diff --git a/DESCRIPTION b/DESCRIPTION index 2758ddbc..b54c0ec1 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,8 +1,8 @@ Package: DatabaseConnector Type: Package Title: Connecting to Various Database Platforms -Version: 6.3.3 -Date: 2025-01-15 +Version: 6.4.0 +Date: 2025-01-29 Authors@R: c( person("Martijn", "Schuemie", email = "schuemie@ohdsi.org", role = c("aut", "cre")), person("Marc", "Suchard", role = c("aut")), diff --git a/DatabaseConnector.Rproj b/DatabaseConnector.Rproj index c5ea6c45..4084b1e0 100644 --- a/DatabaseConnector.Rproj +++ b/DatabaseConnector.Rproj @@ -1,5 +1,4 @@ Version: 1.0 -ProjectId: 9d51e576-41a3-432f-b696-8bfdc3eed676 RestoreWorkspace: No SaveWorkspace: No diff --git a/NEWS.md b/NEWS.md index 97f8159f..0004c6c6 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,12 @@ +DatabaseConnector 6.4.0 +======================= + +Changes: + +- Adding support for InterSystems IRIS. + + + DatabaseConnector 6.3.3 ======================= diff --git a/R/Sql.R b/R/Sql.R index f1e990bd..54c19c70 100644 --- a/R/Sql.R +++ b/R/Sql.R @@ -438,7 +438,7 @@ executeSql <- function(connection, startQuery <- Sys.time() # InterSystems IRIS JDBC supports batch updates but does not have a separate # executeLargeBatch() method - if (con@dbms == "iris") { + if (dbms == "iris") { rowsAffected <- c(rowsAffected, rJava::.jcall(statement, "[I", "executeBatch")) } else { rowsAffected <- c(rowsAffected, rJava::.jcall(statement, "[J", "executeLargeBatch")) diff --git a/man/downloadJdbcDrivers.Rd b/man/downloadJdbcDrivers.Rd index da71f659..af3c6660 100644 --- a/man/downloadJdbcDrivers.Rd +++ b/man/downloadJdbcDrivers.Rd @@ -48,7 +48,7 @@ The following versions of the JDBC drivers are currently used: \item Spark (Databricks): V2.6.36 \item Snowflake: V3.16.01 \item BigQuery: v1.3.2.1003 -\item InterSystems IRIS: v3.9.0 +\item InterSystems IRIS: v3.10.2 } } \examples{ diff --git a/man/insertTable.Rd b/man/insertTable.Rd index 637243b7..5b08421c 100644 --- a/man/insertTable.Rd +++ b/man/insertTable.Rd @@ -77,6 +77,13 @@ directly into the System Environment using the following keys: Sys.setenv("AWS_A "some_aws_region", "AWS_BUCKET_NAME" = "some_bucket_name", "AWS_OBJECT_KEY" = "some_object_key", "AWS_SSE_TYPE" = "server_side_encryption_type"). +Spark (DataBricks): The MPP bulk loading relies upon the AzureStor library +to test a connection to an Azure ADLS Gen2 storage container using Azure credentials. +Credentials are configured directly into the System Environment using the +following keys: Sys.setenv("AZR_STORAGE_ACCOUNT" = +"some_azure_storage_account", "AZR_ACCOUNT_KEY" = "some_secret_account_key", "AZR_CONTAINER_NAME" = +"some_container_name"). + PDW: The MPP bulk loading relies upon the client having a Windows OS and the DWLoader exe installed, and the following permissions granted: --Grant BULK Load permissions - needed at a server level USE master; GRANT ADMINISTER BULK OPERATIONS TO