diff --git a/NAMESPACE b/NAMESPACE index 8bdab82e9..bd539a118 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -119,6 +119,8 @@ export(ds.var) export(ds.vectorCalc) import(DSI) import(data.table) +importFrom(DSI,datashield.connections_find) +importFrom(cli,cli_abort) importFrom(stats,as.formula) importFrom(stats,na.omit) importFrom(stats,ts) diff --git a/R/ds.cor.R b/R/ds.cor.R index 53fb22db4..059ca5cc0 100644 --- a/R/ds.cor.R +++ b/R/ds.cor.R @@ -37,6 +37,7 @@ #' percentage is pre-specified by the 'nfilter.glm'). The second disclosure control checks that none of them is dichotomous #' with a level having fewer counts than the pre-specified 'nfilter.tab' threshold. #' @author DataSHIELD Development Team +#' @author Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands #' @examples #' \dontrun{ #' @@ -79,56 +80,25 @@ #' } #' @export #' -ds.cor <- function(x=NULL, y=NULL, type="split", datasources=NULL){ +ds.cor <- function(x=NULL, y=NULL, type="split", classConsistencyCheck=TRUE, datasources=NULL){ - # look for DS connections - if(is.null(datasources)){ - datasources <- datashield.connections_find() - } - - # ensure datasources is a list of DSConnection-class - if(!(is.list(datasources) && all(unlist(lapply(datasources, function(d) {methods::is(d,"DSConnection")}))))){ - stop("The 'datasources' were expected to be a list of DSConnection-class objects", call.=FALSE) - } + datasources <- .set_datasources(datasources) if(is.null(x)){ stop("x=NULL. Please provide the name of a matrix or dataframe or the names of two numeric vectors!", call.=FALSE) - }else{ - isDefined(datasources, x) - } - - # check the type of the input objects - typ <- checkClass(datasources, x) - - if(('numeric' %in% typ) | ('integer' %in% typ) | ('factor' %in% typ)){ - if(is.null(y)){ - stop("If x is a numeric vector, y must be a numeric vector!", call.=FALSE) - }else{ - isDefined(datasources, y) - typ2 <- checkClass(datasources, y) - } - } - - if(('matrix' %in% typ) | ('data.frame' %in% typ) & !(is.null(y))){ - y <- NULL - warning("x is a matrix or a dataframe; y will be ignored and a correlation matrix computed for x!") } # name of the studies to be used in the output stdnames <- names(datasources) # call the server side function - if(('matrix' %in% typ) | ('data.frame' %in% typ)){ - calltext <- call("corDS", x, NULL) - }else{ - if(!(is.null(y))){ - calltext <- call("corDS", x, y) - }else{ - calltext <- call("corDS", x, NULL) - } - } + calltext <- call("corDS", x, y) output <- DSI::datashield.aggregate(datasources, calltext) - + + if(classConsistencyCheck){ + .checkClassConsistency(output) + } + if (type=="split"){ covariance <- list() sqrt.diag <- list() diff --git a/R/ds.corTest.R b/R/ds.corTest.R index 3c9e42a81..b53297eee 100644 --- a/R/ds.corTest.R +++ b/R/ds.corTest.R @@ -24,6 +24,7 @@ #' the default set of connections will be used: see \code{\link[DSI]{datashield.connections_default}}. #' @return \code{ds.corTest} returns to the client-side the results of the correlation test. #' @author DataSHIELD Development Team +#' @author Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands #' @export #' @examples #' \dontrun{ @@ -63,17 +64,9 @@ #' #' } #' -ds.corTest <- function(x=NULL, y=NULL, method="pearson", exact=NULL, conf.level=0.95, type='split', datasources=NULL){ +ds.corTest <- function(x=NULL, y=NULL, method="pearson", exact=NULL, conf.level=0.95, type='split', classConsistencyCheck=FALSE, datasources=NULL){ - # look for DS connections - if(is.null(datasources)){ - datasources <- datashield.connections_find() - } - - # ensure datasources is a list of DSConnection-class - if(!(is.list(datasources) && all(unlist(lapply(datasources, function(d) {methods::is(d,"DSConnection")}))))){ - stop("The 'datasources' were expected to be a list of DSConnection-class objects", call.=FALSE) - } + datasources <- .set_datasources(datasources) if(is.null(x)){ stop("x=NULL. Please provide the names of the 1st numeric vector!", call.=FALSE) @@ -85,19 +78,18 @@ ds.corTest <- function(x=NULL, y=NULL, method="pearson", exact=NULL, conf.level= if(!(method %in% c("pearson", "kendall", "spearman"))){ stop('Function argument "method" has to be either "pearson", "kendall" or "spearman"', call.=FALSE) } - - # check if the input objects are defined in all the studies - isDefined(datasources, x) - isDefined(datasources, y) - - # call the internal function that checks the input objects are of the same class in all studies. - typ <- checkClass(datasources, x) - typ <- checkClass(datasources, y) # call the server side function cally <- call("corTestDS", x, y, method, exact, conf.level) out <- DSI::datashield.aggregate(datasources, cally) + if(classConsistencyCheck){ + .checkClassConsistency(out) + } + + # strip class field from results before returning + out <- lapply(out, function(r) { r$class <- NULL; r }) + if(type=="split"){ return(out) }else{ diff --git a/R/ds.cov.R b/R/ds.cov.R index c67d2e134..ec60367a8 100644 --- a/R/ds.cov.R +++ b/R/ds.cov.R @@ -47,6 +47,7 @@ #' the disclosure controls then all the output values are replaced with NAs. If all the variables are valid and pass #' the controls, then the output matrices are returned and also an error message is returned but it is replaced by NA. #' @author DataSHIELD Development Team +#' @author Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands #' @examples #' \dontrun{ #' @@ -96,56 +97,25 @@ #' } #' @export #' -ds.cov <- function(x=NULL, y=NULL, naAction='pairwise.complete', type="split", datasources=NULL){ +ds.cov <- function(x=NULL, y=NULL, naAction='pairwise.complete', type="split", classConsistencyCheck=TRUE, datasources=NULL){ - # look for DS connections - if(is.null(datasources)){ - datasources <- datashield.connections_find() - } - - # ensure datasources is a list of DSConnection-class - if(!(is.list(datasources) && all(unlist(lapply(datasources, function(d) {methods::is(d,"DSConnection")}))))){ - stop("The 'datasources' were expected to be a list of DSConnection-class objects", call.=FALSE) - } + datasources <- .set_datasources(datasources) if(is.null(x)){ stop("x=NULL. Please provide the name of a matrix or dataframe or the names of two numeric vectors!", call.=FALSE) - }else{ - isDefined(datasources, x) - } - - # check the type of the input objects - typ <- checkClass(datasources, x) - - if(('numeric' %in% typ) | ('integer' %in% typ) | ('factor' %in% typ)){ - if(is.null(y)){ - stop("If x is a numeric vector, y must be a numeric vector!", call.=FALSE) - }else{ - isDefined(datasources, y) - typ2 <- checkClass(datasources, y) - } - } - - if(('matrix' %in% typ) | ('data.frame' %in% typ) & !(is.null(y))){ - y <- NULL - warning("x is a matrix or a dataframe; y will be ignored and a covariance matrix computed for x!") } # name of the studies to be used in the output stdnames <- names(datasources) # call the server side function - if(('matrix' %in% typ) | ('data.frame' %in% typ)){ - calltext <- call("covDS", x, NULL, naAction) - }else{ - if(!(is.null(y))){ - calltext <- call("covDS", x, y, naAction) - }else{ - calltext <- call("covDS", x, NULL, naAction) - } - } + calltext <- call("covDS", x, y, naAction) output <- DSI::datashield.aggregate(datasources, calltext) - + + if(classConsistencyCheck){ + .checkClassConsistency(output) + } + if (type=="split"){ covariance <- list() results <- list() diff --git a/R/ds.kurtosis.R b/R/ds.kurtosis.R index 974682bba..5bc43d95f 100644 --- a/R/ds.kurtosis.R +++ b/R/ds.kurtosis.R @@ -23,56 +23,40 @@ #' @return a matrix showing the kurtosis of the input numeric variable, the number of valid observations and #' the validity message. #' @author Demetris Avraam, for DataSHIELD Development Team +#' @author Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands #' @export #' -ds.kurtosis <- function(x=NULL, method=1, type='both', datasources=NULL){ - - # if no opal login details are provided look for 'opal' objects in the environment - if(is.null(datasources)){ - datasources <- datashield.connections_find() - } +ds.kurtosis <- function(x=NULL, method=1, type='both', classConsistencyCheck=FALSE, datasources=NULL){ - # ensure datasources is a list of DSConnection-class - if(!(is.list(datasources) && all(unlist(lapply(datasources, function(d) {methods::is(d,"DSConnection")}))))){ - stop("The 'datasources' were expected to be a list of DSConnection-class objects", call.=FALSE) - } + datasources <- .set_datasources(datasources) if(is.null(x)){ stop("Please provide the name of the input vector!", call.=FALSE) } - + if(!all(method %in% c(1,2,3))){ stop("method must be an integer between 1 and 3", call.=FALSE) } - - # enable valid aliases for "type" argument + + # enable valid aliases for "type" argument if(type == 'combine' | type == 'combined' | type == 'combines' | type == 'c') type <- 'combine' if(type == 'split' | type == 'splits' | type == 's') type <- 'split' if(type == 'both' | type == 'b' ) type <- 'both' if(type != 'combine' & type != 'split' & type != 'both'){ stop('Function argument "type" has to be either "both", "combine" or "split"', call.=FALSE) } - - # check if the input object is defined in all the studies - isDefined(datasources, x) - - # call the internal function that checks the input object is of the same class in all studies. - typ <- checkClass(datasources, x) - - # the input object must be a numeric or an integer vector - if(typ != 'integer' & typ != 'numeric'){ - message(paste0(x, " is of type ", typ, "!")) - stop("The input object must be an integer or numeric vector.", call.=FALSE) - } - + if (type=='split' | type=='both'){ calltext.split <- call("kurtosisDS1", x, method) - output.split <- DSI::datashield.aggregate(datasources, calltext.split) - mat.split <- matrix(as.numeric(matrix(unlist(output.split), nrow=length(datasources), byrow=TRUE)[,1:2]),nrow=length(datasources)) - validity <- matrix(unlist(output.split), nrow=length(datasources), byrow=TRUE)[,3] - mat.split <- data.frame(cbind(mat.split, validity)) + output.split <- DSI::datashield.aggregate(datasources, calltext.split) + if(classConsistencyCheck){ + .checkClassConsistency(output.split) + } + mat.split <- data.frame( + Kurtosis = sapply(output.split, function(r) r$Kurtosis), + Nvalid = sapply(output.split, function(r) r$Nvalid) + ) rownames(mat.split) <- names(output.split) - colnames(mat.split) <- c('Kurtosis', 'Nvalid', 'ValidityMessage') } if (type=='combine' | type=='both'){ @@ -83,8 +67,11 @@ ds.kurtosis <- function(x=NULL, method=1, type='both', datasources=NULL){ stop("FAILED: The number of valid observations in one or more studies is less than nfilter.tab. \n Check that by using the argument type=='split'", call.=FALSE) }else{ calltext.combined <- call("kurtosisDS2", x, global.mean) - output.combined <- DSI::datashield.aggregate(datasources, calltext.combined) - + output.combined <- DSI::datashield.aggregate(datasources, calltext.combined) + if(classConsistencyCheck){ + .checkClassConsistency(output.combined) + } + Global.sum.quartics <- 0 Global.sum.squares <- 0 Global.Nvalid <- 0 @@ -98,19 +85,15 @@ ds.kurtosis <- function(x=NULL, method=1, type='both', datasources=NULL){ if(method==1){ Global.kurtosis <- g2.global - combinedMessage <- "VALID ANALYSIS" - } + } if(method==2){ Global.kurtosis <- ((Global.Nvalid + 1) * g2.global + 6) * (Global.Nvalid - 1)/((Global.Nvalid - 2) * (Global.Nvalid - 3)) - combinedMessage <- "VALID ANALYSIS" - } + } if(method==3){ Global.kurtosis <- (g2.global + 3) * (1 - 1/Global.Nvalid)^2 - 3 - combinedMessage <- "VALID ANALYSIS" } - mat.combined <- data.frame(cbind(Global.kurtosis, Global.Nvalid, combinedMessage)) + mat.combined <- data.frame(Kurtosis = Global.kurtosis, Nvalid = Global.Nvalid) rownames(mat.combined) <- 'studiesCombined' - colnames(mat.combined) <- c('Kurtosis', 'Nvalid', 'ValidityMessage') } } diff --git a/R/ds.mean.R b/R/ds.mean.R index f23356d56..ab870371d 100644 --- a/R/ds.mean.R +++ b/R/ds.mean.R @@ -30,11 +30,8 @@ #' \code{'split'}, \code{'splits'}, \code{'s'}, #' \code{'both'} or \code{'b'}. #' For more information see \strong{Details}. -#' @param checks logical. If TRUE optional checks of model -#' components will be undertaken. Default is FALSE to save time. -#' It is suggested that checks -#' should only be undertaken once the function call has failed. -#' @param save.mean.Nvalid logical. If TRUE generated values of the mean and + +#' @param save.mean.Nvalid logical. If TRUE generated values of the mean and #' the number of valid (non-missing) observations will be saved on the data servers. #' Default FALSE. #' For more information see \strong{Details}. @@ -57,6 +54,7 @@ #' \code{mean.all.studies} and \code{mean.study.specific} are written to the server-side. #' #' @author DataSHIELD Development Team +#' @author Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands #' @seealso \code{ds.quantileMean} to compute quantiles. #' @seealso \code{ds.summary} to generate the summary of a variable. #' @export @@ -92,7 +90,6 @@ #' #' ds.mean(x = "D$LAB_TSC", #' type = "split", -#' checks = FALSE, #' save.mean.Nvalid = FALSE, #' datasources = connections) #' @@ -100,37 +97,14 @@ #' datashield.logout(connections) #' } #' -ds.mean <- function(x=NULL, type='split', checks=FALSE, save.mean.Nvalid=FALSE, datasources=NULL){ +ds.mean <- function(x=NULL, type='split', save.mean.Nvalid=FALSE, classConsistencyCheck=FALSE, datasources=NULL){ + + datasources <- .set_datasources(datasources) - # look for DS connections - if(is.null(datasources)){ - datasources <- datashield.connections_find() - } - - # ensure datasources is a list of DSConnection-class - if(!(is.list(datasources) && all(unlist(lapply(datasources, function(d) {methods::is(d,"DSConnection")}))))){ - stop("The 'datasources' were expected to be a list of DSConnection-class objects", call.=FALSE) - } - if(is.null(x)){ stop("Please provide the name of the input object!", call.=FALSE) } - # beginning of optional checks - the process stops and reports as soon as one check fails # - if(checks){ - - # check if the input object is defined in all the studies - isDefined(datasources, x) - - # call the internal function that checks the input object is of the same class in all studies. - typ <- checkClass(datasources, x) - - # the input object must be a numeric or an integer vector - if(!('integer' %in% typ) & !('numeric' %in% typ)){ - stop("The input object must be an integer or a numeric vector.", call.=FALSE) - } -} - ################################################################################################### #MODULE: EXTEND "type" argument to include "both" and enable valid alisases # if(type == 'combine' | type == 'combined' | type == 'combines' | type == 'c') type <- 'combine' # @@ -140,15 +114,21 @@ if(type != 'combine' & type != 'split' & type != 'both'){ stop('Function argument "type" has to be either "both", "combine" or "split"', call.=FALSE) # } - cally <- paste0("meanDS(", x, ")") - ss.obj <- DSI::datashield.aggregate(datasources, as.symbol(cally)) + cally <- call("meanDS", x) + ss.obj <- DSI::datashield.aggregate(datasources, cally) - Nstudies <- length(datasources) - ss.mat <- matrix(as.numeric(matrix(unlist(ss.obj),nrow=Nstudies,byrow=TRUE)[,1:4]),nrow=Nstudies) - dimnames(ss.mat) <- c(list(names(ss.obj),names(ss.obj[[1]])[1:4])) + if(classConsistencyCheck){ + .checkClassConsistency(ss.obj) + } - ValidityMessage.mat <- matrix(matrix(unlist(ss.obj),nrow=Nstudies,byrow=TRUE)[,5],nrow=Nstudies) - dimnames(ValidityMessage.mat) <- c(list(names(ss.obj),names(ss.obj[[1]])[5])) + Nstudies <- length(datasources) + ss.mat <- matrix(c( + sapply(ss.obj, function(r) r$EstimatedMean), + sapply(ss.obj, function(r) r$Nmissing), + sapply(ss.obj, function(r) r$Nvalid), + sapply(ss.obj, function(r) r$Ntotal) + ), nrow=Nstudies) + dimnames(ss.mat) <- list(names(ss.obj), c("EstimatedMean", "Nmissing", "Nvalid", "Ntotal")) ss.mat.combined <- t(matrix(ss.mat[1,])) @@ -176,37 +156,20 @@ if(type != 'combine' & type != 'split' & type != 'both'){ Nvalid.all.studies <- ss.mat.combined[1,3] DSI::datashield.assign(datasources, "mean.all.studies", as.symbol(mean.all.studies)) DSI::datashield.assign(datasources, "Nvalid.all.studies", as.symbol(Nvalid.all.studies)) - -############################################################################# -# MODULE 5: CHECK DATA OBJECTS SUCCESSFULLY CREATED # - key.names <- extract("mean.all.studies") # - key.varname <- key.names$elements # - key.obj2lookfor <- key.names$holders # - # - if(is.na(key.obj2lookfor)){ # - key.defined <- isDefined(datasources, key.varname) # - }else{ # - key.defined <- isDefined(datasources, key.obj2lookfor) # - } # - # -#if(key.defined==TRUE){ # -#print("Data object created successfully in all sources") # -#} # -############################################################################# } #PRIMARY FUNCTION OUTPUT SUMMARISE RESULTS FROM #AGGREGATE FUNCTION AND RETURN TO CLIENT-SIDE if (type=='split'){ - return(list(Mean.by.Study=ss.mat,Nstudies=Nstudies,ValidityMessage=ValidityMessage.mat)) + return(list(Mean.by.Study=ss.mat,Nstudies=Nstudies)) } if (type=="combine") { - return(list(Global.Mean=ss.mat.combined,Nstudies=Nstudies,ValidityMessage=ValidityMessage.mat)) + return(list(Global.Mean=ss.mat.combined,Nstudies=Nstudies)) } if (type=="both") { - return(list(Mean.by.Study=ss.mat,Global.Mean=ss.mat.combined,Nstudies=Nstudies,ValidityMessage=ValidityMessage.mat)) + return(list(Mean.by.Study=ss.mat,Global.Mean=ss.mat.combined,Nstudies=Nstudies)) } } diff --git a/R/ds.meanSdGp.R b/R/ds.meanSdGp.R index 1bd60936b..e134ecd59 100644 --- a/R/ds.meanSdGp.R +++ b/R/ds.meanSdGp.R @@ -57,17 +57,14 @@ #' This can be set as: \code{"combine"}, \code{"split"} or \code{"both"}. #' Default \code{"both"}. #' For more information see \strong{Details}. -#' @param do.checks logical. If TRUE the administrative checks -#' are undertaken to ensure that the input objects are defined in all studies and that the -#' variables are of equivalent class in each study. -#' Default is FALSE to save time. -#' @param datasources a list of \code{\link[DSI]{DSConnection-class}} +#' @param datasources a list of \code{\link[DSI]{DSConnection-class}} #' objects obtained after login. If the \code{datasources} argument is not specified #' the default set of connections will be used: see \code{\link[DSI]{datashield.connections_default}}. #' @return \code{ds.meanSdGp} returns to the client-side the mean, SD, Nvalid and SEM combined #' across studies and/or separately for each study, depending on the argument \code{type}. #' #' @author DataSHIELD Development Team +#' @author Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands #' @seealso \code{\link{ds.subsetByClass}} to subset by the classes of factor vector(s). #' @seealso \code{\link{ds.subset}} to subset by complete cases (i.e. removing missing values), threshold, #' columns and rows. @@ -108,7 +105,6 @@ #' ds.meanSdGp(x = "D$age.60", #' y = "D$time.id", #' type = "combine", -#' do.checks = FALSE, #' datasources = connections) #' #' #Example 2: Calculate the mean, SD, Nvalid and SEM of the continuous variable age.60 (age in @@ -119,24 +115,15 @@ #' ds.meanSdGp(x = "D$age.60", #' y = "D$time.id", #' type = "both", -#' do.checks = FALSE, -#' datasources = connections) +#' datasources = connections) #' #' # clear the Datashield R sessions and logout #' datashield.logout(connections) #' } #' -ds.meanSdGp <- function(x=NULL, y=NULL, type='both', do.checks=FALSE, datasources=NULL){ +ds.meanSdGp <- function(x=NULL, y=NULL, type='both', classConsistencyCheck=TRUE, datasources=NULL){ - # look for DS connections - if(is.null(datasources)){ - datasources <- datashield.connections_find() - } - - # ensure datasources is a list of DSConnection-class - if(!(is.list(datasources) && all(unlist(lapply(datasources, function(d) {methods::is(d,"DSConnection")}))))){ - stop("The 'datasources' were expected to be a list of DSConnection-class objects", call.=FALSE) - } + datasources <- .set_datasources(datasources) if(is.null(x)){ stop("Please provide the name of the input vector!", call.=FALSE) @@ -146,20 +133,9 @@ ds.meanSdGp <- function(x=NULL, y=NULL, type='both', do.checks=FALSE, datasource stop("Please provide the name of the input vector!", call.=FALSE) } - if(do.checks){ - - # check if the input objects are defined in all the studies - isDefined(datasources, x) - isDefined(datasources, y) - - # call the internal function that checks the input object is of the same class in all studies. - typ1 <- checkClass(datasources, x) - typ2 <- checkClass(datasources, y) - } - # names of the studies stdnames <- names(datasources) - + # variable names xnames <- extract(x) ynames <- extract(y) @@ -168,8 +144,14 @@ ds.meanSdGp <- function(x=NULL, y=NULL, type='both', do.checks=FALSE, datasource # call the server side function that calculates mean and standard deviation # by group in each study - calltext <- paste0("meanSdGpDS(", x, ",", y, ")") - output <- DSI::datashield.aggregate(datasources, as.symbol(calltext)) + cally <- call("meanSdGpDS", x, y) + output <- DSI::datashield.aggregate(datasources, cally) + + if(classConsistencyCheck){ + # check consistency of index variable class across studies + index.classes <- lapply(output, function(r) list(class = r$class.index)) + .checkClassConsistency(index.classes) + } numsources <- length(output) diff --git a/R/ds.quantileMean.R b/R/ds.quantileMean.R index 48aa705b4..44cac4767 100644 --- a/R/ds.quantileMean.R +++ b/R/ds.quantileMean.R @@ -21,6 +21,7 @@ #' @return \code{ds.quantileMean} returns to the client-side the quantiles and statistical mean #' of a server-side numeric vector. #' @author DataSHIELD Development Team +#' @author Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands #' @seealso \code{\link{ds.mean}} to compute the statistical mean. #' @seealso \code{\link{ds.summary}} to generate the summary of a variable. #' @export @@ -65,17 +66,9 @@ #' #' } #' -ds.quantileMean <- function(x=NULL, type='combine', datasources=NULL){ +ds.quantileMean <- function(x=NULL, type='combine', classConsistencyCheck=FALSE, datasources=NULL){ - # look for DS connections - if(is.null(datasources)){ - datasources <- datashield.connections_find() - } - - # ensure datasources is a list of DSConnection-class - if(!(is.list(datasources) && all(unlist(lapply(datasources, function(d) {methods::is(d,"DSConnection")}))))){ - stop("The 'datasources' were expected to be a list of DSConnection-class objects", call.=FALSE) - } + datasources <- .set_datasources(datasources) if(is.null(x)){ stop("Please provide the name of the input vector!", call.=FALSE) @@ -85,27 +78,23 @@ ds.quantileMean <- function(x=NULL, type='combine', datasources=NULL){ stop('Function argument "type" has to be either "combine" or "split"', call.=FALSE) } - # check if the input object is defined in all the studies - isDefined(datasources, x) - - # call the internal function that checks the input object is of the same class in all studies. - typ <- checkClass(datasources, x) + # get the server function that produces the quantiles + cally1 <- call("quantileMeanDS", x) + results <- DSI::datashield.aggregate(datasources, cally1) - # the input object must be a numeric or an integer vector - if(!('integer' %in% typ) & !('numeric' %in% typ)){ - message(paste0(x, " is of type ", typ, "!")) - stop("The input object must be an integer or numeric vector.", call.=FALSE) + if(classConsistencyCheck){ + .checkClassConsistency(results) } - # get the server function that produces the quantiles - cally1 <- paste0('quantileMeanDS(', x, ')') - quants <- DSI::datashield.aggregate(datasources, as.symbol(cally1)) + quants <- lapply(results, function(r) r$quantiles) # combine the vector of quantiles - using weighted sum cally2 <- call('lengthDS', x) - lengths <- DSI::datashield.aggregate(datasources, cally2) - cally3 <- paste0("numNaDS(", x, ")") - numNAs <- DSI::datashield.aggregate(datasources, as.symbol(cally3)) + length.results <- DSI::datashield.aggregate(datasources, cally2) + lengths <- lapply(length.results, function(r) r$length) + cally3 <- call("numNaDS", x) + numNA.results <- DSI::datashield.aggregate(datasources, cally3) + numNAs <- lapply(numNA.results, function(r) r$numNA) global.quantiles <- rep(0, length(quants[[1]])-1) global.mean <- 0 for(i in 1: length(datasources)){ diff --git a/R/ds.skewness.R b/R/ds.skewness.R index 0ef8d93d3..396d9d78d 100644 --- a/R/ds.skewness.R +++ b/R/ds.skewness.R @@ -37,6 +37,7 @@ #' @return \code{ds.skewness} returns a matrix showing the skewness of the input numeric variable, #' the number of valid observations and the validity message. #' @author Demetris Avraam, for DataSHIELD Development Team +#' @author Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands #' @examples #' \dontrun{ #' ## Version 6, for version 5 see the Wiki @@ -77,53 +78,36 @@ #' } #' @export #' -ds.skewness <- function(x=NULL, method=1, type='both', datasources=NULL){ - - # if no opal login details are provided look for 'opal' objects in the environment - if(is.null(datasources)){ - datasources <- datashield.connections_find() - } +ds.skewness <- function(x=NULL, method=1, type='both', classConsistencyCheck=FALSE, datasources=NULL){ - # ensure datasources is a list of DSConnection-class - if(!(is.list(datasources) && all(unlist(lapply(datasources, function(d) {methods::is(d,"DSConnection")}))))){ - stop("The 'datasources' were expected to be a list of DSConnection-class objects", call.=FALSE) - } + datasources <- .set_datasources(datasources) if(is.null(x)){ stop("Please provide the name of the input vector!", call.=FALSE) } - + if(!all(method %in% c(1,2,3))){ stop("method must be an integer between 1 and 3", call.=FALSE) } - # enable valid aliases for "type" argument + # enable valid aliases for "type" argument if(type == 'combine' | type == 'combined' | type == 'combines' | type == 'c') type <- 'combine' if(type == 'split' | type == 'splits' | type == 's') type <- 'split' if(type == 'both' | type == 'b' ) type <- 'both' if(type != 'combine' & type != 'split' & type != 'both') stop('Function argument "type" has to be either "both", "combine" or "split"', call.=FALSE) - # check if the input object is defined in all the studies - isDefined(datasources, x) - - # call the internal function that checks the input object is of the same class in all studies. - typ <- checkClass(datasources, x) - - # the input object must be a numeric or an integer vector - if(typ != 'integer' & typ != 'numeric'){ - message(paste0(x, " is of type ", typ, "!")) - stop("The input object must be an integer or numeric vector.", call.=FALSE) - } - if (type=='split' | type=='both'){ calltext.split <- call("skewnessDS1", x, method) - output.split <- DSI::datashield.aggregate(datasources, calltext.split) - mat.split <- matrix(as.numeric(matrix(unlist(output.split), nrow=length(datasources), byrow=TRUE)[,1:2]),nrow=length(datasources)) - validity <- matrix(unlist(output.split), nrow=length(datasources), byrow=TRUE)[,3] - mat.split <- data.frame(cbind(mat.split, validity)) + output.split <- DSI::datashield.aggregate(datasources, calltext.split) + if(classConsistencyCheck){ + .checkClassConsistency(output.split) + } + mat.split <- data.frame( + Skewness = sapply(output.split, function(r) r$Skewness), + Nvalid = sapply(output.split, function(r) r$Nvalid) + ) rownames(mat.split) <- names(output.split) - colnames(mat.split) <- c('Skewness', 'Nvalid', 'ValidityMessage') } if (type=='combine' | type=='both'){ @@ -134,8 +118,11 @@ ds.skewness <- function(x=NULL, method=1, type='both', datasources=NULL){ stop("FAILED: The number of valid observations in one or more studies is less than nfilter.tab. \n Check that by using the argument type=='split'", call.=FALSE) }else{ calltext.combined <- call("skewnessDS2", x, global.mean) - output.combined <- DSI::datashield.aggregate(datasources, calltext.combined) - + output.combined <- DSI::datashield.aggregate(datasources, calltext.combined) + if(classConsistencyCheck){ + .checkClassConsistency(output.combined) + } + Global.sum.cubes <- 0 Global.sum.squares <- 0 Global.Nvalid <- 0 @@ -149,19 +136,15 @@ ds.skewness <- function(x=NULL, method=1, type='both', datasources=NULL){ if(method==1){ Global.skewness <- g1.global - combinedMessage <- "VALID ANALYSIS" - } + } if(method==2){ Global.skewness <- g1.global * sqrt(Global.Nvalid*(Global.Nvalid-1))/(Global.Nvalid-2) - combinedMessage <- "VALID ANALYSIS" - } + } if(method==3){ Global.skewness <- g1.global * ((Global.Nvalid-1)/(Global.Nvalid))^(3/2) - combinedMessage <- "VALID ANALYSIS" - } - mat.combined <- data.frame(cbind(Global.skewness, Global.Nvalid, combinedMessage)) + } + mat.combined <- data.frame(Skewness = Global.skewness, Nvalid = Global.Nvalid) rownames(mat.combined) <- 'studiesCombined' - colnames(mat.combined) <- c('Skewness', 'Nvalid', 'ValidityMessage') } } diff --git a/R/ds.summary.R b/R/ds.summary.R index 2d86287b1..82f04f93d 100644 --- a/R/ds.summary.R +++ b/R/ds.summary.R @@ -19,6 +19,7 @@ #' such as the minimum and maximum values of numeric vectors are not returned. #' The summary is given for each study separately. #' @author DataSHIELD Development Team +#' @author Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands #' @export #' @examples #' \dontrun{ @@ -66,24 +67,13 @@ #' ds.summary <- function(x=NULL, datasources=NULL){ - # look for DS connections - if(is.null(datasources)){ - datasources <- datashield.connections_find() - } - - # ensure datasources is a list of DSConnection-class - if(!(is.list(datasources) && all(unlist(lapply(datasources, function(d) {methods::is(d,"DSConnection")}))))){ - stop("The 'datasources' were expected to be a list of DSConnection-class objects", call.=FALSE) - } + datasources <- .set_datasources(datasources) if(is.null(x)){ stop("Please provide the name of the input vector!", call.=FALSE) } - # check if the input object is defined in all the studies - isDefined(datasources, x) - - # call the internal function that checks if the input object is of the same class in all studies. + # check the type of x to drive client-side dispatch typ <- checkClass(datasources, x) # the input object must be a numeric or an integer vector @@ -102,8 +92,8 @@ ds.summary <- function(x=NULL, datasources=NULL){ validity <- DSI::datashield.aggregate(datasources[i], as.symbol(paste0('isValidDS(', x, ')')))[[1]] if(validity){ dims <- DSI::datashield.aggregate(datasources[i], call('dimDS', x)) - r <- dims[[1]][1] - c <- dims[[1]][2] + r <- dims[[1]]$dim[1] + c <- dims[[1]]$dim[2] cols <- (DSI::datashield.aggregate(datasources[i], call('colnamesDS', x)))[[1]] stdsummary <- list('class'=typ, 'number of rows'=r, 'number of columns'=c, 'variables held'=cols) finalOutput[[i]] <- stdsummary @@ -118,7 +108,7 @@ ds.summary <- function(x=NULL, datasources=NULL){ for(i in 1:numsources){ validity <- DSI::datashield.aggregate(datasources[i], as.symbol(paste0('isValidDS(', x, ')')))[[1]] if(validity){ - l <- DSI::datashield.aggregate(datasources[i], call('lengthDS', x))[[1]] + l <- DSI::datashield.aggregate(datasources[i], call('lengthDS', x))[[1]]$length stdsummary <- list('class'=typ, 'length'=l) finalOutput[[i]] <- stdsummary }else{ @@ -132,8 +122,8 @@ ds.summary <- function(x=NULL, datasources=NULL){ for(i in 1:numsources){ validity <- DSI::datashield.aggregate(datasources[i], as.symbol(paste0('isValidDS(', x, ')')))[[1]] if(validity){ - l <- DSI::datashield.aggregate(datasources[i], call('lengthDS', x))[[1]] - levels.resp <- DSI::datashield.aggregate(datasources[i], as.symbol(paste0('levelsDS(', x, ')' )))[[1]] + l <- DSI::datashield.aggregate(datasources[i], call('lengthDS', x))[[1]]$length + levels.resp <- DSI::datashield.aggregate(datasources[i], call('levelsDS', x))[[1]] categories <- levels.resp$Levels freq <- DSI::datashield.aggregate(datasources[i], as.symbol(paste0('table1DDS(', x, ')' )))[[1]][1] stdsummary <- list('class'=typ, 'length'=l, 'categories'=categories) @@ -153,8 +143,8 @@ ds.summary <- function(x=NULL, datasources=NULL){ for(i in 1:numsources){ validity <- DSI::datashield.aggregate(datasources[i], as.symbol(paste0('isValidDS(', x, ')')))[[1]] if(validity){ - l <- DSI::datashield.aggregate(datasources[i], call('lengthDS', x))[[1]] - q <- (DSI::datashield.aggregate(datasources[i], as.symbol(paste0('quantileMeanDS(', x, ')' ))))[[1]] + l <- DSI::datashield.aggregate(datasources[i], call('lengthDS', x))[[1]]$length + q <- (DSI::datashield.aggregate(datasources[i], call('quantileMeanDS', x)))[[1]]$quantiles stdsummary <- list('class'=typ, 'length'=l, 'quantiles & mean'=q) finalOutput[[i]] <- stdsummary }else{ @@ -167,7 +157,7 @@ ds.summary <- function(x=NULL, datasources=NULL){ if("list" %in% typ){ for(i in 1:numsources){ - l <- DSI::datashield.aggregate(datasources[i], call('lengthDS', x))[[1]] + l <- DSI::datashield.aggregate(datasources[i], call('lengthDS', x))[[1]]$length elts <- DSI::datashield.aggregate(datasources[i], call('namesDS', x)) if(length(elts) == 0){ elts <- NULL @@ -188,7 +178,7 @@ ds.summary <- function(x=NULL, datasources=NULL){ for(i in 1:numsources){ validity <- DSI::datashield.aggregate(datasources[i], as.symbol(paste0('isValidDS(', x, ')')))[[1]] if(validity){ - l <- DSI::datashield.aggregate(datasources[i], call('lengthDS', x))[[1]] + l <- DSI::datashield.aggregate(datasources[i], call('lengthDS', x))[[1]]$length freq <- DSI::datashield.aggregate(datasources[i], as.symbol(paste0('table1DDS(', x, ')' )))[[1]][1] stdsummary <- list('class'=typ, 'length'=l) for(j in 1:length(2)){ diff --git a/R/ds.var.R b/R/ds.var.R index 178dc4436..99a5e9994 100644 --- a/R/ds.var.R +++ b/R/ds.var.R @@ -19,11 +19,7 @@ #' \code{'split'}, \code{'splits'}, \code{'s'}, #' \code{'both'} or \code{'b'}. #' For more information see \strong{Details}. -#' @param checks logical. If TRUE optional checks of model -#' components will be undertaken. Default is FALSE to save time. -#' It is suggested that checks -#' should only be undertaken once the function call has failed. -#' @param datasources a list of \code{\link[DSI]{DSConnection-class}} +#' @param datasources a list of \code{\link[DSI]{DSConnection-class}} #' objects obtained after login. If the \code{datasources} argument is not specified #' the default set of connections will be used: see \code{\link[DSI]{datashield.connections_default}}. #' @return \code{ds.var} returns to the client-side a list including:\cr @@ -37,6 +33,7 @@ #' \code{Nstudies}: number of studies being analysed. \cr #' \code{ValidityMessage}: indicates if the analysis was possible. \cr #' @author DataSHIELD Development Team +#' @author Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands #' @export #' @examples #' \dontrun{ @@ -70,49 +67,19 @@ #' #' ds.var(x = "D$LAB_TSC", #' type = "split", -#' checks = FALSE, #' datasources = connections) #' #' # clear the Datashield R sessions and logout #' datashield.logout(connections) #' } #' -ds.var <- function(x=NULL, type='split', checks=FALSE, datasources=NULL){ - - ################################################################################################################# - #MODULE 1: IDENTIFY DEFAULT CONNECTIONS # - # look for DS connections # - if(is.null(datasources)){ # - datasources <- datashield.connections_find() # - } # - # - # ensure datasources is a list of DSConnection-class # - if(!(is.list(datasources) && all(unlist(lapply(datasources, function(d) {methods::is(d,"DSConnection")}))))){ # - stop("The 'datasources' were expected to be a list of DSConnection-class objects", call.=FALSE) # - } # - ################################################################################################################# +ds.var <- function(x=NULL, type='split', classConsistencyCheck=FALSE, datasources=NULL){ + + datasources <- .set_datasources(datasources) if(is.null(x)){ stop("Please provide the name of the input object!", call.=FALSE) } - - # check if the input object is defined in all the studies - isDefined(datasources, x) - - # beginning of optional checks - the process stops and reports as soon as one check fails - if(checks){ - - # check if the input object is defined in all the studies - isDefined(datasources, x) - - # call the internal function that checks the input object is suitable in all studies # - varClass <- checkClass(datasources, x) # - # the input object must be a numeric or an integer vector # - if(!('integer' %in% varClass) & !('numeric' %in% varClass)){ # - stop("The input object must be an integer or a numeric vector.", call.=FALSE) # - } # - } # - ############################################################################################### ################################################################################################### #MODULE: EXTEND "type" argument to include "both" and enable valid alisases # @@ -123,8 +90,12 @@ ds.var <- function(x=NULL, type='split', checks=FALSE, datasources=NULL){ #MODIFY FUNCTION CODE TO DEAL WITH ALL THREE TYPES # ################################################################################################### - cally <- paste0("varDS(", x, ")") - ss.obj <- DSI::datashield.aggregate(datasources, as.symbol(cally)) + cally <- call("varDS", x) + ss.obj <- DSI::datashield.aggregate(datasources, cally) + + if(classConsistencyCheck){ + .checkClassConsistency(ss.obj) + } Nstudies <- length(datasources) EstimatedVar <- c() @@ -132,26 +103,23 @@ ds.var <- function(x=NULL, type='split', checks=FALSE, datasources=NULL){ Nmissing <- c() Ntotal <- c() for (i in 1:Nstudies){ - EstimatedVar[i] <- ss.obj[[i]][[2]]/(ss.obj[[i]][[4]]-1) - (ss.obj[[i]][[1]])^2/(ss.obj[[i]][[4]]*(ss.obj[[i]][[4]]-1)) - Nvalid[i] <- as.numeric(ss.obj[[i]][[4]]) - Nmissing[i] <- as.numeric(ss.obj[[i]][[3]]) - Ntotal[i] <- as.numeric(ss.obj[[i]][[5]]) + EstimatedVar[i] <- ss.obj[[i]]$SumOfSquares/(ss.obj[[i]]$Nvalid-1) - (ss.obj[[i]]$Sum)^2/(ss.obj[[i]]$Nvalid*(ss.obj[[i]]$Nvalid-1)) + Nvalid[i] <- ss.obj[[i]]$Nvalid + Nmissing[i] <- ss.obj[[i]]$Nmissing + Ntotal[i] <- ss.obj[[i]]$Ntotal } ss.mat <- matrix(c(EstimatedVar,Nmissing,Nvalid,Ntotal),nrow=Nstudies) dimnames(ss.mat) <- c(list(names(ss.obj),c('EstimatedVar','Nmissing','Nvalid','Ntotal'))) - ValidityMessage.mat <- matrix(matrix(unlist(ss.obj),nrow=Nstudies,byrow=TRUE)[,6],nrow=Nstudies) - dimnames(ValidityMessage.mat) <- c(list(names(ss.obj),names(ss.obj[[1]])[6])) - ss.mat.combined <- t(matrix(ss.mat[1,])) GlobalSum.new <- 0 GlobalSumSquares.new <- 0 GlobalNvalid.new <- 0 for (i in 1:Nstudies){ - GlobalSum <- GlobalSum.new + ss.obj[[i]][[1]] - GlobalSumSquares <- GlobalSumSquares.new + ss.obj[[i]][[2]] - GlobalNvalid <- GlobalNvalid.new + ss.obj[[i]][[4]] + GlobalSum <- GlobalSum.new + ss.obj[[i]]$Sum + GlobalSumSquares <- GlobalSumSquares.new + ss.obj[[i]]$SumOfSquares + GlobalNvalid <- GlobalNvalid.new + ss.obj[[i]]$Nvalid GlobalSum.new <- GlobalSum GlobalSumSquares.new <- GlobalSumSquares GlobalNvalid.new <- GlobalNvalid @@ -171,15 +139,15 @@ ds.var <- function(x=NULL, type='split', checks=FALSE, datasources=NULL){ #PRIMARY FUNCTION OUTPUT SUMMARISE RESULTS FROM #AGGREGATE FUNCTION AND RETURN TO CLIENT-SIDE if (type=='split'){ - return(list(Variance.by.Study=ss.mat,Nstudies=Nstudies,ValidityMessage=ValidityMessage.mat)) + return(list(Variance.by.Study=ss.mat,Nstudies=Nstudies)) } if (type=="combine"){ - return(list(Global.Variance=ss.mat.combined,Nstudies=Nstudies,ValidityMessage=ValidityMessage.mat)) + return(list(Global.Variance=ss.mat.combined,Nstudies=Nstudies)) } if (type=="both"){ - return(list(Variance.by.Study=ss.mat,Global.Variance=ss.mat.combined,Nstudies=Nstudies,ValidityMessage=ValidityMessage.mat)) + return(list(Variance.by.Study=ss.mat,Global.Variance=ss.mat.combined,Nstudies=Nstudies)) } } diff --git a/R/utils.R b/R/utils.R new file mode 100644 index 000000000..51ef63e20 --- /dev/null +++ b/R/utils.R @@ -0,0 +1,69 @@ +#' Retrieve datasources if not specified +#' +#' @param datasources An optional list of data sources. If not provided, the function will attempt +#' to find available data sources. +#' @importFrom DSI datashield.connections_find +#' @return A list of data sources. +#' @noRd +.get_datasources <- function(datasources) { + if (is.null(datasources)) { + datasources <- datashield.connections_find() + } + return(datasources) +} + +#' Verify that the provided data sources are of class 'DSConnection'. +#' +#' @param datasources A list of data sources. +#' @importFrom cli cli_abort +#' @noRd +.verify_datasources <- function(datasources) { + is_connection_class <- sapply(datasources, function(x) inherits(unlist(x), "DSConnection")) + if (!all(is_connection_class)) { + cli_abort("The 'datasources' were expected to be a list of DSConnection-class objects") + } +} + +#' Set and verify data sources. +#' +#' @param datasources An optional list of data sources. If not provided, the function will attempt +#' to find available data sources. +#' @return A list of verified data sources. +#' @noRd +.set_datasources <- function(datasources) { + datasources <- .get_datasources(datasources) + .verify_datasources(datasources) + return(datasources) +} + +#' Check cross-study class consistency from a list of server aggregate results +#' +#' Batch-refactored server functions return a list per study that includes a +#' `class` field. This helper verifies that the class field is identical across +#' all studies and aborts if not. +#' +#' @param results A named list of server-side aggregate results, one per study, +#' each containing a `class` element. +#' @importFrom cli cli_abort +#' @return Invisibly returns `NULL`. Called for its side effect (error checking). +#' @noRd +.checkClassConsistency <- function(results) { + classes <- lapply(results, function(r) r$class) + if (length(unique(lapply(classes, sort))) > 1) { + cli_abort("The input object is not of the same class in all studies!") + } +} + +#' Check That a Data Frame Name Is Provided +#' +#' Internal helper that checks whether a data frame or matrix object +#' has been provided. If `NULL`, it aborts with a user-friendly error. +#' +#' @param df A data.frame or matrix. +#' @return Invisibly returns `NULL`. Called for its side effect (error checking). +#' @noRd +.check_df_name_provided <- function(df) { + if(is.null(df)){ + cli_abort("Please provide the name of a data.frame or matrix!", call.=FALSE) + } +} diff --git a/REFACTOR_GUIDE.md b/REFACTOR_GUIDE.md new file mode 100644 index 000000000..f7226e83e --- /dev/null +++ b/REFACTOR_GUIDE.md @@ -0,0 +1,449 @@ +# Refactoring Plan: dsBase & dsBaseClient Function Pairs + +> **Action:** Replace `/Users/tcadman/github-repos/ds-core/dsBaseClient/REFACTOR_GUIDE.md` with this plan content so it's accessible across branches. + +## Context + +The `ds.colnames` / `colnamesDS` pair has been refactored as a reference implementation. The pattern shifts server-state validation (object existence, type checking) from client to server, reducing network round trips and centralizing validation where data lives. This needs to be applied across all remaining function pairs in both packages. + +The refactored `ds.colnames` branch (`v7.0-dev-colnames`) also introduces shared helpers: +- **Client:** `R/utils.R` with `.set_datasources()`, `.check_df_name_provided()` +- **Server:** `R/utils.R` with `.loadServersideObject()`, `.checkClass()` + +## Relationship Between Packages + +- **dsBaseClient** (`/Users/tcadman/github-repos/ds-core/dsBaseClient/R/`) — Client functions (`ds.functionName`) that validate inputs and dispatch calls to server +- **dsBase** (`/Users/tcadman/github-repos/ds-core/dsBase/R/`) — Server functions (`functionNameDS`) that execute on the data + +## What Changes Per Function Pair + +### Client-side (dsBaseClient) + +1. **Replace datasource boilerplate** with `datasources <- .set_datasources(datasources)` + - Removes: `datashield.connections_find()` + DSConnection class check (~8 lines) + +2. **Remove `isDefined()` calls** — server handles via `.loadServersideObject()` + +3. **Remove `checkClass()` calls and subsequent type guards** — server handles via `.checkClass()` + +4. **Add `classConsistencyCheck` parameter** — For any function where the input accepts more than one permitted class, add a `classConsistencyCheck` parameter. The server function returns `class = class(obj)` in its result list; the client checks consistency via `.checkClassConsistency()` when the parameter is TRUE, then strips the `class` field before returning to the user. Rules for the default value: + - **TRUE** when permitted classes include genuinely different types (e.g. data.frame + matrix, factor + character + integer) + - **FALSE** when permitted classes are only `numeric` and `integer` (these are effectively interchangeable) + - **No parameter** when only one class is permitted (e.g. `ds.levels` only permits factor — consistency is guaranteed by `.checkClass()`) + + **Verification:** after refactoring, inspect every `return()` in the client function — `class` (or `class.x` / `class.index` for multi-input functions) must not appear as a named element of the returned list. Stripping can be explicit (`r$class <- NULL`) or implicit (building the return from a specific subset of fields), but the absence of `class` in the final returned value must be visible at the `return()` site. + +5. **Remove `ValidityMessage`** — Server functions that returned `ValidityMessage = "VALID ANALYSIS"` should remove it. Failures should call `stop()` instead of returning a failure message. Remove `ValidityMessage` from client returns too. This is a major release so API changes are acceptable. + +6. **Remove `isAssigned()` calls** — no longer verify object creation client-side + +7. **Remove MODULE 5 boilerplate** — the ~40-80 line "CHECK KEY DATA OBJECTS SUCCESSFULLY CREATED" block + +8. **Remove `checks` parameter** — functions like `ds.dim` and `ds.length` have a `checks` parameter that gates `isDefined()`/`checkClass()` calls. Once those calls are removed, the parameter serves no purpose. Remove it from the function signature and delete the associated conditional block. + +9. **Replace per-study loops with single aggregate calls** — some functions (e.g. `ds.isNA`) loop over datasources one at a time (`datashield.aggregate(datasources[i], ...)`). Since `datashield.aggregate` already supports multiple datasources and returns a named list, replace these loops with a single call and process results client-side. This collapses N sequential round trips into 1 parallel call. + +10. **Keep**: null-input checks (or replace with `.check_df_name_provided()`), default `newobj` naming, the actual server call dispatch, any pure client-side logic + +### Server-side (dsBase) + +Two refactor patterns, depending on how the server function currently receives its input. Pick the one that matches. + +**Pattern A — function already receives a string name and uses `eval(parse())` internally.** + +1. Replace `eval(parse(text=x), envir=parent.frame())` with `.loadServersideObject(x)`. +2. Add `.checkClass(obj = x.val, obj_name = x, permitted_classes = …)` right after loading, where the client previously enforced type constraints. +3. Keep all computation, disclosure controls, privacy checks untouched. + +**Pattern B — function currently receives a resolved R object via dispatch-layer evaluation (`as.symbol()` or `call()` in the client).** + +1. Rename the function parameter from its descriptive body-variable name (e.g. `xvect`, `X`) to a simple string-name parameter (e.g. `x`). Do **not** rename the body-variable usages. +2. At the top of the body, load into the original body-variable name: `xvect <- .loadServersideObject(x)`. +3. Add `.checkClass(obj = xvect, obj_name = x, permitted_classes = …)`. +4. On the client, switch dispatch from `as.symbol(paste0("funcDS(", x, ")"))` to `call("funcDS", x)` so the string is passed through instead of being evaluated. +5. Update the `@param` roxygen line to describe the string-name form. + +Both patterns leave the function body untouched — **no renaming inside the body, no restyling.** Minimise diff. + +### Returning class from the server + +Some client functions previously called `checkClass()` purely to drive **client-side routing** (e.g. decide which server function to dispatch, or which output format to use, or to warn about an argument being ignored). That's a separate network round trip solely to discover the class of the input object — redundant, because the server that runs the aggregate already has the object in hand. + +The batch 2 precedent (`dimDS`, `lengthDS`) is: **return the class as a field of the aggregate result**. For example: + +```r +lengthDS <- function(x){ + x.val <- .loadServersideObject(x) + .checkClass(obj = x.val, obj_name = x, permitted_classes = c(...)) + list(length = length(x.val), class = class(x.val)) +} +``` + +The client then reads `result$class` for any post-hoc routing, consistency check, or warning — no extra `checkClass()` call needed. + +Cross-study class consistency is checked via the shared helper `.checkClassConsistency(results)` (in `dsBaseClient/R/utils.R`), which aborts if the `class` field differs across studies. Always use this helper instead of inlining the check. + +**When to apply this:** +- The client was previously calling `checkClass()` to select output format, format warnings, or check class consistency across studies. +- The class information can be derived from the aggregate's input. + +**When not to apply this:** +- The client needs the class *before* choosing which server function to call (true composite dispatchers like `ds.summary`, which branch to completely different server functions per class). These still need a pre-call class lookup, for example via `call("classDS", x)` as a single lightweight aggregate. +- The class is genuinely irrelevant to the client after the call. + +Prefer this pattern over keeping a client-side `checkClass()` call whenever a client function currently does both a `checkClass()` and an aggregate call on the same object. + +### Minimal-diff rule + +Refactors should change as little as possible. Do not rename variables, restyle comments, reformat whitespace, or bundle unrelated cleanups. If a variable rename is stylistically tempting but not strictly required by the refactor, skip it. If the user pushes back on a change, stop and ask — do not iterate with more edits on the same file. + +### Do not change existing behaviour + +The refactor must not alter which input types a function accepts or what it returns (beyond adding `class` to the return list). If the original function accepted data.frames, the refactored version must too. Check previous behaviour before setting permitted classes in `.checkClass()`. + +This extends to **test coverage**. If a refactor removes output fields that tests asserted on, the refactor is not done until equivalent coverage is added. Do not merely delete assertions to make tests pass — that silently reduces what the suite verifies. The change is reviewable by diffing `test-smk-*.R` against the base branch: every removed `expect_*` must be either (a) redundant because the same behaviour is covered elsewhere in the same file, or (b) replaced with an assertion covering the same server-side behaviour. + +### Adding new parameters to existing exported functions + +When adding a parameter to an already-released function (e.g. `classConsistencyCheck`, a new behaviour flag), **place it after all existing named parameters** — never in the middle of the signature. Inserting a parameter mid-signature silently breaks every caller that used positional argument order for anything to its right. Append it to the end (after `datasources=NULL` is acceptable even though `datasources` is conventionally last), and document the default value in `@param`. + +### Tests + +**Server-side unit tests** (new `test-smk-functionNameDS.R` in dsBase): +- Happy path: call with valid input, assert correct output +- Unhappy: nonexistent object → `expect_error(..., "does not exist")` +- Unhappy: wrong type → `expect_error(..., "must be of type")` (only where `.checkClass()` is used) + +**Client-side end-to-end tests** (update existing `test-smk-ds.functionName.R` in dsBaseClient): +- Happy path: existing tests should still pass +- Unhappy: nonexistent object → `expect_error(..., "DataSHIELD errors")` +- Unhappy: wrong type → `expect_error(..., "DataSHIELD errors")` (where type was previously checked client-side) +- Update any tests that expected client-side error messages to expect server-originated errors +- **When MODULE 5 assertions are removed, add comparable replacements.** The old MODULE 5 block returned `$is.object.created` and `$validity.check` messages asserting that `newobj` existed on every server. When those assertions are stripped, add equivalent checks inside the same `test_that` block that verify the object was created on all sources — e.g. `ds_expect_variables(c(""))` or `expect_no_error(ds.class(""))`. Relying on the shutdown-block `ds_expect_variables()` alone is not sufficient because it can't pinpoint which test created the missing object. + +**Client-side smoke tests** (new `test-smk-ds.functionName.R` if none exists): +- If no smoke test file exists for a refactored client function, create one. Every refactored function must have at least a basic happy-path smoke test that exercises the server call and verifies the result. +- Follow the existing test pattern: `connect.studies.dataset.cnsim(...)`, `test_that("setup", ...)`, main test block, `test_that("shutdown", ...)`, `disconnect.studies.dataset.cnsim()`. + +**Client-side performance tests** (new `test-perf-ds.functionName.R` in dsBaseClient): +- Add a performance test for each refactored client function. Follow the pattern in `test-perf-ds.class.R`: call the function in a timed loop, compare against a reference rate from the perf profile CSV. +- Run with `PERF_DURATION_SEC=2 devtools::test(filter = "perf-")` during development; the default 30-second duration is for CI. +- **Do not** include Arjuna Technologies copyright headers in new test files. The existing headers in pre-refactor files should be left as-is, but new files we create should not carry third-party copyright. +- **The perf test must replicate the smoke test.** Before writing a perf test, read the corresponding `test-smk-ds.functionName.R` and copy: + 1. The `connect.studies.dataset.*()` line (same dataset, same columns) + 2. The `disconnect.studies.dataset.*()` line + 3. The function call (same parameters, same column names, same argument names) + + The perf test should exercise the same code path as the smoke test's happy-path call. Do not use generic placeholder calls or different datasets. + +**Design decisions:** +- Functions accepting any class: use `.loadServersideObject()` only, no `.checkClass()` +- Client tests must include unhappy paths testing server error propagation +- Start with Batch 1 (simple coercions) + +### Authorship + +After the refactor commits for a batch have landed, add `Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands` as a new `@author` roxygen line in every R/ file touched on each branch (dsBase and dsBaseClient), matching the existing `@author` line(s) below: + +``` +#' @author +#' @author Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands +``` + +Skip files that have no existing `@author` line (e.g. `R/utils.R`). Do this as a separate trailing commit per repo with message `docs: updated authorship`, not bundled with the refactor commits. + +**Only add the tag to files you actually refactored** (replaced `eval(parse())`, added `.loadServersideObject` / `.checkClass`, replaced MODULE 5, converted dispatch to `call()`, etc.). If a file in a batch's function list turns out not to need any substantive change — for example a server function whose inputs are client-transmitted literal data rather than object names (`dmtC2SDS` is one such case) — leave its author line as-is. Adding `@author Tim Cadman` to an untouched file is incorrect authorship attribution. + +## Excluded Functions + +**Deprecated (12):** ds.look, ds.meanByClass, ds.message, ds.recodeLevels, ds.setDefaultOpals, ds.subset, ds.subsetByClass, ds.table1D, ds.table2D, ds.vectorCalc, ds.listOpals, ds.listServersideFunctions + +**Already done:** ds.colnames / colnamesDS + +**Client-only (no server pair):** checkClass.R, isDefined.R, isAssigned.R, extract.R, glmChecks.R, getPooledMean.R, getPooledVar.R, helpers (meanByClassHelper*, subsetHelper, logical2int, colPercent, rowPercent) + +## Batches + +### Batch 1 — Simple Type Coercions (11 pairs) +Single input → single output, straightforward `eval(parse())` replacement. + +**TODO:** Add `classConsistencyCheck` parameter to batch-1 functions with >1 permitted class. For numeric/integer-only functions (ds.abs, ds.exp, ds.log, ds.sqrt) default to FALSE; for others (ds.asDataMatrix: data.frame/matrix, ds.asLogical: numeric/integer/character/matrix) default to TRUE. Server functions need to return class in results to support this. dsBase batch-1 is already merged — server changes need a new branch or inclusion in a later batch. + +**TODO:** `test-smk-asLogicalDS.R` in dsBase is missing a wrong-type test case (e.g. passing a list). Other `.checkClass()` functions (absDS, expDS, logDS, sqrtDS) have this test. dsBase batch-1 is merged — fix in a follow-up. + +| Client | Server | Permitted classes | Notes | +|--------|--------|-------------------|-------| +| ds.abs | absDS | numeric, integer | | +| ds.asCharacter | asCharacterDS | * | | +| ds.asDataMatrix | asDataMatrixDS | data.frame, matrix | | +| ds.asInteger | asIntegerDS | * | | +| ds.asList | asListDS | * | **AGGREGATE** (not assign); server takes 2 params (x.name, newobj) | +| ds.asLogical | asLogicalDS | * | Server has existing type validation (numeric/integer/character/matrix) — preserve as `.checkClass()` | +| ds.asMatrix | asMatrixDS | * | | +| ds.asNumeric | asNumericDS | * | Server has complex factor/character conversion logic — preserve | +| ds.exp | **NEW: expDS** | numeric, integer | No server DS function exists — client currently calls native `exp()` via `as.symbol()`. Must create `expDS.R` | +| ds.log | **NEW: logDS** | numeric, integer | No server DS function exists — client currently calls native `log()` via `as.symbol()`. Must create `logDS.R`. Has `base` parameter | +| ds.sqrt | sqrtDS | numeric, integer | | + +`*` = accept any class — only use `.loadServersideObject()`, no `.checkClass()` needed + +**Batch 1 sub-patterns discovered:** +- **Math ops (abs, exp, log, sqrt):** Client uses `checkClass()` + `isAssigned()`, no MODULE 5 +- **Type conversions (asCharacter, asDataMatrix, asInteger, asLogical, asMatrix, asNumeric):** Client uses `isDefined()` + MODULE 5 block (except asList which has neither) +- **asList is unique:** Uses `datashield.aggregate` instead of `datashield.assign` + +### Batch 2 — Simple Aggregations (10 pairs) +Return results to client, no server-side assignment. + +| Client | Server | Permitted classes | +|--------|--------|-------------------| +| ds.class | classDS | * | +| ds.dim | dimDS | data.frame, matrix | +| ds.length | lengthDS | character, factor, integer, logical, numeric, list | +| ds.names | namesDS | * | +| ds.isNA | isNaDS | character, factor, integer, logical, numeric, data.frame, matrix | +| ds.numNA | numNaDS | * | +| ds.ls | lsDS | (no object input) | +| ds.completeCases | completeCasesDS | * (no .checkClass — server handles via own branching) | +| ds.levels | levelsDS | factor | +| ds.unique | uniqueDS | * | + +**Deferred from Batch 2:** ds.isValid / isValidDS — `isValidDS` is used as an internal disclosure-control helper by `replaceNaDS` (Batch 4), `quantileMeanDS` (Batch 3), and `rowColCalcDS` (Batch 10), all passing objects directly. Cannot change `isValidDS` signature until those callers are refactored. Refactor ds.isValid/isValidDS when the last internal caller is refactored (see Batch 10 notes). + +**Batch 2 sub-patterns:** +- **Standard eval(parse()) functions (classDS, dimDS, lengthDS, namesDS, lsDS, completeCasesDS, uniqueDS):** Server uses `eval(parse(text=x), envir=parent.frame())` — replace with `.loadServersideObject()` +- **Dispatch-layer resolution functions (isNaDS, numNaDS, levelsDS):** Server receives resolved R objects via client `as.symbol()`/`call()` dispatch — change server to accept string name + `.loadServersideObject()`, change client to `call("funcDS", x)` +- **Assign functions (completeCases, unique):** Use `datashield.assign` not `datashield.aggregate` — still remove MODULE 5 / isAssigned +- **Client-side processing to preserve:** ds.dim and ds.length have `type` parameter with alias normalization and cross-study pooling; ds.isNA has per-study loop with conditional messaging; ds.ls has wildcard `*` → `_:A:_` escaping +- **Pooling functions (dimDS, lengthDS):** Return `list(dim=..., class=...)` / `list(length=..., class=...)` so client can check cross-study class consistency before pooling results + +### Batch 3 — Statistics (10 pairs) +Aggregate functions returning computed values. Some have multi-step server calls. + +| Client | Server | Notes | +|--------|--------|-------| +| ds.mean | meanDS | has disclosure controls | +| ds.var | varDS | has disclosure controls | +| ds.cor | corDS | two inputs | +| ds.corTest | corTestDS | two inputs | +| ds.cov | covDS | two inputs | +| ds.kurtosis | kurtosisDS1/DS2 | multi-step | +| ds.skewness | skewnessDS1/DS2 | multi-step | +| ds.quantileMean | quantileMeanDS | aggregate | +| ds.meanSdGp | meanSdGpDS | aggregate | +| ds.summary | (check server) | aggregate | + +### Batch 4 — Data Manipulation / Assign (15 pairs) +Create/modify server objects. Many have MODULE 5 blocks. + +| Client | Server | Notes | +|--------|--------|-------| +| ds.Boole | BooleDS | assign, MODULE 5 | +| ds.c | cDS | multi-input assign | +| ds.cbind | cbindDS | multi-input, permissive check | +| ds.rbind | rbindDS | multi-input | +| ds.dataFrame | dataFrameDS | multi-input, complex | +| ds.dataFrameSort | dataFrameSortDS | assign, MODULE 5 | +| ds.dataFrameSubset | dataFrameSubsetDS1/DS2 | multi-step | +| ds.dataFrameFill | dataFrameFillDS | assign | +| ds.list | listDS | assign | +| ds.unList | unListDS | assign | +| ds.merge | mergeDS | assign, MODULE 5 | +| ds.rep | repDS | assign | +| ds.seq | seqDS | assign | +| ds.replaceNA | replaceNaDS | assign, per-source loop | +| ds.recodeValues | recodeValuesDS | assign | + +### Batch 5 — Matrix Operations (8 pairs) + +| Client | Server | +|--------|--------| +| ds.matrix | matrixDS | +| ds.matrixDet | matrixDetDS1/DS2 | +| ds.matrixDet.report | matrixDetDS2 | +| ds.matrixDiag | matrixDiagDS | +| ds.matrixDimnames | matrixDimnamesDS | +| ds.matrixInvert | matrixInvertDS | +| ds.matrixMult | matrixMultDS | +| ds.matrixTranspose | matrixTransposeDS | + +### Batch 6 — Factor & Recoding (5 pairs) + +| Client | Server | +|--------|--------| +| ds.asFactor | asFactorDS1/DS2 | +| ds.asFactorSimple | asFactorSimpleDS | +| ds.changeRefGroup | changeRefGroupDS | +| ds.reShape | reShapeDS | +| ds.dmtC2S | dmtC2SDS | + +### Batch 7 — Modelling (8 pairs) +Most complex. Multiple server calls, complex validation logic. + +| Client | Server | +|--------|--------| +| ds.glm | glmDS1/DS2 | +| ds.glmSLMA | glmSLMADS1/DS2/assign | +| ds.glmPredict | glmPredictDS.ag/as | +| ds.glmSummary | glmSummaryDS.ag/as | +| ds.glmerSLMA | glmerSLMADS2/assign | +| ds.lmerSLMA | lmerSLMADS2/assign | +| ds.gamlss | gamlssDS | +| ds.mice | miceDS | + +### Batch 8 — Random Generation & Sampling (6 pairs) + +| Client | Server | +|--------|--------| +| ds.rBinom | rBinomDS | +| ds.rNorm | rNormDS | +| ds.rPois | rPoisDS | +| ds.rUnif | rUnifDS | +| ds.sample | sampleDS | +| ds.setSeed | setSeedDS | + +### Batch 9 — Plotting & Visualization (7 pairs) + +| Client | Server | +|--------|--------| +| ds.histogram | histogramDS1/DS2 | +| ds.heatmapPlot | heatmapPlotDS | +| ds.contourPlot | (check server name) | +| ds.densityGrid | densityGridDS | +| ds.scatterPlot | scatterPlotDS | +| ds.boxPlot | (check server) | +| ds.boxPlotGG | boxPlotGGDS | + +**Batch 9 note:** `ds.heatmapPlot`, `ds.contourPlot`, and `ds.densityGrid` call `rangeDS` which has **not** been refactored. These calls still use `as.symbol(paste0("rangeDS(", x, ")"))`. Once `rangeDS` is refactored (batch 10 or later), go back and update these three client functions to use `call("rangeDS", x=x)`. + +### Batch 10 — Splines, Tables, Misc (14 pairs) + +| Client | Server | +|--------|--------| +| ds.elspline | elsplineDS | +| ds.lspline | lsplineDS | +| ds.ns | nsDS | +| ds.qlspline | qlsplineDS | +| ds.table | tableDS/tableDS.assign/tableDS2 | +| ds.tapply | tapplyDS | +| ds.tapply.assign | tapplyDS.assign | +| ds.rowColCalc | rowColCalcDS | +| ds.make | (check server) | +| ds.assign | (check server) | +| ds.metadata | metadataDS | +| ds.getWGSR | getWGSRDS | +| ds.lexis | lexisDS1/DS2/DS3 | +| ds.hetcor | hetcorDS | + +**Batch 10 dependency:** `rowColCalcDS` calls `isValidDS(result)` internally as a disclosure check. When refactoring `rowColCalcDS`, replace this with direct disclosure logic or `.loadServersideObject()` + `.checkClass()`. Once done, also refactor `ds.isValid` / `isValidDS` (deferred from Batch 2). Similarly, `replaceNaDS` (Batch 4) and `quantileMeanDS` (Batch 3) call `isValidDS()` internally — refactor those callers first before changing `isValidDS`'s signature. + +## Known Issues + +**Batch 4:** `ds.dataFrameFill` perf test cannot run — function requires columns to differ across studies, which is hard to set up in a perf loop. + +**Batch 6:** `ds.asFactor` and `ds.changeRefGroup` perf tests fail with server-side errors. The `asFactorDS1` aggregate call errors out. `ds.changeRefGroup` may have a known pre-existing issue. Both need investigation of the batch-6 server refactoring. + +**Batch 7:** `ds.gamlss` perf test fails with server-side error. May be a batch-7 refactoring issue in `gamlssDS` or a dataset availability issue (gamlss dataset may not be configured on all Armadillo instances). + +**Batch 8:** `ds.sample` smoke test fails at the `ds.length("newobj.sample")` call — this is because the batch-2 client PR has not been merged to v7.0-dev yet, so the old `ds.length` client code cannot handle the new `list(length=..., class=...)` return from the refactored `lengthDS`. + +**Batch 9:** `rangeDS` has not been refactored, so `ds.heatmapPlot`, `ds.contourPlot`, and `ds.densityGrid` still use `as.symbol(paste0("rangeDS(", x, ")"))` for `rangeDS` calls. Once `rangeDS` is refactored, update these to use `call("rangeDS", x=x)`. + +## Per-Batch Workflow + +**Important:** dsBase and dsBaseClient are separate git repos. Changes must be committed and tested in the correct order since the client depends on the server package being installed. + +### Step 0 — Branch bootstrap + +When creating a new batch branch (in either repo) from `origin/v7.0-dev`: + +1. **dsBaseClient:** copy `R/utils.R` from the most recently refactored client branch (e.g. `origin/refactor/perf-batch-4`). `origin/v7.0-dev` on the client does not yet contain it — it only enters `v7.0-dev` once the batch-1 or batch-2 client PR merges. +2. **dsBaseClient:** copy `REFACTOR_GUIDE.md` from the same branch, and add `^REFACTOR_GUIDE\.md$` to `.Rbuildignore` if not already there. This keeps the guide alongside the code being refactored so rules added in later batches are visible to everyone. +3. **dsBase:** no bootstrap copy needed — `R/utils.R` with `.loadServersideObject` / `.checkClass` is already in `origin/v7.0-dev` (merged with batch-1). + +Commit the bootstrap separately (message: `chore: bootstrap batch-N from batch-M`) before starting the refactor work. + +### Step 1 — Server-side (dsBase repo) +1. Create feature branch from `v7.0-dev` in dsBase +2. Refactor server functions: + - Replace `eval(parse())` → `.loadServersideObject()` + - Add `.checkClass()` where the client had type guards +3. Write server-side unit tests (`test-smk-functionNameDS.R`) with happy + unhappy paths +4. Run `devtools::check(args = '--no-tests')` and `devtools::test()` in dsBase +5. Build package: `devtools::build()` + +### Step 2 — Install refactored dsBase on Armadillo +6. Ensure `inst/DATASHIELD` has `default.datashield.privacyControlLevel="permissive"` before building. **Must be the literal string `"permissive"`** — other values like `"banana"` will not work for all functions (e.g. `levelsDS` checks for `'permissive'` explicitly). +7. Build package: `devtools::build()` in dsBase +8. Copy the built tar to dsBaseClient as `dsBase_7.0.0-permissive.tar.gz` (this is the filename the CI pipeline references in `armadillo_azure-pipelines.yml`) +9. Install on local Armadillo: `armadillo.login("http://localhost:8080")` then `armadillo.install_packages(paths = "", profile = "default")` + +### Step 3 — Client-side (dsBaseClient repo) +7. Create feature branch from `v7.0-dev` in dsBaseClient +8. Ensure `R/utils.R` exists (copy from `v7.0-dev-colnames` branch if needed) +9. Refactor client functions: + - Replace datasource boilerplate → `.set_datasources()` + - Remove `isDefined()`, `checkClass()`, `isAssigned()` calls + - Remove MODULE 5 blocks + - Replace null-input checks with `.check_df_name_provided()` where applicable +10. Update/add client end-to-end tests with happy + unhappy paths +11. Run `devtools::check(args = '--no-tests')` in dsBaseClient +12. Run tests against Armadillo, **not DSLite**. Set the driver to `"ArmadilloDriver"` in `tests/testthat/connection_to_datasets/login_details.R` (default is `"DSLiteDriver"`). DSLite uses whatever dsBase is installed locally in R, which may not match the refactored version on Armadillo. Run `devtools::test(filter = "smk-|disc|arg")` for affected functions (requires refactored dsBase to be installed on Armadillo) + +### Step 4 — Verify +13. Run full test suite to check no regressions +14. Run perf tests at 30 seconds (default): `devtools::test(filter = "perf-")` +15. Compare perf results against the v7.0-dev branch baseline to detect any regressions from the refactoring + +### Step 5 — Pre-merge audit (mandatory) + +Before marking a batch complete: + +1. **Diff every touched `test-smk-*.R` / `test-arg-*.R` / `test-disc-*.R` against the branch base.** For each removed `expect_*` assertion, confirm it falls into one of: + - (a) redundant — the same behaviour is covered by another assertion still present in the same file; + - (b) replaced — a new assertion was added that covers the same server-side behaviour (e.g. `ds_expect_variables` replacing `$is.object.created`, or `ds.summary`/`ds.class` on the newobj). + + Any removed assertion that doesn't fall into (a) or (b) is a coverage loss that must be restored before merge. + +2. **Inspect every `test_that(…)` block touched by the refactor.** Each block must still contain at least one `expect_*` assertion after the refactor. Blocks stripped to just the function call are not acceptable — add `ds_expect_variables`, `expect_no_error`, or a downstream property check. + +3. **Diff every signature of every exported function touched.** Confirm no parameter was added in the middle of the signature; new parameters must be at the end (see "Adding new parameters to existing exported functions" above). + +4. **Confirm docs match signature.** `@param` blocks present for every parameter, no stale `@param` for removed arguments, `@return` not promising MODULE 5 output fields. + +5. **Grep for residual patterns that should have been removed:** `isDefined(`, `isAssigned(`, `CLIENTSIDE MODULE`, `testObjExistsDS`, `is.object.created`, `validity.check`, `studyside.messages` in source files (acceptable in test files only if the MODULE 5 pattern is being deliberately preserved with a replacement). + +## Key Files + +### Reference implementation +- Client refactored: `git show v7.0-dev-colnames:R/ds.colnames.R` +- Server refactored: `/Users/tcadman/github-repos/ds-core/dsBase/R/colnamesDS.R` +- Client utils: `git show v7.0-dev-colnames:R/utils.R` +- Server utils: `/Users/tcadman/github-repos/ds-core/dsBase/R/utils.R` +- Server tests: `/Users/tcadman/github-repos/ds-core/dsBase/tests/testthat/test-smk-colnamesDS.R` +- Client tests: `/Users/tcadman/github-repos/ds-core/dsBaseClient/tests/testthat/test-smk-ds.colnames.R` + +### Guides +- `/Users/tcadman/github-repos/ds-core/dsBaseClient/REFACTOR_GUIDE.md` +- `/Users/tcadman/github-repos/ds-core/dsBase/.github/pull_request_template` + +## Verification + +For each batch: +1. Run server-side unit tests: `cd dsBase && devtools::test(filter = "functionNameDS")` +2. Run client-side smoke tests: `cd dsBaseClient && devtools::test(filter = "smk-ds.functionName")` +3. Run `devtools::check(args = '--no-tests')` on both packages +4. Run full test suite: `devtools::test(filter = "smk-|disc|arg")` to check no regressions +5. Run perf tests: `PERF_DURATION_SEC=2 devtools::test(filter = "perf-")` to verify no performance regression + +## Follow-up: refactor client-side `checkClass` + +The client-side helper `checkClass()` currently does two jobs in one: (a) fetches the class of a server-side object and (b) checks the class is consistent across studies. After this refactor, the first job is redundant (the server returns `class` in aggregate results), but the second is still needed by composite dispatchers (`ds.summary` and similar). + +Planned cleanup (defer to a dedicated branch): + +- Rename `checkClass` → `.checkClass` to mark it internal (matches `.checkClassConsistency`, `.set_datasources`). +- Split its responsibilities: one helper that fetches class for pre-call routing, one that checks cross-study consistency on a set of classes. +- Update the remaining callers (`ds.summary` and any others still holding a client-side class pre-fetch). + +Not done as part of any single batch because the rename touches callers outside that batch's function set and would break functions not yet refactored. Schedule once all batches are merged — the rename then becomes one small, isolated commit. diff --git a/dsBase_7.0.0-permissive.tar.gz b/dsBase_7.0.0-permissive.tar.gz index ab4b862e2..31a59b6c2 100644 Binary files a/dsBase_7.0.0-permissive.tar.gz and b/dsBase_7.0.0-permissive.tar.gz differ diff --git a/man/ds.cor.Rd b/man/ds.cor.Rd index 030defaed..f8d8e5ebf 100644 --- a/man/ds.cor.Rd +++ b/man/ds.cor.Rd @@ -4,7 +4,13 @@ \alias{ds.cor} \title{Calculates the correlation of R objects in the server-side} \usage{ -ds.cor(x = NULL, y = NULL, type = "split", datasources = NULL) +ds.cor( + x = NULL, + y = NULL, + type = "split", + classConsistencyCheck = TRUE, + datasources = NULL +) } \arguments{ \item{x}{a character string providing the name of the input vector, data frame or matrix.} @@ -95,4 +101,6 @@ Server function called: \code{corDS} } \author{ DataSHIELD Development Team + +Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands } diff --git a/man/ds.corTest.Rd b/man/ds.corTest.Rd index 6277df16a..f7a4d5b61 100644 --- a/man/ds.corTest.Rd +++ b/man/ds.corTest.Rd @@ -11,6 +11,7 @@ ds.corTest( exact = NULL, conf.level = 0.95, type = "split", + classConsistencyCheck = FALSE, datasources = NULL ) } @@ -92,4 +93,6 @@ Server function called: \code{corTestDS} } \author{ DataSHIELD Development Team + +Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands } diff --git a/man/ds.cov.Rd b/man/ds.cov.Rd index 3fe9ec8f8..20e366ce8 100644 --- a/man/ds.cov.Rd +++ b/man/ds.cov.Rd @@ -9,6 +9,7 @@ ds.cov( y = NULL, naAction = "pairwise.complete", type = "split", + classConsistencyCheck = TRUE, datasources = NULL ) } @@ -118,4 +119,6 @@ and the total number of missing values aggregated from all the involved studies, } \author{ DataSHIELD Development Team + +Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands } diff --git a/man/ds.kurtosis.Rd b/man/ds.kurtosis.Rd index 4b698adff..7c23cb0fc 100644 --- a/man/ds.kurtosis.Rd +++ b/man/ds.kurtosis.Rd @@ -4,7 +4,13 @@ \alias{ds.kurtosis} \title{Calculates the kurtosis of a numeric variable} \usage{ -ds.kurtosis(x = NULL, method = 1, type = "both", datasources = NULL) +ds.kurtosis( + x = NULL, + method = 1, + type = "both", + classConsistencyCheck = FALSE, + datasources = NULL +) } \arguments{ \item{x}{a string character, the name of a numeric variable.} @@ -41,4 +47,6 @@ This function is similar to the function \code{kurtosis} in R package \code{e107 } \author{ Demetris Avraam, for DataSHIELD Development Team + +Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands } diff --git a/man/ds.mean.Rd b/man/ds.mean.Rd index d662a3895..97c13a90d 100644 --- a/man/ds.mean.Rd +++ b/man/ds.mean.Rd @@ -7,8 +7,8 @@ ds.mean( x = NULL, type = "split", - checks = FALSE, save.mean.Nvalid = FALSE, + classConsistencyCheck = FALSE, datasources = NULL ) } @@ -21,12 +21,7 @@ This can be set as \code{'combine'}, \code{'combined'}, \code{'combines'}, \code{'both'} or \code{'b'}. For more information see \strong{Details}.} -\item{checks}{logical. If TRUE optional checks of model -components will be undertaken. Default is FALSE to save time. -It is suggested that checks -should only be undertaken once the function call has failed.} - -\item{save.mean.Nvalid}{logical. If TRUE generated values of the mean and +\item{save.mean.Nvalid}{logical. If TRUE generated values of the mean and the number of valid (non-missing) observations will be saved on the data servers. Default FALSE. For more information see \strong{Details}.} @@ -110,7 +105,6 @@ Server function called: \code{meanDS} ds.mean(x = "D$LAB_TSC", type = "split", - checks = FALSE, save.mean.Nvalid = FALSE, datasources = connections) @@ -126,4 +120,6 @@ Server function called: \code{meanDS} } \author{ DataSHIELD Development Team + +Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands } diff --git a/man/ds.meanSdGp.Rd b/man/ds.meanSdGp.Rd index 964a5dae7..3e83f9013 100644 --- a/man/ds.meanSdGp.Rd +++ b/man/ds.meanSdGp.Rd @@ -8,7 +8,7 @@ ds.meanSdGp( x = NULL, y = NULL, type = "both", - do.checks = FALSE, + classConsistencyCheck = TRUE, datasources = NULL ) } @@ -24,12 +24,7 @@ This can be set as: \code{"combine"}, \code{"split"} or \code{"both"}. Default \code{"both"}. For more information see \strong{Details}.} -\item{do.checks}{logical. If TRUE the administrative checks -are undertaken to ensure that the input objects are defined in all studies and that the -variables are of equivalent class in each study. -Default is FALSE to save time.} - -\item{datasources}{a list of \code{\link[DSI]{DSConnection-class}} +\item{datasources}{a list of \code{\link[DSI]{DSConnection-class}} objects obtained after login. If the \code{datasources} argument is not specified the default set of connections will be used: see \code{\link[DSI]{datashield.connections_default}}.} } @@ -126,7 +121,6 @@ Server function called: \code{meanSdGpDS} ds.meanSdGp(x = "D$age.60", y = "D$time.id", type = "combine", - do.checks = FALSE, datasources = connections) #Example 2: Calculate the mean, SD, Nvalid and SEM of the continuous variable age.60 (age in @@ -137,8 +131,7 @@ Server function called: \code{meanSdGpDS} ds.meanSdGp(x = "D$age.60", y = "D$time.id", type = "both", - do.checks = FALSE, - datasources = connections) + datasources = connections) # clear the Datashield R sessions and logout datashield.logout(connections) @@ -153,4 +146,6 @@ columns and rows. } \author{ DataSHIELD Development Team + +Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands } diff --git a/man/ds.quantileMean.Rd b/man/ds.quantileMean.Rd index 03b469a18..525edfcf5 100644 --- a/man/ds.quantileMean.Rd +++ b/man/ds.quantileMean.Rd @@ -4,7 +4,12 @@ \alias{ds.quantileMean} \title{Computes the quantiles of a server-side variable} \usage{ -ds.quantileMean(x = NULL, type = "combine", datasources = NULL) +ds.quantileMean( + x = NULL, + type = "combine", + classConsistencyCheck = FALSE, + datasources = NULL +) } \arguments{ \item{x}{a character string specifying the name of the numeric vector.} @@ -85,4 +90,6 @@ Server functions called: \code{quantileMeanDS}, \code{length} and \code{numNaDS} } \author{ DataSHIELD Development Team + +Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands } diff --git a/man/ds.skewness.Rd b/man/ds.skewness.Rd index 7c3bda041..b0b7e9d45 100644 --- a/man/ds.skewness.Rd +++ b/man/ds.skewness.Rd @@ -4,7 +4,13 @@ \alias{ds.skewness} \title{Calculates the skewness of a server-side numeric variable} \usage{ -ds.skewness(x = NULL, method = 1, type = "both", datasources = NULL) +ds.skewness( + x = NULL, + method = 1, + type = "both", + classConsistencyCheck = FALSE, + datasources = NULL +) } \arguments{ \item{x}{a character string specifying the name of a numeric variable.} @@ -93,4 +99,6 @@ Server functions called: \code{skewnessDS1} and \code{skewnessDS2} } \author{ Demetris Avraam, for DataSHIELD Development Team + +Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands } diff --git a/man/ds.var.Rd b/man/ds.var.Rd index 5398c6e1a..7d8b3f0ce 100644 --- a/man/ds.var.Rd +++ b/man/ds.var.Rd @@ -4,7 +4,12 @@ \alias{ds.var} \title{Computes server-side vector variance} \usage{ -ds.var(x = NULL, type = "split", checks = FALSE, datasources = NULL) +ds.var( + x = NULL, + type = "split", + classConsistencyCheck = FALSE, + datasources = NULL +) } \arguments{ \item{x}{a character specifying the name of a numerical vector.} @@ -15,12 +20,7 @@ This can be set as \code{'combine'}, \code{'combined'}, \code{'combines'}, \code{'both'} or \code{'b'}. For more information see \strong{Details}.} -\item{checks}{logical. If TRUE optional checks of model -components will be undertaken. Default is FALSE to save time. -It is suggested that checks -should only be undertaken once the function call has failed.} - -\item{datasources}{a list of \code{\link[DSI]{DSConnection-class}} +\item{datasources}{a list of \code{\link[DSI]{DSConnection-class}} objects obtained after login. If the \code{datasources} argument is not specified the default set of connections will be used: see \code{\link[DSI]{datashield.connections_default}}.} } @@ -85,7 +85,6 @@ Server function called: \code{varDS} ds.var(x = "D$LAB_TSC", type = "split", - checks = FALSE, datasources = connections) # clear the Datashield R sessions and logout @@ -95,4 +94,6 @@ Server function called: \code{varDS} } \author{ DataSHIELD Development Team + +Tim Cadman, Genomics Coordination Centre, UMCG, Netherlands } diff --git a/tests/testthat/definition_tests/def-assign-stats.R b/tests/testthat/definition_tests/def-assign-stats.R index c96eca5fc..06eb83a3f 100644 --- a/tests/testthat/definition_tests/def-assign-stats.R +++ b/tests/testthat/definition_tests/def-assign-stats.R @@ -29,7 +29,7 @@ ds.test_env$MAX <- 5 value <- c(0.0,0.0,0.0) # compute dispersion and centrality - mean.from.servers <- ds.mean(x=name.variable,type='combine', check=TRUE,save.mean.Nvalid=FALSE) + mean.from.servers <- ds.mean(x=name.variable,type='combine',save.mean.Nvalid=FALSE) var.from.servers <- ds.var(x=name.variable,type='combine') # compute range diff --git a/tests/testthat/definition_tests/def-ds.mean.R b/tests/testthat/definition_tests/def-ds.mean.R index 9cec838a7..090fc6c33 100644 --- a/tests/testthat/definition_tests/def-ds.mean.R +++ b/tests/testthat/definition_tests/def-ds.mean.R @@ -4,7 +4,7 @@ source("definition_tests/def-assign-stats.R") .test.mean.combined <- function(variable.name,some.values) { mean.local <- mean(some.values) - mean.server <- ds.mean(x=variable.name,type='combine', check=TRUE,save.mean.Nvalid=FALSE) + mean.server <- ds.mean(x=variable.name,type='combine',save.mean.Nvalid=FALSE) expect_equal(mean.server[[1]][1], mean.local, tolerance = ds.test_env$tolerance) } @@ -15,7 +15,7 @@ source("definition_tests/def-assign-stats.R") mean.local.2 <- mean(some.values.2) mean.local.3 <- mean(some.values.3) - mean.server <- ds.mean(x=variable.name,type='split', check=TRUE,save.mean.Nvalid=FALSE) + mean.server <- ds.mean(x=variable.name,type='split',save.mean.Nvalid=FALSE) expect_equal(mean.server[[1]][1], mean.local.1, tolerance = ds.test_env$low_tolerance) expect_equal(mean.server[[1]][2], mean.local.2, tolerance = ds.test_env$low_tolerance) expect_equal(mean.server[[1]][3], mean.local.3, tolerance = ds.test_env$low_tolerance) @@ -23,7 +23,7 @@ source("definition_tests/def-assign-stats.R") .test.residual.combined <- function(variable.name, some.values) { - mean.server <- ds.mean(variable.name,type='combine', check=TRUE,save.mean.Nvalid=FALSE) + mean.server <- ds.mean(variable.name,type='combine',save.mean.Nvalid=FALSE) residue <- sum(some.values - mean.server[[1]][1]) expect_equal(residue, 0, tolerance = ds.test_env$very_low_tolerance) } @@ -31,7 +31,7 @@ source("definition_tests/def-assign-stats.R") .test.residual.split <- function(variable.name, some.values.1,some.values.2,some.values.3) { - mean.server <- ds.mean(variable.name,type='split', check=TRUE,save.mean.Nvalid=FALSE) + mean.server <- ds.mean(variable.name,type='split',save.mean.Nvalid=FALSE) residue.1 <- sum(some.values.1 - mean.server[[1]][1]) residue.2 <- sum(some.values.2 - mean.server[[1]][2]) residue.3 <- sum(some.values.3 - mean.server[[1]][3]) @@ -54,7 +54,7 @@ source("definition_tests/def-assign-stats.R") ds.make(operation,variable.created) - mean.server <- ds.mean(variable.created,type='combine', check=TRUE) + mean.server <- ds.mean(variable.created,type='combine') expect_equal(mean.server[[1]][1], mean.local, tolerance = ds.test_env$tolerance) } @@ -67,8 +67,8 @@ source("definition_tests/def-assign-stats.R") ds.make(operation,variable.created) #calculate variances - var.no.change <- ds.mean(variable.name,type='combine',check=TRUE,save.mean.Nvalid=FALSE) - var.changes <- ds.mean(variable.created,type='combine', check=TRUE,save.mean.Nvalid=FALSE) + var.no.change <- ds.mean(variable.name,type='combine',save.mean.Nvalid=FALSE) + var.changes <- ds.mean(variable.created,type='combine',save.mean.Nvalid=FALSE) difference <- var.changes[[1]][1] - var.no.change[[1]][1] #comparison @@ -84,8 +84,8 @@ source("definition_tests/def-assign-stats.R") ds.make(operation,variable.created) #calculate variances - var.no.change <- ds.mean(variable.name,type='combine',check=TRUE,save.mean.Nvalid=FALSE) - var.changes <- ds.mean(variable.created,type='combine', check=TRUE,save.mean.Nvalid=FALSE) + var.no.change <- ds.mean(variable.name,type='combine',save.mean.Nvalid=FALSE) + var.changes <- ds.mean(variable.created,type='combine',save.mean.Nvalid=FALSE) scale <- constant.value * var.no.change[[1]][1] #comparison expect_equal(scale, var.changes[[1]][1], tolerance = ds.test_env$tolerance) diff --git a/tests/testthat/definition_tests/def-ds.var.R b/tests/testthat/definition_tests/def-ds.var.R index a8a16994e..586d96ddc 100644 --- a/tests/testthat/definition_tests/def-ds.var.R +++ b/tests/testthat/definition_tests/def-ds.var.R @@ -4,7 +4,7 @@ source("definition_tests/def-assign-stats.R") .test.var.combined <- function(variable.name,some.values) { var.local <- var(some.values) - var.server <- ds.var(variable.name,type='combine', check=TRUE) + var.server <- ds.var(variable.name,type='combine') expect_equal(var.server[[1]][1], var.local, tolerance = ds.test_env$tolerance) } @@ -15,7 +15,7 @@ source("definition_tests/def-assign-stats.R") var.local.3 <- var(some.values.3) - var.servers <- ds.var(x=variable.name,type='split', check=TRUE) + var.servers <- ds.var(x=variable.name,type='split') expect_equal(var.servers[[1]][1], var.local.1, tolerance = ds.test_env$low_tolerance) expect_equal(var.servers[[1]][2], var.local.2, tolerance = ds.test_env$low_tolerance) expect_equal(var.servers[[1]][3], var.local.3, tolerance = ds.test_env$low_tolerance) @@ -23,13 +23,13 @@ source("definition_tests/def-assign-stats.R") .test.variance.positive.combine <- function(variable.name) { - var.server <- ds.var(variable.name,type='combine', check=TRUE) + var.server <- ds.var(variable.name,type='combine') expect_true(var.server$Global.Variance[1] >= 0) } .test.variance.positive.split <- function(variable.name) { - var.server <- ds.var(variable.name,type='split', check=TRUE) + var.server <- ds.var(variable.name,type='split') expect_true(var.server[[1]][1] >= 0) expect_true(var.server[[1]][2] >= 0) expect_true(var.server[[1]][3] >= 0) @@ -73,7 +73,7 @@ source("definition_tests/def-assign-stats.R") ds.make(operation,variable.created) - var.server <- ds.var(variable.created,type='combine', check=TRUE) + var.server <- ds.var(variable.created,type='combine') expect_equal(var.server[[1]][1], var.local, tolerance = ds.test_env$tolerance) } @@ -86,8 +86,8 @@ source("definition_tests/def-assign-stats.R") ds.make(operation,variable.created) #calculate variances - var.no.change <- ds.var(variable.name,type='combine', check=TRUE) - var.changes <- ds.var(variable.created,type='combine', check=TRUE) + var.no.change <- ds.var(variable.name,type='combine') + var.changes <- ds.var(variable.created,type='combine') #comparison expect_equal(var.no.change[[1]][1], var.changes[[1]][1], tolerance = ds.test_env$tolerance) @@ -102,8 +102,8 @@ source("definition_tests/def-assign-stats.R") ds.make(operation,variable.created) #calculate variances - var.no.change <- ds.var(variable.name,type='combine', check=TRUE) - var.changes <- ds.var(variable.created,type='combine', check=TRUE) + var.no.change <- ds.var(variable.name,type='combine') + var.changes <- ds.var(variable.created,type='combine') scale <- constant.value^2 * var.no.change[[1]][1] #comparison expect_equal(scale, var.changes[[1]][1], tolerance = ds.test_env$tolerance) @@ -112,6 +112,6 @@ source("definition_tests/def-assign-stats.R") .test.large.vectors <- function(dist.name, size) { ds.rPois(samp.size = size, lambda = c(50), newobj=dist.name) - var.changes <- ds.var(dist.name,type='combine', check=TRUE) + var.changes <- ds.var(dist.name,type='combine') expect_false(is.na(var.changes[[1]][1])) } diff --git a/tests/testthat/smk_expt-results/ds.mean-DIS_AMI-both.rds b/tests/testthat/smk_expt-results/ds.mean-DIS_AMI-both.rds index e6a738de9..91062443c 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-DIS_AMI-both.rds and b/tests/testthat/smk_expt-results/ds.mean-DIS_AMI-both.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-DIS_AMI-combine.rds b/tests/testthat/smk_expt-results/ds.mean-DIS_AMI-combine.rds index 61f75f611..485f85de0 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-DIS_AMI-combine.rds and b/tests/testthat/smk_expt-results/ds.mean-DIS_AMI-combine.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-DIS_AMI-split.rds b/tests/testthat/smk_expt-results/ds.mean-DIS_AMI-split.rds index 7f950f3fc..cfeb664b1 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-DIS_AMI-split.rds and b/tests/testthat/smk_expt-results/ds.mean-DIS_AMI-split.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-DIS_CVA-both.rds b/tests/testthat/smk_expt-results/ds.mean-DIS_CVA-both.rds index 7ae3f0a4d..dbb6ef76d 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-DIS_CVA-both.rds and b/tests/testthat/smk_expt-results/ds.mean-DIS_CVA-both.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-DIS_CVA-combine.rds b/tests/testthat/smk_expt-results/ds.mean-DIS_CVA-combine.rds index 1bd293f24..84205f1e9 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-DIS_CVA-combine.rds and b/tests/testthat/smk_expt-results/ds.mean-DIS_CVA-combine.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-DIS_CVA-split.rds b/tests/testthat/smk_expt-results/ds.mean-DIS_CVA-split.rds index 54c9fd533..f7e566ddb 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-DIS_CVA-split.rds and b/tests/testthat/smk_expt-results/ds.mean-DIS_CVA-split.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-DIS_DIAB-both.rds b/tests/testthat/smk_expt-results/ds.mean-DIS_DIAB-both.rds index ad6bae6e7..29b7e8fb0 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-DIS_DIAB-both.rds and b/tests/testthat/smk_expt-results/ds.mean-DIS_DIAB-both.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-DIS_DIAB-combine.rds b/tests/testthat/smk_expt-results/ds.mean-DIS_DIAB-combine.rds index ecb8c49c0..20762a1bf 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-DIS_DIAB-combine.rds and b/tests/testthat/smk_expt-results/ds.mean-DIS_DIAB-combine.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-DIS_DIAB-split.rds b/tests/testthat/smk_expt-results/ds.mean-DIS_DIAB-split.rds index f3ff37cfe..02d0f9cd5 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-DIS_DIAB-split.rds and b/tests/testthat/smk_expt-results/ds.mean-DIS_DIAB-split.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-GENDER-both.rds b/tests/testthat/smk_expt-results/ds.mean-GENDER-both.rds index 4f24b59e5..3749727ab 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-GENDER-both.rds and b/tests/testthat/smk_expt-results/ds.mean-GENDER-both.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-GENDER-combine.rds b/tests/testthat/smk_expt-results/ds.mean-GENDER-combine.rds index 490b78611..aa6a8e7b8 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-GENDER-combine.rds and b/tests/testthat/smk_expt-results/ds.mean-GENDER-combine.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-GENDER-split.rds b/tests/testthat/smk_expt-results/ds.mean-GENDER-split.rds index 1812e0a2d..e1a0cd85c 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-GENDER-split.rds and b/tests/testthat/smk_expt-results/ds.mean-GENDER-split.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-LAB_GLUC_ADJUSTED-both.rds b/tests/testthat/smk_expt-results/ds.mean-LAB_GLUC_ADJUSTED-both.rds index 2113ca0f9..90c788326 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-LAB_GLUC_ADJUSTED-both.rds and b/tests/testthat/smk_expt-results/ds.mean-LAB_GLUC_ADJUSTED-both.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-LAB_GLUC_ADJUSTED-combine.rds b/tests/testthat/smk_expt-results/ds.mean-LAB_GLUC_ADJUSTED-combine.rds index 6e88ef951..0f693f3e6 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-LAB_GLUC_ADJUSTED-combine.rds and b/tests/testthat/smk_expt-results/ds.mean-LAB_GLUC_ADJUSTED-combine.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-LAB_GLUC_ADJUSTED-split.rds b/tests/testthat/smk_expt-results/ds.mean-LAB_GLUC_ADJUSTED-split.rds index 721440c25..d344348ea 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-LAB_GLUC_ADJUSTED-split.rds and b/tests/testthat/smk_expt-results/ds.mean-LAB_GLUC_ADJUSTED-split.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-LAB_HDL-both.rds b/tests/testthat/smk_expt-results/ds.mean-LAB_HDL-both.rds index 4be94380e..a2db63a53 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-LAB_HDL-both.rds and b/tests/testthat/smk_expt-results/ds.mean-LAB_HDL-both.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-LAB_HDL-combine.rds b/tests/testthat/smk_expt-results/ds.mean-LAB_HDL-combine.rds index 9dfc55ecb..01d181e97 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-LAB_HDL-combine.rds and b/tests/testthat/smk_expt-results/ds.mean-LAB_HDL-combine.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-LAB_HDL-split.rds b/tests/testthat/smk_expt-results/ds.mean-LAB_HDL-split.rds index 030d377ac..99932d3ce 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-LAB_HDL-split.rds and b/tests/testthat/smk_expt-results/ds.mean-LAB_HDL-split.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-LAB_TRIG-both.rds b/tests/testthat/smk_expt-results/ds.mean-LAB_TRIG-both.rds deleted file mode 100644 index ba0028eda..000000000 Binary files a/tests/testthat/smk_expt-results/ds.mean-LAB_TRIG-both.rds and /dev/null differ diff --git a/tests/testthat/smk_expt-results/ds.mean-LAB_TRIG-combine.rds b/tests/testthat/smk_expt-results/ds.mean-LAB_TRIG-combine.rds deleted file mode 100644 index 5d1e7b558..000000000 Binary files a/tests/testthat/smk_expt-results/ds.mean-LAB_TRIG-combine.rds and /dev/null differ diff --git a/tests/testthat/smk_expt-results/ds.mean-LAB_TRIG-split.rds b/tests/testthat/smk_expt-results/ds.mean-LAB_TRIG-split.rds deleted file mode 100644 index 52bdfdc31..000000000 Binary files a/tests/testthat/smk_expt-results/ds.mean-LAB_TRIG-split.rds and /dev/null differ diff --git a/tests/testthat/smk_expt-results/ds.mean-LAB_TSC-both.rds b/tests/testthat/smk_expt-results/ds.mean-LAB_TSC-both.rds index 1053d5a72..7359e7f73 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-LAB_TSC-both.rds and b/tests/testthat/smk_expt-results/ds.mean-LAB_TSC-both.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-LAB_TSC-combine.rds b/tests/testthat/smk_expt-results/ds.mean-LAB_TSC-combine.rds index 5ff25d553..8e3fdc325 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-LAB_TSC-combine.rds and b/tests/testthat/smk_expt-results/ds.mean-LAB_TSC-combine.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-LAB_TSC-split.rds b/tests/testthat/smk_expt-results/ds.mean-LAB_TSC-split.rds index 3da059ec2..dbac941e7 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-LAB_TSC-split.rds and b/tests/testthat/smk_expt-results/ds.mean-LAB_TSC-split.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-MEDI_LPD-both.rds b/tests/testthat/smk_expt-results/ds.mean-MEDI_LPD-both.rds index b9221fe8c..0b97cdb49 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-MEDI_LPD-both.rds and b/tests/testthat/smk_expt-results/ds.mean-MEDI_LPD-both.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-MEDI_LPD-combine.rds b/tests/testthat/smk_expt-results/ds.mean-MEDI_LPD-combine.rds index d0c4d65c9..29ebc3647 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-MEDI_LPD-combine.rds and b/tests/testthat/smk_expt-results/ds.mean-MEDI_LPD-combine.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-MEDI_LPD-split.rds b/tests/testthat/smk_expt-results/ds.mean-MEDI_LPD-split.rds index 15d9710ec..656fd8673 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-MEDI_LPD-split.rds and b/tests/testthat/smk_expt-results/ds.mean-MEDI_LPD-split.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CATEGORICAL-both.rds b/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CATEGORICAL-both.rds index 0592c5288..4a756b328 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CATEGORICAL-both.rds and b/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CATEGORICAL-both.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CATEGORICAL-combine.rds b/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CATEGORICAL-combine.rds index b16efc6e0..f26c04f8c 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CATEGORICAL-combine.rds and b/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CATEGORICAL-combine.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CATEGORICAL-split.rds b/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CATEGORICAL-split.rds index 24f97ab70..4aa59c4a2 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CATEGORICAL-split.rds and b/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CATEGORICAL-split.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CONTINUOUS-both.rds b/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CONTINUOUS-both.rds index dba08251d..9869722f3 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CONTINUOUS-both.rds and b/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CONTINUOUS-both.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CONTINUOUS-combine.rds b/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CONTINUOUS-combine.rds index 391d6ea34..210fd1996 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CONTINUOUS-combine.rds and b/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CONTINUOUS-combine.rds differ diff --git a/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CONTINUOUS-split.rds b/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CONTINUOUS-split.rds index 9e52cd9a4..da80ae310 100644 Binary files a/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CONTINUOUS-split.rds and b/tests/testthat/smk_expt-results/ds.mean-PM_BMI_CONTINUOUS-split.rds differ diff --git a/tests/testthat/test-arg-ds.cor.R b/tests/testthat/test-arg-ds.cor.R index 47b5a5abb..168bc5cf9 100644 --- a/tests/testthat/test-arg-ds.cor.R +++ b/tests/testthat/test-arg-ds.cor.R @@ -22,7 +22,7 @@ connect.studies.dataset.cnsim(list("LAB_TSC", "LAB_TRIG")) # context("ds.cor::arg::test errors") test_that("cor_erros", { expect_error(ds.cor(), 'x=NULL. Please provide the name of a matrix or dataframe or the names of two numeric vectors!', fixed=TRUE) - expect_error(ds.cor(x='D$LAB_TSC'), 'If x is a numeric vector, y must be a numeric vector!', fixed=TRUE) + expect_error(ds.cor(x='D$LAB_TSC'), "DataSHIELD errors") expect_error(ds.cor(x='D$LAB_TSC', y='D$LAB_TRIG', type='datashield'), 'Function argument "type" has to be either "combine" or "split"', fixed=TRUE) }) diff --git a/tests/testthat/test-arg-ds.cov.R b/tests/testthat/test-arg-ds.cov.R index 29330c9de..f89fa84ed 100644 --- a/tests/testthat/test-arg-ds.cov.R +++ b/tests/testthat/test-arg-ds.cov.R @@ -22,7 +22,7 @@ connect.studies.dataset.cnsim(list("LAB_TSC", "LAB_TRIG")) # context("ds.cov::arg::test errors") test_that("cov_erros", { expect_error(ds.cov(), 'x=NULL. Please provide the name of a matrix or dataframe or the names of two numeric vectors!', fixed=TRUE) - expect_error(ds.cov(x='D$LAB_TSC'), 'If x is a numeric vector, y must be a numeric vector!', fixed=TRUE) + expect_error(ds.cov(x='D$LAB_TSC'), "DataSHIELD errors") expect_error(ds.cov(x='D$LAB_TSC', y='D$LAB_TRIG', type='datashield'), 'Function argument "type" has to be either "combine" or "split"', fixed=TRUE) }) diff --git a/tests/testthat/test-arg-ds.mean.R b/tests/testthat/test-arg-ds.mean.R index 96ef958ba..4ae006e71 100644 --- a/tests/testthat/test-arg-ds.mean.R +++ b/tests/testthat/test-arg-ds.mean.R @@ -25,7 +25,7 @@ test_that("mean_erros", { expect_error(ds.mean(), "Please provide the name of the input object!", fixed=TRUE) expect_error(ds.mean(x='D$LAB_TSC', type='datashield'), 'Function argument "type" has to be either "both", "combine" or "split"', fixed=TRUE) - expect_error(ds.mean(x='not_a_numeric', checks=TRUE), "The input object must be an integer or a numeric vector.", fixed=TRUE) + expect_error(ds.mean(x='not_a_numeric'), "DataSHIELD errors") }) #context("ds.mean::arg::discordant errors") diff --git a/tests/testthat/test-arg-ds.quantileMean.R b/tests/testthat/test-arg-ds.quantileMean.R index 4d1587628..9b701d628 100644 --- a/tests/testthat/test-arg-ds.quantileMean.R +++ b/tests/testthat/test-arg-ds.quantileMean.R @@ -24,7 +24,7 @@ ds.asCharacter(x='D$LAB_HDL', newobj="not_a_numeric") test_that("quantileMean_erros", { expect_error(ds.quantileMean(), "Please provide the name of the input vector!", fixed=TRUE) expect_error(ds.quantileMean(x='D$LAB_HDL', type='datashield'), 'Function argument "type" has to be either "combine" or "split"', fixed=TRUE) - expect_error(ds.quantileMean(x='not_a_numeric'), "The input object must be an integer or numeric vector.", fixed=TRUE) + expect_error(ds.quantileMean(x='not_a_numeric'), "DataSHIELD errors") }) # diff --git a/tests/testthat/test-arg-ds.var.R b/tests/testthat/test-arg-ds.var.R index 8ececd0d0..c28f23dec 100644 --- a/tests/testthat/test-arg-ds.var.R +++ b/tests/testthat/test-arg-ds.var.R @@ -24,7 +24,7 @@ test_that("var_erros", { ds.asCharacter(x='D$LAB_TSC', newobj="not_a_numeric") expect_error(ds.var(), "Please provide the name of the input object!", fixed=TRUE) - expect_error(ds.var(x="not_a_numeric", checks=TRUE), "The input object must be an integer or a numeric vector.", fixed=TRUE) + expect_error(ds.var(x="not_a_numeric"), "DataSHIELD errors") }) # diff --git a/tests/testthat/test-perf-ds.cor.R b/tests/testthat/test-perf-ds.cor.R new file mode 100644 index 000000000..ffa963d52 --- /dev/null +++ b/tests/testthat/test-perf-ds.cor.R @@ -0,0 +1,59 @@ +#------------------------------------------------------------------------------- +# Copyright (c) 2024-2025 Arjuna Technologies, Newcastle upon Tyne. All rights reserved. +# +# This program and the accompanying materials +# are made available under the terms of the GNU Public License v3.0. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- + +# +# Set up +# + +# context("ds.cor::perf::setup") +connect.studies.dataset.survival(list("survtime", "time.id", "female")) + +# +# Tests +# + +# context("ds.cor::perf::combine:0") +test_that("combine - performance", { + .durationSec <- 30 # seconds + .count <- 0 + .start.time <- Sys.time() + .current.time <- .start.time + + while (difftime(.current.time, .start.time, units = "secs")[[1]] < .durationSec) { + ds.cor("D$LAB_TSC", "D$LAB_TRIG") + + .count <- .count + 1 + .current.time <- Sys.time() + } + + .current.rate <- .count / (difftime(.current.time, .start.time, units = "secs")[[1]]) + .reference.rate <- perf.reference.rate("ds.cor::perf::combine:0") + if (any(length(.reference.rate) == 0) || any(is.null(.reference.rate))) { + print(paste("ds.cor::perf::combine:0 ", .current.rate, 0.5, 2.0)) + perf.reference.save("ds.cor::perf::combine:0", .current.rate, 0.5, 2.0) + } else { + print(paste("ds.cor::perf::combine:0 ", format(.current.rate, digits = 8), ", ", format(100.0 * .current.rate / .reference.rate, digits = 4), "%", sep = '')) + } + + .reference.rate <- perf.reference.rate("ds.cor::perf::combine:0") + .reference.tolerance.lower <- perf.reference.tolerance.lower("ds.cor::perf::combine:0") + .reference.tolerance.upper <- perf.reference.tolerance.upper("ds.cor::perf::combine:0") + + expect_gt(.current.rate, .reference.rate * .reference.tolerance.lower, label = "Observed rate", expected.label = "lower threshold on rate") + expect_lt(.current.rate, .reference.rate * .reference.tolerance.upper, label = "Observed rate", expected.label = "upper threshold on rate") +}) + +# +# Done +# + +# context("ds.cor::perf::shutdown") +disconnect.studies.dataset.survival() +# context("ds.cor::perf::done") diff --git a/tests/testthat/test-perf-ds.corTest.R b/tests/testthat/test-perf-ds.corTest.R new file mode 100644 index 000000000..1b4f71204 --- /dev/null +++ b/tests/testthat/test-perf-ds.corTest.R @@ -0,0 +1,59 @@ +#------------------------------------------------------------------------------- +# Copyright (c) 2024-2025 Arjuna Technologies, Newcastle upon Tyne. All rights reserved. +# +# This program and the accompanying materials +# are made available under the terms of the GNU Public License v3.0. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- + +# +# Set up +# + +# context("ds.corTest::perf::setup") +connect.studies.dataset.survival(list("survtime", "time.id")) + +# +# Tests +# + +# context("ds.corTest::perf::combine:0") +test_that("combine - performance", { + .durationSec <- 30 # seconds + .count <- 0 + .start.time <- Sys.time() + .current.time <- .start.time + + while (difftime(.current.time, .start.time, units = "secs")[[1]] < .durationSec) { + ds.corTest("D$LAB_TSC", "D$LAB_TRIG") + + .count <- .count + 1 + .current.time <- Sys.time() + } + + .current.rate <- .count / (difftime(.current.time, .start.time, units = "secs")[[1]]) + .reference.rate <- perf.reference.rate("ds.corTest::perf::combine:0") + if (any(length(.reference.rate) == 0) || any(is.null(.reference.rate))) { + print(paste("ds.corTest::perf::combine:0 ", .current.rate, 0.5, 2.0)) + perf.reference.save("ds.corTest::perf::combine:0", .current.rate, 0.5, 2.0) + } else { + print(paste("ds.corTest::perf::combine:0 ", format(.current.rate, digits = 8), ", ", format(100.0 * .current.rate / .reference.rate, digits = 4), "%", sep = '')) + } + + .reference.rate <- perf.reference.rate("ds.corTest::perf::combine:0") + .reference.tolerance.lower <- perf.reference.tolerance.lower("ds.corTest::perf::combine:0") + .reference.tolerance.upper <- perf.reference.tolerance.upper("ds.corTest::perf::combine:0") + + expect_gt(.current.rate, .reference.rate * .reference.tolerance.lower, label = "Observed rate", expected.label = "lower threshold on rate") + expect_lt(.current.rate, .reference.rate * .reference.tolerance.upper, label = "Observed rate", expected.label = "upper threshold on rate") +}) + +# +# Done +# + +# context("ds.corTest::perf::shutdown") +disconnect.studies.dataset.survival() +# context("ds.corTest::perf::done") diff --git a/tests/testthat/test-perf-ds.cov.R b/tests/testthat/test-perf-ds.cov.R new file mode 100644 index 000000000..4342c130a --- /dev/null +++ b/tests/testthat/test-perf-ds.cov.R @@ -0,0 +1,59 @@ +#------------------------------------------------------------------------------- +# Copyright (c) 2024-2025 Arjuna Technologies, Newcastle upon Tyne. All rights reserved. +# +# This program and the accompanying materials +# are made available under the terms of the GNU Public License v3.0. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- + +# +# Set up +# + +# context("ds.cov::perf::setup") +connect.studies.dataset.survival(list("survtime", "time.id", "female")) + +# +# Tests +# + +# context("ds.cov::perf::combine:0") +test_that("combine - performance", { + .durationSec <- 30 # seconds + .count <- 0 + .start.time <- Sys.time() + .current.time <- .start.time + + while (difftime(.current.time, .start.time, units = "secs")[[1]] < .durationSec) { + ds.cov("D$LAB_TSC", "D$LAB_TRIG") + + .count <- .count + 1 + .current.time <- Sys.time() + } + + .current.rate <- .count / (difftime(.current.time, .start.time, units = "secs")[[1]]) + .reference.rate <- perf.reference.rate("ds.cov::perf::combine:0") + if (any(length(.reference.rate) == 0) || any(is.null(.reference.rate))) { + print(paste("ds.cov::perf::combine:0 ", .current.rate, 0.5, 2.0)) + perf.reference.save("ds.cov::perf::combine:0", .current.rate, 0.5, 2.0) + } else { + print(paste("ds.cov::perf::combine:0 ", format(.current.rate, digits = 8), ", ", format(100.0 * .current.rate / .reference.rate, digits = 4), "%", sep = '')) + } + + .reference.rate <- perf.reference.rate("ds.cov::perf::combine:0") + .reference.tolerance.lower <- perf.reference.tolerance.lower("ds.cov::perf::combine:0") + .reference.tolerance.upper <- perf.reference.tolerance.upper("ds.cov::perf::combine:0") + + expect_gt(.current.rate, .reference.rate * .reference.tolerance.lower, label = "Observed rate", expected.label = "lower threshold on rate") + expect_lt(.current.rate, .reference.rate * .reference.tolerance.upper, label = "Observed rate", expected.label = "upper threshold on rate") +}) + +# +# Done +# + +# context("ds.cov::perf::shutdown") +disconnect.studies.dataset.survival() +# context("ds.cov::perf::done") diff --git a/tests/testthat/test-perf-ds.kurtosis.R b/tests/testthat/test-perf-ds.kurtosis.R new file mode 100644 index 000000000..d0998b5d9 --- /dev/null +++ b/tests/testthat/test-perf-ds.kurtosis.R @@ -0,0 +1,59 @@ +#------------------------------------------------------------------------------- +# Copyright (c) 2024-2025 Arjuna Technologies, Newcastle upon Tyne. All rights reserved. +# +# This program and the accompanying materials +# are made available under the terms of the GNU Public License v3.0. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- + +# +# Set up +# + +# context("ds.kurtosis::perf::setup") +connect.studies.dataset.cnsim(list("LAB_TSC", "LAB_TRIG", "LAB_HDL", "LAB_GLUC_ADJUSTED", "PM_BMI_CONTINUOUS")) + +# +# Tests +# + +# context("ds.kurtosis::perf::combine:0") +test_that("combine - performance", { + .durationSec <- 30 # seconds + .count <- 0 + .start.time <- Sys.time() + .current.time <- .start.time + + while (difftime(.current.time, .start.time, units = "secs")[[1]] < .durationSec) { + ds.kurtosis("D$LAB_TSC") + + .count <- .count + 1 + .current.time <- Sys.time() + } + + .current.rate <- .count / (difftime(.current.time, .start.time, units = "secs")[[1]]) + .reference.rate <- perf.reference.rate("ds.kurtosis::perf::combine:0") + if (any(length(.reference.rate) == 0) || any(is.null(.reference.rate))) { + print(paste("ds.kurtosis::perf::combine:0 ", .current.rate, 0.5, 2.0)) + perf.reference.save("ds.kurtosis::perf::combine:0", .current.rate, 0.5, 2.0) + } else { + print(paste("ds.kurtosis::perf::combine:0 ", format(.current.rate, digits = 8), ", ", format(100.0 * .current.rate / .reference.rate, digits = 4), "%", sep = '')) + } + + .reference.rate <- perf.reference.rate("ds.kurtosis::perf::combine:0") + .reference.tolerance.lower <- perf.reference.tolerance.lower("ds.kurtosis::perf::combine:0") + .reference.tolerance.upper <- perf.reference.tolerance.upper("ds.kurtosis::perf::combine:0") + + expect_gt(.current.rate, .reference.rate * .reference.tolerance.lower, label = "Observed rate", expected.label = "lower threshold on rate") + expect_lt(.current.rate, .reference.rate * .reference.tolerance.upper, label = "Observed rate", expected.label = "upper threshold on rate") +}) + +# +# Done +# + +# context("ds.kurtosis::perf::shutdown") +disconnect.studies.dataset.cnsim() +# context("ds.kurtosis::perf::done") diff --git a/tests/testthat/test-perf-ds.meanSdGp.R b/tests/testthat/test-perf-ds.meanSdGp.R new file mode 100644 index 000000000..cc2347693 --- /dev/null +++ b/tests/testthat/test-perf-ds.meanSdGp.R @@ -0,0 +1,59 @@ +#------------------------------------------------------------------------------- +# Copyright (c) 2024-2025 Arjuna Technologies, Newcastle upon Tyne. All rights reserved. +# +# This program and the accompanying materials +# are made available under the terms of the GNU Public License v3.0. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- + +# +# Set up +# + +# context("ds.meanSdGp::perf::setup") +connect.studies.dataset.survival(list("age.60", "female")) + +# +# Tests +# + +# context("ds.meanSdGp::perf::combine:0") +test_that("combine - performance", { + .durationSec <- 30 # seconds + .count <- 0 + .start.time <- Sys.time() + .current.time <- .start.time + + while (difftime(.current.time, .start.time, units = "secs")[[1]] < .durationSec) { + ds.meanSdGp("D$LAB_TSC", "D$PM_BMI_CATEGORICAL") + + .count <- .count + 1 + .current.time <- Sys.time() + } + + .current.rate <- .count / (difftime(.current.time, .start.time, units = "secs")[[1]]) + .reference.rate <- perf.reference.rate("ds.meanSdGp::perf::combine:0") + if (any(length(.reference.rate) == 0) || any(is.null(.reference.rate))) { + print(paste("ds.meanSdGp::perf::combine:0 ", .current.rate, 0.5, 2.0)) + perf.reference.save("ds.meanSdGp::perf::combine:0", .current.rate, 0.5, 2.0) + } else { + print(paste("ds.meanSdGp::perf::combine:0 ", format(.current.rate, digits = 8), ", ", format(100.0 * .current.rate / .reference.rate, digits = 4), "%", sep = '')) + } + + .reference.rate <- perf.reference.rate("ds.meanSdGp::perf::combine:0") + .reference.tolerance.lower <- perf.reference.tolerance.lower("ds.meanSdGp::perf::combine:0") + .reference.tolerance.upper <- perf.reference.tolerance.upper("ds.meanSdGp::perf::combine:0") + + expect_gt(.current.rate, .reference.rate * .reference.tolerance.lower, label = "Observed rate", expected.label = "lower threshold on rate") + expect_lt(.current.rate, .reference.rate * .reference.tolerance.upper, label = "Observed rate", expected.label = "upper threshold on rate") +}) + +# +# Done +# + +# context("ds.meanSdGp::perf::shutdown") +disconnect.studies.dataset.survival() +# context("ds.meanSdGp::perf::done") diff --git a/tests/testthat/test-perf-ds.quantileMean.R b/tests/testthat/test-perf-ds.quantileMean.R new file mode 100644 index 000000000..b40491926 --- /dev/null +++ b/tests/testthat/test-perf-ds.quantileMean.R @@ -0,0 +1,59 @@ +#------------------------------------------------------------------------------- +# Copyright (c) 2024-2025 Arjuna Technologies, Newcastle upon Tyne. All rights reserved. +# +# This program and the accompanying materials +# are made available under the terms of the GNU Public License v3.0. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- + +# +# Set up +# + +# context("ds.quantileMean::perf::setup") +connect.studies.dataset.cnsim(list('LAB_HDL')) + +# +# Tests +# + +# context("ds.quantileMean::perf::combine:0") +test_that("combine - performance", { + .durationSec <- 30 # seconds + .count <- 0 + .start.time <- Sys.time() + .current.time <- .start.time + + while (difftime(.current.time, .start.time, units = "secs")[[1]] < .durationSec) { + ds.quantileMean("D$LAB_TSC") + + .count <- .count + 1 + .current.time <- Sys.time() + } + + .current.rate <- .count / (difftime(.current.time, .start.time, units = "secs")[[1]]) + .reference.rate <- perf.reference.rate("ds.quantileMean::perf::combine:0") + if (any(length(.reference.rate) == 0) || any(is.null(.reference.rate))) { + print(paste("ds.quantileMean::perf::combine:0 ", .current.rate, 0.5, 2.0)) + perf.reference.save("ds.quantileMean::perf::combine:0", .current.rate, 0.5, 2.0) + } else { + print(paste("ds.quantileMean::perf::combine:0 ", format(.current.rate, digits = 8), ", ", format(100.0 * .current.rate / .reference.rate, digits = 4), "%", sep = '')) + } + + .reference.rate <- perf.reference.rate("ds.quantileMean::perf::combine:0") + .reference.tolerance.lower <- perf.reference.tolerance.lower("ds.quantileMean::perf::combine:0") + .reference.tolerance.upper <- perf.reference.tolerance.upper("ds.quantileMean::perf::combine:0") + + expect_gt(.current.rate, .reference.rate * .reference.tolerance.lower, label = "Observed rate", expected.label = "lower threshold on rate") + expect_lt(.current.rate, .reference.rate * .reference.tolerance.upper, label = "Observed rate", expected.label = "upper threshold on rate") +}) + +# +# Done +# + +# context("ds.quantileMean::perf::shutdown") +disconnect.studies.dataset.cnsim() +# context("ds.quantileMean::perf::done") diff --git a/tests/testthat/test-perf-ds.skewness.R b/tests/testthat/test-perf-ds.skewness.R new file mode 100644 index 000000000..724ec929b --- /dev/null +++ b/tests/testthat/test-perf-ds.skewness.R @@ -0,0 +1,59 @@ +#------------------------------------------------------------------------------- +# Copyright (c) 2024-2025 Arjuna Technologies, Newcastle upon Tyne. All rights reserved. +# +# This program and the accompanying materials +# are made available under the terms of the GNU Public License v3.0. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- + +# +# Set up +# + +# context("ds.skewness::perf::setup") +connect.studies.dataset.cnsim(list("LAB_TSC", "LAB_TRIG", "LAB_HDL", "LAB_GLUC_ADJUSTED", "PM_BMI_CONTINUOUS")) + +# +# Tests +# + +# context("ds.skewness::perf::combine:0") +test_that("combine - performance", { + .durationSec <- 30 # seconds + .count <- 0 + .start.time <- Sys.time() + .current.time <- .start.time + + while (difftime(.current.time, .start.time, units = "secs")[[1]] < .durationSec) { + ds.skewness("D$LAB_TSC") + + .count <- .count + 1 + .current.time <- Sys.time() + } + + .current.rate <- .count / (difftime(.current.time, .start.time, units = "secs")[[1]]) + .reference.rate <- perf.reference.rate("ds.skewness::perf::combine:0") + if (any(length(.reference.rate) == 0) || any(is.null(.reference.rate))) { + print(paste("ds.skewness::perf::combine:0 ", .current.rate, 0.5, 2.0)) + perf.reference.save("ds.skewness::perf::combine:0", .current.rate, 0.5, 2.0) + } else { + print(paste("ds.skewness::perf::combine:0 ", format(.current.rate, digits = 8), ", ", format(100.0 * .current.rate / .reference.rate, digits = 4), "%", sep = '')) + } + + .reference.rate <- perf.reference.rate("ds.skewness::perf::combine:0") + .reference.tolerance.lower <- perf.reference.tolerance.lower("ds.skewness::perf::combine:0") + .reference.tolerance.upper <- perf.reference.tolerance.upper("ds.skewness::perf::combine:0") + + expect_gt(.current.rate, .reference.rate * .reference.tolerance.lower, label = "Observed rate", expected.label = "lower threshold on rate") + expect_lt(.current.rate, .reference.rate * .reference.tolerance.upper, label = "Observed rate", expected.label = "upper threshold on rate") +}) + +# +# Done +# + +# context("ds.skewness::perf::shutdown") +disconnect.studies.dataset.cnsim() +# context("ds.skewness::perf::done") diff --git a/tests/testthat/test-perf-ds.summary.R b/tests/testthat/test-perf-ds.summary.R new file mode 100644 index 000000000..65b65841e --- /dev/null +++ b/tests/testthat/test-perf-ds.summary.R @@ -0,0 +1,50 @@ + +# +# Set up +# + +# context("ds.summary::perf::setup") +connect.studies.dataset.cnsim(list('LAB_TSC', 'LAB_TRIG', 'LAB_HDL', 'LAB_GLUC_ADJUSTED', 'PM_BMI_CONTINUOUS', 'DIS_CVA', 'MEDI_LPD', 'DIS_DIAB', 'DIS_AMI', 'GENDER', 'PM_BMI_CATEGORICAL')) + +# +# Tests +# + +# context("ds.summary::perf::0") +test_that("performance", { + .durationSec <- 30 # seconds + .count <- 0 + .start.time <- Sys.time() + .current.time <- .start.time + + while (difftime(.current.time, .start.time, units = "secs")[[1]] < .durationSec) { + ds.summary("D$LAB_TSC") + + .count <- .count + 1 + .current.time <- Sys.time() + } + + .current.rate <- .count / (difftime(.current.time, .start.time, units = "secs")[[1]]) + .reference.rate <- perf.reference.rate("ds.summary::perf::0") + if (any(length(.reference.rate) == 0) || any(is.null(.reference.rate))) { + print(paste("ds.summary::perf::0 ", .current.rate, 0.5, 2.0)) + perf.reference.save("ds.summary::perf::0", .current.rate, 0.5, 2.0) + } else { + print(paste("ds.summary::perf::0 ", format(.current.rate, digits = 8), ", ", format(100.0 * .current.rate / .reference.rate, digits = 4), "%", sep = '')) + } + + .reference.rate <- perf.reference.rate("ds.summary::perf::0") + .reference.tolerance.lower <- perf.reference.tolerance.lower("ds.summary::perf::0") + .reference.tolerance.upper <- perf.reference.tolerance.upper("ds.summary::perf::0") + + expect_gt(.current.rate, .reference.rate * .reference.tolerance.lower, label = "Observed rate", expected.label = "lower threshold on rate") + expect_lt(.current.rate, .reference.rate * .reference.tolerance.upper, label = "Observed rate", expected.label = "upper threshold on rate") +}) + +# +# Done +# + +# context("ds.summary::perf::shutdown") +disconnect.studies.dataset.cnsim() +# context("ds.summary::perf::done") diff --git a/tests/testthat/test-perf-ds.var.R b/tests/testthat/test-perf-ds.var.R new file mode 100644 index 000000000..7b5537f6d --- /dev/null +++ b/tests/testthat/test-perf-ds.var.R @@ -0,0 +1,59 @@ +#------------------------------------------------------------------------------- +# Copyright (c) 2024-2025 Arjuna Technologies, Newcastle upon Tyne. All rights reserved. +# +# This program and the accompanying materials +# are made available under the terms of the GNU Public License v3.0. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- + +# +# Set up +# + +# context("ds.var::perf::setup") +connect.studies.dataset.cnsim(list("LAB_TSC")) + +# +# Tests +# + +# context("ds.var::perf::combine:0") +test_that("combine - performance", { + .durationSec <- 30 # seconds + .count <- 0 + .start.time <- Sys.time() + .current.time <- .start.time + + while (difftime(.current.time, .start.time, units = "secs")[[1]] < .durationSec) { + ds.var("D$LAB_TSC") + + .count <- .count + 1 + .current.time <- Sys.time() + } + + .current.rate <- .count / (difftime(.current.time, .start.time, units = "secs")[[1]]) + .reference.rate <- perf.reference.rate("ds.var::perf::combine:0") + if (any(length(.reference.rate) == 0) || any(is.null(.reference.rate))) { + print(paste("ds.var::perf::combine:0 ", .current.rate, 0.5, 2.0)) + perf.reference.save("ds.var::perf::combine:0", .current.rate, 0.5, 2.0) + } else { + print(paste("ds.var::perf::combine:0 ", format(.current.rate, digits = 8), ", ", format(100.0 * .current.rate / .reference.rate, digits = 4), "%", sep = '')) + } + + .reference.rate <- perf.reference.rate("ds.var::perf::combine:0") + .reference.tolerance.lower <- perf.reference.tolerance.lower("ds.var::perf::combine:0") + .reference.tolerance.upper <- perf.reference.tolerance.upper("ds.var::perf::combine:0") + + expect_gt(.current.rate, .reference.rate * .reference.tolerance.lower, label = "Observed rate", expected.label = "lower threshold on rate") + expect_lt(.current.rate, .reference.rate * .reference.tolerance.upper, label = "Observed rate", expected.label = "upper threshold on rate") +}) + +# +# Done +# + +# context("ds.var::perf::shutdown") +disconnect.studies.dataset.cnsim() +# context("ds.var::perf::done") diff --git a/tests/testthat/test-smk-ds.kurtosis.R b/tests/testthat/test-smk-ds.kurtosis.R index 297f87e46..97695f91d 100644 --- a/tests/testthat/test-smk-ds.kurtosis.R +++ b/tests/testthat/test-smk-ds.kurtosis.R @@ -33,19 +33,15 @@ test_that("simple kurtosis, method 1, split, on LAB_TSC", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.171744367103707"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("0.574419687874713"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("0.674414218958169"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "1807") - expect_equal(kurtosis.res$Nvalid[2], "2539") - expect_equal(kurtosis.res$Nvalid[3], "3479") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 1807) + expect_equal(kurtosis.res$Nvalid[2], 2539) + expect_equal(kurtosis.res$Nvalid[3], 3479) }) test_that("simple kurtosis, method 1, split, on LAB_TRIG", { @@ -53,19 +49,15 @@ test_that("simple kurtosis, method 1, split, on LAB_TRIG", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("1.76749454806425"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("1.04629523942343"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("1.02128294035219"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "1801") - expect_equal(kurtosis.res$Nvalid[2], "2526") - expect_equal(kurtosis.res$Nvalid[3], "3473") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 1801) + expect_equal(kurtosis.res$Nvalid[2], 2526) + expect_equal(kurtosis.res$Nvalid[3], 3473) }) test_that("simple kurtosis, method 1, split, on LAB_HDL", { @@ -73,19 +65,15 @@ test_that("simple kurtosis, method 1, split, on LAB_HDL", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.290702025953629"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("0.494573359163136"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("0.549162800210091"), tolerance = ds.test_env$low_tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "1803") - expect_equal(kurtosis.res$Nvalid[2], "2533") - expect_equal(kurtosis.res$Nvalid[3], "3473") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 1803) + expect_equal(kurtosis.res$Nvalid[2], 2533) + expect_equal(kurtosis.res$Nvalid[3], 3473) }) test_that("simple kurtosis, method 1, split, on LAB_GLUC_ADJUSTED", { @@ -93,19 +81,15 @@ test_that("simple kurtosis, method 1, split, on LAB_GLUC_ADJUSTED", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("4.32162963839166"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("4.38468288434594"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("3.72493030465797"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "1822") - expect_equal(kurtosis.res$Nvalid[2], "2583") - expect_equal(kurtosis.res$Nvalid[3], "3519") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 1822) + expect_equal(kurtosis.res$Nvalid[2], 2583) + expect_equal(kurtosis.res$Nvalid[3], 3519) }) test_that("simple kurtosis, method 1, split, on PM_BMI_CONTINUOUS", { @@ -113,19 +97,15 @@ test_that("simple kurtosis, method 1, split, on PM_BMI_CONTINUOUS", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.671416534303503"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("0.251325359087079"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("0.187132199973004"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "2066") - expect_equal(kurtosis.res$Nvalid[2], "2938") - expect_equal(kurtosis.res$Nvalid[3], "3923") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 2066) + expect_equal(kurtosis.res$Nvalid[2], 2938) + expect_equal(kurtosis.res$Nvalid[3], 3923) }) # context("ds.kurtosis::smk::method 1::combine") @@ -134,13 +114,11 @@ test_that("simple kurtosis, combine, on LAB_TSC", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.515598613390042"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "7825") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 7825) }) test_that("simple kurtosis, method 1, combine, on LAB_TRIG", { @@ -148,13 +126,11 @@ test_that("simple kurtosis, method 1, combine, on LAB_TRIG", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("1.21679529801477"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "7800") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 7800) }) test_that("simple kurtosis, method 1, combine, on LAB_HDL", { @@ -162,13 +138,11 @@ test_that("simple kurtosis, method 1, combine, on LAB_HDL", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.472661436116919"), tolerance = ds.test_env$low_tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "7809") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 7809) }) test_that("simple kurtosis, method 1, combine, on LAB_GLUC_ADJUSTED", { @@ -176,13 +150,11 @@ test_that("simple kurtosis, method 1, combine, on LAB_GLUC_ADJUSTED", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("4.08935226175995"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "7924") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 7924) }) test_that("simple kurtosis, method 1, combine, on PM_BMI_CONTINUOUS", { @@ -190,13 +162,11 @@ test_that("simple kurtosis, method 1, combine, on PM_BMI_CONTINUOUS", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.335021586102938"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "8927") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 8927) }) # context("ds.kurtosis::smk::method 1::both") @@ -206,26 +176,20 @@ test_that("simple kurtosis, both, on LAB_TSC", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("0.171744367103707"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("0.574419687874713"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("0.674414218958169"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "1807") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2539") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3479") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 1807) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2539) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3479) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("0.515598613390042"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "7825") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 7825) }) test_that("simple kurtosis, method 1, both, on LAB_TRIG", { @@ -234,26 +198,20 @@ test_that("simple kurtosis, method 1, both, on LAB_TRIG", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("1.76749454806425"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("1.04629523942343"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("1.02128294035219"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "1801") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2526") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3473") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 1801) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2526) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3473) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("1.21679529801477"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "7800") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 7800) }) test_that("simple kurtosis, method 1, both, on LAB_HDL", { @@ -262,26 +220,20 @@ test_that("simple kurtosis, method 1, both, on LAB_HDL", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("0.290702025953629"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("0.494573359163136"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("0.549162800210091"), tolerance = ds.test_env$low_tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "1803") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2533") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3473") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 1803) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2533) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3473) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("0.472661436116919"), tolerance = ds.test_env$low_tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "7809") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 7809) }) test_that("simple kurtosis, method 1, both, on LAB_GLUC_ADJUSTED", { @@ -290,26 +242,20 @@ test_that("simple kurtosis, method 1, both, on LAB_GLUC_ADJUSTED", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("4.32162963839166"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("4.38468288434594"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("3.72493030465797"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "1822") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2583") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3519") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 1822) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2583) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3519) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("4.08935226175995"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "7924") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 7924) }) test_that("simple kurtosis, method 1, both, on PM_BMI_CONTINUOUS", { @@ -318,26 +264,20 @@ test_that("simple kurtosis, method 1, both, on PM_BMI_CONTINUOUS", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("0.671416534303503"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("0.251325359087079"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("0.187132199973004"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "2066") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2938") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3923") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 2066) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2938) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3923) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("0.335021586102938"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "8927") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 8927) }) # Method 2 @@ -348,19 +288,15 @@ test_that("simple kurtosis, method 2, split, on LAB_TSC", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.175548320198465"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("0.577919349817977"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("0.677111105785997"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "1807") - expect_equal(kurtosis.res$Nvalid[2], "2539") - expect_equal(kurtosis.res$Nvalid[3], "3479") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 1807) + expect_equal(kurtosis.res$Nvalid[2], 2539) + expect_equal(kurtosis.res$Nvalid[3], 3479) }) test_that("simple kurtosis, method 2, split, on LAB_TRIG", { @@ -368,19 +304,15 @@ test_that("simple kurtosis, method 2, split, on LAB_TRIG", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("1.7757502518397"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("1.0507483099711"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("1.02448438876989"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "1801") - expect_equal(kurtosis.res$Nvalid[2], "2526") - expect_equal(kurtosis.res$Nvalid[3], "3473") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 1801) + expect_equal(kurtosis.res$Nvalid[2], 2526) + expect_equal(kurtosis.res$Nvalid[3], 3473) }) test_that("simple kurtosis, method 2, split, on LAB_HDL", { @@ -388,19 +320,15 @@ test_that("simple kurtosis, method 2, split, on LAB_HDL", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.294844984757316"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("0.497923487075878"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("0.551683842646188"), tolerance = ds.test_env$low_tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "1803") - expect_equal(kurtosis.res$Nvalid[2], "2533") - expect_equal(kurtosis.res$Nvalid[3], "3473") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 1803) + expect_equal(kurtosis.res$Nvalid[2], 2533) + expect_equal(kurtosis.res$Nvalid[3], 3473) }) test_that("simple kurtosis, method 2, split, on LAB_GLUC_ADJUSTED", { @@ -408,19 +336,15 @@ test_that("simple kurtosis, method 2, split, on LAB_GLUC_ADJUSTED", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("4.33681301852393"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("4.39550878961539"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("3.73193529182726"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "1822") - expect_equal(kurtosis.res$Nvalid[2], "2583") - expect_equal(kurtosis.res$Nvalid[3], "3519") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 1822) + expect_equal(kurtosis.res$Nvalid[2], 2583) + expect_equal(kurtosis.res$Nvalid[3], 3519) }) test_that("simple kurtosis, method 2, split, on PM_BMI_CONTINUOUS", { @@ -428,19 +352,15 @@ test_that("simple kurtosis, method 2, split, on PM_BMI_CONTINUOUS", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.675954084252309"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("0.253798588114679"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("0.188901928135923"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "2066") - expect_equal(kurtosis.res$Nvalid[2], "2938") - expect_equal(kurtosis.res$Nvalid[3], "3923") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 2066) + expect_equal(kurtosis.res$Nvalid[2], 2938) + expect_equal(kurtosis.res$Nvalid[3], 3923) }) # context("ds.kurtosis::smk::method 2::combine") @@ -449,13 +369,11 @@ test_that("simple kurtosis, combine, on LAB_TSC", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.516695386307489"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "7825") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 7825) }) test_that("simple kurtosis, method 2, combine, on LAB_TRIG", { @@ -463,13 +381,11 @@ test_that("simple kurtosis, method 2, combine, on LAB_TRIG", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("1.21834528057683"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "7800") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 7800) }) test_that("simple kurtosis, method 2, combine, on LAB_HDL", { @@ -477,13 +393,11 @@ test_that("simple kurtosis, method 2, combine, on LAB_HDL", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.473732952559591"), tolerance = ds.test_env$low_tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "7809") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 7809) }) test_that("simple kurtosis, method 2, combine, on LAB_GLUC_ADJUSTED", { @@ -491,13 +405,11 @@ test_that("simple kurtosis, method 2, combine, on LAB_GLUC_ADJUSTED", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("4.09269136885493"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "7924") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 7924) }) test_that("simple kurtosis, method 2, combine, on PM_BMI_CONTINUOUS", { @@ -505,13 +417,11 @@ test_that("simple kurtosis, method 2, combine, on PM_BMI_CONTINUOUS", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.335881726489728"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "8927") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 8927) }) # context("ds.kurtosis::smk::method 2::both") @@ -521,26 +431,20 @@ test_that("simple kurtosis, both, on LAB_TSC", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("0.175548320198465"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("0.577919349817977"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("0.677111105785997"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "1807") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2539") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3479") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 1807) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2539) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3479) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("0.516695386307489"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "7825") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 7825) }) test_that("simple kurtosis, method 2, both, on LAB_TRIG", { @@ -549,26 +453,20 @@ test_that("simple kurtosis, method 2, both, on LAB_TRIG", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("1.7757502518397"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("1.0507483099711"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("1.02448438876989"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "1801") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2526") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3473") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 1801) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2526) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3473) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("1.21834528057683"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "7800") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 7800) }) test_that("simple kurtosis, method 2, both, on LAB_HDL", { @@ -577,26 +475,20 @@ test_that("simple kurtosis, method 2, both, on LAB_HDL", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("0.294844984757316"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("0.497923487075878"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("0.551683842646188"), tolerance = ds.test_env$low_tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "1803") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2533") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3473") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 1803) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2533) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3473) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("0.473732952559591"), tolerance = ds.test_env$low_tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "7809") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 7809) }) test_that("simple kurtosis, method 2, both, on LAB_GLUC_ADJUSTED", { @@ -605,26 +497,20 @@ test_that("simple kurtosis, method 2, both, on LAB_GLUC_ADJUSTED", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("4.33681301852393"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("4.39550878961539"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("3.73193529182726"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "1822") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2583") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3519") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 1822) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2583) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3519) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("4.09269136885493"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "7924") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 7924) }) test_that("simple kurtosis, method 2, both, on PM_BMI_CONTINUOUS", { @@ -633,26 +519,20 @@ test_that("simple kurtosis, method 2, both, on PM_BMI_CONTINUOUS", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("0.675954084252309"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("0.253798588114679"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("0.188901928135923"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "2066") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2938") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3923") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 2066) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2938) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3923) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("0.335881726489728"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "8927") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 8927) }) # Method 3 @@ -663,19 +543,15 @@ test_that("simple kurtosis, method 3, split, on LAB_TSC", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.16823483003675"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("0.571604630147399"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("0.672302183238634"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "1807") - expect_equal(kurtosis.res$Nvalid[2], "2539") - expect_equal(kurtosis.res$Nvalid[3], "3479") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 1807) + expect_equal(kurtosis.res$Nvalid[2], 2539) + expect_equal(kurtosis.res$Nvalid[3], 3479) }) test_that("simple kurtosis, method 3, split, on LAB_TRIG", { @@ -683,19 +559,15 @@ test_that("simple kurtosis, method 3, split, on LAB_TRIG", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("1.76220174297892"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("1.04309215604256"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("1.01896753349628"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "1801") - expect_equal(kurtosis.res$Nvalid[2], "2526") - expect_equal(kurtosis.res$Nvalid[3], "3473") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 1801) + expect_equal(kurtosis.res$Nvalid[2], 2526) + expect_equal(kurtosis.res$Nvalid[3], 3473) }) test_that("simple kurtosis, method 3, split, on LAB_HDL", { @@ -703,19 +575,15 @@ test_that("simple kurtosis, method 3, split, on LAB_HDL", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.287052786394011"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("0.491814667058934"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("0.54711923451172"), tolerance = ds.test_env$low_tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "1803") - expect_equal(kurtosis.res$Nvalid[2], "2533") - expect_equal(kurtosis.res$Nvalid[3], "3473") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 1803) + expect_equal(kurtosis.res$Nvalid[2], 2533) + expect_equal(kurtosis.res$Nvalid[3], 3473) }) test_that("simple kurtosis, method 3, split, on LAB_GLUC_ADJUSTED", { @@ -723,19 +591,15 @@ test_that("simple kurtosis, method 3, split, on LAB_GLUC_ADJUSTED", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("4.31359492883116"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("4.37896607954034"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("3.72110877877708"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "1822") - expect_equal(kurtosis.res$Nvalid[2], "2583") - expect_equal(kurtosis.res$Nvalid[3], "3519") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 1822) + expect_equal(kurtosis.res$Nvalid[2], 2583) + expect_equal(kurtosis.res$Nvalid[3], 3519) }) test_that("simple kurtosis, method 3, split, on PM_BMI_CONTINUOUS", { @@ -743,19 +607,15 @@ test_that("simple kurtosis, method 3, split, on PM_BMI_CONTINUOUS", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.667863264214689"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[2]), as.double("0.249112444154344"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis[3]), as.double("0.185507562711274"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 3) - expect_equal(kurtosis.res$Nvalid[1], "2066") - expect_equal(kurtosis.res$Nvalid[2], "2938") - expect_equal(kurtosis.res$Nvalid[3], "3923") - expect_length(kurtosis.res$ValidityMessage, 3) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 2066) + expect_equal(kurtosis.res$Nvalid[2], 2938) + expect_equal(kurtosis.res$Nvalid[3], 3923) }) # context("ds.kurtosis::smk::method 3::combine") @@ -764,13 +624,11 @@ test_that("simple kurtosis, combine, on LAB_TSC", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.514700115249594"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "7825") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 7825) }) test_that("simple kurtosis, method 3, combine, on LAB_TRIG", { @@ -778,13 +636,11 @@ test_that("simple kurtosis, method 3, combine, on LAB_TRIG", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("1.21571413776076"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "7800") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 7800) }) test_that("simple kurtosis, method 3, combine, on LAB_HDL", { @@ -792,13 +648,11 @@ test_that("simple kurtosis, method 3, combine, on LAB_HDL", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.47177209328527"), tolerance = ds.test_env$low_tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "7809") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 7809) }) test_that("simple kurtosis, method 3, combine, on LAB_GLUC_ADJUSTED", { @@ -806,13 +660,11 @@ test_that("simple kurtosis, method 3, combine, on LAB_GLUC_ADJUSTED", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("4.0875630379014"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "7924") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 7924) }) test_that("simple kurtosis, method 3, combine, on PM_BMI_CONTINUOUS", { @@ -820,13 +672,11 @@ test_that("simple kurtosis, method 3, combine, on PM_BMI_CONTINUOUS", { expect_equal(class(kurtosis.res), "data.frame") - expect_length(kurtosis.res, 3) + expect_length(kurtosis.res, 2) expect_length(kurtosis.res$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Kurtosis[1]), as.double("0.334274451613855"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Nvalid, 1) - expect_equal(kurtosis.res$Nvalid[1], "8927") - expect_length(kurtosis.res$ValidityMessage, 1) - expect_equal(kurtosis.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Nvalid[1], 8927) }) # context("ds.kurtosis::smk::method 3::both") @@ -836,26 +686,20 @@ test_that("simple kurtosis, both, on LAB_TSC", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("0.16823483003675"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("0.571604630147399"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("0.672302183238634"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "1807") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2539") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3479") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 1807) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2539) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3479) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("0.514700115249594"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "7825") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 7825) }) test_that("simple kurtosis, method 3, both, on LAB_TRIG", { @@ -864,26 +708,20 @@ test_that("simple kurtosis, method 3, both, on LAB_TRIG", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("1.76220174297892"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("1.04309215604256"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("1.01896753349628"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "1801") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2526") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3473") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 1801) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2526) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3473) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("1.21571413776076"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "7800") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 7800) }) test_that("simple kurtosis, method 3, both, on LAB_HDL", { @@ -892,26 +730,20 @@ test_that("simple kurtosis, method 3, both, on LAB_HDL", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("0.287052786394011"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("0.491814667058934"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("0.54711923451172"), tolerance = ds.test_env$low_tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "1803") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2533") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3473") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 1803) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2533) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3473) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("0.47177209328527"), tolerance = ds.test_env$low_tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "7809") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 7809) }) test_that("simple kurtosis, method 3, both, on LAB_GLUC_ADJUSTED", { @@ -920,26 +752,20 @@ test_that("simple kurtosis, method 3, both, on LAB_GLUC_ADJUSTED", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("4.31359492883116"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("4.37896607954034"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("3.72110877877708"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "1822") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2583") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3519") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 1822) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2583) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3519) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("4.0875630379014"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "7924") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 7924) }) test_that("simple kurtosis, method 3, both, on PM_BMI_CONTINUOUS", { @@ -948,26 +774,20 @@ test_that("simple kurtosis, method 3, both, on PM_BMI_CONTINUOUS", { expect_equal(class(kurtosis.res), "list") expect_length(kurtosis.res, 2) - expect_length(kurtosis.res$Kurtosis.by.Study, 3) + expect_length(kurtosis.res$Kurtosis.by.Study, 2) expect_length(kurtosis.res$Kurtosis.by.Study$Kurtosis, 3) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[1]), as.double("0.667863264214689"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[2]), as.double("0.249112444154344"), tolerance = ds.test_env$tolerance) expect_equal(as.double(kurtosis.res$Kurtosis.by.Study$Kurtosis[3]), as.double("0.185507562711274"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Kurtosis.by.Study$Nvalid, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], "2066") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], "2938") - expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], "3923") - expect_length(kurtosis.res$Kurtosis.by.Study$ValidityMessage, 3) - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(kurtosis.res$Kurtosis.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(kurtosis.res$Global.Kurtosis, 3) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[1], 2066) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[2], 2938) + expect_equal(kurtosis.res$Kurtosis.by.Study$Nvalid[3], 3923) + expect_length(kurtosis.res$Global.Kurtosis, 2) expect_length(kurtosis.res$Global.Kurtosis$Kurtosis, 1) expect_equal(as.double(kurtosis.res$Global.Kurtosis$Kurtosis[1]), as.double("0.334274451613855"), tolerance = ds.test_env$tolerance) expect_length(kurtosis.res$Global.Kurtosis$Nvalid, 1) - expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], "8927") - expect_length(kurtosis.res$Global.Kurtosis$ValidityMessage, 1) - expect_equal(kurtosis.res$Global.Kurtosis$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(kurtosis.res$Global.Kurtosis$Nvalid[1], 8927) }) # diff --git a/tests/testthat/test-smk-ds.mean.R b/tests/testthat/test-smk-ds.mean.R index 5b2d7360e..d40025d5f 100644 --- a/tests/testthat/test-smk-ds.mean.R +++ b/tests/testthat/test-smk-ds.mean.R @@ -34,17 +34,13 @@ test_that("mean values [combine]", { stat.mean <- ds.mean(x='D$LAB_TSC',type='combine') - expect_length(stat.mean, 3) + expect_length(stat.mean, 2) expect_length(stat.mean$Global.Mean, 4) expect_equal(as.numeric(stat.mean$Global.Mean[1]), 5.85192485623003, tolerance = .000000000000001) expect_equal(as.integer(stat.mean$Global.Mean[2]), 1554) expect_equal(as.integer(stat.mean$Global.Mean[3]), 7825) expect_equal(as.integer(stat.mean$Global.Mean[4]), 9379) expect_equal(stat.mean$Nstudies, 3) - expect_length(stat.mean$ValidityMessage, 3) - expect_equal(stat.mean$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(stat.mean$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(stat.mean$ValidityMessage[3], "VALID ANALYSIS") ls.res <- ds.ls() expect_false("mean.all.studies" %in% ls.res$sim1$objects.found) @@ -70,7 +66,7 @@ test_that("mean values [split]", { stat.mean <- ds.mean(x='D$LAB_TSC', type='split') - expect_length(stat.mean, 3) + expect_length(stat.mean, 2) expect_length(stat.mean$Mean.by.Study, 12) expect_equal(as.numeric(stat.mean$Mean.by.Study[1]), 5.87211344770338, tolerance = .000000000000001) expect_equal(as.numeric(stat.mean$Mean.by.Study[2]), 5.84526388341867, tolerance = .000000000000001) @@ -85,10 +81,6 @@ test_that("mean values [split]", { expect_equal(as.integer(stat.mean$Mean.by.Study[11]), 3088) expect_equal(as.integer(stat.mean$Mean.by.Study[12]), 4128) expect_equal(stat.mean$Nstudies, 3) - expect_length(stat.mean$ValidityMessage, 3) - expect_equal(stat.mean$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(stat.mean$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(stat.mean$ValidityMessage[3], "VALID ANALYSIS") ls.res <- ds.ls() expect_false("mean.all.studies" %in% ls.res$sim1$objects.found) @@ -114,7 +106,7 @@ test_that("mean values [both]", { stat.mean <- ds.mean(x='D$LAB_TSC', type='both') - expect_length(stat.mean, 4) + expect_length(stat.mean, 3) expect_length(stat.mean$Mean.by.Study, 12) expect_equal(as.numeric(stat.mean$Mean.by.Study[1]), 5.87211344770338, tolerance = .000000000000001) expect_equal(as.numeric(stat.mean$Mean.by.Study[2]), 5.84526388341867, tolerance = .000000000000001) @@ -134,10 +126,6 @@ test_that("mean values [both]", { expect_equal(as.integer(stat.mean$Global.Mean[3]), 7825) expect_equal(as.integer(stat.mean$Global.Mean[4]), 9379) expect_equal(stat.mean$Nstudies, 3) - expect_length(stat.mean$ValidityMessage, 3) - expect_equal(stat.mean$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(stat.mean$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(stat.mean$ValidityMessage[3], "VALID ANALYSIS") ls.res <- ds.ls() expect_false("mean.all.studies" %in% ls.res$sim1$objects.found) @@ -163,17 +151,13 @@ test_that("mean values [combine]", { stat.mean <- ds.mean(x='D$LAB_TSC', type='combine', save.mean.Nvalid=TRUE) - expect_length(stat.mean, 3) + expect_length(stat.mean, 2) expect_length(stat.mean$Global.Mean, 4) expect_equal(as.numeric(stat.mean$Global.Mean[1]), 5.85192485623003, tolerance = .000000000000001) expect_equal(as.integer(stat.mean$Global.Mean[2]), 1554) expect_equal(as.integer(stat.mean$Global.Mean[3]), 7825) expect_equal(as.integer(stat.mean$Global.Mean[4]), 9379) expect_equal(stat.mean$Nstudies, 3) - expect_length(stat.mean$ValidityMessage, 3) - expect_equal(stat.mean$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(stat.mean$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(stat.mean$ValidityMessage[3], "VALID ANALYSIS") ls.res <- ds.ls() expect_true("mean.all.studies" %in% ls.res$sim1$objects.found) @@ -199,7 +183,7 @@ test_that("mean values [split]", { stat.mean <- ds.mean(x='D$LAB_TSC', type='split', save.mean.Nvalid=TRUE) - expect_length(stat.mean, 3) + expect_length(stat.mean, 2) expect_length(stat.mean$Mean.by.Study, 12) expect_equal(as.numeric(stat.mean$Mean.by.Study[1]), 5.87211344770338, tolerance = .000000000000001) expect_equal(as.numeric(stat.mean$Mean.by.Study[2]), 5.84526388341867, tolerance = .000000000000001) @@ -214,10 +198,6 @@ test_that("mean values [split]", { expect_equal(as.integer(stat.mean$Mean.by.Study[11]), 3088) expect_equal(as.integer(stat.mean$Mean.by.Study[12]), 4128) expect_equal(stat.mean$Nstudies, 3) - expect_length(stat.mean$ValidityMessage, 3) - expect_equal(stat.mean$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(stat.mean$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(stat.mean$ValidityMessage[3], "VALID ANALYSIS") ls.res <- ds.ls() expect_true("mean.all.studies" %in% ls.res$sim1$objects.found) @@ -238,7 +218,7 @@ test_that("mean values [split]", { test_that("mean values [both]", { stat.mean <- ds.mean(x='D$LAB_TSC', type='both', save.mean.Nvalid=TRUE) - expect_length(stat.mean, 4) + expect_length(stat.mean, 3) expect_length(stat.mean$Mean.by.Study, 12) expect_equal(as.numeric(stat.mean$Mean.by.Study[1]), 5.87211344770338, tolerance = .000000000000001) expect_equal(as.numeric(stat.mean$Mean.by.Study[2]), 5.84526388341867, tolerance = .000000000000001) @@ -258,10 +238,6 @@ test_that("mean values [both]", { expect_equal(as.integer(stat.mean$Global.Mean[3]), 7825) expect_equal(as.integer(stat.mean$Global.Mean[4]), 9379) expect_equal(stat.mean$Nstudies, 3) - expect_length(stat.mean$ValidityMessage, 3) - expect_equal(stat.mean$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(stat.mean$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(stat.mean$ValidityMessage[3], "VALID ANALYSIS") ls.res <- ds.ls() expect_true("mean.all.studies" %in% ls.res$sim1$objects.found) diff --git a/tests/testthat/test-smk-ds.skewness.R b/tests/testthat/test-smk-ds.skewness.R index d6f7eaec4..416bc79df 100644 --- a/tests/testthat/test-smk-ds.skewness.R +++ b/tests/testthat/test-smk-ds.skewness.R @@ -33,19 +33,15 @@ test_that("simple skewness, method 1, split, on LAB_TSC", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.188034458112999"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("0.145513907236103"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("0.352576848495665"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "1807") - expect_equal(skewness.res$Nvalid[2], "2539") - expect_equal(skewness.res$Nvalid[3], "3479") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 1807) + expect_equal(skewness.res$Nvalid[2], 2539) + expect_equal(skewness.res$Nvalid[3], 3479) }) test_that("simple skewness, method 1, split, on LAB_TRIG", { @@ -53,19 +49,15 @@ test_that("simple skewness, method 1, split, on LAB_TRIG", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.32820558565826"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("0.220887697425414"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("0.105433229814455"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "1801") - expect_equal(skewness.res$Nvalid[2], "2526") - expect_equal(skewness.res$Nvalid[3], "3473") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 1801) + expect_equal(skewness.res$Nvalid[2], 2526) + expect_equal(skewness.res$Nvalid[3], 3473) }) test_that("simple skewness, method 1, split, on LAB_HDL", { @@ -73,19 +65,15 @@ test_that("simple skewness, method 1, split, on LAB_HDL", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("-0.257771315950979"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("-0.206165733408786"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("-0.322356008145192"), tolerance = ds.test_env$low_tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "1803") - expect_equal(skewness.res$Nvalid[2], "2533") - expect_equal(skewness.res$Nvalid[3], "3473") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 1803) + expect_equal(skewness.res$Nvalid[2], 2533) + expect_equal(skewness.res$Nvalid[3], 3473) }) test_that("simple skewness, method 1, split, on LAB_GLUC_ADJUSTED", { @@ -93,19 +81,15 @@ test_that("simple skewness, method 1, split, on LAB_GLUC_ADJUSTED", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("1.09805173411495"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("1.11035058496889"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("0.979387780725702"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "1822") - expect_equal(skewness.res$Nvalid[2], "2583") - expect_equal(skewness.res$Nvalid[3], "3519") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 1822) + expect_equal(skewness.res$Nvalid[2], 2583) + expect_equal(skewness.res$Nvalid[3], 3519) }) test_that("simple skewness, method 1, split, on PM_BMI_CONTINUOUS", { @@ -113,19 +97,15 @@ test_that("simple skewness, method 1, split, on PM_BMI_CONTINUOUS", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.211695333017453"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("0.0914245797002757"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("0.031209342768676"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "2066") - expect_equal(skewness.res$Nvalid[2], "2938") - expect_equal(skewness.res$Nvalid[3], "3923") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 2066) + expect_equal(skewness.res$Nvalid[2], 2938) + expect_equal(skewness.res$Nvalid[3], 3923) }) # context("ds.skewness::smk::method 1::combine") @@ -134,13 +114,11 @@ test_that("simple skewness, combine, on LAB_TSC", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.246567206666354"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "7825") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 7825) }) test_that("simple skewness, method 1, combine, on LAB_TRIG", { @@ -148,13 +126,11 @@ test_that("simple skewness, method 1, combine, on LAB_TRIG", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.197724380225568"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "7800") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 7800) }) test_that("simple skewness, method 1, combine, on LAB_HDL", { @@ -162,13 +138,11 @@ test_that("simple skewness, method 1, combine, on LAB_HDL", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("-0.269960357921624"), tolerance = ds.test_env$low_tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "7809") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 7809) }) test_that("simple skewness, method 1, combine, on LAB_GLUC_ADJUSTED", { @@ -176,13 +150,11 @@ test_that("simple skewness, method 1, combine, on LAB_GLUC_ADJUSTED", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("1.05104981283397"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "7924") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 7924) }) test_that("simple skewness, method 1, combine, on PM_BMI_CONTINUOUS", { @@ -190,13 +162,11 @@ test_that("simple skewness, method 1, combine, on PM_BMI_CONTINUOUS", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.0960670927351586"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "8927") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 8927) }) # context("ds.skewness::smk::method 1::both") @@ -206,26 +176,20 @@ test_that("simple skewness, both, on LAB_TSC", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("0.188034458112999"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("0.145513907236103"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("0.352576848495665"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "1807") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2539") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3479") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 1807) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2539) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3479) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("0.246567206666354"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "7825") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 7825) }) test_that("simple skewness, method 1, both, on LAB_TRIG", { @@ -234,26 +198,20 @@ test_that("simple skewness, method 1, both, on LAB_TRIG", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("0.32820558565826"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("0.220887697425414"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("0.105433229814455"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "1801") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2526") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3473") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 1801) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2526) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3473) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("0.197724380225568"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "7800") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 7800) }) test_that("simple skewness, method 1, both, on LAB_HDL", { @@ -262,26 +220,20 @@ test_that("simple skewness, method 1, both, on LAB_HDL", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("-0.257771315950979"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("-0.206165733408786"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("-0.322356008145192"), tolerance = ds.test_env$low_tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "1803") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2533") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3473") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 1803) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2533) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3473) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("-0.269960357921624"), tolerance = ds.test_env$low_tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "7809") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 7809) }) test_that("simple skewness, method 1, both, on LAB_GLUC_ADJUSTED", { @@ -290,26 +242,20 @@ test_that("simple skewness, method 1, both, on LAB_GLUC_ADJUSTED", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("1.09805173411495"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("1.11035058496889"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("0.979387780725702"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "1822") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2583") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3519") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 1822) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2583) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3519) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("1.05104981283397"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "7924") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 7924) }) test_that("simple skewness, method 1, both, on PM_BMI_CONTINUOUS", { @@ -318,26 +264,20 @@ test_that("simple skewness, method 1, both, on PM_BMI_CONTINUOUS", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("0.211695333017453"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("0.0914245797002757"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("0.031209342768676"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "2066") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2938") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3923") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 2066) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2938) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3923) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("0.0960670927351586"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "8927") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 8927) }) # Method 2 @@ -348,19 +288,15 @@ test_that("simple skewness, method 2, split, on LAB_TSC", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.188190712227239"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("0.145599939437722"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("0.352728948755338"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "1807") - expect_equal(skewness.res$Nvalid[2], "2539") - expect_equal(skewness.res$Nvalid[3], "3479") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 1807) + expect_equal(skewness.res$Nvalid[2], 2539) + expect_equal(skewness.res$Nvalid[3], 3479) }) test_that("simple skewness, method 2, split, on LAB_TRIG", { @@ -368,19 +304,15 @@ test_that("simple skewness, method 2, split, on LAB_TRIG", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.328479229678695"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("0.221018965497232"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("0.10547879191455"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "1801") - expect_equal(skewness.res$Nvalid[2], "2526") - expect_equal(skewness.res$Nvalid[3], "3473") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 1801) + expect_equal(skewness.res$Nvalid[2], 2526) + expect_equal(skewness.res$Nvalid[3], 3473) }) test_that("simple skewness, method 2, split, on LAB_HDL", { @@ -388,19 +320,15 @@ test_that("simple skewness, method 2, split, on LAB_HDL", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("-0.257985996183054"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("-0.206287913742296"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("-0.322495311633619"), tolerance = ds.test_env$low_tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "1803") - expect_equal(skewness.res$Nvalid[2], "2533") - expect_equal(skewness.res$Nvalid[3], "3473") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 1803) + expect_equal(skewness.res$Nvalid[2], 2533) + expect_equal(skewness.res$Nvalid[3], 3473) }) test_that("simple skewness, method 2, split, on LAB_GLUC_ADJUSTED", { @@ -408,19 +336,15 @@ test_that("simple skewness, method 2, split, on LAB_GLUC_ADJUSTED", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("1.09895668040486"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("1.11099586669437"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("0.979805479581792"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "1822") - expect_equal(skewness.res$Nvalid[2], "2583") - expect_equal(skewness.res$Nvalid[3], "3519") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 1822) + expect_equal(skewness.res$Nvalid[2], 2583) + expect_equal(skewness.res$Nvalid[3], 3519) }) test_that("simple skewness, method 2, split, on PM_BMI_CONTINUOUS", { @@ -428,19 +352,15 @@ test_that("simple skewness, method 2, split, on PM_BMI_CONTINUOUS", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.21184917516287"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("0.0914712871182399"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("0.0312212818198342"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "2066") - expect_equal(skewness.res$Nvalid[2], "2938") - expect_equal(skewness.res$Nvalid[3], "3923") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 2066) + expect_equal(skewness.res$Nvalid[2], 2938) + expect_equal(skewness.res$Nvalid[3], 3923) }) # context("ds.skewness::smk::method 2::combine") @@ -449,13 +369,11 @@ test_that("simple skewness, combine, on LAB_TSC", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.246614483525739"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "7825") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 7825) }) test_that("simple skewness, method 2, combine, on LAB_TRIG", { @@ -463,13 +381,11 @@ test_that("simple skewness, method 2, combine, on LAB_TRIG", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.197762413490697"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "7800") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 7800) }) test_that("simple skewness, method 2, combine, on LAB_HDL", { @@ -477,13 +393,11 @@ test_that("simple skewness, method 2, combine, on LAB_HDL", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("-0.270012226272502"), tolerance = ds.test_env$low_tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "7809") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 7809) }) test_that("simple skewness, method 2, combine, on LAB_GLUC_ADJUSTED", { @@ -491,13 +405,11 @@ test_that("simple skewness, method 2, combine, on LAB_GLUC_ADJUSTED", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("1.05124882294985"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "7924") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 7924) }) test_that("simple skewness, method 2, combine, on PM_BMI_CONTINUOUS", { @@ -505,13 +417,11 @@ test_that("simple skewness, method 2, combine, on PM_BMI_CONTINUOUS", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.0960832383143017"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "8927") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 8927) }) # context("ds.skewness::smk::method 2::both") @@ -521,26 +431,20 @@ test_that("simple skewness, both, on LAB_TSC", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("0.188190712227239"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("0.145599939437722"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("0.352728948755338"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "1807") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2539") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3479") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 1807) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2539) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3479) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("0.246614483525739"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "7825") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 7825) }) test_that("simple skewness, method 2, both, on LAB_TRIG", { @@ -549,26 +453,20 @@ test_that("simple skewness, method 2, both, on LAB_TRIG", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("0.328479229678695"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("0.221018965497232"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("0.10547879191455"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "1801") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2526") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3473") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 1801) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2526) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3473) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("0.197762413490697"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "7800") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 7800) }) test_that("simple skewness, method 2, both, on LAB_HDL", { @@ -577,26 +475,20 @@ test_that("simple skewness, method 2, both, on LAB_HDL", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("-0.257985996183054"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("-0.206287913742296"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("-0.322495311633619"), tolerance = ds.test_env$low_tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "1803") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2533") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3473") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 1803) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2533) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3473) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("-0.270012226272502"), tolerance = ds.test_env$low_tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "7809") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 7809) }) test_that("simple skewness, method 2, both, on LAB_GLUC_ADJUSTED", { @@ -605,26 +497,20 @@ test_that("simple skewness, method 2, both, on LAB_GLUC_ADJUSTED", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("1.09895668040486"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("1.11099586669437"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("0.979805479581792"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "1822") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2583") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3519") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 1822) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2583) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3519) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("1.05124882294985"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "7924") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 7924) }) test_that("simple skewness, method 2, both, on PM_BMI_CONTINUOUS", { @@ -633,26 +519,20 @@ test_that("simple skewness, method 2, both, on PM_BMI_CONTINUOUS", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("0.21184917516287"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("0.0914712871182399"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("0.0312212818198342"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "2066") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2938") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3923") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 2066) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2938) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3923) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("0.0960832383143017"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "8927") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 8927) }) # Method 3 @@ -663,19 +543,15 @@ test_that("simple skewness, method 3, split, on LAB_TSC", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.187878391338523"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("0.145427948446175"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("0.352424842957634"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "1807") - expect_equal(skewness.res$Nvalid[2], "2539") - expect_equal(skewness.res$Nvalid[3], "3479") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 1807) + expect_equal(skewness.res$Nvalid[2], 2539) + expect_equal(skewness.res$Nvalid[3], 3479) }) test_that("simple skewness, method 3, split, on LAB_TRIG", { @@ -683,19 +559,15 @@ test_that("simple skewness, method 3, split, on LAB_TRIG", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.327932270814304"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("0.220756541941702"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("0.105387696137537"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "1801") - expect_equal(skewness.res$Nvalid[2], "2526") - expect_equal(skewness.res$Nvalid[3], "3473") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 1801) + expect_equal(skewness.res$Nvalid[2], 2526) + expect_equal(skewness.res$Nvalid[3], 3473) }) test_that("simple skewness, method 3, split, on LAB_HDL", { @@ -703,19 +575,15 @@ test_that("simple skewness, method 3, split, on LAB_HDL", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("-0.257556893679228"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("-0.206043657579281"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("-0.322216791558985"), tolerance = ds.test_env$low_tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "1803") - expect_equal(skewness.res$Nvalid[2], "2533") - expect_equal(skewness.res$Nvalid[3], "3473") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 1803) + expect_equal(skewness.res$Nvalid[2], 2533) + expect_equal(skewness.res$Nvalid[3], 3473) }) test_that("simple skewness, method 3, split, on LAB_GLUC_ADJUSTED", { @@ -723,19 +591,15 @@ test_that("simple skewness, method 3, split, on LAB_GLUC_ADJUSTED", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("1.09714786387241"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("1.10970584448638"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("0.978970339038204"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "1822") - expect_equal(skewness.res$Nvalid[2], "2583") - expect_equal(skewness.res$Nvalid[3], "3519") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 1822) + expect_equal(skewness.res$Nvalid[2], 2583) + expect_equal(skewness.res$Nvalid[3], 3519) }) test_that("simple skewness, method 3, split, on PM_BMI_CONTINUOUS", { @@ -743,19 +607,15 @@ test_that("simple skewness, method 3, split, on PM_BMI_CONTINUOUS", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 3) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.211541652198686"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[2]), as.double("0.0913779067255815"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness[3]), as.double("0.031197410311189"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 3) - expect_equal(skewness.res$Nvalid[1], "2066") - expect_equal(skewness.res$Nvalid[2], "2938") - expect_equal(skewness.res$Nvalid[3], "3923") - expect_length(skewness.res$ValidityMessage, 3) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$ValidityMessage[3], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 2066) + expect_equal(skewness.res$Nvalid[2], 2938) + expect_equal(skewness.res$Nvalid[3], 3923) }) # context("ds.skewness::smk::method 3::combine") @@ -764,13 +624,11 @@ test_that("simple skewness, combine, on LAB_TSC", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.246519942897225"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "7825") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 7825) }) test_that("simple skewness, method 3, combine, on LAB_TRIG", { @@ -778,13 +636,11 @@ test_that("simple skewness, method 3, combine, on LAB_TRIG", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.197686357525035"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "7800") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 7800) }) test_that("simple skewness, method 3, combine, on LAB_HDL", { @@ -792,13 +648,11 @@ test_that("simple skewness, method 3, combine, on LAB_HDL", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("-0.269908503961744"), tolerance = ds.test_env$low_tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "7809") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 7809) }) test_that("simple skewness, method 3, combine, on LAB_GLUC_ADJUSTED", { @@ -806,13 +660,11 @@ test_that("simple skewness, method 3, combine, on LAB_GLUC_ADJUSTED", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("1.05085085713259"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "7924") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 7924) }) test_that("simple skewness, method 3, combine, on PM_BMI_CONTINUOUS", { @@ -820,13 +672,11 @@ test_that("simple skewness, method 3, combine, on PM_BMI_CONTINUOUS", { expect_equal(class(skewness.res), "data.frame") - expect_length(skewness.res, 3) + expect_length(skewness.res, 2) expect_length(skewness.res$Skewness, 1) expect_equal(as.double(skewness.res$Skewness[1]), as.double("0.0960509510746345"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Nvalid, 1) - expect_equal(skewness.res$Nvalid[1], "8927") - expect_length(skewness.res$ValidityMessage, 1) - expect_equal(skewness.res$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Nvalid[1], 8927) }) # context("ds.skewness::smk::method 3::both") @@ -836,26 +686,20 @@ test_that("simple skewness, both, on LAB_TSC", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("0.187878391338523"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("0.145427948446175"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("0.352424842957634"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "1807") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2539") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3479") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 1807) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2539) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3479) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("0.246519942897225"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "7825") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 7825) }) test_that("simple skewness, method 3, both, on LAB_TRIG", { @@ -864,26 +708,20 @@ test_that("simple skewness, method 3, both, on LAB_TRIG", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("0.327932270814304"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("0.220756541941702"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("0.105387696137537"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "1801") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2526") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3473") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 1801) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2526) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3473) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("0.197686357525035"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "7800") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 7800) }) test_that("simple skewness, method 3, both, on LAB_HDL", { @@ -892,26 +730,20 @@ test_that("simple skewness, method 3, both, on LAB_HDL", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("-0.257556893679228"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("-0.206043657579281"), tolerance = ds.test_env$low_tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("-0.322216791558985"), tolerance = ds.test_env$low_tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "1803") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2533") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3473") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 1803) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2533) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3473) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("-0.269908503961744"), tolerance = ds.test_env$low_tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "7809") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 7809) }) test_that("simple skewness, method 3, both, on LAB_GLUC_ADJUSTED", { @@ -920,26 +752,20 @@ test_that("simple skewness, method 3, both, on LAB_GLUC_ADJUSTED", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("1.09714786387241"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("1.10970584448638"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("0.978970339038204"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "1822") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2583") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3519") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 1822) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2583) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3519) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("1.05085085713259"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "7924") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 7924) }) test_that("simple skewness, method 3, both, on PM_BMI_CONTINUOUS", { @@ -948,26 +774,20 @@ test_that("simple skewness, method 3, both, on PM_BMI_CONTINUOUS", { expect_equal(class(skewness.res), "list") expect_length(skewness.res, 2) - expect_length(skewness.res$Skewness.by.Study, 3) + expect_length(skewness.res$Skewness.by.Study, 2) expect_length(skewness.res$Skewness.by.Study$Skewness, 3) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[1]), as.double("0.211541652198686"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[2]), as.double("0.0913779067255815"), tolerance = ds.test_env$tolerance) expect_equal(as.double(skewness.res$Skewness.by.Study$Skewness[3]), as.double("0.031197410311189"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Skewness.by.Study$Nvalid, 3) - expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], "2066") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], "2938") - expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], "3923") - expect_length(skewness.res$Skewness.by.Study$ValidityMessage, 3) - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(skewness.res$Skewness.by.Study$ValidityMessage[3], "VALID ANALYSIS") - expect_length(skewness.res$Global.Skewness, 3) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[1], 2066) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[2], 2938) + expect_equal(skewness.res$Skewness.by.Study$Nvalid[3], 3923) + expect_length(skewness.res$Global.Skewness, 2) expect_length(skewness.res$Global.Skewness$Skewness, 1) expect_equal(as.double(skewness.res$Global.Skewness$Skewness[1]), as.double("0.0960509510746345"), tolerance = ds.test_env$tolerance) expect_length(skewness.res$Global.Skewness$Nvalid, 1) - expect_equal(skewness.res$Global.Skewness$Nvalid[1], "8927") - expect_length(skewness.res$Global.Skewness$ValidityMessage, 1) - expect_equal(skewness.res$Global.Skewness$ValidityMessage[1], "VALID ANALYSIS") + expect_equal(skewness.res$Global.Skewness$Nvalid[1], 8927) }) # diff --git a/tests/testthat/test-smk-ds.var.R b/tests/testthat/test-smk-ds.var.R index 834ece232..defaed69d 100644 --- a/tests/testthat/test-smk-ds.var.R +++ b/tests/testthat/test-smk-ds.var.R @@ -29,7 +29,7 @@ test_that("setup", { test_that("simple var, split", { var.res <- ds.var(x = 'D$LAB_TSC', type='split') - expect_length(var.res, 3) + expect_length(var.res, 2) expect_length(var.res$Variance.by.Study, 12) expect_equal(var.res$Variance.by.Study[1], 1.229163, tolerance = .000001) expect_equal(var.res$Variance.by.Study[2], 1.140606, tolerance = .000001) @@ -44,34 +44,26 @@ test_that("simple var, split", { expect_equal(var.res$Variance.by.Study[11], 3088) expect_equal(var.res$Variance.by.Study[12], 4128) expect_equal(var.res$Nstudies, 3) - expect_length(var.res$ValidityMessage, 3) - expect_equal(var.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(var.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(var.res$ValidityMessage[3], "VALID ANALYSIS") }) # context("ds.var::smk::combine") test_that("simple var, combine", { var.res <- ds.var(x = 'D$LAB_TSC', type='combine') - expect_length(var.res, 3) + expect_length(var.res, 2) expect_length(var.res$Global.Variance, 4) expect_equal(var.res$Global.Variance[1], 1.158384, tolerance = .000001) expect_equal(var.res$Global.Variance[2], 1554) expect_equal(var.res$Global.Variance[3], 7825) expect_equal(var.res$Global.Variance[4], 9379) expect_equal(var.res$Nstudies, 3) - expect_length(var.res$ValidityMessage, 3) - expect_equal(var.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(var.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(var.res$ValidityMessage[3], "VALID ANALYSIS") }) # context("ds.var::smk::both") test_that("simple var, both", { var.res <- ds.var(x = 'D$LAB_TSC', type='both') - expect_length(var.res, 4) + expect_length(var.res, 3) expect_length(var.res$Variance.by.Study, 12) expect_equal(var.res$Variance.by.Study[1], 1.229163, tolerance = .000001) expect_equal(var.res$Variance.by.Study[2], 1.140606, tolerance = .000001) @@ -91,10 +83,6 @@ test_that("simple var, both", { expect_equal(var.res$Global.Variance[3], 7825) expect_equal(var.res$Global.Variance[4], 9379) expect_equal(var.res$Nstudies, 3) - expect_length(var.res$ValidityMessage, 3) - expect_equal(var.res$ValidityMessage[1], "VALID ANALYSIS") - expect_equal(var.res$ValidityMessage[2], "VALID ANALYSIS") - expect_equal(var.res$ValidityMessage[3], "VALID ANALYSIS") }) #