CRAN Package Check Results for Package BioMoR

Last updated on 2025-12-04 07:50:52 CET.

Flavor Version Tinstall Tcheck Ttotal Status Flags
r-devel-linux-x86_64-debian-clang 0.1.0 14.40 396.92 411.32 OK
r-devel-linux-x86_64-debian-gcc 0.1.0 11.11 331.43 342.54 OK
r-devel-linux-x86_64-fedora-clang 0.1.0 85.00 427.18 512.18 ERROR
r-devel-linux-x86_64-fedora-gcc 0.1.0 108.00 1029.38 1137.38 ERROR
r-devel-windows-x86_64 0.1.0 16.00 214.00 230.00 OK
r-patched-linux-x86_64 0.1.0 13.71 467.72 481.43 OK
r-release-linux-x86_64 0.1.0 13.43 489.43 502.86 OK
r-release-macos-arm64 0.1.0 OK
r-release-macos-x86_64 0.1.0 15.00 190.00 205.00 OK
r-release-windows-x86_64 0.1.0 17.00 210.00 227.00 OK
r-oldrel-macos-arm64 0.1.0 OK
r-oldrel-macos-x86_64 0.1.0 16.00 204.00 220.00 OK
r-oldrel-windows-x86_64 0.1.0 21.00 282.00 303.00 OK

Check Details

Version: 0.1.0
Check: tests
Result: ERROR Running ‘testthat.R’ [270s/571s] Running the tests in ‘tests/testthat.R’ failed. Complete output: > library(testthat) > library(BioMoR) > > test_check("BioMoR") Loading required namespace: randomForest Loading required package: ggplot2 Loading required package: lattice Loading required package: dplyr Attaching package: 'dplyr' The following objects are masked from 'package:stats': filter, lag The following objects are masked from 'package:base': intersect, setdiff, setequal, union Attaching package: 'recipes' The following object is masked from 'package:stats': step randomForest 4.7-1.2 Type rfNews() to see new features/changes/bug fixes. Attaching package: 'randomForest' The following object is masked from 'package:dplyr': combine The following object is masked from 'package:ggplot2': margin Setting direction: controls > cases note: only 1 unique complexity parameters in default grid. Truncating the grid to 1 . ------------------------------------------------------------------------------ You have loaded plyr after dplyr - this is likely to cause problems. If you need functions from both plyr and dplyr, please load plyr first, then dplyr: library(plyr); library(dplyr) ------------------------------------------------------------------------------ Attaching package: 'plyr' The following objects are masked from 'package:dplyr': arrange, count, desc, failwith, id, mutate, rename, summarise, summarize Saving _problems/test_models-35.R [ FAIL 1 | WARN 600 | SKIP 0 | PASS 5 ] ══ Failed tests ════════════════════════════════════════════════════════════════ ── Error ('test_models.R:35:3'): XGB model trains and predicts ───────────────── Error in `{ if (!(length(ctrl$seeds) == 1L && is.na(ctrl$seeds))) set.seed(ctrl$seeds[[iter]][parm]) loadNamespace("caret") loadNamespace("recipes") if (ctrl$verboseIter) progress(printed[parm, , drop = FALSE], names(resampleIndex), iter) if (names(resampleIndex)[iter] != "AllData") { modelIndex <- resampleIndex[[iter]] holdoutIndex <- ctrl$indexOut[[iter]] } else { modelIndex <- 1:nrow(dat) holdoutIndex <- modelIndex } if (testing) cat("pre-model\n") if (!is.null(info$submodels[[parm]]) && nrow(info$submodels[[parm]]) > 0) { submod <- info$submodels[[parm]] } else submod <- NULL mod_rec <- try(rec_model(rec, subset_x(dat, modelIndex), method = method, tuneValue = info$loop[parm, , drop = FALSE], obsLevels = lev, classProbs = ctrl$classProbs, sampling = ctrl$sampling, ...), silent = TRUE) if (testing) print(mod_rec) if (!model_failed(mod_rec)) { predicted <- try(rec_pred(method = method, object = mod_rec, newdata = subset_x(dat, holdoutIndex), param = submod), silent = TRUE) if (pred_failed(predicted)) { fail_warning(settings = printed[parm, , drop = FALSE], msg = predicted, where = "predictions", iter = names(resampleIndex)[iter], verb = ctrl$verboseIter) predicted <- fill_failed_pred(index = holdoutIndex, lev = lev, submod) } } else { fail_warning(settings = printed[parm, , drop = FALSE], msg = mod_rec, iter = names(resampleIndex)[iter], verb = ctrl$verboseIter) predicted <- fill_failed_pred(index = holdoutIndex, lev = lev, submod) } if (testing) print(head(predicted)) if (ctrl$classProbs) { if (!model_failed(mod_rec)) { probValues <- rec_prob(method = method, object = mod_rec, newdata = subset_x(dat, holdoutIndex), param = submod) } else { probValues <- fill_failed_prob(holdoutIndex, lev, submod) } if (testing) print(head(probValues)) } predicted <- trim_values(predicted, ctrl, is.null(lev)) ho_data <- holdout_rec(mod_rec, dat, holdoutIndex) if (!is.null(submod)) { allParam <- expandParameters(info$loop[parm, , drop = FALSE], submod) allParam <- allParam[complete.cases(allParam), , drop = FALSE] predicted <- lapply(predicted, function(x, lv, dat) { x <- outcome_conversion(x, lv = lev) dat$pred <- x dat }, lv = lev, dat = ho_data) if (testing) print(head(predicted)) if (ctrl$classProbs) predicted <- mapply(cbind, predicted, probValues, SIMPLIFY = FALSE) if (keep_pred) { tmpPred <- predicted for (modIndex in seq(along.with = tmpPred)) { tmpPred[[modIndex]] <- merge(tmpPred[[modIndex]], allParam[modIndex, , drop = FALSE], all = TRUE) } tmpPred <- rbind.fill(tmpPred) tmpPred$Resample <- names(resampleIndex)[iter] } else tmpPred <- NULL thisResample <- lapply(predicted, ctrl$summaryFunction, lev = lev, model = method) if (testing) print(head(thisResample)) if (length(lev) > 1 && length(lev) <= 50) { cells <- lapply(predicted, function(x) flatTable(x$pred, x$obs)) for (ind in seq(along.with = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) } thisResample <- do.call("rbind", thisResample) thisResample <- cbind(allParam, thisResample) } else { pred_val <- outcome_conversion(predicted, lv = lev) tmp <- ho_data tmp$pred <- pred_val if (ctrl$classProbs) tmp <- cbind(tmp, probValues) if (keep_pred) { tmpPred <- tmp tmpPred$rowIndex <- holdoutIndex tmpPred <- merge(tmpPred, info$loop[parm, , drop = FALSE], all = TRUE) tmpPred$Resample <- names(resampleIndex)[iter] } else tmpPred <- NULL thisResample <- ctrl$summaryFunction(tmp, lev = lev, model = method) if (length(lev) > 1 && length(lev) <= 50) thisResample <- c(thisResample, flatTable(tmp$pred, tmp$obs)) thisResample <- as.data.frame(t(thisResample), stringsAsFactors = FALSE) thisResample <- cbind(thisResample, info$loop[parm, , drop = FALSE]) } thisResample$Resample <- names(resampleIndex)[iter] thisResampleExtra <- optimism_rec(ctrl, dat, iter, lev, method, mod_rec, predicted, submod, info$loop[parm, , drop = FALSE]) if (ctrl$verboseIter) progress(printed[parm, , drop = FALSE], names(resampleIndex), iter, FALSE) if (testing) print(thisResample) list(resamples = thisResample, pred = tmpPred, resamplesExtra = thisResampleExtra) }`: task 1 failed - "$ operator is invalid for atomic vectors" Backtrace: ▆ 1. └─BioMoR::train_xgb_caret(df, "Label", ctrl) at test_models.R:35:3 2. ├─caret::train(...) 3. └─caret:::train.recipe(...) 4. └─caret:::train_rec(...) 5. └─... %op% ... 6. └─e$fun(obj, substitute(ex), parent.frame(), e$data) [ FAIL 1 | WARN 600 | SKIP 0 | PASS 5 ] Error: ! Test failures. Execution halted Flavor: r-devel-linux-x86_64-fedora-clang

Version: 0.1.0
Check: tests
Result: ERROR Running ‘testthat.R’ [15m/36m] Running the tests in ‘tests/testthat.R’ failed. Complete output: > library(testthat) > library(BioMoR) > > test_check("BioMoR") Loading required namespace: randomForest Loading required package: ggplot2 Loading required package: lattice Loading required package: dplyr Attaching package: 'dplyr' The following objects are masked from 'package:stats': filter, lag The following objects are masked from 'package:base': intersect, setdiff, setequal, union Attaching package: 'recipes' The following object is masked from 'package:stats': step randomForest 4.7-1.2 Type rfNews() to see new features/changes/bug fixes. Attaching package: 'randomForest' The following object is masked from 'package:dplyr': combine The following object is masked from 'package:ggplot2': margin Setting direction: controls > cases note: only 1 unique complexity parameters in default grid. Truncating the grid to 1 . ------------------------------------------------------------------------------ You have loaded plyr after dplyr - this is likely to cause problems. If you need functions from both plyr and dplyr, please load plyr first, then dplyr: library(plyr); library(dplyr) ------------------------------------------------------------------------------ Attaching package: 'plyr' The following objects are masked from 'package:dplyr': arrange, count, desc, failwith, id, mutate, rename, summarise, summarize Saving _problems/test_models-35.R [ FAIL 1 | WARN 600 | SKIP 0 | PASS 5 ] ══ Failed tests ════════════════════════════════════════════════════════════════ ── Error ('test_models.R:35:3'): XGB model trains and predicts ───────────────── Error in `{ if (!(length(ctrl$seeds) == 1L && is.na(ctrl$seeds))) set.seed(ctrl$seeds[[iter]][parm]) loadNamespace("caret") loadNamespace("recipes") if (ctrl$verboseIter) progress(printed[parm, , drop = FALSE], names(resampleIndex), iter) if (names(resampleIndex)[iter] != "AllData") { modelIndex <- resampleIndex[[iter]] holdoutIndex <- ctrl$indexOut[[iter]] } else { modelIndex <- 1:nrow(dat) holdoutIndex <- modelIndex } if (testing) cat("pre-model\n") if (!is.null(info$submodels[[parm]]) && nrow(info$submodels[[parm]]) > 0) { submod <- info$submodels[[parm]] } else submod <- NULL mod_rec <- try(rec_model(rec, subset_x(dat, modelIndex), method = method, tuneValue = info$loop[parm, , drop = FALSE], obsLevels = lev, classProbs = ctrl$classProbs, sampling = ctrl$sampling, ...), silent = TRUE) if (testing) print(mod_rec) if (!model_failed(mod_rec)) { predicted <- try(rec_pred(method = method, object = mod_rec, newdata = subset_x(dat, holdoutIndex), param = submod), silent = TRUE) if (pred_failed(predicted)) { fail_warning(settings = printed[parm, , drop = FALSE], msg = predicted, where = "predictions", iter = names(resampleIndex)[iter], verb = ctrl$verboseIter) predicted <- fill_failed_pred(index = holdoutIndex, lev = lev, submod) } } else { fail_warning(settings = printed[parm, , drop = FALSE], msg = mod_rec, iter = names(resampleIndex)[iter], verb = ctrl$verboseIter) predicted <- fill_failed_pred(index = holdoutIndex, lev = lev, submod) } if (testing) print(head(predicted)) if (ctrl$classProbs) { if (!model_failed(mod_rec)) { probValues <- rec_prob(method = method, object = mod_rec, newdata = subset_x(dat, holdoutIndex), param = submod) } else { probValues <- fill_failed_prob(holdoutIndex, lev, submod) } if (testing) print(head(probValues)) } predicted <- trim_values(predicted, ctrl, is.null(lev)) ho_data <- holdout_rec(mod_rec, dat, holdoutIndex) if (!is.null(submod)) { allParam <- expandParameters(info$loop[parm, , drop = FALSE], submod) allParam <- allParam[complete.cases(allParam), , drop = FALSE] predicted <- lapply(predicted, function(x, lv, dat) { x <- outcome_conversion(x, lv = lev) dat$pred <- x dat }, lv = lev, dat = ho_data) if (testing) print(head(predicted)) if (ctrl$classProbs) predicted <- mapply(cbind, predicted, probValues, SIMPLIFY = FALSE) if (keep_pred) { tmpPred <- predicted for (modIndex in seq(along.with = tmpPred)) { tmpPred[[modIndex]] <- merge(tmpPred[[modIndex]], allParam[modIndex, , drop = FALSE], all = TRUE) } tmpPred <- rbind.fill(tmpPred) tmpPred$Resample <- names(resampleIndex)[iter] } else tmpPred <- NULL thisResample <- lapply(predicted, ctrl$summaryFunction, lev = lev, model = method) if (testing) print(head(thisResample)) if (length(lev) > 1 && length(lev) <= 50) { cells <- lapply(predicted, function(x) flatTable(x$pred, x$obs)) for (ind in seq(along.with = cells)) thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]]) } thisResample <- do.call("rbind", thisResample) thisResample <- cbind(allParam, thisResample) } else { pred_val <- outcome_conversion(predicted, lv = lev) tmp <- ho_data tmp$pred <- pred_val if (ctrl$classProbs) tmp <- cbind(tmp, probValues) if (keep_pred) { tmpPred <- tmp tmpPred$rowIndex <- holdoutIndex tmpPred <- merge(tmpPred, info$loop[parm, , drop = FALSE], all = TRUE) tmpPred$Resample <- names(resampleIndex)[iter] } else tmpPred <- NULL thisResample <- ctrl$summaryFunction(tmp, lev = lev, model = method) if (length(lev) > 1 && length(lev) <= 50) thisResample <- c(thisResample, flatTable(tmp$pred, tmp$obs)) thisResample <- as.data.frame(t(thisResample), stringsAsFactors = FALSE) thisResample <- cbind(thisResample, info$loop[parm, , drop = FALSE]) } thisResample$Resample <- names(resampleIndex)[iter] thisResampleExtra <- optimism_rec(ctrl, dat, iter, lev, method, mod_rec, predicted, submod, info$loop[parm, , drop = FALSE]) if (ctrl$verboseIter) progress(printed[parm, , drop = FALSE], names(resampleIndex), iter, FALSE) if (testing) print(thisResample) list(resamples = thisResample, pred = tmpPred, resamplesExtra = thisResampleExtra) }`: task 1 failed - "$ operator is invalid for atomic vectors" Backtrace: ▆ 1. └─BioMoR::train_xgb_caret(df, "Label", ctrl) at test_models.R:35:3 2. ├─caret::train(...) 3. └─caret:::train.recipe(...) 4. └─caret:::train_rec(...) 5. └─... %op% ... 6. └─e$fun(obj, substitute(ex), parent.frame(), e$data) [ FAIL 1 | WARN 600 | SKIP 0 | PASS 5 ] Error: ! Test failures. Execution halted Flavor: r-devel-linux-x86_64-fedora-gcc

These binaries (installable software) and packages are in development.
They may not be fully stable and should be used with caution. We make no claims about them.
Health stats visible at Monitor.