# nolint start
library(mlexperiments)
The hardware and bandwidth for this mirror is donated by dogado GmbH, the Webhosting and Full Service-Cloud Provider. Check out our Wordpress Tutorial.
If you wish to report a bug, or if you are interested in having us mirror your free-software or open-source project, please feel free to contact us at mirror[@]dogado.de.
# nolint start
library(mlexperiments)
See https://github.com/kapsner/mlexperiments/blob/main/R/learner_knn.R for implementation details.
library(mlbench)
data("PimaIndiansDiabetes2")
<- PimaIndiansDiabetes2 |>
dataset ::as.data.table() |>
data.tablena.omit()
<- colnames(dataset)[1:8]
feature_cols <- "diabetes" target_col
<- 123
seed if (isTRUE(as.logical(Sys.getenv("_R_CHECK_LIMIT_CORES_")))) {
# on cran
<- 2L
ncores else {
} <- ifelse(
ncores test = parallel::detectCores() > 4,
yes = 4L,
no = ifelse(
test = parallel::detectCores() < 2L,
yes = 1L,
no = parallel::detectCores()
)
)
}options("mlexperiments.bayesian.max_init" = 10L)
<- splitTools::partition(
data_split y = dataset[, get(target_col)],
p = c(train = 0.7, test = 0.3),
type = "stratified",
seed = seed
)
<- model.matrix(
train_x ~ -1 + .,
$train, .SD, .SDcols = feature_cols]
dataset[data_split
)<- as.integer(dataset[data_split$train, get(target_col)]) - 1L
train_y
<- model.matrix(
test_x ~ -1 + .,
$test, .SD, .SDcols = feature_cols]
dataset[data_split
)<- as.integer(dataset[data_split$test, get(target_col)]) - 1L test_y
<- splitTools::create_folds(
fold_list y = train_y,
k = 3,
type = "stratified",
seed = seed
)
# required learner arguments, not optimized
<- list(
learner_args l = 2,
test = parse(text = "fold_test$x"),
use.all = FALSE
)
# set arguments for predict function and performance metric,
# required for mlexperiments::MLCrossValidation and
# mlexperiments::MLNestedCV
<- list(type = "response")
predict_args <- metric("acc")
performance_metric <- NULL
performance_metric_args <- FALSE
return_models
# required for grid search and initialization of bayesian optimization
<- expand.grid(
parameter_grid k = seq(4, 68, 6)
)# reduce to a maximum of 10 rows
if (nrow(parameter_grid) > 10) {
set.seed(123)
<- sample(seq_len(nrow(parameter_grid)), 10, FALSE)
sample_rows <- kdry::mlh_subset(parameter_grid, sample_rows)
parameter_grid
}
# required for bayesian optimization
<- list(k = c(2L, 80L))
parameter_bounds <- list(
optim_args iters.n = ncores,
kappa = 3.5,
acq = "ucb"
)
<- mlexperiments::MLTuneParameters$new(
tuner learner = LearnerKnn$new(),
strategy = "grid",
ncores = ncores,
seed = seed
)
$parameter_grid <- parameter_grid
tuner$learner_args <- learner_args
tuner$split_type <- "stratified"
tuner
$set_data(
tunerx = train_x,
y = train_y
)
<- tuner$execute(k = 3)
tuner_results_grid
head(tuner_results_grid)
#> setting_id metric_optim_mean k l use.all
#> 1: 1 0.2224638 16 2 FALSE
#> 2: 2 0.2628019 64 2 FALSE
#> 3: 3 0.2297907 10 2 FALSE
#> 4: 4 0.2371981 34 2 FALSE
#> 5: 5 0.2627214 58 2 FALSE
#> 6: 6 0.2444444 28 2 FALSE
<- mlexperiments::MLTuneParameters$new(
tuner learner = LearnerKnn$new(),
strategy = "bayesian",
ncores = ncores,
seed = seed
)
$parameter_grid <- parameter_grid
tuner$parameter_bounds <- parameter_bounds
tuner
$learner_args <- learner_args
tuner$optim_args <- optim_args
tuner
$split_type <- "stratified"
tuner
$set_data(
tunerx = train_x,
y = train_y
)
<- tuner$execute(k = 3)
tuner_results_bayesian #> Registering parallel backend using 4 cores.
head(tuner_results_bayesian)
#> Epoch setting_id k gpUtility acqOptimum inBounds Elapsed Score metric_optim_mean errorMessage l use.all
#> 1: 0 1 16 NA FALSE TRUE 0.024 -0.2262480 0.2262480 NA 2 FALSE
#> 2: 0 2 64 NA FALSE TRUE 0.026 -0.2700483 0.2700483 NA 2 FALSE
#> 3: 0 3 10 NA FALSE TRUE 0.023 -0.2370370 0.2370370 NA 2 FALSE
#> 4: 0 4 34 NA FALSE TRUE 0.025 -0.2262480 0.2262480 NA 2 FALSE
#> 5: 0 5 58 NA FALSE TRUE 0.008 -0.2735910 0.2735910 NA 2 FALSE
#> 6: 0 6 28 NA FALSE TRUE 0.006 -0.2589372 0.2589372 NA 2 FALSE
<- mlexperiments::MLCrossValidation$new(
validator learner = LearnerKnn$new(),
fold_list = fold_list,
ncores = ncores,
seed = seed
)
$learner_args <- tuner$results$best.setting[-1]
validator
$predict_args <- predict_args
validator$performance_metric <- performance_metric
validator$performance_metric_args <- performance_metric_args
validator$return_models <- return_models
validator
$set_data(
validatorx = train_x,
y = train_y
)
<- validator$execute()
validator_results #>
#> CV fold: Fold1
#>
#> CV fold: Fold2
#>
#> CV fold: Fold3
head(validator_results)
#> fold performance k l use.all
#> 1: Fold1 0.7934783 16 2 FALSE
#> 2: Fold2 0.7391304 16 2 FALSE
#> 3: Fold3 0.8000000 16 2 FALSE
<- mlexperiments::MLNestedCV$new(
validator learner = LearnerKnn$new(),
strategy = "grid",
fold_list = fold_list,
k_tuning = 3L,
ncores = ncores,
seed = seed
)
$parameter_grid <- parameter_grid
validator$learner_args <- learner_args
validator$split_type <- "stratified"
validator
$predict_args <- predict_args
validator$performance_metric <- performance_metric
validator$performance_metric_args <- performance_metric_args
validator$return_models <- return_models
validator
$set_data(
validatorx = train_x,
y = train_y
)
<- validator$execute()
validator_results #>
#> CV fold: Fold1
#>
#> CV fold: Fold2
#>
#> CV fold: Fold3
#> CV progress [==========================================================================================================] 3/3 (100%)
head(validator_results)
#> fold performance k l use.all
#> 1: Fold1 0.7391304 22 2 FALSE
#> 2: Fold2 0.7391304 28 2 FALSE
#> 3: Fold3 0.7666667 34 2 FALSE
<- mlexperiments::MLNestedCV$new(
validator learner = LearnerKnn$new(),
strategy = "bayesian",
fold_list = fold_list,
k_tuning = 3L,
ncores = ncores,
seed = seed
)
$parameter_grid <- parameter_grid
validator$learner_args <- learner_args
validator$split_type <- "stratified"
validator
$parameter_bounds <- parameter_bounds
validator$optim_args <- optim_args
validator
$predict_args <- predict_args
validator$performance_metric <- performance_metric
validator$performance_metric_args <- performance_metric_args
validator$return_models <- return_models
validator
$set_data(
validatorx = train_x,
y = train_y
)
<- validator$execute()
validator_results #>
#> CV fold: Fold1
#>
#> Registering parallel backend using 4 cores.
#>
#> CV fold: Fold2
#> CV progress [======================================================================>-----------------------------------] 2/3 ( 67%)
#>
#> Registering parallel backend using 4 cores.
#>
#> CV fold: Fold3
#> CV progress [==========================================================================================================] 3/3 (100%)
#>
#> Registering parallel backend using 4 cores.
head(validator_results)
#> fold performance k l use.all
#> 1: Fold1 0.7391304 22 2 FALSE
#> 2: Fold2 0.7934783 10 2 FALSE
#> 3: Fold3 0.7888889 10 2 FALSE
These binaries (installable software) and packages are in development.
They may not be fully stable and should be used with caution. We make no claims about them.
Health stats visible at Monitor.