The hardware and bandwidth for this mirror is donated by dogado GmbH, the Webhosting and Full Service-Cloud Provider. Check out our Wordpress Tutorial.
If you wish to report a bug, or if you are interested in having us mirror your free-software or open-source project, please feel free to contact us at mirror[@]dogado.de.

xgboost: Survival Analysis, AFT Analysis

# nolint start
library(mlexperiments)
library(mlsurvlrnrs)

See https://github.com/kapsner/mlsurvlrnrs/blob/main/R/learner_surv_xgboost_aft.R for implementation details.

Preprocessing

Import and Prepare Data

dataset <- survival::colon |>
  data.table::as.data.table() |>
  na.omit()
dataset <- dataset[get("etype") == 2, ]

surv_cols <- c("status", "time", "rx")
feature_cols <- colnames(dataset)[3:(ncol(dataset) - 1)]

General Configurations

seed <- 123
if (isTRUE(as.logical(Sys.getenv("_R_CHECK_LIMIT_CORES_")))) {
  # on cran
  ncores <- 2L
} else {
  ncores <- ifelse(
    test = parallel::detectCores() > 4,
    yes = 4L,
    no = ifelse(
      test = parallel::detectCores() < 2L,
      yes = 1L,
      no = parallel::detectCores()
    )
  )
}
options("mlexperiments.bayesian.max_init" = 10L)
options("mlexperiments.optim.xgb.nrounds" = 100L)
options("mlexperiments.optim.xgb.early_stopping_rounds" = 10L)

Generate Training- and Test Data

split_vector <- splitTools::multi_strata(
  df = dataset[, .SD, .SDcols = surv_cols],
  strategy = "kmeans",
  k = 4
)

data_split <- splitTools::partition(
  y = split_vector,
  p = c(train = 0.7, test = 0.3),
  type = "stratified",
  seed = seed
)

train_x <- model.matrix(
  ~ -1 + .,
  dataset[
    data_split$train, .SD, .SDcols = setdiff(feature_cols, surv_cols[1:2])
  ]
)
train_y <- survival::Surv(
  event = (dataset[data_split$train, get("status")] |>
             as.character() |>
             as.integer()),
  time = dataset[data_split$train, get("time")],
  type = "right"
)
split_vector_train <- splitTools::multi_strata(
  df = dataset[data_split$train, .SD, .SDcols = surv_cols],
  strategy = "kmeans",
  k = 4
)


test_x <- model.matrix(
  ~ -1 + .,
  dataset[data_split$test, .SD, .SDcols = setdiff(feature_cols, surv_cols[1:2])]
)
test_y <- survival::Surv(
  event = (dataset[data_split$test, get("status")] |>
             as.character() |>
             as.integer()),
  time = dataset[data_split$test, get("time")],
  type = "right"
)

Generate Training Data Folds

fold_list <- splitTools::create_folds(
  y = split_vector_train,
  k = 3,
  type = "stratified",
  seed = seed
)

Experiments

Prepare Experiments

# required learner arguments, not optimized
learner_args <- list(
  objective = "survival:aft",
  eval_metric = "aft-nloglik"
)

# set arguments for predict function and performance metric,
# required for mlexperiments::MLCrossValidation and
# mlexperiments::MLNestedCV
predict_args <- NULL
performance_metric <- c_index
performance_metric_args <- NULL
return_models <- FALSE

# required for grid search and initialization of bayesian optimization
parameter_grid <- expand.grid(
  subsample = seq(0.6, 1, .2),
  colsample_bytree = seq(0.6, 1, .2),
  min_child_weight = seq(1, 5, 4),
  learning_rate = seq(0.1, 0.2, 0.1),
  max_depth = seq(1, 5, 4)
)
# reduce to a maximum of 10 rows
if (nrow(parameter_grid) > 10) {
  set.seed(123)
  sample_rows <- sample(seq_len(nrow(parameter_grid)), 10, FALSE)
  parameter_grid <- kdry::mlh_subset(parameter_grid, sample_rows)
}

# required for bayesian optimization
parameter_bounds <- list(
  subsample = c(0.2, 1),
  colsample_bytree = c(0.2, 1),
  min_child_weight = c(1L, 10L),
  learning_rate = c(0.1, 0.2),
  max_depth =  c(1L, 10L)
)
optim_args <- list(
  iters.n = ncores,
  kappa = 3.5,
  acq = "ucb"
)

Hyperparameter Tuning

tuner <- mlexperiments::MLTuneParameters$new(
  learner = LearnerSurvXgboostAft$new(
    metric_optimization_higher_better = FALSE
  ),
  strategy = "grid",
  ncores = ncores,
  seed = seed
)

tuner$parameter_grid <- parameter_grid
tuner$learner_args <- learner_args
tuner$split_type <- "stratified"
tuner$split_vector <- split_vector_train

tuner$set_data(
  x = train_x,
  y = train_y
)

tuner_results_grid <- tuner$execute(k = 3)
#> 
#> Parameter settings [=======================================================>-----------------------------------------------------------------------------------] 4/10 ( 40%)
#> Parameter settings [=====================================================================>---------------------------------------------------------------------] 5/10 ( 50%)
#> Parameter settings [==================================================================================>--------------------------------------------------------] 6/10 ( 60%)
#> Parameter settings [================================================================================================>------------------------------------------] 7/10 ( 70%)
#> Parameter settings [==============================================================================================================>----------------------------] 8/10 ( 80%)
#> Parameter settings [============================================================================================================================>--------------] 9/10 ( 90%)
#> Parameter settings [==========================================================================================================================================] 10/10 (100%)                                                                                                                                                                             

head(tuner_results_grid)
#>    setting_id metric_optim_mean  nrounds subsample colsample_bytree min_child_weight learning_rate max_depth    objective eval_metric
#> 1:          1          4.508734 40.00000       0.6              0.8                5           0.2         1 survival:aft aft-nloglik
#> 2:          2          4.546383 39.33333       1.0              0.8                5           0.1         5 survival:aft aft-nloglik
#> 3:          3          4.505510 69.33333       0.8              0.8                5           0.1         1 survival:aft aft-nloglik
#> 4:          4          4.578441 19.33333       0.6              0.8                5           0.2         5 survival:aft aft-nloglik
#> 5:          5          4.561942 38.33333       1.0              0.8                1           0.1         5 survival:aft aft-nloglik
#> 6:          6          4.542217 37.66667       0.8              0.8                5           0.1         5 survival:aft aft-nloglik

Bayesian Optimization

tuner <- mlexperiments::MLTuneParameters$new(
  learner = LearnerSurvXgboostAft$new(
    metric_optimization_higher_better = FALSE
  ),
  strategy = "bayesian",
  ncores = ncores,
  seed = seed
)

tuner$parameter_grid <- parameter_grid
tuner$parameter_bounds <- parameter_bounds

tuner$learner_args <- learner_args
tuner$optim_args <- optim_args

tuner$split_type <- "stratified"
tuner$split_vector <- split_vector_train

tuner$set_data(
  x = train_x,
  y = train_y
)

tuner_results_bayesian <- tuner$execute(k = 3)
#> 
#> Registering parallel backend using 4 cores.

head(tuner_results_bayesian)
#>    Epoch setting_id subsample colsample_bytree min_child_weight learning_rate max_depth gpUtility acqOptimum inBounds Elapsed     Score metric_optim_mean  nrounds
#> 1:     0          1       0.6              0.8                5           0.2         1        NA      FALSE     TRUE   3.705 -4.509285          4.509285 41.00000
#> 2:     0          2       1.0              0.8                5           0.1         5        NA      FALSE     TRUE   3.918 -4.542901          4.542901 41.66667
#> 3:     0          3       0.8              0.8                5           0.1         1        NA      FALSE     TRUE   3.980 -4.506211          4.506211 82.33333
#> 4:     0          4       0.6              0.8                5           0.2         5        NA      FALSE     TRUE   3.867 -4.582990          4.582990 22.33333
#> 5:     0          5       1.0              0.8                1           0.1         5        NA      FALSE     TRUE   2.638 -4.559373          4.559373 42.33333
#> 6:     0          6       0.8              0.8                5           0.1         5        NA      FALSE     TRUE   3.138 -4.548201          4.548201 44.00000
#>    errorMessage    objective eval_metric
#> 1:           NA survival:aft aft-nloglik
#> 2:           NA survival:aft aft-nloglik
#> 3:           NA survival:aft aft-nloglik
#> 4:           NA survival:aft aft-nloglik
#> 5:           NA survival:aft aft-nloglik
#> 6:           NA survival:aft aft-nloglik

k-Fold Cross Validation

validator <- mlexperiments::MLCrossValidation$new(
  learner = LearnerSurvXgboostAft$new(
    metric_optimization_higher_better = FALSE
  ),
  fold_list = fold_list,
  ncores = ncores,
  seed = seed
)

validator$learner_args <- tuner$results$best.setting[-1]

validator$predict_args <- predict_args
validator$performance_metric <- performance_metric
validator$performance_metric_args <- performance_metric_args
validator$return_models <- return_models

validator$set_data(
  x = train_x,
  y = train_y
)

validator_results <- validator$execute()
#> 
#> CV fold: Fold1
#> 
#> CV fold: Fold2
#> 
#> CV fold: Fold3

head(validator_results)
#>     fold performance subsample colsample_bytree min_child_weight learning_rate max_depth nrounds    objective eval_metric
#> 1: Fold1   0.3477846 0.2882211        0.9747412                1     0.1124153         1      60 survival:aft aft-nloglik
#> 2: Fold2   0.3601468 0.2882211        0.9747412                1     0.1124153         1      60 survival:aft aft-nloglik
#> 3: Fold3   0.3585996 0.2882211        0.9747412                1     0.1124153         1      60 survival:aft aft-nloglik

Nested Cross Validation

validator <- mlexperiments::MLNestedCV$new(
  learner = LearnerSurvXgboostAft$new(
    metric_optimization_higher_better = FALSE
  ),
  strategy = "grid",
  fold_list = fold_list,
  k_tuning = 3L,
  ncores = ncores,
  seed = seed
)

validator$parameter_grid <- parameter_grid
validator$learner_args <- learner_args
validator$split_type <- "stratified"
validator$split_vector <- split_vector_train

validator$predict_args <- predict_args
validator$performance_metric <- performance_metric
validator$performance_metric_args <- performance_metric_args
validator$return_models <- return_models

validator$set_data(
  x = train_x,
  y = train_y
)

validator_results <- validator$execute()
#> 
#> CV fold: Fold1
#> 
#> Parameter settings [=======================================================>-----------------------------------------------------------------------------------] 4/10 ( 40%)
#> Parameter settings [=====================================================================>---------------------------------------------------------------------] 5/10 ( 50%)
#> Parameter settings [==================================================================================>--------------------------------------------------------] 6/10 ( 60%)
#> Parameter settings [================================================================================================>------------------------------------------] 7/10 ( 70%)
#> Parameter settings [==============================================================================================================>----------------------------] 8/10 ( 80%)
#> Parameter settings [============================================================================================================================>--------------] 9/10 ( 90%)
#> Parameter settings [==========================================================================================================================================] 10/10 (100%)                                                                                                                                                                             
#> CV fold: Fold2
#> CV progress [=================================================================================================>-------------------------------------------------] 2/3 ( 67%)
#> 
#> Parameter settings [=========================================>-------------------------------------------------------------------------------------------------] 3/10 ( 30%)
#> Parameter settings [=======================================================>-----------------------------------------------------------------------------------] 4/10 ( 40%)
#> Parameter settings [=====================================================================>---------------------------------------------------------------------] 5/10 ( 50%)
#> Parameter settings [==================================================================================>--------------------------------------------------------] 6/10 ( 60%)
#> Parameter settings [================================================================================================>------------------------------------------] 7/10 ( 70%)
#> Parameter settings [==============================================================================================================>----------------------------] 8/10 ( 80%)
#> Parameter settings [============================================================================================================================>--------------] 9/10 ( 90%)
#> Parameter settings [==========================================================================================================================================] 10/10 (100%)                                                                                                                                                                             
#> CV fold: Fold3
#> CV progress [===================================================================================================================================================] 3/3 (100%)
#>                                                                                                                                                                              
#> Parameter settings [=======================================================>-----------------------------------------------------------------------------------] 4/10 ( 40%)
#> Parameter settings [=====================================================================>---------------------------------------------------------------------] 5/10 ( 50%)
#> Parameter settings [==================================================================================>--------------------------------------------------------] 6/10 ( 60%)
#> Parameter settings [================================================================================================>------------------------------------------] 7/10 ( 70%)
#> Parameter settings [==============================================================================================================>----------------------------] 8/10 ( 80%)
#> Parameter settings [============================================================================================================================>--------------] 9/10 ( 90%)
#> Parameter settings [==========================================================================================================================================] 10/10 (100%)                                                                                                                                                                             

head(validator_results)
#>     fold performance  nrounds subsample colsample_bytree min_child_weight learning_rate max_depth    objective eval_metric
#> 1: Fold1   0.3609538 32.66667       0.6              0.8                5           0.2         1 survival:aft aft-nloglik
#> 2: Fold2   0.3665939 31.33333       0.6              1.0                1           0.2         1 survival:aft aft-nloglik
#> 3: Fold3   0.3549842 38.33333       0.6              1.0                1           0.2         1 survival:aft aft-nloglik

Inner Bayesian Optimization

validator <- mlexperiments::MLNestedCV$new(
  learner = LearnerSurvXgboostAft$new(
    metric_optimization_higher_better = FALSE
  ),
  strategy = "bayesian",
  fold_list = fold_list,
  k_tuning = 3L,
  ncores = ncores,
  seed = 312
)

validator$parameter_grid <- parameter_grid
validator$learner_args <- learner_args
validator$split_type <- "stratified"
validator$split_vector <- split_vector_train


validator$parameter_bounds <- parameter_bounds
validator$optim_args <- optim_args

validator$predict_args <- predict_args
validator$performance_metric <- performance_metric
validator$performance_metric_args <- performance_metric_args
validator$return_models <- TRUE

validator$set_data(
  x = train_x,
  y = train_y
)

validator_results <- validator$execute()
#> 
#> CV fold: Fold1
#> 
#> Registering parallel backend using 4 cores.
#> 
#> CV fold: Fold2
#> CV progress [=================================================================================================>-------------------------------------------------] 2/3 ( 67%)
#> 
#> Registering parallel backend using 4 cores.
#> 
#> CV fold: Fold3
#> CV progress [===================================================================================================================================================] 3/3 (100%)
#>                                                                                                                                                                              
#> Registering parallel backend using 4 cores.

head(validator_results)
#>     fold performance subsample colsample_bytree min_child_weight learning_rate max_depth  nrounds    objective eval_metric
#> 1: Fold1   0.3480615 0.6000000        0.8000000                5     0.2000000         1 44.33333 survival:aft aft-nloglik
#> 2: Fold2   0.3699332 0.6000000        1.0000000                1     0.2000000         1 36.66667 survival:aft aft-nloglik
#> 3: Fold3   0.3522341 0.7604887        0.7889484                1     0.1695828         1 31.00000 survival:aft aft-nloglik

Holdout Test Dataset Performance

Predict Outcome in Holdout Test Dataset

preds_xgboost <- mlexperiments::predictions(
  object = validator,
  newdata = test_x
)

Evaluate Performance on Holdout Test Dataset

perf_xgboost <- mlexperiments::performance(
  object = validator,
  prediction_results = preds_xgboost,
  y_ground_truth = test_y
)
perf_xgboost
#>    model performance
#> 1: Fold1   0.3401763
#> 2: Fold2   0.3213113
#> 3: Fold3   0.3136183

These binaries (installable software) and packages are in development.
They may not be fully stable and should be used with caution. We make no claims about them.
Health stats visible at Monitor.