From 7ff1264f8c8488aed68d307ea91f8bb2e0c1c897 Mon Sep 17 00:00:00 2001 From: Sebastian Fischer Date: Mon, 11 Sep 2023 16:42:05 +0200 Subject: [PATCH] docs: correct usage of default / initial value (#291) * docs: correct usage of default / initial value * fix workflow * fix gh actions * typo * ... * try and fix workflow * typo * try to fix ci * don't use pip * ... * fix rfsrc learners * typo * fix typos * Update R/learner_lightgbm_regr_lightgbm.R * document * fix parameter tests * style --- .github/workflows/rcmdcheck.yml | 7 +-- R/learner_LiblineaR_regr_liblinear.R | 4 +- R/learner_dbarts_regr_bart.R | 11 ++-- R/learner_flexsurv_surv_flexible.R | 4 +- R/learner_gbm_classif_gbm.R | 13 ++--- R/learner_glmnet_surv_cv_glmnet.R | 4 +- R/learner_glmnet_surv_glmnet.R | 4 +- R/learner_lightgbm_classif_lightgbm.R | 20 ++++---- R/learner_lightgbm_regr_lightgbm.R | 15 +++--- R/learner_mboost_regr_gamboost.R | 2 +- R/learner_mboost_regr_glmboost.R | 2 +- R/learner_obliqueRSF_surv_obliqueRSF.R | 9 ++-- ...randomForestSRC_classif_imbalanced_rfsrc.R | 29 ++++------- R/learner_randomForestSRC_classif_rfsrc.R | 11 ++-- R/learner_randomForestSRC_regr_rfsrc.R | 4 +- R/learner_randomForestSRC_surv_rfsrc.R | 4 +- R/learner_randomForest_regr_randomForest.R | 2 +- R/learner_ranger_surv_ranger.R | 7 +-- R/learner_rsm_regr_rsm.R | 2 +- R/learner_survivalmodels_surv_dnnsurv.R | 7 +-- R/learner_xgboost_surv_xgboost.R | 24 ++------- inst/templates/learner_template.R | 5 +- man/mlr_learners_classif.gbm.Rd | 17 ++----- man/mlr_learners_classif.imbalanced_rfsrc.Rd | 50 ++++++++----------- man/mlr_learners_classif.lightgbm.Rd | 30 +++++------ man/mlr_learners_classif.rfsrc.Rd | 14 +----- man/mlr_learners_regr.bart.Rd | 11 ++-- man/mlr_learners_regr.liblinear.Rd | 4 +- man/mlr_learners_regr.lightgbm.Rd | 18 +++---- man/mlr_learners_regr.rfsrc.Rd | 4 +- man/mlr_learners_surv.cv_glmnet.Rd | 4 +- man/mlr_learners_surv.dnnsurv.Rd | 9 +--- man/mlr_learners_surv.flexible.Rd | 4 +- man/mlr_learners_surv.glmnet.Rd | 10 ++-- man/mlr_learners_surv.obliqueRSF.Rd | 11 ++-- man/mlr_learners_surv.ranger.Rd | 9 +--- man/mlr_learners_surv.rfsrc.Rd | 4 +- man/mlr_learners_surv.xgboost.Rd | 32 ++---------- ...test_paramtest_lightgbm_classif_lightgbm.R | 5 +- .../test_paramtest_lightgbm_regr_lightgbm.R | 4 +- 40 files changed, 155 insertions(+), 275 deletions(-) diff --git a/.github/workflows/rcmdcheck.yml b/.github/workflows/rcmdcheck.yml index 429596ee6..cbb46c094 100644 --- a/.github/workflows/rcmdcheck.yml +++ b/.github/workflows/rcmdcheck.yml @@ -56,11 +56,12 @@ jobs: - name: Install Python run: | - pak::pkg_install('rstudio/reticulate') + pak::pkg_install('reticulate') reticulate::install_miniconda() + reticulate::use_condaenv('r-reticulate') install.packages('keras') - keras::install_keras(extra_packages = c('IPython', 'requests', 'certifi', 'urllib3', 'tensorflow-hub', 'tabnet')) - reticulate::py_install(c('torch', 'pycox'), pip = TRUE) + keras::install_keras(extra_packages = c('IPython', 'requests', 'certifi', 'urllib3', 'tensorflow-hub', 'tabnet'), method = "conda") + reticulate::py_install(c('pytorch', 'pycox'), method = "conda") shell: Rscript {0} - uses: r-lib/actions/check-r-package@v2 diff --git a/R/learner_LiblineaR_regr_liblinear.R b/R/learner_LiblineaR_regr_liblinear.R index 6dd51debd..8a68f8ecb 100644 --- a/R/learner_LiblineaR_regr_liblinear.R +++ b/R/learner_LiblineaR_regr_liblinear.R @@ -15,10 +15,10 @@ #' * `type = 12` – L2-regularized L2-loss support vector regression (dual) #' * `type = 13` – L2-regularized L1-loss support vector regression (dual) #' -#' @section Custom mlr3 defaults: +#' @section Initial parameter values: #' - `svr_eps`: #' - Actual default: `NULL` -#' - Adjusted default: 0.001 +#' - Initial value: 0.001 #' - Reason for change: `svr_eps` is type dependent and the "type" is handled #' by the mlr3learner. The default value is set to th default of the respective #' "type". diff --git a/R/learner_dbarts_regr_bart.R b/R/learner_dbarts_regr_bart.R index e36bc8f8a..748be7083 100644 --- a/R/learner_dbarts_regr_bart.R +++ b/R/learner_dbarts_regr_bart.R @@ -9,7 +9,7 @@ #' @template learner #' @templateVar id regr.bart #' -#' @section Initial parameter values: +#' @section Custom mlr3 parameters: #' * Parameter: offset #' * The parameter is removed, because only `dbarts::bart2` allows an offset during training, #' and therefore the offset parameter in `dbarts:::predict.bart` is irrelevant for @@ -17,11 +17,8 @@ #' * Parameter: nchain, combineChains, combinechains #' * The parameters are removed as parallelization of multiple models is handled by future. #' -#' @section Custom mlr3 defaults: -#' * Parameter: keeptrees -#' * Original: FALSE -#' * New: TRUE -#' * Reason: Required for prediction +#' @section Initial parameter values: +#' * `keeptrees` is initialized to `TRUE` because it is required for prediction. #' #' @references #' `r format_bib("sparapani2021nonparametric", "chipman2010bart")` @@ -71,7 +68,7 @@ LearnerRegrBart = R6Class("LearnerRegrBart", packages = c("mlr3extralearners", "dbarts"), feature_types = c("integer", "numeric", "factor", "ordered"), # TODO: add "se" to the list of predict types. - predict_types = c("response"), + predict_types = "response", param_set = ps, properties = c("weights"), man = "mlr3extralearners::mlr_learners_regr.bart", diff --git a/R/learner_flexsurv_surv_flexible.R b/R/learner_flexsurv_surv_flexible.R index fa87ce3a6..a51527fe1 100644 --- a/R/learner_flexsurv_surv_flexible.R +++ b/R/learner_flexsurv_surv_flexible.R @@ -21,10 +21,10 @@ #' and covariates \eqn{X^T = (X_0,...,X_P)^T}{X^T = (X0,...,XP)^T}, where \eqn{X_0}{X0} is a column #' of \eqn{1}s: \eqn{lp = \beta X}{lp = \betaX}. #' -#' @section Custom mlr3 defaults: +#' @section Initial parameter values: #' - `k`: #' - Actual default: `0` -#' - Adjusted default: `1` +#' - Initial value: `1` #' - Reason for change: The default value of `0` is equivalent to, and a much less efficient #' implementation of, [LearnerSurvParametric]. #' diff --git a/R/learner_gbm_classif_gbm.R b/R/learner_gbm_classif_gbm.R index 22ee96138..9a53482ee 100644 --- a/R/learner_gbm_classif_gbm.R +++ b/R/learner_gbm_classif_gbm.R @@ -9,16 +9,9 @@ #' @template learner #' @templateVar id classif.gbm #' -#' @section Custom mlr3 defaults: -#' - `keep.data`: -#' - Actual default: TRUE -#' - Adjusted default: FALSE -#' - Reason for change: `keep.data = FALSE` saves memory during model fitting. -#' - `n.cores`: -#' - Actual default: NULL -#' - Adjusted default: 1 -#' - Reason for change: Suppressing the automatic internal parallelization if -#' `cv.folds` > 0. +#' @section Initial parameter values: +#' - `keep.data` is initialized to `FALSE` to save memory. +#' - `n.cores` is initialized to 1 to avoid conflicts with parallelization through future. #' #' @references #' `r format_bib("friedman2002stochastic")` diff --git a/R/learner_glmnet_surv_cv_glmnet.R b/R/learner_glmnet_surv_cv_glmnet.R index ce7b27b04..84b9fdbdb 100644 --- a/R/learner_glmnet_surv_cv_glmnet.R +++ b/R/learner_glmnet_surv_cv_glmnet.R @@ -6,8 +6,8 @@ #' Generalized linear models with elastic net regularization. #' Calls [glmnet::cv.glmnet()] from package \CRANpkg{glmnet}. #' -#' @section Custom mlr3 defaults: -#' - `family` The default is set to `"cox"`. +#' @section Custom mlr3 parameters: +#' - `family` is set to `"cox"` and cannot be changed. #' #' @templateVar id surv.cv_glmnet #' @template learner diff --git a/R/learner_glmnet_surv_glmnet.R b/R/learner_glmnet_surv_glmnet.R index c3e84baa0..cd4c8bf18 100644 --- a/R/learner_glmnet_surv_glmnet.R +++ b/R/learner_glmnet_surv_glmnet.R @@ -6,8 +6,8 @@ #' Generalized linear models with elastic net regularization. #' Calls [glmnet::glmnet()] from package \CRANpkg{glmnet}. #' -#' @section Custom mlr3 defaults: -#' - `family` The default is set to `"cox"`. +# @section Custom mlr3 parameters: +#' - `family` is set to `"cox"` and cannot be changed. #' #' @details #' Caution: This learner is different to learners calling [glmnet::cv.glmnet()] diff --git a/R/learner_lightgbm_classif_lightgbm.R b/R/learner_lightgbm_classif_lightgbm.R index 4be4bc777..db6e7e9f7 100644 --- a/R/learner_lightgbm_classif_lightgbm.R +++ b/R/learner_lightgbm_classif_lightgbm.R @@ -14,27 +14,27 @@ #' @templateVar id classif.lightgbm #' #' @section Initial parameter values: -#' * `convert_categorical`: -#' Additional parameter. If this parameter is set to `TRUE` (default), all factor and logical -#' columns are converted to integers and the parameter categorical_feature of lightgbm is set to -#' those columns. -#' * `num_class`: -#' This parameter is automatically inferred for multiclass tasks and does not have to be set. -#' @section Custom mlr3 defaults: #' * `num_threads`: #' * Actual default: 0L -#' * Adjusted default: 1L +#' * Initial value: 1L #' * Reason for change: Prevents accidental conflicts with `future`. #' * `verbose`: #' * Actual default: 1L -#' * Adjusted default: -1L +#' * Initial value: -1L #' * Reason for change: Prevents accidental conflicts with mlr messaging system. +#' @section Custom mlr3 defaults: #' * `objective`: -#' Depending if the task is binary / multiclass, the default is set to `"binary"` or +#' Depending if the task is binary / multiclass, the default is `"binary"` or #' `"multiclasss"`. #' @section Custom mlr3 parameters: #' * `early_stopping` #' Whether to use the test set for early stopping. Default is `FALSE`. +#' * `convert_categorical`: +#' Additional parameter. If this parameter is set to `TRUE` (default), all factor and logical +#' columns are converted to integers and the parameter categorical_feature of lightgbm is set to +#' those columns. +#' * `num_class`: +#' This parameter is automatically inferred for multiclass tasks and does not have to be set. #' #' @references #' `r format_bib("ke2017lightgbm")` diff --git a/R/learner_lightgbm_regr_lightgbm.R b/R/learner_lightgbm_regr_lightgbm.R index 9d17e343b..ad865dccc 100644 --- a/R/learner_lightgbm_regr_lightgbm.R +++ b/R/learner_lightgbm_regr_lightgbm.R @@ -14,23 +14,22 @@ #' @templateVar id regr.lightgbm #' #' @section Initial parameter values: -#' * `convert_categorical`: -#' Additional parameter. If this parameter is set to `TRUE` (default), all factor and logical -#' columns are converted to integers and the parameter categorical_feature of lightgbm is set to -#' those columns. -#' @section Custom mlr3 defaults: #' * `num_threads`: #' * Actual default: 0L -#' * Adjusted default: 1L +#' * Initital value: 1L #' * Reason for change: Prevents accidental conflicts with `future`. #' * `verbose`: #' * Actual default: 1L -#' * Adjusted default: -1L +#' * Initial value: -1L #' * Reason for change: Prevents accidental conflicts with mlr messaging system. #' #' @section Custom mlr3 parameters: #' * `early_stopping` #' Whether to use the test set for early stopping. Default is `FALSE`. +#' * `convert_categorical`: +#' Additional parameter. If this parameter is set to `TRUE` (default), all factor and logical +#' columns are converted to integers and the parameter categorical_feature of lightgbm is set to +#' those columns. #' #' @references #' `r format_bib("ke2017lightgbm")` @@ -199,7 +198,7 @@ LearnerRegrLightGBM = R6Class("LearnerRegrLightGBM", id = "regr.lightgbm", packages = c("mlr3extralearners", "lightgbm"), feature_types = c("numeric", "integer", "logical", "factor"), - predict_types = c("response"), + predict_types = "response", param_set = ps, properties = c("weights", "missings", "importance", "hotstart_forward"), man = "mlr3extralearners::mlr_learners_regr.lightgbm", diff --git a/R/learner_mboost_regr_gamboost.R b/R/learner_mboost_regr_gamboost.R index 786b5ea65..dd937c3d0 100644 --- a/R/learner_mboost_regr_gamboost.R +++ b/R/learner_mboost_regr_gamboost.R @@ -52,7 +52,7 @@ LearnerRegrGAMBoost = R6Class("LearnerRegrGAMBoost", id = "regr.gamboost", packages = c("mlr3extralearners", "mboost"), feature_types = c("integer", "numeric", "factor", "ordered"), - predict_types = c("response"), + predict_types = "response", param_set = ps, properties = "weights", man = "mlr3extralearners::mlr_learners_regr.gamboost", diff --git a/R/learner_mboost_regr_glmboost.R b/R/learner_mboost_regr_glmboost.R index 6c3a33469..637db0a73 100644 --- a/R/learner_mboost_regr_glmboost.R +++ b/R/learner_mboost_regr_glmboost.R @@ -50,7 +50,7 @@ LearnerRegrGLMBoost = R6Class("LearnerRegrGLMBoost", id = "regr.glmboost", packages = c("mlr3extralearners", "mboost"), feature_types = c("integer", "numeric", "factor", "ordered"), - predict_types = c("response"), + predict_types = "response", param_set = ps, properties = "weights", man = "mlr3extralearners::mlr_learners_regr.glmboost", diff --git a/R/learner_obliqueRSF_surv_obliqueRSF.R b/R/learner_obliqueRSF_surv_obliqueRSF.R index 55656fa84..74d5fb474 100644 --- a/R/learner_obliqueRSF_surv_obliqueRSF.R +++ b/R/learner_obliqueRSF_surv_obliqueRSF.R @@ -10,17 +10,14 @@ #' @template learner #' @templateVar id surv.obliqueRSF #' -#' @section Initial parameter values: +#' @section Custom mlr3 parameters: #' - `mtry`: #' - This hyperparameter can alternatively be set via the added hyperparameter `mtry_ratio` #' as `mtry = max(ceiling(mtry_ratio * n_features), 1)`. #' Note that `mtry` and `mtry_ratio` are mutually exclusive. #' -#' @section Custom mlr3 defaults: -#' - `verbose`: -#' - Actual default: `TRUE` -#' - Adjusted default: `FALSE` -#' - Reason for change: mlr3 already has it's own verbose set to `TRUE` by default +#' @section Initial parameter values: +#' - `verbose` is initialized to `FALSE` #' #' @references #' `r format_bib("jaeger_2019")` diff --git a/R/learner_randomForestSRC_classif_imbalanced_rfsrc.R b/R/learner_randomForestSRC_classif_imbalanced_rfsrc.R index 64dc11634..e2816f73b 100644 --- a/R/learner_randomForestSRC_classif_imbalanced_rfsrc.R +++ b/R/learner_randomForestSRC_classif_imbalanced_rfsrc.R @@ -6,21 +6,8 @@ #' Imbalanced Random forest for classification between two classes. #' Calls [randomForestSRC::imbalanced.rfsrc()] from from \CRANpkg{randomForestSRC}. #' -#' @section Custom mlr3 parameters: -#' - `mtry`: -#' - This hyperparameter can alternatively be set via the added hyperparameter `mtry.ratio` -#' as `mtry = max(ceiling(mtry.ratio * n_features), 1)`. -#' Note that `mtry` and `mtry.ratio` are mutually exclusive. -#' - `sampsize`: -#' - This hyperparameter can alternatively be set via the added hyperparameter `sampsize.ratio` -#' as `sampsize = max(ceiling(sampsize.ratio * n_obs), 1)`. -#' Note that `sampsize` and `sampsize.ratio` are mutually exclusive. #' -#' @section Custom mlr3 defaults: -#' - `cores`: -#' - Actual default: Auto-detecting the number of cores -#' - Adjusted default: 1 -#' - Reason for change: Threading conflicts with explicit parallelization via \CRANpkg{future}. +#' @inheritSection mlr_learners_classif.rfsrc Custom mlr3 parameters #' #' @templateVar id classif.imbalanced_rfsrc #' @template learner @@ -101,12 +88,12 @@ LearnerClassifImbalancedRandomForestSRC = R6Class("LearnerClassifImbalancedRando do.trace = p_lgl(default = FALSE, tags = c("train", "predict")), statistics = p_lgl(default = FALSE, tags = c("train", "predict")), get.tree = p_uty(tags = "predict"), - outcome = p_fct( - default = "train", levels = c("train", "test"), - tags = "predict"), - ptn.count = p_int(default = 0L, lower = 0L, tags = "predict"), - cores = p_int(default = 1L, lower = 1L, tags = c("train", "predict", "threads")), - save.memory = p_lgl(default = FALSE, tags = "train"), + outcome = p_fct( + default = "train", levels = c("train", "test"), + tags = "predict"), + ptn.count = p_int(default = 0L, lower = 0L, tags = "predict"), + cores = p_int(default = 1L, lower = 1L, tags = c("train", "predict", "threads")), + save.memory = p_lgl(default = FALSE, tags = "train"), perf.type = p_fct(levels = c("gmean", "misclass", "brier", "none"), tags = "train") # nolint ) @@ -155,6 +142,7 @@ LearnerClassifImbalancedRandomForestSRC = R6Class("LearnerClassifImbalancedRando pv = convert_ratio(pv, "mtry", "mtry.ratio", length(task$feature_names)) pv = convert_ratio(pv, "sampsize", "sampsize.ratio", task$nrow) cores = pv$cores %??% 1L + pv$cores = NULL if ("weights" %in% task$properties) { pv$case.wt = as.numeric(task$weights$weight) # nolint @@ -168,6 +156,7 @@ LearnerClassifImbalancedRandomForestSRC = R6Class("LearnerClassifImbalancedRando newdata = data.table::setDF(ordered_features(task, self)) pars = self$param_set$get_values(tags = "predict") cores = pars$cores %??% 1L + pars$cores = NULL pred = invoke(predict, object = self$model, newdata = newdata, diff --git a/R/learner_randomForestSRC_classif_rfsrc.R b/R/learner_randomForestSRC_classif_rfsrc.R index 399e2b13e..7ca052c2d 100644 --- a/R/learner_randomForestSRC_classif_rfsrc.R +++ b/R/learner_randomForestSRC_classif_rfsrc.R @@ -9,7 +9,7 @@ #' @template learner #' @templateVar id classif.rfsrc #' -#' @section Initial parameter values: +#' @section Custom mlr3 parameters: #' - `mtry`: #' - This hyperparameter can alternatively be set via the added hyperparameter `mtry.ratio` #' as `mtry = max(ceiling(mtry.ratio * n_features), 1)`. @@ -18,12 +18,8 @@ #' - This hyperparameter can alternatively be set via the added hyperparameter `sampsize.ratio` #' as `sampsize = max(ceiling(sampsize.ratio * n_obs), 1)`. #' Note that `sampsize` and `sampsize.ratio` are mutually exclusive. -#' -#' @section Custom mlr3 defaults: #' - `cores`: -#' - Actual default: Auto-detecting the number of cores -#' - Adjusted default: 1 -#' - Reason for change: Threading conflicts with explicit parallelization via \CRANpkg{future}. +#' This value is set as the option `rf.cores` during training and is set to 1 by default. #' #' @references #' `r format_bib("breiman_2001")` @@ -157,6 +153,7 @@ LearnerClassifRandomForestSRC = R6Class("LearnerClassifRandomForestSRC", pv = convert_ratio(pv, "mtry", "mtry.ratio", length(task$feature_names)) pv = convert_ratio(pv, "sampsize", "sampsize.ratio", task$nrow) cores = pv$cores %??% 1L + pv$cores = NULL if ("weights" %in% task$properties) { pv$case.wt = as.numeric(task$weights$weight) # nolint @@ -171,6 +168,8 @@ LearnerClassifRandomForestSRC = R6Class("LearnerClassifRandomForestSRC", newdata = data.table::setDF(ordered_features(task, self)) pars = self$param_set$get_values(tags = "predict") cores = pars$cores %??% 1L + pars$cores = NULL + pred = invoke(predict, object = self$model, newdata = newdata, diff --git a/R/learner_randomForestSRC_regr_rfsrc.R b/R/learner_randomForestSRC_regr_rfsrc.R index 3945e506b..14b178f4c 100644 --- a/R/learner_randomForestSRC_regr_rfsrc.R +++ b/R/learner_randomForestSRC_regr_rfsrc.R @@ -9,7 +9,7 @@ #' @template learner #' @templateVar id regr.rfsrc #' -#' @inheritSection mlr_learners_classif.rfsrc Initial parameter values +#' @inheritSection mlr_learners_classif.rfsrc Custom mlr3 parameters #' #' @references #' `r format_bib("breiman_2001")` @@ -140,6 +140,7 @@ LearnerRegrRandomForestSRC = R6Class("LearnerRegrRandomForestSRC", pv = convert_ratio(pv, "mtry", "mtry.ratio", length(task$feature_names)) pv = convert_ratio(pv, "sampsize", "sampsize.ratio", task$nrow) cores = pv$cores %??% 1L + pv$cores = NULL if ("weights" %in% task$properties) { pv$case.wt = as.numeric(task$weights$weight) # nolint @@ -154,6 +155,7 @@ LearnerRegrRandomForestSRC = R6Class("LearnerRegrRandomForestSRC", newdata = ordered_features(task, self) pars = self$param_set$get_values(tags = "predict") cores = pars$cores %??% 1L + pars$cores = NULL list( response = invoke(predict, diff --git a/R/learner_randomForestSRC_surv_rfsrc.R b/R/learner_randomForestSRC_surv_rfsrc.R index e72b1002e..9044eb5bd 100644 --- a/R/learner_randomForestSRC_surv_rfsrc.R +++ b/R/learner_randomForestSRC_surv_rfsrc.R @@ -9,7 +9,7 @@ #' @template learner #' @templateVar id surv.rfsrc #' -#' @inheritSection mlr_learners_classif.rfsrc Initial parameter values +#' @inheritSection mlr_learners_classif.rfsrc Custom mlr3 parameters #' #' @details #' [randomForestSRC::predict.rfsrc()] returns both cumulative hazard function (chf) and @@ -150,6 +150,7 @@ delayedAssign( pv = convert_ratio(pv, "mtry", "mtry.ratio", length(task$feature_names)) pv = convert_ratio(pv, "sampsize", "sampsize.ratio", task$nrow) cores = pv$cores %??% 1L + pv$cores = NULL if ("weights" %in% task$properties) { pv$case.wt = as.numeric(task$weights$weight) # nolint @@ -168,6 +169,7 @@ delayedAssign( pars_predict$estimator = NULL pars_predict$var.used = "FALSE" cores = pars_predict$cores %??% 1L # additionaly implemented by author + pars_predict$cores = NULL p = invoke(predict, object = self$model, newdata = newdata, .args = pars_predict, .opts = list(rf.cores = cores)) diff --git a/R/learner_randomForest_regr_randomForest.R b/R/learner_randomForest_regr_randomForest.R index 558043ab3..d6fa2455e 100644 --- a/R/learner_randomForest_regr_randomForest.R +++ b/R/learner_randomForest_regr_randomForest.R @@ -52,7 +52,7 @@ LearnerRegrRandomForest = R6Class("LearnerRegrRandomForest", id = "regr.randomForest", packages = c("mlr3extralearners", "randomForest"), feature_types = c("integer", "numeric", "factor", "ordered", "logical"), - predict_types = c("response"), + predict_types = "response", param_set = ps, properties = c("weights", "importance", "oob_error"), man = "mlr3extralearners::mlr_learners_regr.randomForest", diff --git a/R/learner_ranger_surv_ranger.R b/R/learner_ranger_surv_ranger.R index 7694317d9..7f68e6fcf 100644 --- a/R/learner_ranger_surv_ranger.R +++ b/R/learner_ranger_surv_ranger.R @@ -12,11 +12,8 @@ #' as `mtry = max(ceiling(mtry.ratio * n_features), 1)`. #' Note that `mtry` and `mtry.ratio` are mutually exclusive. #' -#' @section Custom mlr3 defaults: -#' - `num.threads`: -#' - Actual default: `NULL`, triggering auto-detection of the number of CPUs. -#' - Adjusted value: 1. -#' - Reason for change: Conflicting with parallelization via \CRANpkg{future}. +#' @section Initial parameter values: +#' - `num.threads` is initialized to 1 to avoid conflicts with parallelization via \CRANpkg{future}. #' #' @templateVar id surv.ranger #' @template learner diff --git a/R/learner_rsm_regr_rsm.R b/R/learner_rsm_regr_rsm.R index 6f9e7d5a3..cd7f5898b 100644 --- a/R/learner_rsm_regr_rsm.R +++ b/R/learner_rsm_regr_rsm.R @@ -36,7 +36,7 @@ LearnerRegrRSM = R6Class("LearnerRegrRSM", id = "regr.rsm", packages = "rsm", feature_types = c("integer", "numeric", "factor", "ordered"), - predict_types = c("response"), + predict_types = "response", param_set = param_set, properties = character(0), man = "mlr3extralearners::mlr_learners_regr.rsm", diff --git a/R/learner_survivalmodels_surv_dnnsurv.R b/R/learner_survivalmodels_surv_dnnsurv.R index 949ebfa06..131902689 100644 --- a/R/learner_survivalmodels_surv_dnnsurv.R +++ b/R/learner_survivalmodels_surv_dnnsurv.R @@ -17,11 +17,8 @@ #' The number of output channels should be of length `1` and number of input channels is #' the number of features plus number of cuts. #' -#' @section Custom mlr3 defaults: -#' - `verbose`: -#' - Actual default: `1L` -#' - Adjusted default: `0L` -#' - Reason for change: Prevents plotting. +#' @section Initial parameter values: +#' - `verbose` is initialized to 0. #' #' @references #' `r format_bib("zhao2019dnnsurv")` diff --git a/R/learner_xgboost_surv_xgboost.R b/R/learner_xgboost_surv_xgboost.R index 25680c02e..4a3d0687a 100644 --- a/R/learner_xgboost_surv_xgboost.R +++ b/R/learner_xgboost_surv_xgboost.R @@ -8,25 +8,11 @@ #' #' @template note_xgboost #' -#' @section Custom mlr3 defaults: -#' - `nrounds`: -#' - Actual default: no default. -#' - Adjusted default: 1. -#' - Reason for change: Without a default construction of the learner -#' would error. Just setting a nonsense default to workaround this. -#' `nrounds` needs to be tuned by the user. -#' - `nthread`: -#' - Actual value: Undefined, triggering auto-detection of the number of CPUs. -#' - Adjusted value: 1. -#' - Reason for change: Conflicting with parallelization via \CRANpkg{future}. -#' - `verbose`: -#' - Actual default: 1. -#' - Adjusted default: 0. -#' - Reason for change: Reduce verbosity. -#' - `objective`: -#' - Actual default: `reg:squarederror`. -#' - Adjusted default: `survival:cox`. -#' - Reason for change: Changed to a survival objective. +#' @section Initial parameter values: +#' - `nrounds` is initialized to 1. +#' - `nthread` is initialized to 1 to avoid conflicts with parallelization via \CRANpkg{future}. +#' - `verbose` is initialized to 0. +#' - `objective` is initialized to `survival:cox` for survival analysis. #' @section Early stopping: #' Early stopping can be used to find the optimal number of boosting rounds. #' The `early_stopping_set` parameter controls which set is used to monitor the performance. diff --git a/inst/templates/learner_template.R b/inst/templates/learner_template.R index 8b3f59435..9a04cde40 100644 --- a/inst/templates/learner_template.R +++ b/inst/templates/learner_template.R @@ -9,12 +9,9 @@ #' @section Initial parameter values: #' FIXME: DEVIATIONS FROM UPSTREAM PARAMETERS. DELETE IF NOT APPLICABLE. #' -#' @section Custom mlr3 defaults: +#' @section Custom mlr3 parameters: #' FIXME: DEVIATIONS FROM UPSTREAM DEFAULTS. DELETE IF NOT APPLICABLE. #' -#' @section Installation: -#' FIXME: CUSTOM INSTALLATION INSTRUCTIONS. DELETE IF NOT APPLICABLE. -#' #' @templateVar id . #' @template learner #' diff --git a/man/mlr_learners_classif.gbm.Rd b/man/mlr_learners_classif.gbm.Rd index 7525167ee..b8e12ae65 100644 --- a/man/mlr_learners_classif.gbm.Rd +++ b/man/mlr_learners_classif.gbm.Rd @@ -45,22 +45,11 @@ lrn("classif.gbm") } } -\section{Custom mlr3 defaults}{ +\section{Initial parameter values}{ \itemize{ -\item \code{keep.data}: -\itemize{ -\item Actual default: TRUE -\item Adjusted default: FALSE -\item Reason for change: \code{keep.data = FALSE} saves memory during model fitting. -} -\item \code{n.cores}: -\itemize{ -\item Actual default: NULL -\item Adjusted default: 1 -\item Reason for change: Suppressing the automatic internal parallelization if -\code{cv.folds} > 0. -} +\item \code{keep.data} is initialized to \code{FALSE} to save memory. +\item \code{n.cores} is initialized to 1 to avoid conflicts with parallelization through future. } } diff --git a/man/mlr_learners_classif.imbalanced_rfsrc.Rd b/man/mlr_learners_classif.imbalanced_rfsrc.Rd index 02c1ea9c8..5ec107b29 100644 --- a/man/mlr_learners_classif.imbalanced_rfsrc.Rd +++ b/man/mlr_learners_classif.imbalanced_rfsrc.Rd @@ -9,36 +9,6 @@ Imbalanced Random forest for classification between two classes. Calls \code{\link[randomForestSRC:imbalanced.rfsrc]{randomForestSRC::imbalanced.rfsrc()}} from from \CRANpkg{randomForestSRC}. } -\section{Custom mlr3 parameters}{ - -\itemize{ -\item \code{mtry}: -\itemize{ -\item This hyperparameter can alternatively be set via the added hyperparameter \code{mtry.ratio} -as \code{mtry = max(ceiling(mtry.ratio * n_features), 1)}. -Note that \code{mtry} and \code{mtry.ratio} are mutually exclusive. -} -\item \code{sampsize}: -\itemize{ -\item This hyperparameter can alternatively be set via the added hyperparameter \code{sampsize.ratio} -as \code{sampsize = max(ceiling(sampsize.ratio * n_obs), 1)}. -Note that \code{sampsize} and \code{sampsize.ratio} are mutually exclusive. -} -} -} - -\section{Custom mlr3 defaults}{ - -\itemize{ -\item \code{cores}: -\itemize{ -\item Actual default: Auto-detecting the number of cores -\item Adjusted default: 1 -\item Reason for change: Threading conflicts with explicit parallelization via \CRANpkg{future}. -} -} -} - \section{Dictionary}{ This \link{Learner} can be instantiated via the \link[mlr3misc:Dictionary]{dictionary} \link{mlr_learners} or with the associated sugar function \code{\link[=lrn]{lrn()}}: @@ -103,6 +73,26 @@ lrn("classif.imbalanced_rfsrc") } } +\section{Custom mlr3 parameters}{ + +\itemize{ +\item \code{mtry}: +\itemize{ +\item This hyperparameter can alternatively be set via the added hyperparameter \code{mtry.ratio} +as \code{mtry = max(ceiling(mtry.ratio * n_features), 1)}. +Note that \code{mtry} and \code{mtry.ratio} are mutually exclusive. +} +\item \code{sampsize}: +\itemize{ +\item This hyperparameter can alternatively be set via the added hyperparameter \code{sampsize.ratio} +as \code{sampsize = max(ceiling(sampsize.ratio * n_obs), 1)}. +Note that \code{sampsize} and \code{sampsize.ratio} are mutually exclusive. +} +\item \code{cores}: +This value is set as the option \code{rf.cores} during training and is set to 1 by default. +} +} + \examples{ learner = mlr3::lrn("classif.imbalanced_rfsrc") print(learner) diff --git a/man/mlr_learners_classif.lightgbm.Rd b/man/mlr_learners_classif.lightgbm.Rd index ff7d02281..1ecf1cd81 100644 --- a/man/mlr_learners_classif.lightgbm.Rd +++ b/man/mlr_learners_classif.lightgbm.Rd @@ -146,33 +146,27 @@ lrn("classif.lightgbm") \section{Initial parameter values}{ -\itemize{ -\item \code{convert_categorical}: -Additional parameter. If this parameter is set to \code{TRUE} (default), all factor and logical -columns are converted to integers and the parameter categorical_feature of lightgbm is set to -those columns. -\item \code{num_class}: -This parameter is automatically inferred for multiclass tasks and does not have to be set. -} -} - -\section{Custom mlr3 defaults}{ - \itemize{ \item \code{num_threads}: \itemize{ \item Actual default: 0L -\item Adjusted default: 1L +\item Initial value: 1L \item Reason for change: Prevents accidental conflicts with \code{future}. } \item \code{verbose}: \itemize{ \item Actual default: 1L -\item Adjusted default: -1L +\item Initial value: -1L \item Reason for change: Prevents accidental conflicts with mlr messaging system. } +} +} + +\section{Custom mlr3 defaults}{ + +\itemize{ \item \code{objective}: -Depending if the task is binary / multiclass, the default is set to \code{"binary"} or +Depending if the task is binary / multiclass, the default is \code{"binary"} or \code{"multiclasss"}. } } @@ -182,6 +176,12 @@ Depending if the task is binary / multiclass, the default is set to \code{"binar \itemize{ \item \code{early_stopping} Whether to use the test set for early stopping. Default is \code{FALSE}. +\item \code{convert_categorical}: +Additional parameter. If this parameter is set to \code{TRUE} (default), all factor and logical +columns are converted to integers and the parameter categorical_feature of lightgbm is set to +those columns. +\item \code{num_class}: +This parameter is automatically inferred for multiclass tasks and does not have to be set. } } diff --git a/man/mlr_learners_classif.rfsrc.Rd b/man/mlr_learners_classif.rfsrc.Rd index 042d8e287..cec2d7808 100644 --- a/man/mlr_learners_classif.rfsrc.Rd +++ b/man/mlr_learners_classif.rfsrc.Rd @@ -69,7 +69,7 @@ lrn("classif.rfsrc") } } -\section{Initial parameter values}{ +\section{Custom mlr3 parameters}{ \itemize{ \item \code{mtry}: @@ -84,18 +84,8 @@ Note that \code{mtry} and \code{mtry.ratio} are mutually exclusive. as \code{sampsize = max(ceiling(sampsize.ratio * n_obs), 1)}. Note that \code{sampsize} and \code{sampsize.ratio} are mutually exclusive. } -} -} - -\section{Custom mlr3 defaults}{ - -\itemize{ \item \code{cores}: -\itemize{ -\item Actual default: Auto-detecting the number of cores -\item Adjusted default: 1 -\item Reason for change: Threading conflicts with explicit parallelization via \CRANpkg{future}. -} +This value is set as the option \code{rf.cores} during training and is set to 1 by default. } } diff --git a/man/mlr_learners_regr.bart.Rd b/man/mlr_learners_regr.bart.Rd index d9377748a..e267f7cb6 100644 --- a/man/mlr_learners_regr.bart.Rd +++ b/man/mlr_learners_regr.bart.Rd @@ -57,7 +57,7 @@ lrn("regr.bart") } } -\section{Initial parameter values}{ +\section{Custom mlr3 parameters}{ \itemize{ \item Parameter: offset @@ -73,15 +73,10 @@ and therefore the offset parameter in \code{dbarts:::predict.bart} is irrelevant } } -\section{Custom mlr3 defaults}{ +\section{Initial parameter values}{ \itemize{ -\item Parameter: keeptrees -\itemize{ -\item Original: FALSE -\item New: TRUE -\item Reason: Required for prediction -} +\item \code{keeptrees} is initialized to \code{TRUE} because it is required for prediction. } } diff --git a/man/mlr_learners_regr.liblinear.Rd b/man/mlr_learners_regr.liblinear.Rd index 27f7760b9..06b926ac0 100644 --- a/man/mlr_learners_regr.liblinear.Rd +++ b/man/mlr_learners_regr.liblinear.Rd @@ -49,13 +49,13 @@ lrn("regr.liblinear") } } -\section{Custom mlr3 defaults}{ +\section{Initial parameter values}{ \itemize{ \item \code{svr_eps}: \itemize{ \item Actual default: \code{NULL} -\item Adjusted default: 0.001 +\item Initial value: 0.001 \item Reason for change: \code{svr_eps} is type dependent and the "type" is handled by the mlr3learner. The default value is set to th default of the respective "type". diff --git a/man/mlr_learners_regr.lightgbm.Rd b/man/mlr_learners_regr.lightgbm.Rd index a5b42ff0f..adda315f0 100644 --- a/man/mlr_learners_regr.lightgbm.Rd +++ b/man/mlr_learners_regr.lightgbm.Rd @@ -144,27 +144,17 @@ lrn("regr.lightgbm") \section{Initial parameter values}{ -\itemize{ -\item \code{convert_categorical}: -Additional parameter. If this parameter is set to \code{TRUE} (default), all factor and logical -columns are converted to integers and the parameter categorical_feature of lightgbm is set to -those columns. -} -} - -\section{Custom mlr3 defaults}{ - \itemize{ \item \code{num_threads}: \itemize{ \item Actual default: 0L -\item Adjusted default: 1L +\item Initital value: 1L \item Reason for change: Prevents accidental conflicts with \code{future}. } \item \code{verbose}: \itemize{ \item Actual default: 1L -\item Adjusted default: -1L +\item Initial value: -1L \item Reason for change: Prevents accidental conflicts with mlr messaging system. } } @@ -175,6 +165,10 @@ those columns. \itemize{ \item \code{early_stopping} Whether to use the test set for early stopping. Default is \code{FALSE}. +\item \code{convert_categorical}: +Additional parameter. If this parameter is set to \code{TRUE} (default), all factor and logical +columns are converted to integers and the parameter categorical_feature of lightgbm is set to +those columns. } } diff --git a/man/mlr_learners_regr.rfsrc.Rd b/man/mlr_learners_regr.rfsrc.Rd index da969843e..e82f785bb 100644 --- a/man/mlr_learners_regr.rfsrc.Rd +++ b/man/mlr_learners_regr.rfsrc.Rd @@ -69,7 +69,7 @@ lrn("regr.rfsrc") } } -\section{Initial parameter values}{ +\section{Custom mlr3 parameters}{ \itemize{ \item \code{mtry}: @@ -84,6 +84,8 @@ Note that \code{mtry} and \code{mtry.ratio} are mutually exclusive. as \code{sampsize = max(ceiling(sampsize.ratio * n_obs), 1)}. Note that \code{sampsize} and \code{sampsize.ratio} are mutually exclusive. } +\item \code{cores}: +This value is set as the option \code{rf.cores} during training and is set to 1 by default. } } diff --git a/man/mlr_learners_surv.cv_glmnet.Rd b/man/mlr_learners_surv.cv_glmnet.Rd index 9f4ec5814..8e8b2857e 100644 --- a/man/mlr_learners_surv.cv_glmnet.Rd +++ b/man/mlr_learners_surv.cv_glmnet.Rd @@ -8,10 +8,10 @@ Generalized linear models with elastic net regularization. Calls \code{\link[glmnet:cv.glmnet]{glmnet::cv.glmnet()}} from package \CRANpkg{glmnet}. } -\section{Custom mlr3 defaults}{ +\section{Custom mlr3 parameters}{ \itemize{ -\item \code{family} The default is set to \code{"cox"}. +\item \code{family} is set to \code{"cox"} and cannot be changed. } } diff --git a/man/mlr_learners_surv.dnnsurv.Rd b/man/mlr_learners_surv.dnnsurv.Rd index d17065603..b4905d870 100644 --- a/man/mlr_learners_surv.dnnsurv.Rd +++ b/man/mlr_learners_surv.dnnsurv.Rd @@ -90,15 +90,10 @@ Package 'survivalmodels' is not on CRAN and has to be install from GitHub via \code{remotes::install_github("RaphaelS1/survivalmodels")}. } -\section{Custom mlr3 defaults}{ +\section{Initial parameter values}{ \itemize{ -\item \code{verbose}: -\itemize{ -\item Actual default: \code{1L} -\item Adjusted default: \code{0L} -\item Reason for change: Prevents plotting. -} +\item \code{verbose} is initialized to 0. } } diff --git a/man/mlr_learners_surv.flexible.Rd b/man/mlr_learners_surv.flexible.Rd index 068a9b373..eeedf3a9e 100644 --- a/man/mlr_learners_surv.flexible.Rd +++ b/man/mlr_learners_surv.flexible.Rd @@ -60,13 +60,13 @@ lrn("surv.flexible") } } -\section{Custom mlr3 defaults}{ +\section{Initial parameter values}{ \itemize{ \item \code{k}: \itemize{ \item Actual default: \code{0} -\item Adjusted default: \code{1} +\item Initial value: \code{1} \item Reason for change: The default value of \code{0} is equivalent to, and a much less efficient implementation of, \link{LearnerSurvParametric}. } diff --git a/man/mlr_learners_surv.glmnet.Rd b/man/mlr_learners_surv.glmnet.Rd index 35b579b44..4504e4416 100644 --- a/man/mlr_learners_surv.glmnet.Rd +++ b/man/mlr_learners_surv.glmnet.Rd @@ -7,6 +7,9 @@ \description{ Generalized linear models with elastic net regularization. Calls \code{\link[glmnet:glmnet]{glmnet::glmnet()}} from package \CRANpkg{glmnet}. +\itemize{ +\item \code{family} is set to \code{"cox"} and cannot be changed. +} } \details{ Caution: This learner is different to learners calling \code{\link[glmnet:cv.glmnet]{glmnet::cv.glmnet()}} @@ -25,13 +28,6 @@ However, in some situations this is not applicable, usually when data are imbalanced or not i.i.d. (longitudinal, time-series) and tuning requires custom resampling strategies (blocked design, stratification). } -\section{Custom mlr3 defaults}{ - -\itemize{ -\item \code{family} The default is set to \code{"cox"}. -} -} - \section{Dictionary}{ This \link{Learner} can be instantiated via the \link[mlr3misc:Dictionary]{dictionary} \link{mlr_learners} or with the associated sugar function \code{\link[=lrn]{lrn()}}: diff --git a/man/mlr_learners_surv.obliqueRSF.Rd b/man/mlr_learners_surv.obliqueRSF.Rd index ea974c7b1..24c45e68e 100644 --- a/man/mlr_learners_surv.obliqueRSF.Rd +++ b/man/mlr_learners_surv.obliqueRSF.Rd @@ -52,7 +52,7 @@ lrn("surv.obliqueRSF") } } -\section{Initial parameter values}{ +\section{Custom mlr3 parameters}{ \itemize{ \item \code{mtry}: @@ -64,15 +64,10 @@ Note that \code{mtry} and \code{mtry_ratio} are mutually exclusive. } } -\section{Custom mlr3 defaults}{ +\section{Initial parameter values}{ \itemize{ -\item \code{verbose}: -\itemize{ -\item Actual default: \code{TRUE} -\item Adjusted default: \code{FALSE} -\item Reason for change: mlr3 already has it's own verbose set to \code{TRUE} by default -} +\item \code{verbose} is initialized to \code{FALSE} } } diff --git a/man/mlr_learners_surv.ranger.Rd b/man/mlr_learners_surv.ranger.Rd index 84a941e35..91392bc16 100644 --- a/man/mlr_learners_surv.ranger.Rd +++ b/man/mlr_learners_surv.ranger.Rd @@ -18,17 +18,10 @@ as \code{mtry = max(ceiling(mtry.ratio * n_features), 1)}. Note that \code{mtry} and \code{mtry.ratio} are mutually exclusive. } } -} -\section{Custom mlr3 defaults}{ \itemize{ -\item \code{num.threads}: -\itemize{ -\item Actual default: \code{NULL}, triggering auto-detection of the number of CPUs. -\item Adjusted value: 1. -\item Reason for change: Conflicting with parallelization via \CRANpkg{future}. -} +\item \code{num.threads} is initialized to 1 to avoid conflicts with parallelization via \CRANpkg{future}. } } diff --git a/man/mlr_learners_surv.rfsrc.Rd b/man/mlr_learners_surv.rfsrc.Rd index 2f093af16..6223aa554 100644 --- a/man/mlr_learners_surv.rfsrc.Rd +++ b/man/mlr_learners_surv.rfsrc.Rd @@ -77,7 +77,7 @@ lrn("surv.rfsrc") } } -\section{Initial parameter values}{ +\section{Custom mlr3 parameters}{ \itemize{ \item \code{mtry}: @@ -92,6 +92,8 @@ Note that \code{mtry} and \code{mtry.ratio} are mutually exclusive. as \code{sampsize = max(ceiling(sampsize.ratio * n_obs), 1)}. Note that \code{sampsize} and \code{sampsize.ratio} are mutually exclusive. } +\item \code{cores}: +This value is set as the option \code{rf.cores} during training and is set to 1 by default. } } diff --git a/man/mlr_learners_surv.xgboost.Rd b/man/mlr_learners_surv.xgboost.Rd index 2d845e640..1953e93d4 100644 --- a/man/mlr_learners_surv.xgboost.Rd +++ b/man/mlr_learners_surv.xgboost.Rd @@ -13,35 +13,13 @@ To compute on GPUs, you first need to compile \CRANpkg{xgboost} yourself and lin against CUDA. See \url{https://xgboost.readthedocs.io/en/stable/build.html#building-with-gpu-support}. } -\section{Custom mlr3 defaults}{ +\section{Initial parameter values}{ \itemize{ -\item \code{nrounds}: -\itemize{ -\item Actual default: no default. -\item Adjusted default: 1. -\item Reason for change: Without a default construction of the learner -would error. Just setting a nonsense default to workaround this. -\code{nrounds} needs to be tuned by the user. -} -\item \code{nthread}: -\itemize{ -\item Actual value: Undefined, triggering auto-detection of the number of CPUs. -\item Adjusted value: 1. -\item Reason for change: Conflicting with parallelization via \CRANpkg{future}. -} -\item \code{verbose}: -\itemize{ -\item Actual default: 1. -\item Adjusted default: 0. -\item Reason for change: Reduce verbosity. -} -\item \code{objective}: -\itemize{ -\item Actual default: \code{reg:squarederror}. -\item Adjusted default: \code{survival:cox}. -\item Reason for change: Changed to a survival objective. -} +\item \code{nrounds} is initialized to 1. +\item \code{nthread} is initialized to 1 to avoid conflicts with parallelization via \CRANpkg{future}. +\item \code{verbose} is initialized to 0. +\item \code{objective} is initialized to \code{survival:cox} for survival analysis. } } diff --git a/tests/testthat/test_paramtest_lightgbm_classif_lightgbm.R b/tests/testthat/test_paramtest_lightgbm_classif_lightgbm.R index 0b41a4aa1..35e8dc43c 100644 --- a/tests/testthat/test_paramtest_lightgbm_classif_lightgbm.R +++ b/tests/testthat/test_paramtest_lightgbm_classif_lightgbm.R @@ -80,7 +80,10 @@ test_that("paramtest classif.lightgbm train", { "valids", # handled internally "obj", # alias for objective "colnames", # we don't want to allow overwriting any names - "init_model" # handled internally via hotstarting + "init_model", # handled internally via hotstarting + + + "lambdarank_position_bias_regularization" # only for lambdarank ) diff --git a/tests/testthat/test_paramtest_lightgbm_regr_lightgbm.R b/tests/testthat/test_paramtest_lightgbm_regr_lightgbm.R index 8d6987717..16855f199 100644 --- a/tests/testthat/test_paramtest_lightgbm_regr_lightgbm.R +++ b/tests/testthat/test_paramtest_lightgbm_regr_lightgbm.R @@ -85,7 +85,9 @@ test_that("paramtest regr.lightgbm train", { "valids", # handled internally "obj", # alias for objective "colnames", # we don't want to allow overwriting any names - "init_model" # handled internally via hotstarting + "init_model", # handled internally via hotstarting + + "lambdarank_position_bias_regularization" # only for lambdarank )