diff --git a/dev/pkgdown.yml b/dev/pkgdown.yml index fa6f5c03..d5e0ac7c 100644 --- a/dev/pkgdown.yml +++ b/dev/pkgdown.yml @@ -3,7 +3,7 @@ pkgdown: 2.1.1 pkgdown_sha: ~ articles: extending: extending.html -last_built: 2024-11-25T08:44Z +last_built: 2024-11-26T14:00Z urls: reference: https://mlr3tuning.mlr-org.com/reference article: https://mlr3tuning.mlr-org.com/articles diff --git a/dev/reference/TuningInstanceAsyncMultiCrit.html b/dev/reference/TuningInstanceAsyncMultiCrit.html index e6f30cdb..d49765ea 100644 --- a/dev/reference/TuningInstanceAsyncMultiCrit.html +++ b/dev/reference/TuningInstanceAsyncMultiCrit.html @@ -228,7 +228,6 @@

Usage ydt, learner_param_vals = NULL, extra = NULL, - xydt = NULL, ... )

@@ -257,11 +256,6 @@

Argumentsdata.table::data.table())
-Point, outcome, and additional information.

- -
...

(any)
ignored.

diff --git a/dev/reference/TuningInstanceAsyncSingleCrit.html b/dev/reference/TuningInstanceAsyncSingleCrit.html index 1d471c74..45287bb5 100644 --- a/dev/reference/TuningInstanceAsyncSingleCrit.html +++ b/dev/reference/TuningInstanceAsyncSingleCrit.html @@ -248,7 +248,6 @@

Usage y, learner_param_vals = NULL, extra = NULL, - xydt = NULL, ... )

@@ -277,11 +276,6 @@

Argumentsdata.table::data.table())
-Point, outcome, and additional information (Deprecated).

- -
...

(any)
ignored.

diff --git a/dev/reference/TuningInstanceBatchMultiCrit.html b/dev/reference/TuningInstanceBatchMultiCrit.html index 81f3fa4c..d2350abf 100644 --- a/dev/reference/TuningInstanceBatchMultiCrit.html +++ b/dev/reference/TuningInstanceBatchMultiCrit.html @@ -234,7 +234,6 @@

Usage ydt, learner_param_vals = NULL, extra = NULL, - xydt = NULL, ... )

@@ -263,11 +262,6 @@

Argumentsdata.table::data.table())
-Point, outcome, and additional information (Deprecated).

- -
...

(any)
ignored.

@@ -320,24 +314,30 @@

Examples # Run tuning tuner$optimize(instance) -#> cp learner_param_vals x_domain classif.ce time_train -#> <num> <list> <list> <num> <num> -#> 1: -3.759791 <list[2]> <list[1]> 0.09583016 0.002666667 +#> cp learner_param_vals x_domain classif.ce time_train +#> <num> <list> <list> <num> <num> +#> 1: -3.259804 <list[2]> <list[1]> 0.09583016 0.003 +#> 2: -3.759791 <list[2]> <list[1]> 0.09583016 0.003 +#> 3: -2.565382 <list[2]> <list[1]> 0.09583016 0.003 +#> 4: -3.080830 <list[2]> <list[1]> 0.09583016 0.003 # Optimal hyperparameter configurations instance$result -#> cp learner_param_vals x_domain classif.ce time_train -#> <num> <list> <list> <num> <num> -#> 1: -3.759791 <list[2]> <list[1]> 0.09583016 0.002666667 +#> cp learner_param_vals x_domain classif.ce time_train +#> <num> <list> <list> <num> <num> +#> 1: -3.259804 <list[2]> <list[1]> 0.09583016 0.003 +#> 2: -3.759791 <list[2]> <list[1]> 0.09583016 0.003 +#> 3: -2.565382 <list[2]> <list[1]> 0.09583016 0.003 +#> 4: -3.080830 <list[2]> <list[1]> 0.09583016 0.003 # Inspect all evaluated configurations as.data.table(instance$archive) -#> cp classif.ce time_train runtime_learners timestamp -#> <num> <num> <num> <num> <POSc> -#> 1: -3.259804 0.09583016 0.003333333 0.017 2024-11-25 08:44:13 -#> 2: -3.759791 0.09583016 0.002666667 0.014 2024-11-25 08:44:13 -#> 3: -2.565382 0.09583016 0.003000000 0.015 2024-11-25 08:44:13 -#> 4: -3.080830 0.09583016 0.003000000 0.015 2024-11-25 08:44:13 +#> cp classif.ce time_train runtime_learners timestamp +#> <num> <num> <num> <num> <POSc> +#> 1: -3.259804 0.09583016 0.003 0.016 2024-11-26 14:00:42 +#> 2: -3.759791 0.09583016 0.003 0.016 2024-11-26 14:00:42 +#> 3: -2.565382 0.09583016 0.003 0.017 2024-11-26 14:00:42 +#> 4: -3.080830 0.09583016 0.003 0.016 2024-11-26 14:00:42 #> warnings errors x_domain batch_nr resample_result #> <int> <int> <list> <int> <list> #> 1: 0 0 <list[1]> 1 <ResampleResult> diff --git a/dev/reference/TuningInstanceBatchSingleCrit.html b/dev/reference/TuningInstanceBatchSingleCrit.html index 780751e6..e7a9cbbc 100644 --- a/dev/reference/TuningInstanceBatchSingleCrit.html +++ b/dev/reference/TuningInstanceBatchSingleCrit.html @@ -244,7 +244,6 @@

Usage y, learner_param_vals = NULL, extra = NULL, - xydt = NULL, ... )

@@ -273,11 +272,6 @@

Argumentsdata.table::data.table())
-Point, outcome, and additional information (Deprecated).

- -
...

(any)
ignored.

@@ -344,10 +338,10 @@

Examplesas.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> <num> <num> <num> <POSc> <int> <int> -#> 1: -3.036646 0.06392067 0.015 2024-11-25 08:44:14 0 0 -#> 2: -5.238604 0.06392067 0.034 2024-11-25 08:44:14 0 0 -#> 3: -7.255326 0.06392067 0.016 2024-11-25 08:44:14 0 0 -#> 4: -6.314690 0.06392067 0.015 2024-11-25 08:44:14 0 0 +#> 1: -3.036646 0.06392067 0.017 2024-11-26 14:00:43 0 0 +#> 2: -5.238604 0.06392067 0.037 2024-11-26 14:00:43 0 0 +#> 3: -7.255326 0.06392067 0.017 2024-11-26 14:00:43 0 0 +#> 4: -6.314690 0.06392067 0.016 2024-11-26 14:00:43 0 0 #> x_domain batch_nr resample_result #> <list> <int> <list> #> 1: <list[1]> 1 <ResampleResult> diff --git a/dev/reference/extract_inner_tuning_archives.html b/dev/reference/extract_inner_tuning_archives.html index 73865879..bb5c5df4 100644 --- a/dev/reference/extract_inner_tuning_archives.html +++ b/dev/reference/extract_inner_tuning_archives.html @@ -142,24 +142,24 @@

Examplesextract_inner_tuning_archives(rr) #> iteration cp classif.ce x_domain_cp runtime_learners #> <int> <num> <num> <num> <num> -#> 1: 1 -6.819407 0.04 0.001092369 0.004 +#> 1: 1 -6.819407 0.04 0.001092369 0.005 #> 2: 1 -6.361894 0.04 0.001726095 0.005 -#> 3: 1 -5.017906 0.04 0.006618373 0.004 -#> 4: 1 -4.487537 0.04 0.011248315 0.004 +#> 3: 1 -5.017906 0.04 0.006618373 0.005 +#> 4: 1 -4.487537 0.04 0.011248315 0.005 #> 5: 2 -2.771268 0.08 0.062582599 0.005 #> 6: 2 -5.852816 0.08 0.002871801 0.005 #> 7: 2 -6.365882 0.08 0.001719224 0.005 #> 8: 2 -3.185002 0.08 0.041378177 0.004 #> timestamp warnings errors batch_nr resample_result task_id #> <POSc> <int> <int> <int> <list> <char> -#> 1: 2024-11-25 08:44:18 0 0 1 <ResampleResult> iris -#> 2: 2024-11-25 08:44:18 0 0 2 <ResampleResult> iris -#> 3: 2024-11-25 08:44:18 0 0 3 <ResampleResult> iris -#> 4: 2024-11-25 08:44:18 0 0 4 <ResampleResult> iris -#> 5: 2024-11-25 08:44:18 0 0 1 <ResampleResult> iris -#> 6: 2024-11-25 08:44:18 0 0 2 <ResampleResult> iris -#> 7: 2024-11-25 08:44:18 0 0 3 <ResampleResult> iris -#> 8: 2024-11-25 08:44:18 0 0 4 <ResampleResult> iris +#> 1: 2024-11-26 14:00:47 0 0 1 <ResampleResult> iris +#> 2: 2024-11-26 14:00:47 0 0 2 <ResampleResult> iris +#> 3: 2024-11-26 14:00:47 0 0 3 <ResampleResult> iris +#> 4: 2024-11-26 14:00:47 0 0 4 <ResampleResult> iris +#> 5: 2024-11-26 14:00:47 0 0 1 <ResampleResult> iris +#> 6: 2024-11-26 14:00:47 0 0 2 <ResampleResult> iris +#> 7: 2024-11-26 14:00:47 0 0 3 <ResampleResult> iris +#> 8: 2024-11-26 14:00:47 0 0 4 <ResampleResult> iris #> learner_id resampling_id #> <char> <char> #> 1: classif.rpart.tuned cv diff --git a/dev/reference/mlr_tuners_cmaes.html b/dev/reference/mlr_tuners_cmaes.html index d47f4384..ac8a17d7 100644 --- a/dev/reference/mlr_tuners_cmaes.html +++ b/dev/reference/mlr_tuners_cmaes.html @@ -205,28 +205,28 @@

Examplesas.data.table(instance$archive) #> cp minbucket minsplit classif.ce runtime_learners #> <num> <num> <num> <num> <num> -#> 1: -7.336334 15.209063 107.23382 0.07826087 0.006 -#> 2: -9.210340 64.000000 22.89758 0.12173913 0.005 -#> 3: -2.621780 31.763900 128.00000 0.07826087 0.005 +#> 1: -7.336334 15.209063 107.23382 0.07826087 0.005 +#> 2: -9.210340 64.000000 22.89758 0.12173913 0.025 +#> 3: -2.621780 31.763900 128.00000 0.07826087 0.006 #> 4: -2.302585 1.000000 106.26335 0.07826087 0.006 -#> 5: -2.302585 62.039211 128.00000 0.12173913 0.005 +#> 5: -2.302585 62.039211 128.00000 0.12173913 0.006 #> 6: -4.416664 54.268412 108.94055 0.07826087 0.006 -#> 7: -2.302585 4.755131 72.28910 0.07826087 0.023 +#> 7: -2.302585 4.755131 72.28910 0.07826087 0.006 #> 8: -4.734599 30.835601 24.51517 0.07826087 0.005 #> 9: -9.210340 39.906483 97.63893 0.07826087 0.006 #> 10: -6.242816 18.946310 96.50841 0.07826087 0.006 #> timestamp warnings errors x_domain batch_nr resample_result #> <POSc> <int> <int> <list> <int> <list> -#> 1: 2024-11-25 08:44:27 0 0 <list[3]> 1 <ResampleResult> -#> 2: 2024-11-25 08:44:27 0 0 <list[3]> 2 <ResampleResult> -#> 3: 2024-11-25 08:44:27 0 0 <list[3]> 3 <ResampleResult> -#> 4: 2024-11-25 08:44:27 0 0 <list[3]> 4 <ResampleResult> -#> 5: 2024-11-25 08:44:27 0 0 <list[3]> 5 <ResampleResult> -#> 6: 2024-11-25 08:44:27 0 0 <list[3]> 6 <ResampleResult> -#> 7: 2024-11-25 08:44:27 0 0 <list[3]> 7 <ResampleResult> -#> 8: 2024-11-25 08:44:27 0 0 <list[3]> 8 <ResampleResult> -#> 9: 2024-11-25 08:44:27 0 0 <list[3]> 9 <ResampleResult> -#> 10: 2024-11-25 08:44:27 0 0 <list[3]> 10 <ResampleResult> +#> 1: 2024-11-26 14:00:57 0 0 <list[3]> 1 <ResampleResult> +#> 2: 2024-11-26 14:00:57 0 0 <list[3]> 2 <ResampleResult> +#> 3: 2024-11-26 14:00:57 0 0 <list[3]> 3 <ResampleResult> +#> 4: 2024-11-26 14:00:57 0 0 <list[3]> 4 <ResampleResult> +#> 5: 2024-11-26 14:00:57 0 0 <list[3]> 5 <ResampleResult> +#> 6: 2024-11-26 14:00:57 0 0 <list[3]> 6 <ResampleResult> +#> 7: 2024-11-26 14:00:57 0 0 <list[3]> 7 <ResampleResult> +#> 8: 2024-11-26 14:00:57 0 0 <list[3]> 8 <ResampleResult> +#> 9: 2024-11-26 14:00:57 0 0 <list[3]> 9 <ResampleResult> +#> 10: 2024-11-26 14:00:57 0 0 <list[3]> 10 <ResampleResult> # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals diff --git a/dev/reference/mlr_tuners_design_points.html b/dev/reference/mlr_tuners_design_points.html index 48cb4013..a1c471df 100644 --- a/dev/reference/mlr_tuners_design_points.html +++ b/dev/reference/mlr_tuners_design_points.html @@ -227,9 +227,9 @@

Examplesas.data.table(instance$archive) #> cp minbucket minsplit classif.ce runtime_learners timestamp #> <num> <num> <num> <num> <num> <POSc> -#> 1: 0.100 64 2 0.09565217 0.006 2024-11-25 08:44:28 -#> 2: 0.010 32 64 0.07826087 0.005 2024-11-25 08:44:28 -#> 3: 0.001 1 128 0.07826087 0.005 2024-11-25 08:44:28 +#> 1: 0.100 64 2 0.09565217 0.006 2024-11-26 14:00:58 +#> 2: 0.010 32 64 0.07826087 0.006 2024-11-26 14:00:58 +#> 3: 0.001 1 128 0.07826087 0.006 2024-11-26 14:00:58 #> warnings errors x_domain batch_nr resample_result #> <int> <int> <list> <int> <list> #> 1: 0 0 <list[3]> 1 <ResampleResult> diff --git a/dev/reference/mlr_tuners_gensa.html b/dev/reference/mlr_tuners_gensa.html index 2590ee06..1ad785be 100644 --- a/dev/reference/mlr_tuners_gensa.html +++ b/dev/reference/mlr_tuners_gensa.html @@ -247,16 +247,16 @@

Examplesas.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> <num> <num> <num> <POSc> <int> <int> -#> 1: -5.721042 0.04347826 0.006 2024-11-25 08:44:29 0 0 -#> 2: -2.850714 0.06086957 0.005 2024-11-25 08:44:29 0 0 -#> 3: -7.568995 0.04347826 0.005 2024-11-25 08:44:29 0 0 -#> 4: -5.721042 0.04347826 0.006 2024-11-25 08:44:29 0 0 -#> 5: -5.721042 0.04347826 0.005 2024-11-25 08:44:29 0 0 -#> 6: -5.721042 0.04347826 0.005 2024-11-25 08:44:29 0 0 -#> 7: -5.148938 0.04347826 0.006 2024-11-25 08:44:30 0 0 -#> 8: -6.293146 0.04347826 0.005 2024-11-25 08:44:30 0 0 -#> 9: -6.007094 0.04347826 0.005 2024-11-25 08:44:30 0 0 -#> 10: -5.434990 0.04347826 0.006 2024-11-25 08:44:30 0 0 +#> 1: -5.721042 0.04347826 0.006 2024-11-26 14:00:59 0 0 +#> 2: -2.850714 0.06086957 0.006 2024-11-26 14:00:59 0 0 +#> 3: -7.568995 0.04347826 0.006 2024-11-26 14:00:59 0 0 +#> 4: -5.721042 0.04347826 0.006 2024-11-26 14:00:59 0 0 +#> 5: -5.721042 0.04347826 0.005 2024-11-26 14:00:59 0 0 +#> 6: -5.721042 0.04347826 0.006 2024-11-26 14:00:59 0 0 +#> 7: -5.148938 0.04347826 0.006 2024-11-26 14:01:00 0 0 +#> 8: -6.293146 0.04347826 0.006 2024-11-26 14:01:00 0 0 +#> 9: -6.007094 0.04347826 0.006 2024-11-26 14:01:00 0 0 +#> 10: -5.434990 0.04347826 0.006 2024-11-26 14:01:00 0 0 #> x_domain batch_nr resample_result #> <list> <int> <list> #> 1: <list[1]> 1 <ResampleResult> diff --git a/dev/reference/mlr_tuners_grid_search.html b/dev/reference/mlr_tuners_grid_search.html index 8df8581f..1818c904 100644 --- a/dev/reference/mlr_tuners_grid_search.html +++ b/dev/reference/mlr_tuners_grid_search.html @@ -219,16 +219,16 @@

Examplesas.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> <num> <num> <num> <POSc> <int> <int> -#> 1: -3.070113 0.06956522 0.024 2024-11-25 08:44:31 0 0 -#> 2: -4.605170 0.06956522 0.005 2024-11-25 08:44:31 0 0 -#> 3: -8.442812 0.06956522 0.006 2024-11-25 08:44:31 0 0 -#> 4: -3.837642 0.06956522 0.006 2024-11-25 08:44:31 0 0 -#> 5: -5.372699 0.06956522 0.005 2024-11-25 08:44:31 0 0 -#> 6: -2.302585 0.06956522 0.005 2024-11-25 08:44:31 0 0 -#> 7: -7.675284 0.06956522 0.005 2024-11-25 08:44:31 0 0 -#> 8: -6.907755 0.06956522 0.005 2024-11-25 08:44:31 0 0 -#> 9: -9.210340 0.06956522 0.005 2024-11-25 08:44:31 0 0 -#> 10: -6.140227 0.06956522 0.006 2024-11-25 08:44:31 0 0 +#> 1: -3.070113 0.06956522 0.008 2024-11-26 14:01:01 0 0 +#> 2: -4.605170 0.06956522 0.006 2024-11-26 14:01:01 0 0 +#> 3: -8.442812 0.06956522 0.007 2024-11-26 14:01:01 0 0 +#> 4: -3.837642 0.06956522 0.007 2024-11-26 14:01:01 0 0 +#> 5: -5.372699 0.06956522 0.007 2024-11-26 14:01:01 0 0 +#> 6: -2.302585 0.06956522 0.006 2024-11-26 14:01:01 0 0 +#> 7: -7.675284 0.06956522 0.007 2024-11-26 14:01:01 0 0 +#> 8: -6.907755 0.06956522 0.006 2024-11-26 14:01:01 0 0 +#> 9: -9.210340 0.06956522 0.005 2024-11-26 14:01:01 0 0 +#> 10: -6.140227 0.06956522 0.006 2024-11-26 14:01:01 0 0 #> x_domain batch_nr resample_result #> <list> <int> <list> #> 1: <list[1]> 1 <ResampleResult> diff --git a/dev/reference/mlr_tuners_irace.html b/dev/reference/mlr_tuners_irace.html index 041c4d7c..e9043b5c 100644 --- a/dev/reference/mlr_tuners_irace.html +++ b/dev/reference/mlr_tuners_irace.html @@ -238,7 +238,7 @@

Examples measure = msr("classif.ce"), term_evals = 42 ) -#> # 2024-11-25 08:44:34 UTC: Initialization +#> # 2024-11-26 14:01:04 UTC: Initialization #> # Elitist race #> # Elitist new instances: 1 #> # Elitist limit: 2 @@ -251,7 +251,7 @@

Examples#> # mu: 5 #> # deterministic: FALSE #> -#> # 2024-11-25 08:44:34 UTC: Iteration 1 of 2 +#> # 2024-11-26 14:01:04 UTC: Iteration 1 of 2 #> # experimentsUsedSoFar: 0 #> # remainingBudget: 42 #> # currentBudget: 21 @@ -278,10 +278,10 @@

Examples#> .ID. cp .PARENT. #> 3 3 -2.7229877489945 NA #> -#> # 2024-11-25 08:44:34 UTC: Elite configurations (first number is the configuration ID; listed from best to worst according to the sum of ranks): +#> # 2024-11-26 14:01:05 UTC: Elite configurations (first number is the configuration ID; listed from best to worst according to the sum of ranks): #> cp #> 3 -2.7229877489945 -#> # 2024-11-25 08:44:34 UTC: Iteration 2 of 2 +#> # 2024-11-26 14:01:05 UTC: Iteration 2 of 2 #> # experimentsUsedSoFar: 15 #> # remainingBudget: 27 #> # currentBudget: 27 @@ -310,11 +310,11 @@

Examples#> .ID. cp .PARENT. #> 5 5 -3.17982221206359 3 #> -#> # 2024-11-25 08:44:35 UTC: Elite configurations (first number is the configuration ID; listed from best to worst according to the sum of ranks): +#> # 2024-11-26 14:01:06 UTC: Elite configurations (first number is the configuration ID; listed from best to worst according to the sum of ranks): #> cp #> 5 -3.17982221206359 #> 6 -3.29396533989700 -#> # 2024-11-25 08:44:35 UTC: Stopped because there is not enough budget left to race more than the minimum (2) +#> # 2024-11-26 14:01:06 UTC: Stopped because there is not enough budget left to race more than the minimum (2) #> # You may either increase the budget or set 'minNbSurvival' to a lower value #> # Iteration: 3 #> # nbIterations: 3 @@ -324,7 +324,7 @@

Examples#> # currentBudget: 4 #> # number of elites: 2 #> # nbConfigurations: 2 -#> # Total CPU user time: 1.379, CPU sys time: 0.024, Wall-clock time: 1.409 +#> # Total CPU user time: 1.491, CPU sys time: 0.016, Wall-clock time: 1.511 # best performing hyperparameter configuration instance$result @@ -336,44 +336,44 @@

Examplesas.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp race step #> <num> <num> <num> <POSc> <num> <int> -#> 1: -8.192526 0.3085938 0.007 2024-11-25 08:44:34 1 1 -#> 2: -8.612223 0.3085938 0.007 2024-11-25 08:44:34 1 1 -#> 3: -2.722988 0.2812500 0.007 2024-11-25 08:44:34 1 1 -#> 4: -8.192526 0.3359375 0.007 2024-11-25 08:44:34 1 1 -#> 5: -8.612223 0.3359375 0.007 2024-11-25 08:44:34 1 1 -#> 6: -2.722988 0.2539062 0.007 2024-11-25 08:44:34 1 1 -#> 7: -8.192526 0.2851562 0.007 2024-11-25 08:44:34 1 1 -#> 8: -8.612223 0.2851562 0.010 2024-11-25 08:44:34 1 1 -#> 9: -2.722988 0.2460938 0.007 2024-11-25 08:44:34 1 1 -#> 10: -8.192526 0.2617188 0.007 2024-11-25 08:44:34 1 1 -#> 11: -8.612223 0.2617188 0.007 2024-11-25 08:44:34 1 1 -#> 12: -2.722988 0.2148438 0.007 2024-11-25 08:44:34 1 1 -#> 13: -8.192526 0.2382812 0.007 2024-11-25 08:44:34 1 1 -#> 14: -8.612223 0.2382812 0.006 2024-11-25 08:44:34 1 1 -#> 15: -2.722988 0.2304688 0.006 2024-11-25 08:44:34 1 1 -#> 16: -2.722988 0.2890625 0.006 2024-11-25 08:44:34 2 1 -#> 17: -3.680872 0.2734375 0.006 2024-11-25 08:44:34 2 1 -#> 18: -3.179822 0.2734375 0.006 2024-11-25 08:44:34 2 1 -#> 19: -3.293965 0.2734375 0.006 2024-11-25 08:44:34 2 1 -#> 20: -3.680872 0.2460938 0.007 2024-11-25 08:44:35 2 1 -#> 21: -3.179822 0.2539062 0.007 2024-11-25 08:44:35 2 1 -#> 22: -3.293965 0.2539062 0.006 2024-11-25 08:44:35 2 1 -#> 23: -3.680872 0.2148438 0.007 2024-11-25 08:44:35 2 1 -#> 24: -3.179822 0.2148438 0.006 2024-11-25 08:44:35 2 1 -#> 25: -3.293965 0.2148438 0.007 2024-11-25 08:44:35 2 1 -#> 26: -3.680872 0.2460938 0.006 2024-11-25 08:44:35 2 1 -#> 27: -3.179822 0.2460938 0.007 2024-11-25 08:44:35 2 1 -#> 28: -3.293965 0.2460938 0.006 2024-11-25 08:44:35 2 1 -#> 29: -3.680872 0.2304688 0.006 2024-11-25 08:44:35 2 1 -#> 30: -3.179822 0.2304688 0.006 2024-11-25 08:44:35 2 1 -#> 31: -3.293965 0.2304688 0.006 2024-11-25 08:44:35 2 1 -#> 32: -3.680872 0.2968750 0.007 2024-11-25 08:44:35 2 1 -#> 33: -3.179822 0.2968750 0.006 2024-11-25 08:44:35 2 1 -#> 34: -3.293965 0.2968750 0.006 2024-11-25 08:44:35 2 1 -#> 35: -2.722988 0.2500000 0.007 2024-11-25 08:44:35 2 1 -#> 36: -3.680872 0.2617188 0.006 2024-11-25 08:44:35 2 1 -#> 37: -3.179822 0.2500000 0.010 2024-11-25 08:44:35 2 1 -#> 38: -3.293965 0.2500000 0.006 2024-11-25 08:44:35 2 1 +#> 1: -8.192526 0.3085938 0.007 2024-11-26 14:01:04 1 1 +#> 2: -8.612223 0.3085938 0.007 2024-11-26 14:01:04 1 1 +#> 3: -2.722988 0.2812500 0.007 2024-11-26 14:01:04 1 1 +#> 4: -8.192526 0.3359375 0.007 2024-11-26 14:01:04 1 1 +#> 5: -8.612223 0.3359375 0.007 2024-11-26 14:01:04 1 1 +#> 6: -2.722988 0.2539062 0.006 2024-11-26 14:01:04 1 1 +#> 7: -8.192526 0.2851562 0.008 2024-11-26 14:01:04 1 1 +#> 8: -8.612223 0.2851562 0.008 2024-11-26 14:01:04 1 1 +#> 9: -2.722988 0.2460938 0.007 2024-11-26 14:01:04 1 1 +#> 10: -8.192526 0.2617188 0.006 2024-11-26 14:01:04 1 1 +#> 11: -8.612223 0.2617188 0.007 2024-11-26 14:01:04 1 1 +#> 12: -2.722988 0.2148438 0.007 2024-11-26 14:01:04 1 1 +#> 13: -8.192526 0.2382812 0.007 2024-11-26 14:01:05 1 1 +#> 14: -8.612223 0.2382812 0.006 2024-11-26 14:01:05 1 1 +#> 15: -2.722988 0.2304688 0.006 2024-11-26 14:01:05 1 1 +#> 16: -2.722988 0.2890625 0.007 2024-11-26 14:01:05 2 1 +#> 17: -3.680872 0.2734375 0.006 2024-11-26 14:01:05 2 1 +#> 18: -3.179822 0.2734375 0.007 2024-11-26 14:01:05 2 1 +#> 19: -3.293965 0.2734375 0.007 2024-11-26 14:01:05 2 1 +#> 20: -3.680872 0.2460938 0.007 2024-11-26 14:01:05 2 1 +#> 21: -3.179822 0.2539062 0.006 2024-11-26 14:01:05 2 1 +#> 22: -3.293965 0.2539062 0.007 2024-11-26 14:01:05 2 1 +#> 23: -3.680872 0.2148438 0.007 2024-11-26 14:01:05 2 1 +#> 24: -3.179822 0.2148438 0.007 2024-11-26 14:01:05 2 1 +#> 25: -3.293965 0.2148438 0.007 2024-11-26 14:01:05 2 1 +#> 26: -3.680872 0.2460938 0.007 2024-11-26 14:01:05 2 1 +#> 27: -3.179822 0.2460938 0.007 2024-11-26 14:01:05 2 1 +#> 28: -3.293965 0.2460938 0.007 2024-11-26 14:01:05 2 1 +#> 29: -3.680872 0.2304688 0.006 2024-11-26 14:01:05 2 1 +#> 30: -3.179822 0.2304688 0.006 2024-11-26 14:01:05 2 1 +#> 31: -3.293965 0.2304688 0.007 2024-11-26 14:01:05 2 1 +#> 32: -3.680872 0.2968750 0.007 2024-11-26 14:01:05 2 1 +#> 33: -3.179822 0.2968750 0.007 2024-11-26 14:01:05 2 1 +#> 34: -3.293965 0.2968750 0.007 2024-11-26 14:01:05 2 1 +#> 35: -2.722988 0.2500000 0.007 2024-11-26 14:01:06 2 1 +#> 36: -3.680872 0.2617188 0.008 2024-11-26 14:01:06 2 1 +#> 37: -3.179822 0.2500000 0.006 2024-11-26 14:01:06 2 1 +#> 38: -3.293965 0.2500000 0.006 2024-11-26 14:01:06 2 1 #> cp classif.ce runtime_learners timestamp race step #> instance configuration warnings errors x_domain batch_nr resample_result #> <int> <num> <int> <int> <list> <int> <list> diff --git a/dev/reference/mlr_tuners_nloptr.html b/dev/reference/mlr_tuners_nloptr.html index dff388c8..c5682d0a 100644 --- a/dev/reference/mlr_tuners_nloptr.html +++ b/dev/reference/mlr_tuners_nloptr.html @@ -236,16 +236,16 @@

Examplesas.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> <num> <num> <num> <POSc> <int> <int> -#> 1: -5.081957 0.07826087 0.006 2024-11-25 08:44:36 0 0 -#> 2: -5.081957 0.07826087 0.006 2024-11-25 08:44:36 0 0 -#> 3: -5.081957 0.07826087 0.005 2024-11-25 08:44:36 0 0 -#> 4: -3.355018 0.07826087 0.006 2024-11-25 08:44:36 0 0 -#> 5: -6.808896 0.07826087 0.005 2024-11-25 08:44:36 0 0 -#> 6: -5.064688 0.07826087 0.005 2024-11-25 08:44:37 0 0 -#> 7: -5.099226 0.07826087 0.005 2024-11-25 08:44:37 0 0 -#> 8: -5.080230 0.07826087 0.007 2024-11-25 08:44:37 0 0 -#> 9: -5.083684 0.07826087 0.005 2024-11-25 08:44:37 0 0 -#> 10: -5.081957 0.07826087 0.006 2024-11-25 08:44:37 0 0 +#> 1: -5.081957 0.07826087 0.006 2024-11-26 14:01:07 0 0 +#> 2: -5.081957 0.07826087 0.005 2024-11-26 14:01:07 0 0 +#> 3: -5.081957 0.07826087 0.006 2024-11-26 14:01:07 0 0 +#> 4: -3.355018 0.07826087 0.005 2024-11-26 14:01:07 0 0 +#> 5: -6.808896 0.07826087 0.006 2024-11-26 14:01:07 0 0 +#> 6: -5.064688 0.07826087 0.006 2024-11-26 14:01:07 0 0 +#> 7: -5.099226 0.07826087 0.006 2024-11-26 14:01:07 0 0 +#> 8: -5.080230 0.07826087 0.007 2024-11-26 14:01:07 0 0 +#> 9: -5.083684 0.07826087 0.005 2024-11-26 14:01:07 0 0 +#> 10: -5.081957 0.07826087 0.006 2024-11-26 14:01:07 0 0 #> x_domain batch_nr resample_result #> <list> <int> <list> #> 1: <list[1]> 1 <ResampleResult> diff --git a/dev/reference/mlr_tuners_random_search.html b/dev/reference/mlr_tuners_random_search.html index 137d4865..79e02620 100644 --- a/dev/reference/mlr_tuners_random_search.html +++ b/dev/reference/mlr_tuners_random_search.html @@ -219,16 +219,16 @@

Examplesas.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> <num> <num> <num> <POSc> <int> <int> -#> 1: -4.711280 0.05217391 0.006 2024-11-25 08:44:38 0 0 -#> 2: -3.034222 0.05217391 0.005 2024-11-25 08:44:38 0 0 -#> 3: -2.403159 0.05217391 0.005 2024-11-25 08:44:38 0 0 -#> 4: -9.025467 0.03478261 0.005 2024-11-25 08:44:38 0 0 -#> 5: -7.209532 0.03478261 0.006 2024-11-25 08:44:38 0 0 -#> 6: -6.858402 0.03478261 0.006 2024-11-25 08:44:38 0 0 -#> 7: -6.311528 0.03478261 0.006 2024-11-25 08:44:38 0 0 -#> 8: -3.598009 0.05217391 0.006 2024-11-25 08:44:38 0 0 -#> 9: -3.967858 0.05217391 0.006 2024-11-25 08:44:38 0 0 -#> 10: -6.004689 0.03478261 0.006 2024-11-25 08:44:38 0 0 +#> 1: -4.711280 0.05217391 0.006 2024-11-26 14:01:08 0 0 +#> 2: -3.034222 0.05217391 0.006 2024-11-26 14:01:08 0 0 +#> 3: -2.403159 0.05217391 0.007 2024-11-26 14:01:08 0 0 +#> 4: -9.025467 0.03478261 0.007 2024-11-26 14:01:08 0 0 +#> 5: -7.209532 0.03478261 0.005 2024-11-26 14:01:09 0 0 +#> 6: -6.858402 0.03478261 0.006 2024-11-26 14:01:09 0 0 +#> 7: -6.311528 0.03478261 0.006 2024-11-26 14:01:09 0 0 +#> 8: -3.598009 0.05217391 0.006 2024-11-26 14:01:09 0 0 +#> 9: -3.967858 0.05217391 0.007 2024-11-26 14:01:09 0 0 +#> 10: -6.004689 0.03478261 0.007 2024-11-26 14:01:09 0 0 #> x_domain batch_nr resample_result #> <list> <int> <list> #> 1: <list[1]> 1 <ResampleResult> diff --git a/dev/reference/ti.html b/dev/reference/ti.html index ac58e1b9..102b137a 100644 --- a/dev/reference/ti.html +++ b/dev/reference/ti.html @@ -198,10 +198,10 @@

Examplesas.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> <num> <num> <num> <POSc> <int> <int> -#> 1: -3.372665 0.05814900 0.015 2024-11-25 08:44:39 0 0 -#> 2: -3.054465 0.05814900 0.015 2024-11-25 08:44:39 0 0 -#> 3: -8.460007 0.04942792 0.016 2024-11-25 08:44:40 0 0 -#> 4: -4.158236 0.05814900 0.015 2024-11-25 08:44:40 0 0 +#> 1: -3.372665 0.05814900 0.017 2024-11-26 14:01:10 0 0 +#> 2: -3.054465 0.05814900 0.016 2024-11-26 14:01:10 0 0 +#> 3: -8.460007 0.04942792 0.018 2024-11-26 14:01:10 0 0 +#> 4: -4.158236 0.05814900 0.017 2024-11-26 14:01:10 0 0 #> x_domain batch_nr resample_result #> <list> <int> <list> #> 1: <list[1]> 1 <ResampleResult> diff --git a/dev/reference/ti_async.html b/dev/reference/ti_async.html index 0aa893f2..c3987b11 100644 --- a/dev/reference/ti_async.html +++ b/dev/reference/ti_async.html @@ -204,10 +204,10 @@

Examplesas.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> <num> <num> <num> <POSc> <int> <int> -#> 1: -8.481745 0.05817442 0.018 2024-11-25 08:44:40 0 0 -#> 2: -9.007729 0.05817442 0.016 2024-11-25 08:44:40 0 0 -#> 3: -2.474325 0.05817442 0.016 2024-11-25 08:44:40 0 0 -#> 4: -8.015548 0.05817442 0.015 2024-11-25 08:44:40 0 0 +#> 1: -8.481745 0.05817442 0.017 2024-11-26 14:01:11 0 0 +#> 2: -9.007729 0.05817442 0.016 2024-11-26 14:01:11 0 0 +#> 3: -2.474325 0.05817442 0.017 2024-11-26 14:01:11 0 0 +#> 4: -8.015548 0.05817442 0.017 2024-11-26 14:01:11 0 0 #> x_domain batch_nr resample_result #> <list> <int> <list> #> 1: <list[1]> 1 <ResampleResult> diff --git a/dev/reference/tune.html b/dev/reference/tune.html index 82a5b942..c9aab361 100644 --- a/dev/reference/tune.html +++ b/dev/reference/tune.html @@ -255,10 +255,10 @@

Examplesas.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> <num> <num> <num> <POSc> <int> <int> -#> 1: -7.397281 0.2460938 0.007 2024-11-25 08:44:41 0 0 -#> 2: -4.648966 0.2382812 0.006 2024-11-25 08:44:41 0 0 -#> 3: -9.116329 0.2460938 0.026 2024-11-25 08:44:41 0 0 -#> 4: -5.519208 0.2460938 0.007 2024-11-25 08:44:41 0 0 +#> 1: -7.397281 0.2460938 0.007 2024-11-26 14:01:12 0 0 +#> 2: -4.648966 0.2382812 0.008 2024-11-26 14:01:12 0 0 +#> 3: -9.116329 0.2460938 0.031 2024-11-26 14:01:12 0 0 +#> 4: -5.519208 0.2460938 0.008 2024-11-26 14:01:12 0 0 #> x_domain batch_nr resample_result #> <list> <int> <list> #> 1: <list[1]> 1 <ResampleResult> diff --git a/dev/search.json b/dev/search.json index 101091a2..2320710a 100644 --- a/dev/search.json +++ b/dev/search.json @@ -1 +1 @@ -[{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"adding-new-tuners","dir":"Articles","previous_headings":"","what":"Adding new Tuners","title":"Add a new Tuner","text":"vignette, show implement custom tuner mlr3tuning. main task tuner iteratively propose new hyperparameter configurations want evaluate given task, learner validation strategy. second task decide configuration returned tuning result - usually configuration led best observed performance value. want implement tuner, implement R6-Object offers .optimize method implements iterative proposal free implement .assign_result differ -mentioned default process determining result. start implementation make familiar main R6-Objects bbotk (Black-Box Optimization Toolkit). package provide basic black box optimization algorithms also objects represent optimization problem (OptimInstance) log evaluated configurations (Archive). d two ways implement new tuner: ) new tuner can applied kind optimization problem implemented Optimizer. Optimizer can easily transformed Tuner. b) new custom tuner usable hyperparameter tuning, example needs access task, learner resampling objects directly implemented mlr3tuning Tuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"adding-a-new-tuner","dir":"Articles","previous_headings":"Adding new Tuners","what":"Adding a new Tuner","title":"Add a new Tuner","text":"summary steps adding new tuner. fifth step required new tuner added via bbotk. Check tuner already exist Optimizer Tuner GitHub repositories. Use one existing optimizers / tuners template. Overwrite .optimize private method optimizer / tuner. Optionally, overwrite default .assign_result private method. Use mlr3tuning::TunerBatchFromOptimizerBatch class transform Optimizer Tuner. Add unit tests tuner optionally optimizer. Open new pull request Tuner optionally second one `Optimizer.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"tuner-template","dir":"Articles","previous_headings":"Adding new Tuners","what":"Template","title":"Add a new Tuner","text":"new custom tuner implemented via bbotk, use one existing optimizer template e.g. bbotk::OptimizerRandomSearch. currently two tuners based Optimizer: mlr3hyperband::TunerHyperband mlr3tuning::TunerIrace. rather complex can still use documentation class structure template. following steps identical optimizers tuners. Rewrite meta information documentation create new class name. Scientific sources can added R/bibentries.R added @source documentation. example dictionary sections documentation auto-generated based @templateVar id . Change parameter set optimizer / tuner document @section Parameters. forget change mlr_optimizers$add() / mlr_tuners$add() last line adds optimizer / tuner dictionary.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"optimize-method","dir":"Articles","previous_headings":"Adding new Tuners","what":"Optimize method","title":"Add a new Tuner","text":"$.optimize() private method main part tuner. takes instance, proposes new points calls $eval_batch() method instance evaluate . can go two ways: Implement iterative process call external optimization function resides another package.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"writing-a-custom-iteration","dir":"Articles","previous_headings":"Adding new Tuners > Optimize method","what":"Writing a custom iteration","title":"Add a new Tuner","text":"Usually, proposal evaluation done repeat-loop implement. Please consider following points: can evaluate one multiple points per iteration don’t care termination, $eval_batch() won’t allow evaluations allowed bbotk::Terminator. implies, code repeat-loop executed. don’t care keeping track evaluations every evaluation automatically stored inst$archive. want log additional information evaluation Objective Archive can simply add columns data.table object passed $eval_batch().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"calling-an-external-optimization-function","dir":"Articles","previous_headings":"Adding new Tuners > Optimize method","what":"Calling an external optimization function","title":"Add a new Tuner","text":"Optimization functions external packages usually take objective function argument. case, can pass inst$objective_function internally calls $eval_batch(). Check OptimizerGenSA example.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"assign-result-method","dir":"Articles","previous_headings":"Adding new Tuners","what":"Assign result method","title":"Add a new Tuner","text":"default $.assign_result() private method simply obtains best performing result archive. default method can overwritten new tuner determines result optimization different way. new function must call $assign_result() method instance write final result instance. See mlr3tuning::TunerIrace implementation $.assign_result().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"transform-optimizer-to-tuner","dir":"Articles","previous_headings":"Adding new Tuners","what":"Transform optimizer to tuner","title":"Add a new Tuner","text":"step needed implement via bbotk. mlr3tuning::TunerBatchFromOptimizerBatch class transforms Optimizer Tuner. Just add Optimizer optimizer field. See mlr3tuning::TunerRandomSearch example.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"add-unit-tests","dir":"Articles","previous_headings":"Adding new Tuners","what":"Add unit tests","title":"Add a new Tuner","text":"new custom tuner thoroughly tested unit tests. Tuners can tested test_tuner() helper function. added Tuner via Optimizer, additionally test Optimizer test_optimizer() helper function.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/authors.html","id":null,"dir":"","previous_headings":"","what":"Authors","title":"Authors and Citation","text":"Marc Becker. Maintainer, author. Michel Lang. Author. Jakob Richter. Author. Bernd Bischl. Author. Daniel Schalk. Author.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/authors.html","id":"citation","dir":"","previous_headings":"","what":"Citation","title":"Authors and Citation","text":"Becker M, Lang M, Richter J, Bischl B, Schalk D (2024). mlr3tuning: Hyperparameter Optimization 'mlr3'. R package version 1.2.0.9000, https://github.com/mlr-org/mlr3tuning, https://mlr3tuning.mlr-org.com.","code":"@Manual{, title = {mlr3tuning: Hyperparameter Optimization for 'mlr3'}, author = {Marc Becker and Michel Lang and Jakob Richter and Bernd Bischl and Daniel Schalk}, year = {2024}, note = {R package version 1.2.0.9000, https://github.com/mlr-org/mlr3tuning}, url = {https://mlr3tuning.mlr-org.com}, }"},{"path":"https://mlr3tuning.mlr-org.com/dev/index.html","id":"mlr3tuning-","dir":"","previous_headings":"","what":"Hyperparameter Optimization for mlr3","title":"Hyperparameter Optimization for mlr3","text":"Package website: release | dev mlr3tuning hyperparameter optimization package mlr3 ecosystem. features highly configurable search spaces via paradox package finds optimal hyperparameter configurations mlr3 learner. mlr3tuning works several optimization algorithms e.g. Random Search, Iterated Racing, Bayesian Optimization (mlr3mbo) Hyperband (mlr3hyperband). Moreover, can automatically optimize learners estimate performance optimized models nested resampling. package built optimization framework bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/index.html","id":"extension-packages","dir":"","previous_headings":"","what":"Extension packages","title":"Hyperparameter Optimization for mlr3","text":"mlr3tuning extended following packages. mlr3tuningspaces collection search spaces scientific articles commonly used learners. mlr3hyperband adds Hyperband Successive Halving algorithm. mlr3mbo adds Bayesian Optimization methods.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/index.html","id":"resources","dir":"","previous_headings":"","what":"Resources","title":"Hyperparameter Optimization for mlr3","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/index.html","id":"installation","dir":"","previous_headings":"","what":"Installation","title":"Hyperparameter Optimization for mlr3","text":"Install last release CRAN: Install development version GitHub:","code":"install.packages(\"mlr3tuning\") remotes::install_github(\"mlr-org/mlr3tuning\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/index.html","id":"examples","dir":"","previous_headings":"","what":"Examples","title":"Hyperparameter Optimization for mlr3","text":"optimize cost gamma hyperparameters support vector machine Sonar data set. construct tuning instance ti() function. tuning instance describes tuning problem. select simple grid search optimization algorithm. start tuning, simply pass tuning instance tuner. tuner returns best hyperparameter configuration corresponding measured performance. archive contains evaluated hyperparameter configurations. mlr3viz package visualizes tuning results. fit final model optimized hyperparameters make predictions new data.","code":"library(\"mlr3learners\") library(\"mlr3tuning\") learner = lrn(\"classif.svm\", cost = to_tune(1e-5, 1e5, logscale = TRUE), gamma = to_tune(1e-5, 1e5, logscale = TRUE), kernel = \"radial\", type = \"C-classification\" ) instance = ti( task = tsk(\"sonar\"), learner = learner, resampling = rsmp(\"cv\", folds = 3), measures = msr(\"classif.ce\"), terminator = trm(\"none\") ) instance ## ## * State: Not optimized ## * Objective: ## * Search Space: ## id class lower upper nlevels ## 1: cost ParamDbl -11.51293 11.51293 Inf ## 2: gamma ParamDbl -11.51293 11.51293 Inf ## * Terminator: tuner = tnr(\"grid_search\", resolution = 5) tuner ## : Grid Search ## * Parameters: batch_size=1, resolution=5 ## * Parameter classes: ParamLgl, ParamInt, ParamDbl, ParamFct ## * Properties: dependencies, single-crit, multi-crit ## * Packages: mlr3tuning, bbotk tuner$optimize(instance) ## cost gamma learner_param_vals x_domain classif.ce ## 1: 5.756463 -5.756463 0.1828847 as.data.table(instance$archive)[, .(cost, gamma, classif.ce, batch_nr, resample_result)] ## cost gamma classif.ce batch_nr resample_result ## 1: -5.756463 5.756463 0.4663216 1 ## 2: 5.756463 -5.756463 0.1828847 2 ## 3: 11.512925 5.756463 0.4663216 3 ## 4: 5.756463 11.512925 0.4663216 4 ## 5: -11.512925 -11.512925 0.4663216 5 ## --- ## 21: -5.756463 -5.756463 0.4663216 21 ## 22: 11.512925 11.512925 0.4663216 22 ## 23: -11.512925 11.512925 0.4663216 23 ## 24: 11.512925 -5.756463 0.1828847 24 ## 25: 0.000000 -5.756463 0.2402346 25 library(mlr3viz) autoplot(instance, type = \"surface\") learner$param_set$values = instance$result_learner_param_vals learner$train(tsk(\"sonar\"))"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Rush Data Storage — ArchiveAsyncTuning","title":"Rush Data Storage — ArchiveAsyncTuning","text":"`ArchiveAsyncTuning“ stores evaluated hyperparameter configurations performance scores rush::Rush database.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Rush Data Storage — ArchiveAsyncTuning","text":"ArchiveAsyncTuning connector rush::Rush database.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"data-structure","dir":"Reference","previous_headings":"","what":"Data Structure","title":"Rush Data Storage — ArchiveAsyncTuning","text":"table ($data) following columns: One column hyperparameter search space ($search_space). One (list-)column internal_tuned_values One column performance measure ($codomain). x_domain (list()) Lists (transformed) hyperparameter values passed learner. runtime_learners (numeric(1)) Sum training predict times logged learners per mlr3::ResampleResult / evaluation. include potential overhead time. timestamp (POSIXct) Time stamp evaluation logged archive. batch_nr (integer(1)) Hyperparameters evaluated batches. batch unique batch number.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"analysis","dir":"Reference","previous_headings":"","what":"Analysis","title":"Rush Data Storage — ArchiveAsyncTuning","text":"analyzing tuning results, recommended pass ArchiveAsyncTuning .data.table(). returned data table contains mlr3::ResampleResult hyperparameter evaluation.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"s-methods","dir":"Reference","previous_headings":"","what":"S3 Methods","title":"Rush Data Storage — ArchiveAsyncTuning","text":".data.table.ArchiveTuning(x, unnest = \"x_domain\", exclude_columns = \"uhash\", measures = NULL) Returns tabular view evaluated hyperparameter configurations. ArchiveAsyncTuning -> data.table::data.table() x (ArchiveAsyncTuning) unnest (character()) Transforms list columns separate columns. Set NULL column unnested. exclude_columns (character()) Exclude columns table. Set NULL column excluded. measures (List mlr3::Measure) Score hyperparameter configurations additional measures.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Rush Data Storage — ArchiveAsyncTuning","text":"bbotk::Archive -> bbotk::ArchiveAsync -> ArchiveAsyncTuning","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Rush Data Storage — ArchiveAsyncTuning","text":"internal_search_space (paradox::ParamSet) search space containing parameters internally optimized mlr3::Learner. benchmark_result (mlr3::BenchmarkResult) Benchmark result.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Rush Data Storage — ArchiveAsyncTuning","text":"bbotk::Archive$format() bbotk::Archive$help() bbotk::ArchiveAsync$best() bbotk::ArchiveAsync$clear() bbotk::ArchiveAsync$data_with_state() bbotk::ArchiveAsync$nds_selection() bbotk::ArchiveAsync$pop_point() bbotk::ArchiveAsync$push_failed_point() bbotk::ArchiveAsync$push_points() bbotk::ArchiveAsync$push_result() bbotk::ArchiveAsync$push_running_point()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Rush Data Storage — ArchiveAsyncTuning","text":"ArchiveAsyncTuning$new() ArchiveAsyncTuning$learner() ArchiveAsyncTuning$learners() ArchiveAsyncTuning$learner_param_vals() ArchiveAsyncTuning$predictions() ArchiveAsyncTuning$resample_result() ArchiveAsyncTuning$print() ArchiveAsyncTuning$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$new( search_space, codomain, rush, internal_search_space = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). codomain (bbotk::Codomain) Specifies codomain objective function .e. set performance measures. Internally created provided mlr3::Measures. rush (Rush) rush instance supplied, tuning runs without batches. internal_search_space (paradox::ParamSet NULL) internal search space. check_values (logical(1)) TRUE (default), hyperparameter configurations check validity.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-learner-","dir":"Reference","previous_headings":"","what":"Method learner()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"Retrieve mlr3::Learner -th evaluation, position unique hash uhash. uhash mutually exclusive. Learner contain model. Use $learners() get learners models.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$learner(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-learners-","dir":"Reference","previous_headings":"","what":"Method learners()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"Retrieve list trained mlr3::Learner objects -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$learners(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-learner-param-vals-","dir":"Reference","previous_headings":"","what":"Method learner_param_vals()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"Retrieve param values -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage-3","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$learner_param_vals(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments-3","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-predictions-","dir":"Reference","previous_headings":"","what":"Method predictions()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"Retrieve list mlr3::Prediction objects -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage-4","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$predictions(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments-4","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-resample-result-","dir":"Reference","previous_headings":"","what":"Method resample_result()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"Retrieve mlr3::ResampleResult -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage-5","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$resample_result(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments-5","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-print-","dir":"Reference","previous_headings":"","what":"Method print()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"Printer.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage-6","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$print()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments-6","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"... (ignored).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage-7","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments-7","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"ArchiveBatchTuning stores evaluated hyperparameter configurations performance scores data.table::data.table().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"ArchiveBatchTuning container around data.table::data.table(). row corresponds single evaluation hyperparameter configuration. See section Data Structure information. archive stores additionally mlr3::BenchmarkResult ($benchmark_result) records resampling experiments. experiment corresponds single evaluation hyperparameter configuration. table ($data) benchmark result ($benchmark_result) linked uhash column. archive passed .data.table(), joined automatically.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"data-structure","dir":"Reference","previous_headings":"","what":"Data Structure","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"table ($data) following columns: One column hyperparameter search space ($search_space). One (list-)column internal_tuned_values One column performance measure ($codomain). x_domain (list()) Lists (transformed) hyperparameter values passed learner. runtime_learners (numeric(1)) Sum training predict times logged learners per mlr3::ResampleResult / evaluation. include potential overhead time. timestamp (POSIXct) Time stamp evaluation logged archive. batch_nr (integer(1)) Hyperparameters evaluated batches. batch unique batch number. uhash (character(1)) Connects hyperparameter configuration resampling experiment stored mlr3::BenchmarkResult.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"analysis","dir":"Reference","previous_headings":"","what":"Analysis","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"analyzing tuning results, recommended pass ArchiveBatchTuning .data.table(). returned data table joined benchmark result adds mlr3::ResampleResult hyperparameter evaluation. archive provides various getters (e.g. $learners()) ease access. getters extract position () unique hash (uhash). complete list getters see methods section. benchmark result ($benchmark_result) allows score hyperparameter configurations different measure. Alternatively, measures can supplied .data.table(). mlr3viz package provides visualizations tuning results.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"s-methods","dir":"Reference","previous_headings":"","what":"S3 Methods","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":".data.table.ArchiveTuning(x, unnest = \"x_domain\", exclude_columns = \"uhash\", measures = NULL) Returns tabular view evaluated hyperparameter configurations. ArchiveBatchTuning -> data.table::data.table() x (ArchiveBatchTuning) unnest (character()) Transforms list columns separate columns. Set NULL column unnested. exclude_columns (character()) Exclude columns table. Set NULL column excluded. measures (List mlr3::Measure) Score hyperparameter configurations additional measures.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"bbotk::Archive -> bbotk::ArchiveBatch -> ArchiveBatchTuning","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"benchmark_result (mlr3::BenchmarkResult) Benchmark result.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"internal_search_space (paradox::ParamSet) search space containing parameters internally optimized mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"bbotk::Archive$format() bbotk::Archive$help() bbotk::ArchiveBatch$add_evals() bbotk::ArchiveBatch$best() bbotk::ArchiveBatch$clear() bbotk::ArchiveBatch$nds_selection()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"ArchiveBatchTuning$new() ArchiveBatchTuning$learner() ArchiveBatchTuning$learners() ArchiveBatchTuning$learner_param_vals() ArchiveBatchTuning$predictions() ArchiveBatchTuning$resample_result() ArchiveBatchTuning$print() ArchiveBatchTuning$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$new( search_space, codomain, check_values = FALSE, internal_search_space = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). codomain (bbotk::Codomain) Specifies codomain objective function .e. set performance measures. Internally created provided mlr3::Measures. check_values (logical(1)) TRUE (default), hyperparameter configurations check validity. internal_search_space (paradox::ParamSet NULL) internal search space.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-learner-","dir":"Reference","previous_headings":"","what":"Method learner()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"Retrieve mlr3::Learner -th evaluation, position unique hash uhash. uhash mutually exclusive. Learner contain model. Use $learners() get learners models.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$learner(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-learners-","dir":"Reference","previous_headings":"","what":"Method learners()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"Retrieve list trained mlr3::Learner objects -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$learners(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-learner-param-vals-","dir":"Reference","previous_headings":"","what":"Method learner_param_vals()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"Retrieve param values -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage-3","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$learner_param_vals(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments-3","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-predictions-","dir":"Reference","previous_headings":"","what":"Method predictions()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"Retrieve list mlr3::Prediction objects -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage-4","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$predictions(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments-4","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-resample-result-","dir":"Reference","previous_headings":"","what":"Method resample_result()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"Retrieve mlr3::ResampleResult -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage-5","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$resample_result(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments-5","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-print-","dir":"Reference","previous_headings":"","what":"Method print()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"Printer.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage-6","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$print()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments-6","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"... (ignored).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage-7","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments-7","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Automatic Tuning — AutoTuner","title":"Class for Automatic Tuning — AutoTuner","text":"AutoTuner wraps mlr3::Learner augments automatic tuning process given set hyperparameters. auto_tuner() function creates AutoTuner object.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Class for Automatic Tuning — AutoTuner","text":"AutoTuner mlr3::Learner wraps another mlr3::Learner performs following steps $train(): hyperparameters wrapped (inner) learner trained training data via resampling. tuning can specified providing Tuner, bbotk::Terminator, search space paradox::ParamSet, mlr3::Resampling mlr3::Measure. best found hyperparameter configuration set hyperparameters wrapped (inner) learner stored $learner. Access tuned hyperparameters via $tuning_result. final model fit complete training data using now parametrized wrapped learner. respective model available via field $learner$model. $predict() AutoTuner just calls predict method wrapped (inner) learner. set timeout disabled fitting final model.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"validation","dir":"Reference","previous_headings":"","what":"Validation","title":"Class for Automatic Tuning — AutoTuner","text":"AutoTuner \"validation\" property. enable validation tuning, set $validate field tuned learner. also possible via set_validate().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"nested-resampling","dir":"Reference","previous_headings":"","what":"Nested Resampling","title":"Class for Automatic Tuning — AutoTuner","text":"Nested resampling performed passing AutoTuner mlr3::resample() mlr3::benchmark(). access inner resampling results, set store_tuning_instance = TRUE execute mlr3::resample() mlr3::benchmark() store_models = TRUE (see examples). mlr3::Resampling passed AutoTuner meant inner resampling, operating training set arbitrary outer resampling. reason, inner resampling instantiated. instantiated resampling passed, AutoTuner fails row id inner resampling present training set outer resampling.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"default-measures","dir":"Reference","previous_headings":"","what":"Default Measures","title":"Class for Automatic Tuning — AutoTuner","text":"measure passed, default measure used. default measure depends task type.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Class for Automatic Tuning — AutoTuner","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"super-class","dir":"Reference","previous_headings":"","what":"Super class","title":"Class for Automatic Tuning — AutoTuner","text":"mlr3::Learner -> AutoTuner","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Class for Automatic Tuning — AutoTuner","text":"instance_args (list()) arguments construction create TuningInstanceBatchSingleCrit. tuner (Tuner) Optimization algorithm.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Class for Automatic Tuning — AutoTuner","text":"internal_valid_scores Retrieves inner validation scores named list(). Returns NULL learner trained yet. archive ArchiveBatchTuning Archive TuningInstanceBatchSingleCrit. learner (mlr3::Learner) Trained learner tuning_instance (TuningInstanceAsyncSingleCrit | TuningInstanceBatchSingleCrit) Internally created tuning instance intermediate results. tuning_result (data.table::data.table) Short-cut result tuning instance. predict_type (character(1)) Stores currently active predict type, e.g. \"response\". Must element $predict_types. hash (character(1)) Hash (unique identifier) object. phash (character(1)) Hash (unique identifier) partial object, excluding components varied systematically tuning (parameter values) feature selection (feature names).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Automatic Tuning — AutoTuner","text":"mlr3::Learner$encapsulate() mlr3::Learner$format() mlr3::Learner$help() mlr3::Learner$predict() mlr3::Learner$predict_newdata() mlr3::Learner$reset() mlr3::Learner$train()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Automatic Tuning — AutoTuner","text":"AutoTuner$new() AutoTuner$base_learner() AutoTuner$importance() AutoTuner$selected_features() AutoTuner$oob_error() AutoTuner$loglik() AutoTuner$print() AutoTuner$marshal() AutoTuner$unmarshal() AutoTuner$marshaled() AutoTuner$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Class for Automatic Tuning — AutoTuner","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$new( tuner, learner, resampling, measure = NULL, terminator, search_space = NULL, internal_search_space = NULL, store_tuning_instance = TRUE, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL, rush = NULL, id = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Automatic Tuning — AutoTuner","text":"tuner (Tuner) Optimization algorithm. learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measure (mlr3::Measure) Measure optimize. NULL, default measure used. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. store_tuning_instance (logical(1)) TRUE (default), stores internally created TuningInstanceBatchSingleCrit intermediate results slot $tuning_instance. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks. rush (Rush) rush instance supplied, tuning runs without batches. id (character(1)) Identifier new instance.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-base-learner-","dir":"Reference","previous_headings":"","what":"Method base_learner()","title":"Class for Automatic Tuning — AutoTuner","text":"Extracts base learner nested learner objects like GraphLearner mlr3pipelines. recursive = 0, (tuned) learner returned.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$base_learner(recursive = Inf)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Automatic Tuning — AutoTuner","text":"recursive (integer(1)) Depth recursion multiple nested objects.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Automatic Tuning — AutoTuner","text":"mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-importance-","dir":"Reference","previous_headings":"","what":"Method importance()","title":"Class for Automatic Tuning — AutoTuner","text":"importance scores final model.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$importance()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"returns-1","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Automatic Tuning — AutoTuner","text":"Named numeric().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-selected-features-","dir":"Reference","previous_headings":"","what":"Method selected_features()","title":"Class for Automatic Tuning — AutoTuner","text":"selected features final model.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-3","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$selected_features()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"returns-2","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Automatic Tuning — AutoTuner","text":"character().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-oob-error-","dir":"Reference","previous_headings":"","what":"Method oob_error()","title":"Class for Automatic Tuning — AutoTuner","text":"--bag error final model.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-4","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$oob_error()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"returns-3","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Automatic Tuning — AutoTuner","text":"numeric(1).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-loglik-","dir":"Reference","previous_headings":"","what":"Method loglik()","title":"Class for Automatic Tuning — AutoTuner","text":"log-likelihood final model.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-5","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$loglik()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"returns-4","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Automatic Tuning — AutoTuner","text":"logLik. Printer.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-6","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$print()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Automatic Tuning — AutoTuner","text":"... (ignored).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-marshal-","dir":"Reference","previous_headings":"","what":"Method marshal()","title":"Class for Automatic Tuning — AutoTuner","text":"Marshal learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-7","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$marshal(...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"arguments-3","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Automatic Tuning — AutoTuner","text":"... () Additional parameters.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"returns-5","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Automatic Tuning — AutoTuner","text":"self","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-unmarshal-","dir":"Reference","previous_headings":"","what":"Method unmarshal()","title":"Class for Automatic Tuning — AutoTuner","text":"Unmarshal learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-8","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$unmarshal(...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"arguments-4","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Automatic Tuning — AutoTuner","text":"... () Additional parameters.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"returns-6","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Automatic Tuning — AutoTuner","text":"self","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-marshaled-","dir":"Reference","previous_headings":"","what":"Method marshaled()","title":"Class for Automatic Tuning — AutoTuner","text":"Whether learner marshaled.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-9","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$marshaled()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Automatic Tuning — AutoTuner","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-10","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"arguments-5","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Automatic Tuning — AutoTuner","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"# Automatic Tuning # split to train and external set task = tsk(\"penguins\") split = partition(task, ratio = 0.8) # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # create auto tuner at = auto_tuner( tuner = tnr(\"random_search\"), learner = learner, resampling = rsmp (\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 4) # tune hyperparameters and fit final model at$train(task, row_ids = split$train) # predict with final model at$predict(task, row_ids = split$test) #> for 69 observations: #> row_ids truth response #> 1 Adelie Adelie #> 2 Adelie Adelie #> 9 Adelie Adelie #> --- --- --- #> 318 Chinstrap Chinstrap #> 334 Chinstrap Chinstrap #> 338 Chinstrap Chinstrap # show tuning result at$tuning_result #> cp learner_param_vals x_domain classif.ce #> #> 1: -4.797088 0.05434783 # model slot contains trained learner and tuning instance at$model #> $learner #> : Classification Tree #> * Model: rpart #> * Parameters: cp=0.008254, xval=0 #> * Packages: mlr3, rpart #> * Predict Types: [response], prob #> * Feature Types: logical, integer, numeric, factor, ordered #> * Properties: importance, missings, multiclass, selected_features, #> twoclass, weights #> #> $tuning_instance #> #> * State: Optimized #> * Objective: #> * Search Space: #> id class lower upper nlevels #> #> 1: cp ParamDbl -9.21034 -2.302585 Inf #> * Terminator: #> * Result: #> cp classif.ce #> #> 1: -4.797088 0.05434783 #> * Archive: #> cp classif.ce #> #> 1: -2.529580 0.08695652 #> 2: -4.797088 0.05434783 #> 3: -2.447415 0.08695652 #> 4: -4.854704 0.05434783 #> #> attr(,\"class\") #> [1] \"auto_tuner_model\" \"list\" # shortcut trained learner at$learner #> : Classification Tree #> * Model: rpart #> * Parameters: cp=0.008254, xval=0 #> * Packages: mlr3, rpart #> * Predict Types: [response], prob #> * Feature Types: logical, integer, numeric, factor, ordered #> * Properties: importance, missings, multiclass, selected_features, #> twoclass, weights # shortcut tuning instance at$tuning_instance #> #> * State: Optimized #> * Objective: #> * Search Space: #> id class lower upper nlevels #> #> 1: cp ParamDbl -9.21034 -2.302585 Inf #> * Terminator: #> * Result: #> cp classif.ce #> #> 1: -4.797088 0.05434783 #> * Archive: #> cp classif.ce #> #> 1: -2.529580 0.08695652 #> 2: -4.797088 0.05434783 #> 3: -2.447415 0.08695652 #> 4: -4.854704 0.05434783 # Nested Resampling at = auto_tuner( tuner = tnr(\"random_search\"), learner = learner, resampling = rsmp (\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 4) resampling_outer = rsmp(\"cv\", folds = 3) rr = resample(task, at, resampling_outer, store_models = TRUE) # retrieve inner tuning results. extract_inner_tuning_results(rr) #> iteration cp classif.ce learner_param_vals x_domain task_id #> #> 1: 1 -8.698664 0.03947368 penguins #> 2: 2 -2.421343 0.06578947 penguins #> 3: 3 -7.917442 0.06493506 penguins #> learner_id resampling_id #> #> 1: classif.rpart.tuned cv #> 2: classif.rpart.tuned cv #> 3: classif.rpart.tuned cv # performance scores estimated on the outer resampling rr$score() #> task_id learner_id resampling_id iteration classif.ce #> #> 1: penguins classif.rpart.tuned cv 1 0.06956522 #> 2: penguins classif.rpart.tuned cv 2 0.06956522 #> 3: penguins classif.rpart.tuned cv 3 0.01754386 #> Hidden columns: task, learner, resampling, prediction_test # unbiased performance of the final model trained on the full data set rr$aggregate() #> classif.ce #> 0.05222476"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"Specialized bbotk::CallbackAsync asynchronous tuning. Callbacks allow customize behavior processes mlr3tuning. callback_async_tuning() function creates CallbackAsyncTuning. Predefined callbacks stored dictionary mlr_callbacks can retrieved clbk(). information tuning callbacks see callback_async_tuning().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"mlr3misc::Callback -> bbotk::CallbackAsync -> CallbackAsyncTuning","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"on_eval_after_xs (function()) Stage called xs passed. Called ObjectiveTuningAsync$eval(). on_eval_after_resample (function()) Stage called hyperparameter configurations evaluated. Called ObjectiveTuningAsync$eval(). on_eval_before_archive (function()) Stage called performance values written archive. Called ObjectiveTuningAsync$eval(). on_tuning_result_begin (function()) Stage called results written. Called TuningInstance*$assign_result().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"mlr3misc::Callback$call() mlr3misc::Callback$format() mlr3misc::Callback$help() mlr3misc::Callback$initialize() mlr3misc::Callback$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"CallbackAsyncTuning$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"","code":"CallbackAsyncTuning$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Create Batch Tuning Callback — CallbackBatchTuning","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"Specialized bbotk::CallbackBatch batch tuning. Callbacks allow customize behavior processes mlr3tuning. callback_batch_tuning() function creates CallbackBatchTuning. Predefined callbacks stored dictionary mlr_callbacks can retrieved clbk(). information tuning callbacks see callback_batch_tuning().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"mlr3misc::Callback -> bbotk::CallbackBatch -> CallbackBatchTuning","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"on_eval_after_design (function()) Stage called design created. Called ObjectiveTuningBatch$eval_many(). on_eval_after_benchmark (function()) Stage called hyperparameter configurations evaluated. Called ObjectiveTuningBatch$eval_many(). on_eval_before_archive (function()) Stage called performance values written archive. Called ObjectiveTuningBatch$eval_many(). on_tuning_result_begin (function()) Stage called results written. Called TuningInstance*$assign_result().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"mlr3misc::Callback$call() mlr3misc::Callback$format() mlr3misc::Callback$help() mlr3misc::Callback$initialize() mlr3misc::Callback$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"CallbackBatchTuning$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"","code":"CallbackBatchTuning$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"","code":"# write archive to disk callback_batch_tuning(\"mlr3tuning.backup\", on_optimization_end = function(callback, context) { saveRDS(context$instance$archive, \"archive.rds\") } ) #> #> * Active Stages: on_optimization_end"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Asynchronous Tuning Context — ContextAsyncTuning","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"CallbackAsyncTuning accesses modifies data optimization via ContextAsyncTuning. See section active bindings list modifiable objects. See callback_async_tuning() list stages access ContextAsyncTuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"Changes $instance $optimizer stages executed workers reflected main process.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"mlr3misc::Context -> bbotk::ContextAsync -> ContextAsyncTuning","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"xs_learner (list()) hyperparameter configuration currently evaluated. Contains values learner scale .e. transformations applied. resample_result (mlr3::BenchmarkResult) resample result hyperparameter configuration currently evaluated. aggregated_performance (list()) Aggregated performance scores training time evaluated hyperparameter configuration. list passed archive. callback can add additional elements also written archive. result_learner_param_vals (list()) learner parameter values passed instance$assign_result().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"mlr3misc::Context$format() mlr3misc::Context$print() bbotk::ContextAsync$initialize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"ContextAsyncTuning$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"","code":"ContextAsyncTuning$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Batch Tuning Context — ContextBatchTuning","title":"Batch Tuning Context — ContextBatchTuning","text":"CallbackBatchTuning accesses modifies data optimization via ContextBatchTuning. See section active bindings list modifiable objects. See callback_batch_tuning() list stages access ContextBatchTuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Batch Tuning Context — ContextBatchTuning","text":"mlr3misc::Context -> bbotk::ContextBatch -> ContextBatchTuning","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Batch Tuning Context — ContextBatchTuning","text":"xss (list()) hyperparameter configurations latest batch. Contains values learner scale .e. transformations applied. See $xdt untransformed values. design (data.table::data.table) benchmark design latest batch. benchmark_result (mlr3::BenchmarkResult) benchmark result latest batch. aggregated_performance (data.table::data.table) Aggregated performance scores training time latest batch. data table passed archive. callback can add additional columns also written archive. result_learner_param_vals (list()) learner parameter values passed instance$assign_result().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Batch Tuning Context — ContextBatchTuning","text":"mlr3misc::Context$format() mlr3misc::Context$print() bbotk::ContextBatch$initialize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Batch Tuning Context — ContextBatchTuning","text":"ContextBatchTuning$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Batch Tuning Context — ContextBatchTuning","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Batch Tuning Context — ContextBatchTuning","text":"","code":"ContextBatchTuning$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Batch Tuning Context — ContextBatchTuning","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Tuning Objective — ObjectiveTuning","title":"Class for Tuning Objective — ObjectiveTuning","text":"Stores objective function estimates performance hyperparameter configurations. class usually constructed internally TuningInstanceBatchSingleCrit TuningInstanceBatchMultiCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"super-class","dir":"Reference","previous_headings":"","what":"Super class","title":"Class for Tuning Objective — ObjectiveTuning","text":"bbotk::Objective -> ObjectiveTuning","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Class for Tuning Objective — ObjectiveTuning","text":"task (mlr3::Task). learner (mlr3::Learner). resampling (mlr3::Resampling). measures (list mlr3::Measure). store_models (logical(1)). store_benchmark_result (logical(1)). callbacks (List mlr3misc::Callback). default_values (named list()). internal_search_space (paradox::ParamSet). Internal search space internal tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Tuning Objective — ObjectiveTuning","text":"bbotk::Objective$eval() bbotk::Objective$eval_dt() bbotk::Objective$eval_many() bbotk::Objective$format() bbotk::Objective$help() bbotk::Objective$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Tuning Objective — ObjectiveTuning","text":"ObjectiveTuning$new() ObjectiveTuning$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Class for Tuning Objective — ObjectiveTuning","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Tuning Objective — ObjectiveTuning","text":"","code":"ObjectiveTuning$new( task, learner, resampling, measures, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL, internal_search_space = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Tuning Objective — ObjectiveTuning","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (list mlr3::Measure) Measures optimize. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks. internal_search_space (paradox::ParamSet NULL) internal search space.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Tuning Objective — ObjectiveTuning","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Tuning Objective — ObjectiveTuning","text":"","code":"ObjectiveTuning$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Tuning Objective — ObjectiveTuning","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningAsync.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Tuning Objective — ObjectiveTuningAsync","title":"Class for Tuning Objective — ObjectiveTuningAsync","text":"Stores objective function estimates performance hyperparameter configurations. class usually constructed internally TuningInstanceBatchSingleCrit TuningInstanceBatchMultiCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningAsync.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Class for Tuning Objective — ObjectiveTuningAsync","text":"bbotk::Objective -> mlr3tuning::ObjectiveTuning -> ObjectiveTuningAsync","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningAsync.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Tuning Objective — ObjectiveTuningAsync","text":"bbotk::Objective$eval() bbotk::Objective$eval_dt() bbotk::Objective$eval_many() bbotk::Objective$format() bbotk::Objective$help() bbotk::Objective$print() mlr3tuning::ObjectiveTuning$initialize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningAsync.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Tuning Objective — ObjectiveTuningAsync","text":"ObjectiveTuningAsync$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningAsync.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Tuning Objective — ObjectiveTuningAsync","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningAsync.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Tuning Objective — ObjectiveTuningAsync","text":"","code":"ObjectiveTuningAsync$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningAsync.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Tuning Objective — ObjectiveTuningAsync","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Tuning Objective — ObjectiveTuningBatch","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"Stores objective function estimates performance hyperparameter configurations. class usually constructed internally TuningInstanceBatchSingleCrit TuningInstanceBatchMultiCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"bbotk::Objective -> mlr3tuning::ObjectiveTuning -> ObjectiveTuningBatch","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"archive (ArchiveBatchTuning).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"bbotk::Objective$eval() bbotk::Objective$eval_dt() bbotk::Objective$eval_many() bbotk::Objective$format() bbotk::Objective$help() bbotk::Objective$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"ObjectiveTuningBatch$new() ObjectiveTuningBatch$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"","code":"ObjectiveTuningBatch$new( task, learner, resampling, measures, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, archive = NULL, callbacks = NULL, internal_search_space = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (list mlr3::Measure) Measures optimize. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. archive (ArchiveBatchTuning) Reference archive TuningInstanceBatchSingleCrit | TuningInstanceBatchMultiCrit. NULL (default), benchmark result models stored. callbacks (list mlr3misc::Callback) List callbacks. internal_search_space (paradox::ParamSet NULL) internal search space.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"","code":"ObjectiveTuningBatch$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":null,"dir":"Reference","previous_headings":"","what":"Tuner — Tuner","title":"Tuner — Tuner","text":"Tuner implements optimization algorithm.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Tuner — Tuner","text":"Tuner abstract base class implements base functionality tuner must provide.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"extension-packages","dir":"Reference","previous_headings":"","what":"Extension Packages","title":"Tuner — Tuner","text":"Additional tuners provided following packages. mlr3hyperband adds Hyperband Successive Halving algorithm. mlr3mbo adds Bayesian optimization methods.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Tuner — Tuner","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Tuner — Tuner","text":"id (character(1)) Identifier object. Used tables, plot text output.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Tuner — Tuner","text":"param_set (paradox::ParamSet) Set control parameters. param_classes (character()) Supported parameter classes learner hyperparameters tuner can optimize, given paradox::ParamSet $class field. properties (character()) Set properties tuner. Must subset mlr_reflections$tuner_properties. packages (character()) Set required packages. Note packages loaded via requireNamespace(), attached. label (character(1)) Label object. Can used tables, plot text output instead ID. man (character(1)) String format [pkg]::[topic] pointing manual page object. referenced help package can opened via method $help().","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Tuner — Tuner","text":"Tuner$new() Tuner$format() Tuner$print() Tuner$help() Tuner$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Tuner — Tuner","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Tuner — Tuner","text":"","code":"Tuner$new( id = \"tuner\", param_set, param_classes, properties, packages = character(), label = NA_character_, man = NA_character_ )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Tuner — Tuner","text":"id (character(1)) Identifier new instance. param_set (paradox::ParamSet) Set control parameters. param_classes (character()) Supported parameter classes learner hyperparameters tuner can optimize, given paradox::ParamSet $class field. properties (character()) Set properties tuner. Must subset mlr_reflections$tuner_properties. packages (character()) Set required packages. Note packages loaded via requireNamespace(), attached. label (character(1)) Label object. Can used tables, plot text output instead ID. man (character(1)) String format [pkg]::[topic] pointing manual page object. referenced help package can opened via method $help().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"method-format-","dir":"Reference","previous_headings":"","what":"Method format()","title":"Tuner — Tuner","text":"Helper print outputs.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Tuner — Tuner","text":"","code":"Tuner$format(...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Tuner — Tuner","text":"... (ignored).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"Tuner — Tuner","text":"(character()).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"method-print-","dir":"Reference","previous_headings":"","what":"Method print()","title":"Tuner — Tuner","text":"Print method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Tuner — Tuner","text":"","code":"Tuner$print()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"returns-1","dir":"Reference","previous_headings":"","what":"Returns","title":"Tuner — Tuner","text":"(character()).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"method-help-","dir":"Reference","previous_headings":"","what":"Method help()","title":"Tuner — Tuner","text":"Opens corresponding help page referenced field $man.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"usage-3","dir":"Reference","previous_headings":"","what":"Usage","title":"Tuner — Tuner","text":"","code":"Tuner$help()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Tuner — Tuner","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"usage-4","dir":"Reference","previous_headings":"","what":"Usage","title":"Tuner — Tuner","text":"","code":"Tuner$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Tuner — Tuner","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Asynchronous Tuning Algorithms — TunerAsync","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"TunerAsync implements asynchronous optimization algorithm.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"TunerAsync abstract base class implements base functionality asynchronous tuner must provide.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"super-class","dir":"Reference","previous_headings":"","what":"Super class","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"mlr3tuning::Tuner -> TunerAsync","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$initialize() mlr3tuning::Tuner$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"TunerAsync$optimize() TunerAsync$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"method-optimize-","dir":"Reference","previous_headings":"","what":"Method optimize()","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"Performs tuning TuningInstanceAsyncSingleCrit TuningInstanceAsyncMultiCrit termination. single evaluations written ArchiveAsyncTuning resides TuningInstanceAsyncSingleCrit/TuningInstanceAsyncMultiCrit. result written instance object.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"","code":"TunerAsync$optimize(inst)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"inst (TuningInstanceAsyncSingleCrit | TuningInstanceAsyncMultiCrit).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"data.table::data.table()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"","code":"TunerAsync$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":null,"dir":"Reference","previous_headings":"","what":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"Internally used transform bbotk::Optimizer Tuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"mlr3tuning::Tuner -> mlr3tuning::TunerAsync -> TunerAsyncFromOptimizerAsync","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"param_set (paradox::ParamSet) Set control parameters.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"TunerAsyncFromOptimizerAsync$new() TunerAsyncFromOptimizerAsync$optimize() TunerAsyncFromOptimizerAsync$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"","code":"TunerAsyncFromOptimizerAsync$new(optimizer, man = NA_character_)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"optimizer bbotk::Optimizer Optimizer called. man (character(1)) String format [pkg]::[topic] pointing manual page object. referenced help package can opened via method $help().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"method-optimize-","dir":"Reference","previous_headings":"","what":"Method optimize()","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"Performs tuning TuningInstanceBatchSingleCrit / TuningInstanceBatchMultiCrit termination. single evaluations final results written ArchiveAsyncTuning resides TuningInstanceBatchSingleCrit/TuningInstanceBatchMultiCrit. final result returned.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"","code":"TunerAsyncFromOptimizerAsync$optimize(inst)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"inst (TuningInstanceBatchSingleCrit | TuningInstanceBatchMultiCrit).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"data.table::data.table.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"","code":"TunerAsyncFromOptimizerAsync$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Batch Tuning Algorithms — TunerBatch","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"TunerBatch implements optimization algorithm.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"TunerBatch abstract base class implements base functionality tuner must provide. subclass implemented following way: Inherit Tuner. Specify private abstract method $.optimize() use call optimizer. need call instance$eval_batch() evaluate design points. batch evaluation requested TuningInstanceBatchSingleCrit/TuningInstanceBatchMultiCrit object instance, batch possibly executed parallel via mlr3::benchmark(), evaluations stored inside instance$archive. batch evaluation, bbotk::Terminator checked, positive, exception class \"terminated_error\" generated. later case current batch evaluations still stored instance, numeric scores sent back handling optimizer lost execution control. exception caught select best configuration instance$archive return . Note therefore points specified bbotk::Terminator may evaluated, Terminator checked batch evaluation, -evaluation batch. many depends setting batch size. Overwrite private super-method .assign_result() want decide estimate final configuration instance estimated performance. default behavior : pick best resample-experiment, regarding given measure, assign configuration aggregated performance instance.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"private-methods","dir":"Reference","previous_headings":"","what":"Private Methods","title":"Class for Batch Tuning Algorithms — TunerBatch","text":".optimize(instance) -> NULL Abstract base method. Implement specify tuning subclass. See details sections. .assign_result(instance) -> NULL Abstract base method. Implement specify final configuration selected. See details sections.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"super-class","dir":"Reference","previous_headings":"","what":"Super class","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"mlr3tuning::Tuner -> TunerBatch","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"TunerBatch$new() TunerBatch$optimize() TunerBatch$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"","code":"TunerBatch$new( id = \"tuner_batch\", param_set, param_classes, properties, packages = character(), label = NA_character_, man = NA_character_ )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"id (character(1)) Identifier new instance. param_set (paradox::ParamSet) Set control parameters. param_classes (character()) Supported parameter classes learner hyperparameters tuner can optimize, given paradox::ParamSet $class field. properties (character()) Set properties tuner. Must subset mlr_reflections$tuner_properties. packages (character()) Set required packages. Note packages loaded via requireNamespace(), attached. label (character(1)) Label object. Can used tables, plot text output instead ID. man (character(1)) String format [pkg]::[topic] pointing manual page object. referenced help package can opened via method $help().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"method-optimize-","dir":"Reference","previous_headings":"","what":"Method optimize()","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"Performs tuning TuningInstanceBatchSingleCrit TuningInstanceBatchMultiCrit termination. single evaluations written ArchiveBatchTuning resides TuningInstanceBatchSingleCrit/TuningInstanceBatchMultiCrit. result written instance object.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"","code":"TunerBatch$optimize(inst)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"inst (TuningInstanceBatchSingleCrit | TuningInstanceBatchMultiCrit).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"data.table::data.table()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"","code":"TunerBatch$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":null,"dir":"Reference","previous_headings":"","what":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"Internally used transform bbotk::Optimizer Tuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> TunerBatchFromOptimizerBatch","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"param_set (paradox::ParamSet) Set control parameters.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"TunerBatchFromOptimizerBatch$new() TunerBatchFromOptimizerBatch$optimize() TunerBatchFromOptimizerBatch$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"","code":"TunerBatchFromOptimizerBatch$new(optimizer, man = NA_character_)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"optimizer bbotk::Optimizer Optimizer called. man (character(1)) String format [pkg]::[topic] pointing manual page object. referenced help package can opened via method $help().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"method-optimize-","dir":"Reference","previous_headings":"","what":"Method optimize()","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"Performs tuning TuningInstanceBatchSingleCrit / TuningInstanceBatchMultiCrit termination. single evaluations final results written ArchiveBatchTuning resides TuningInstanceBatchSingleCrit/TuningInstanceBatchMultiCrit. final result returned.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"","code":"TunerBatchFromOptimizerBatch$optimize(inst)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"inst (TuningInstanceBatchSingleCrit | TuningInstanceBatchMultiCrit).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"data.table::data.table.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"","code":"TunerBatchFromOptimizerBatch$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":null,"dir":"Reference","previous_headings":"","what":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"TuningInstanceAsyncMultiCrit specifies tuning problem Tuner. function ti_async() creates TuningInstanceAsyncMultiCrit function tune() creates instance internally.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"instance contains ObjectiveTuningAsync object encodes black box objective function Tuner optimize. instance allows basic operations querying objective design points ($eval_async()). operation usually done Tuner. Hyperparameter configurations asynchronously sent workers evaluated calling mlr3::resample(). evaluated hyperparameter configurations stored ArchiveAsyncTuning ($archive). batch evaluated, bbotk::Terminator queried remaining budget. available budget exhausted, exception raised, evaluations can performed point . tuner also supposed store final result, consisting selected hyperparameter configuration associated estimated performance values, calling method instance$.assign_result.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"analysis","dir":"Reference","previous_headings":"","what":"Analysis","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"analyzing tuning results, recommended pass ArchiveAsyncTuning .data.table(). returned data table contains mlr3::ResampleResult hyperparameter evaluation.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"bbotk::OptimInstance -> bbotk::OptimInstanceAsync -> bbotk::OptimInstanceAsyncMultiCrit -> TuningInstanceAsyncMultiCrit","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"internal_search_space (paradox::ParamSet) search space containing parameters internally optimized mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"result_learner_param_vals (list()) List param values optimal learner call.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"bbotk::OptimInstance$format() bbotk::OptimInstanceAsync$clear() bbotk::OptimInstanceAsync$print() bbotk::OptimInstanceAsync$reconnect()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"TuningInstanceAsyncMultiCrit$new() TuningInstanceAsyncMultiCrit$assign_result() TuningInstanceAsyncMultiCrit$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"","code":"TuningInstanceAsyncMultiCrit$new( task, learner, resampling, measures, terminator, search_space = NULL, internal_search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL, rush = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (list mlr3::Measure) Measures optimize. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks. rush (Rush) rush instance supplied, tuning runs without batches.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"method-assign-result-","dir":"Reference","previous_headings":"","what":"Method assign_result()","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"TunerAsync writes best found points estimated performance values (probably Pareto set / front). internal use.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"","code":"TuningInstanceAsyncMultiCrit$assign_result( xdt, ydt, learner_param_vals = NULL, extra = NULL, xydt = NULL, ... )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"xdt (data.table::data.table()) Hyperparameter values data.table::data.table(). row one configuration. Contains values search space. Can contain additional columns extra information. ydt (numeric(1)) Optimal outcomes, e.g. Pareto front. learner_param_vals (List named list()s) Fixed parameter values learner neither part extra (data.table::data.table()) Additional information. xydt (data.table::data.table()) Point, outcome, additional information. ... () ignored.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"","code":"TuningInstanceAsyncMultiCrit$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":null,"dir":"Reference","previous_headings":"","what":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"TuningInstanceAsyncSingleCrit specifies tuning problem TunerAsync. function ti_async() creates TuningInstanceAsyncSingleCrit function tune() creates instance internally.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"instance contains ObjectiveTuningAsync object encodes black box objective function Tuner optimize. instance allows basic operations querying objective design points ($eval_async()). operation usually done Tuner. Hyperparameter configurations asynchronously sent workers evaluated calling mlr3::resample(). evaluated hyperparameter configurations stored ArchiveAsyncTuning ($archive). batch evaluated, bbotk::Terminator queried remaining budget. available budget exhausted, exception raised, evaluations can performed point . tuner also supposed store final result, consisting selected hyperparameter configuration associated estimated performance values, calling method instance$.assign_result.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"default-measures","dir":"Reference","previous_headings":"","what":"Default Measures","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"measure passed, default measure used. default measure depends task type.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"analysis","dir":"Reference","previous_headings":"","what":"Analysis","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"analyzing tuning results, recommended pass ArchiveAsyncTuning .data.table(). returned data table contains mlr3::ResampleResult hyperparameter evaluation.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"extension-packages","dir":"Reference","previous_headings":"","what":"Extension Packages","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"mlr3tuning extended following packages. mlr3tuningspaces collection search spaces scientific articles commonly used learners. mlr3hyperband adds Hyperband Successive Halving algorithm. mlr3mbo adds Bayesian optimization methods.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"bbotk::OptimInstance -> bbotk::OptimInstanceAsync -> bbotk::OptimInstanceAsyncSingleCrit -> TuningInstanceAsyncSingleCrit","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"internal_search_space (paradox::ParamSet) search space containing parameters internally optimized mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"result_learner_param_vals (list()) Param values optimal learner call.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"bbotk::OptimInstance$format() bbotk::OptimInstanceAsync$clear() bbotk::OptimInstanceAsync$print() bbotk::OptimInstanceAsync$reconnect()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"TuningInstanceAsyncSingleCrit$new() TuningInstanceAsyncSingleCrit$assign_result() TuningInstanceAsyncSingleCrit$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"","code":"TuningInstanceAsyncSingleCrit$new( task, learner, resampling, measure = NULL, terminator, search_space = NULL, internal_search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL, rush = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measure (mlr3::Measure) Measure optimize. NULL, default measure used. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. internal_search_space (paradox::ParamSet NULL) internal search space. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks. rush (Rush) rush instance supplied, tuning runs without batches.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"method-assign-result-","dir":"Reference","previous_headings":"","what":"Method assign_result()","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"TunerAsync object writes best found point estimated performance value . internal use.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"","code":"TuningInstanceAsyncSingleCrit$assign_result( xdt, y, learner_param_vals = NULL, extra = NULL, xydt = NULL, ... )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"xdt (data.table::data.table()) Hyperparameter values data.table::data.table(). row one configuration. Contains values search space. Can contain additional columns extra information. y (numeric(1)) Optimal outcome. learner_param_vals (List named list()s) Fixed parameter values learner neither part extra (data.table::data.table()) Additional information. xydt (data.table::data.table()) Point, outcome, additional information (Deprecated). ... () ignored.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"","code":"TuningInstanceAsyncSingleCrit$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"TuningInstanceBatchMultiCrit specifies tuning problem Tuner. function ti() creates TuningInstanceBatchMultiCrit function tune() creates instance internally.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"instance contains ObjectiveTuningBatch object encodes black box objective function Tuner optimize. instance allows basic operations querying objective design points ($eval_batch()). operation usually done Tuner. Evaluations hyperparameter configurations performed batches calling mlr3::benchmark() internally. evaluated hyperparameter configurations stored ArchiveBatchTuning ($archive). batch evaluated, bbotk::Terminator queried remaining budget. available budget exhausted, exception raised, evaluations can performed point . tuner also supposed store final result, consisting selected hyperparameter configuration associated estimated performance values, calling method instance$assign_result.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"analysis","dir":"Reference","previous_headings":"","what":"Analysis","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"analyzing tuning results, recommended pass ArchiveBatchTuning .data.table(). returned data table joined benchmark result adds mlr3::ResampleResult hyperparameter evaluation. archive provides various getters (e.g. $learners()) ease access. getters extract position () unique hash (uhash). complete list getters see methods section. benchmark result ($benchmark_result) allows score hyperparameter configurations different measure. Alternatively, measures can supplied .data.table(). mlr3viz package provides visualizations tuning results.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"bbotk::OptimInstance -> bbotk::OptimInstanceBatch -> bbotk::OptimInstanceBatchMultiCrit -> TuningInstanceBatchMultiCrit","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"internal_search_space (paradox::ParamSet) search space containing parameters internally optimized mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"result_learner_param_vals (list()) List param values optimal learner call.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"bbotk::OptimInstance$clear() bbotk::OptimInstance$format() bbotk::OptimInstance$print() bbotk::OptimInstanceBatch$eval_batch() bbotk::OptimInstanceBatch$objective_function()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"TuningInstanceBatchMultiCrit$new() TuningInstanceBatchMultiCrit$assign_result() TuningInstanceBatchMultiCrit$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"","code":"TuningInstanceBatchMultiCrit$new( task, learner, resampling, measures, terminator, search_space = NULL, internal_search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (list mlr3::Measure) Measures optimize. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. internal_search_space (paradox::ParamSet NULL) internal search space. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"method-assign-result-","dir":"Reference","previous_headings":"","what":"Method assign_result()","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"Tuner object writes best found points estimated performance values . internal use.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"","code":"TuningInstanceBatchMultiCrit$assign_result( xdt, ydt, learner_param_vals = NULL, extra = NULL, xydt = NULL, ... )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"xdt (data.table::data.table()) Hyperparameter values data.table::data.table(). row one configuration. Contains values search space. Can contain additional columns extra information. ydt (data.table::data.table()) Optimal outcomes, e.g. Pareto front. learner_param_vals (List named list()s) Fixed parameter values learner neither part extra (data.table::data.table()) Additional information. xydt (data.table::data.table()) Point, outcome, additional information (Deprecated). ... () ignored.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"","code":"TuningInstanceBatchMultiCrit$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"","code":"# Hyperparameter optimization on the Palmer Penguins data set task = tsk(\"penguins\") # Load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # Construct tuning instance instance = ti( task = task, learner = learner, resampling = rsmp(\"cv\", folds = 3), measures = msrs(c(\"classif.ce\", \"time_train\")), terminator = trm(\"evals\", n_evals = 4) ) # Choose optimization algorithm tuner = tnr(\"random_search\", batch_size = 2) # Run tuning tuner$optimize(instance) #> cp learner_param_vals x_domain classif.ce time_train #> #> 1: -3.759791 0.09583016 0.002666667 # Optimal hyperparameter configurations instance$result #> cp learner_param_vals x_domain classif.ce time_train #> #> 1: -3.759791 0.09583016 0.002666667 # Inspect all evaluated configurations as.data.table(instance$archive) #> cp classif.ce time_train runtime_learners timestamp #> #> 1: -3.259804 0.09583016 0.003333333 0.017 2024-11-25 08:44:13 #> 2: -3.759791 0.09583016 0.002666667 0.014 2024-11-25 08:44:13 #> 3: -2.565382 0.09583016 0.003000000 0.015 2024-11-25 08:44:13 #> 4: -3.080830 0.09583016 0.003000000 0.015 2024-11-25 08:44:13 #> warnings errors x_domain batch_nr resample_result #> #> 1: 0 0 1 #> 2: 0 0 1 #> 3: 0 0 2 #> 4: 0 0 2 "},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"TuningInstanceBatchSingleCrit specifies tuning problem Tuner. function ti() creates TuningInstanceBatchSingleCrit function tune() creates instance internally.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"instance contains ObjectiveTuningBatch object encodes black box objective function Tuner optimize. instance allows basic operations querying objective design points ($eval_batch()). operation usually done Tuner. Evaluations hyperparameter configurations performed batches calling mlr3::benchmark() internally. evaluated hyperparameter configurations stored ArchiveBatchTuning ($archive). batch evaluated, bbotk::Terminator queried remaining budget. available budget exhausted, exception raised, evaluations can performed point . tuner also supposed store final result, consisting selected hyperparameter configuration associated estimated performance values, calling method instance$assign_result.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"default-measures","dir":"Reference","previous_headings":"","what":"Default Measures","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"measure passed, default measure used. default measure depends task type.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"extension-packages","dir":"Reference","previous_headings":"","what":"Extension Packages","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"mlr3tuning extended following packages. mlr3tuningspaces collection search spaces scientific articles commonly used learners. mlr3hyperband adds Hyperband Successive Halving algorithm. mlr3mbo adds Bayesian optimization methods.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"analysis","dir":"Reference","previous_headings":"","what":"Analysis","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"analyzing tuning results, recommended pass ArchiveBatchTuning .data.table(). returned data table joined benchmark result adds mlr3::ResampleResult hyperparameter evaluation. archive provides various getters (e.g. $learners()) ease access. getters extract position () unique hash (uhash). complete list getters see methods section. benchmark result ($benchmark_result) allows score hyperparameter configurations different measure. Alternatively, measures can supplied .data.table(). mlr3viz package provides visualizations tuning results.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"bbotk::OptimInstance -> bbotk::OptimInstanceBatch -> bbotk::OptimInstanceBatchSingleCrit -> TuningInstanceBatchSingleCrit","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"internal_search_space (paradox::ParamSet) search space containing parameters internally optimized mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"result_learner_param_vals (list()) Param values optimal learner call.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"bbotk::OptimInstance$clear() bbotk::OptimInstance$format() bbotk::OptimInstance$print() bbotk::OptimInstanceBatch$eval_batch() bbotk::OptimInstanceBatch$objective_function()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"TuningInstanceBatchSingleCrit$new() TuningInstanceBatchSingleCrit$assign_result() TuningInstanceBatchSingleCrit$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"","code":"TuningInstanceBatchSingleCrit$new( task, learner, resampling, measure = NULL, terminator, search_space = NULL, internal_search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measure (mlr3::Measure) Measure optimize. NULL, default measure used. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"method-assign-result-","dir":"Reference","previous_headings":"","what":"Method assign_result()","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"Tuner object writes best found point estimated performance value . internal use.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"","code":"TuningInstanceBatchSingleCrit$assign_result( xdt, y, learner_param_vals = NULL, extra = NULL, xydt = NULL, ... )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"xdt (data.table::data.table()) Hyperparameter values data.table::data.table(). row one configuration. Contains values search space. Can contain additional columns extra information. y (numeric(1)) Optimal outcome. learner_param_vals (List named list()s) Fixed parameter values learner neither part extra (data.table::data.table()) Additional information. xydt (data.table::data.table()) Point, outcome, additional information (Deprecated). ... () ignored.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"","code":"TuningInstanceBatchSingleCrit$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"","code":"# Hyperparameter optimization on the Palmer Penguins data set task = tsk(\"penguins\") # Load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # Construct tuning instance instance = ti( task = task, learner = learner, resampling = rsmp(\"cv\", folds = 3), measures = msr(\"classif.ce\"), terminator = trm(\"evals\", n_evals = 4) ) # Choose optimization algorithm tuner = tnr(\"random_search\", batch_size = 2) # Run tuning tuner$optimize(instance) #> cp learner_param_vals x_domain classif.ce #> #> 1: -3.036646 0.06392067 # Set optimal hyperparameter configuration to learner learner$param_set$values = instance$result_learner_param_vals # Train the learner on the full data set learner$train(task) # Inspect all evaluated configurations as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -3.036646 0.06392067 0.015 2024-11-25 08:44:14 0 0 #> 2: -5.238604 0.06392067 0.034 2024-11-25 08:44:14 0 0 #> 3: -7.255326 0.06392067 0.016 2024-11-25 08:44:14 0 0 #> 4: -6.314690 0.06392067 0.015 2024-11-25 08:44:14 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 1 #> 3: 2 #> 4: 2 "},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":null,"dir":"Reference","previous_headings":"","what":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"TuningInstanceMultiCrit deprecated class now wrapper around TuningInstanceBatchMultiCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"bbotk::OptimInstance -> bbotk::OptimInstanceBatch -> bbotk::OptimInstanceBatchMultiCrit -> mlr3tuning::TuningInstanceBatchMultiCrit -> TuningInstanceMultiCrit","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"bbotk::OptimInstance$clear() bbotk::OptimInstance$format() bbotk::OptimInstance$print() bbotk::OptimInstanceBatch$eval_batch() bbotk::OptimInstanceBatch$objective_function() mlr3tuning::TuningInstanceBatchMultiCrit$assign_result()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"TuningInstanceMultiCrit$new() TuningInstanceMultiCrit$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"","code":"TuningInstanceMultiCrit$new( task, learner, resampling, measures, terminator, search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (list mlr3::Measure) Measures optimize. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"","code":"TuningInstanceMultiCrit$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":null,"dir":"Reference","previous_headings":"","what":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"TuningInstanceSingleCrit deprecated class now wrapper around TuningInstanceBatchSingleCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"bbotk::OptimInstance -> bbotk::OptimInstanceBatch -> bbotk::OptimInstanceBatchSingleCrit -> mlr3tuning::TuningInstanceBatchSingleCrit -> TuningInstanceSingleCrit","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"bbotk::OptimInstance$clear() bbotk::OptimInstance$format() bbotk::OptimInstance$print() bbotk::OptimInstanceBatch$eval_batch() bbotk::OptimInstanceBatch$objective_function() mlr3tuning::TuningInstanceBatchSingleCrit$assign_result()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"TuningInstanceSingleCrit$new() TuningInstanceSingleCrit$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"","code":"TuningInstanceSingleCrit$new( task, learner, resampling, measure = NULL, terminator, search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measure (mlr3::Measure) Measure optimize. NULL, default measure used. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"","code":"TuningInstanceSingleCrit$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/as_search_space.html","id":null,"dir":"Reference","previous_headings":"","what":"Convert to a Search Space — as_search_space","title":"Convert to a Search Space — as_search_space","text":"Convert object search space.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/as_search_space.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Convert to a Search Space — as_search_space","text":"","code":"as_search_space(x, ...) # S3 method for class 'Learner' as_search_space(x, ...) # S3 method for class 'ParamSet' as_search_space(x, ...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/as_search_space.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Convert to a Search Space — as_search_space","text":"x () Object convert search space. ... () Additional arguments.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/as_search_space.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Convert to a Search Space — as_search_space","text":"paradox::ParamSet.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/as_tuner.html","id":null,"dir":"Reference","previous_headings":"","what":"Convert to a Tuner — as_tuner","title":"Convert to a Tuner — as_tuner","text":"Convert object Tuner list Tuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/as_tuner.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Convert to a Tuner — as_tuner","text":"","code":"as_tuner(x, ...) # S3 method for class 'Tuner' as_tuner(x, clone = FALSE, ...) as_tuners(x, ...) # Default S3 method as_tuners(x, ...) # S3 method for class 'list' as_tuners(x, ...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/as_tuner.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Convert to a Tuner — as_tuner","text":"x () Object convert. ... () Additional arguments. clone (logical(1)) Whether clone object.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_async_tuning_callback.html","id":null,"dir":"Reference","previous_headings":"","what":"Assertions for Callbacks — assert_async_tuning_callback","title":"Assertions for Callbacks — assert_async_tuning_callback","text":"Assertions CallbackAsyncTuning class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_async_tuning_callback.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Assertions for Callbacks — assert_async_tuning_callback","text":"","code":"assert_async_tuning_callback(callback, null_ok = FALSE) assert_async_tuning_callbacks(callbacks)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_async_tuning_callback.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Assertions for Callbacks — assert_async_tuning_callback","text":"callback (CallbackAsyncTuning). null_ok (logical(1)) TRUE, NULL allowed. callbacks (list CallbackAsyncTuning).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_async_tuning_callback.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Assertions for Callbacks — assert_async_tuning_callback","text":"[CallbackAsyncTuning | List CallbackAsyncTunings.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_batch_tuning_callback.html","id":null,"dir":"Reference","previous_headings":"","what":"Assertions for Callbacks — assert_batch_tuning_callback","title":"Assertions for Callbacks — assert_batch_tuning_callback","text":"Assertions CallbackBatchTuning class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_batch_tuning_callback.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Assertions for Callbacks — assert_batch_tuning_callback","text":"","code":"assert_batch_tuning_callback(callback, null_ok = FALSE) assert_batch_tuning_callbacks(callbacks)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_batch_tuning_callback.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Assertions for Callbacks — assert_batch_tuning_callback","text":"callback (CallbackBatchTuning). null_ok (logical(1)) TRUE, NULL allowed. callbacks (list CallbackBatchTuning).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_batch_tuning_callback.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Assertions for Callbacks — assert_batch_tuning_callback","text":"[CallbackBatchTuning | List CallbackBatchTunings.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":null,"dir":"Reference","previous_headings":"","what":"Function for Automatic Tuning — auto_tuner","title":"Function for Automatic Tuning — auto_tuner","text":"AutoTuner wraps mlr3::Learner augments automatic tuning process given set hyperparameters. auto_tuner() function creates AutoTuner object.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Function for Automatic Tuning — auto_tuner","text":"","code":"auto_tuner( tuner, learner, resampling, measure = NULL, term_evals = NULL, term_time = NULL, terminator = NULL, search_space = NULL, internal_search_space = NULL, store_tuning_instance = TRUE, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL, rush = NULL, id = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Function for Automatic Tuning — auto_tuner","text":"tuner (Tuner) Optimization algorithm. learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measure (mlr3::Measure) Measure optimize. NULL, default measure used. term_evals (integer(1)) Number allowed evaluations. Ignored terminator passed. term_time (integer(1)) Maximum allowed time seconds. Ignored terminator passed. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. store_tuning_instance (logical(1)) TRUE (default), stores internally created TuningInstanceBatchSingleCrit intermediate results slot $tuning_instance. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks. rush (Rush) rush instance supplied, tuning runs without batches. id (character(1)) Identifier new instance.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Function for Automatic Tuning — auto_tuner","text":"AutoTuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Function for Automatic Tuning — auto_tuner","text":"AutoTuner mlr3::Learner wraps another mlr3::Learner performs following steps $train(): hyperparameters wrapped (inner) learner trained training data via resampling. tuning can specified providing Tuner, bbotk::Terminator, search space paradox::ParamSet, mlr3::Resampling mlr3::Measure. best found hyperparameter configuration set hyperparameters wrapped (inner) learner stored $learner. Access tuned hyperparameters via $tuning_result. final model fit complete training data using now parametrized wrapped learner. respective model available via field $learner$model. $predict() AutoTuner just calls predict method wrapped (inner) learner. set timeout disabled fitting final model.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"default-measures","dir":"Reference","previous_headings":"","what":"Default Measures","title":"Function for Automatic Tuning — auto_tuner","text":"measure passed, default measure used. default measure depends task type.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Function for Automatic Tuning — auto_tuner","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"nested-resampling","dir":"Reference","previous_headings":"","what":"Nested Resampling","title":"Function for Automatic Tuning — auto_tuner","text":"Nested resampling performed passing AutoTuner mlr3::resample() mlr3::benchmark(). access inner resampling results, set store_tuning_instance = TRUE execute mlr3::resample() mlr3::benchmark() store_models = TRUE (see examples). mlr3::Resampling passed AutoTuner meant inner resampling, operating training set arbitrary outer resampling. reason, inner resampling instantiated. instantiated resampling passed, AutoTuner fails row id inner resampling present training set outer resampling.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Function for Automatic Tuning — auto_tuner","text":"","code":"at = auto_tuner( tuner = tnr(\"random_search\"), learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)), resampling = rsmp (\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 4) at$train(tsk(\"pima\"))"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_async_tuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Create Asynchronous Tuning Callback — callback_async_tuning","title":"Create Asynchronous Tuning Callback — callback_async_tuning","text":"Function create CallbackAsyncTuning. Predefined callbacks stored dictionary mlr_callbacks can retrieved clbk(). Tuning callbacks can called different stages tuning process. stages prefixed on_*. See also section parameters information stages. tuning callback works ContextAsyncTuning.","code":"Start Tuning - on_optimization_begin Start Worker - on_worker_begin Start Optimization on Worker - on_optimizer_before_eval Start Evaluation - on_eval_after_xs - on_eval_after_resample - on_eval_before_archive End Evaluation - on_optimizer_after_eval End Optimization on Worker - on_worker_end End Worker - on_tuning_result_begin - on_result_begin - on_result_end - on_optimization_end End Tuning"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_async_tuning.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create Asynchronous Tuning Callback — callback_async_tuning","text":"","code":"callback_async_tuning( id, label = NA_character_, man = NA_character_, on_optimization_begin = NULL, on_worker_begin = NULL, on_optimizer_before_eval = NULL, on_eval_after_xs = NULL, on_eval_after_resample = NULL, on_eval_before_archive = NULL, on_optimizer_after_eval = NULL, on_worker_end = NULL, on_tuning_result_begin = NULL, on_result_begin = NULL, on_result_end = NULL, on_result = NULL, on_optimization_end = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_async_tuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create Asynchronous Tuning Callback — callback_async_tuning","text":"id (character(1)) Identifier new instance. label (character(1)) Label new instance. man (character(1)) String format [pkg]::[topic] pointing manual page object. referenced help package can opened via method $help(). on_optimization_begin (function()) Stage called beginning optimization. Called Optimizer$optimize(). functions must two arguments named callback context. on_worker_begin (function()) Stage called beginning optimization worker. Called worker loop. functions must two arguments named callback context. on_optimizer_before_eval (function()) Stage called optimizer proposes points. Called OptimInstance$.eval_point(). functions must two arguments named callback context. argument instance$.eval_point(xs) xs_trafoed extra available context. xs xs_trafoed instance$.eval_queue() available context. on_eval_after_xs (function()) Stage called xs passed objective. Called ObjectiveTuningAsync$eval(). functions must two arguments named callback context. argument $.eval(xs) available context. on_eval_after_resample (function()) Stage called hyperparameter configuration evaluated. Called ObjectiveTuningAsync$eval(). functions must two arguments named callback context. resample_result available `context on_eval_before_archive (function()) Stage called performance values written archive. Called ObjectiveTuningAsync$eval(). functions must two arguments named callback context. aggregated_performance available context. on_optimizer_after_eval (function()) Stage called points evaluated. Called OptimInstance$.eval_point(). functions must two arguments named callback context. on_worker_end (function()) Stage called end optimization worker. Called worker loop. functions must two arguments named callback context. on_tuning_result_begin (function()) Stage called beginning result writing. Called TuningInstance*$assign_result(). functions must two arguments named callback context. arguments $assign_result(xdt, y, learner_param_vals, extra) available context. on_result_begin (function()) Stage called beginning result writing. Called OptimInstance$assign_result(). functions must two arguments named callback context. arguments $.assign_result(xdt, y, extra) available context. on_result_end (function()) Stage called result written. Called OptimInstance$assign_result(). functions must two arguments named callback context. final result instance$result available context. on_result (function()) Deprecated. Use on_result_end instead. Stage called result written. Called OptimInstance$assign_result(). on_optimization_end (function()) Stage called end optimization. Called Optimizer$optimize().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_async_tuning.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Create Asynchronous Tuning Callback — callback_async_tuning","text":"implementing callback, function must two arguments named callback context. callback can write data state ($state), e.g. settings affect callback . Tuning callbacks access ContextAsyncTuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_batch_tuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Create Batch Tuning Callback — callback_batch_tuning","title":"Create Batch Tuning Callback — callback_batch_tuning","text":"Function create CallbackBatchTuning. Predefined callbacks stored dictionary mlr_callbacks can retrieved clbk(). Tuning callbacks can called different stages tuning process. stages prefixed on_*. See also section parameters information stages. tuning callback works ContextBatchTuning.","code":"Start Tuning - on_optimization_begin Start Tuner Batch - on_optimizer_before_eval Start Evaluation - on_eval_after_design - on_eval_after_benchmark - on_eval_before_archive End Evaluation - on_optimizer_after_eval End Tuner Batch - on_tuning_result_begin - on_result_begin - on_result_end - on_optimization_end End Tuning"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_batch_tuning.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create Batch Tuning Callback — callback_batch_tuning","text":"","code":"callback_batch_tuning( id, label = NA_character_, man = NA_character_, on_optimization_begin = NULL, on_optimizer_before_eval = NULL, on_eval_after_design = NULL, on_eval_after_benchmark = NULL, on_eval_before_archive = NULL, on_optimizer_after_eval = NULL, on_tuning_result_begin = NULL, on_result_begin = NULL, on_result_end = NULL, on_result = NULL, on_optimization_end = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_batch_tuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create Batch Tuning Callback — callback_batch_tuning","text":"id (character(1)) Identifier new instance. label (character(1)) Label new instance. man (character(1)) String format [pkg]::[topic] pointing manual page object. referenced help package can opened via method $help(). on_optimization_begin (function()) Stage called beginning optimization. Called Optimizer$optimize(). functions must two arguments named callback context. on_optimizer_before_eval (function()) Stage called optimizer proposes points. Called OptimInstance$eval_batch(). functions must two arguments named callback context. argument $eval_batch(xdt) available context. on_eval_after_design (function()) Stage called design created. Called ObjectiveTuningBatch$eval_many(). functions must two arguments named callback context. arguments $eval_many(xss, resampling) available context. Additionally, design available context. on_eval_after_benchmark (function()) Stage called hyperparameter configurations evaluated. Called ObjectiveTuningBatch$eval_many(). functions must two arguments named callback context. benchmark_result available context. on_eval_before_archive (function()) Stage called performance values written archive. Called ObjectiveTuningBatch$eval_many(). functions must two arguments named callback context. aggregated_performance available context. on_optimizer_after_eval (function()) Stage called points evaluated. Called OptimInstance$eval_batch(). functions must two arguments named callback context. new configurations performances instance$archive available context. on_tuning_result_begin (function()) Stage called beginning result writing. Called TuningInstanceBatch$assign_result(). functions must two arguments named callback context. arguments $assign_result(xdt, y, learner_param_vals, extra) available context. on_result_begin (function()) Stage called beginning result writing. Called OptimInstance$assign_result(). functions must two arguments named callback context. arguments $assign_result(xdt, y, extra) available context. on_result_end (function()) Stage called result written. Called OptimInstance$assign_result(). functions must two arguments named callback context. final result instance$result available context. on_result (function()) Deprecated. Use on_result_end instead. Stage called result written. Called OptimInstance$assign_result(). functions must two arguments named callback context. on_optimization_end (function()) Stage called end optimization. Called Optimizer$optimize(). functions must two arguments named callback context.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_batch_tuning.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Create Batch Tuning Callback — callback_batch_tuning","text":"implementing callback, function must two arguments named callback context. callback can write data state ($state), e.g. settings affect callback . Tuning callbacks access ContextBatchTuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_batch_tuning.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Create Batch Tuning Callback — callback_batch_tuning","text":"","code":"# write archive to disk callback_batch_tuning(\"mlr3tuning.backup\", on_optimization_end = function(callback, context) { saveRDS(context$instance$archive, \"archive.rds\") } ) #> #> * Active Stages: on_optimization_end"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_archives.html","id":null,"dir":"Reference","previous_headings":"","what":"Extract Inner Tuning Archives — extract_inner_tuning_archives","title":"Extract Inner Tuning Archives — extract_inner_tuning_archives","text":"Extract inner tuning archives nested resampling. Implemented mlr3::ResampleResult mlr3::BenchmarkResult. function iterates AutoTuner objects binds tuning archives data.table::data.table(). AutoTuner must initialized store_tuning_instance = TRUE mlr3::resample() mlr3::benchmark() must called store_models = TRUE.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_archives.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Extract Inner Tuning Archives — extract_inner_tuning_archives","text":"","code":"extract_inner_tuning_archives( x, unnest = \"x_domain\", exclude_columns = \"uhash\" )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_archives.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Extract Inner Tuning Archives — extract_inner_tuning_archives","text":"x (mlr3::ResampleResult | mlr3::BenchmarkResult). unnest (character()) Transforms list columns separate columns. default, x_domain unnested. Set NULL column unnested. exclude_columns (character()) Exclude columns result table. Set NULL column excluded.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_archives.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Extract Inner Tuning Archives — extract_inner_tuning_archives","text":"data.table::data.table().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_archives.html","id":"data-structure","dir":"Reference","previous_headings":"","what":"Data structure","title":"Extract Inner Tuning Archives — extract_inner_tuning_archives","text":"returned data table following columns: experiment (integer(1)) Index, giving according row number original benchmark grid. iteration (integer(1)) Iteration outer resampling. One column hyperparameter search spaces. One column performance measure. runtime_learners (numeric(1)) Sum training predict times logged learners per mlr3::ResampleResult / evaluation. include potential overhead time. timestamp (POSIXct) Time stamp evaluation logged archive. batch_nr (integer(1)) Hyperparameters evaluated batches. batch unique batch number. x_domain (list()) List transformed hyperparameter values. default column unnested. x_domain_* () Separate column transformed hyperparameter. resample_result (mlr3::ResampleResult) Resample result inner resampling. task_id (character(1)). learner_id (character(1)). resampling_id (character(1)).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_archives.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Extract Inner Tuning Archives — extract_inner_tuning_archives","text":"","code":"# Nested Resampling on Palmer Penguins Data Set learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)) # create auto tuner at = auto_tuner( tuner = tnr(\"random_search\"), learner = learner, resampling = rsmp (\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 4) resampling_outer = rsmp(\"cv\", folds = 2) rr = resample(tsk(\"iris\"), at, resampling_outer, store_models = TRUE) # extract inner archives extract_inner_tuning_archives(rr) #> iteration cp classif.ce x_domain_cp runtime_learners #> #> 1: 1 -6.819407 0.04 0.001092369 0.004 #> 2: 1 -6.361894 0.04 0.001726095 0.005 #> 3: 1 -5.017906 0.04 0.006618373 0.004 #> 4: 1 -4.487537 0.04 0.011248315 0.004 #> 5: 2 -2.771268 0.08 0.062582599 0.005 #> 6: 2 -5.852816 0.08 0.002871801 0.005 #> 7: 2 -6.365882 0.08 0.001719224 0.005 #> 8: 2 -3.185002 0.08 0.041378177 0.004 #> timestamp warnings errors batch_nr resample_result task_id #> #> 1: 2024-11-25 08:44:18 0 0 1 iris #> 2: 2024-11-25 08:44:18 0 0 2 iris #> 3: 2024-11-25 08:44:18 0 0 3 iris #> 4: 2024-11-25 08:44:18 0 0 4 iris #> 5: 2024-11-25 08:44:18 0 0 1 iris #> 6: 2024-11-25 08:44:18 0 0 2 iris #> 7: 2024-11-25 08:44:18 0 0 3 iris #> 8: 2024-11-25 08:44:18 0 0 4 iris #> learner_id resampling_id #> #> 1: classif.rpart.tuned cv #> 2: classif.rpart.tuned cv #> 3: classif.rpart.tuned cv #> 4: classif.rpart.tuned cv #> 5: classif.rpart.tuned cv #> 6: classif.rpart.tuned cv #> 7: classif.rpart.tuned cv #> 8: classif.rpart.tuned cv"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_results.html","id":null,"dir":"Reference","previous_headings":"","what":"Extract Inner Tuning Results — extract_inner_tuning_results","title":"Extract Inner Tuning Results — extract_inner_tuning_results","text":"Extract inner tuning results nested resampling. Implemented mlr3::ResampleResult mlr3::BenchmarkResult.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_results.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Extract Inner Tuning Results — extract_inner_tuning_results","text":"","code":"extract_inner_tuning_results(x, tuning_instance, ...) # S3 method for class 'ResampleResult' extract_inner_tuning_results(x, tuning_instance = FALSE, ...) # S3 method for class 'BenchmarkResult' extract_inner_tuning_results(x, tuning_instance = FALSE, ...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_results.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Extract Inner Tuning Results — extract_inner_tuning_results","text":"x (mlr3::ResampleResult | mlr3::BenchmarkResult). tuning_instance (logical(1)) TRUE, tuning instances added table. ... () Additional arguments.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_results.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Extract Inner Tuning Results — extract_inner_tuning_results","text":"data.table::data.table().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_results.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Extract Inner Tuning Results — extract_inner_tuning_results","text":"function iterates AutoTuner objects binds tuning results data.table::data.table(). AutoTuner must initialized store_tuning_instance = TRUE mlr3::resample() mlr3::benchmark() must called store_models = TRUE. Optionally, tuning instance can added iteration.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_results.html","id":"data-structure","dir":"Reference","previous_headings":"","what":"Data structure","title":"Extract Inner Tuning Results — extract_inner_tuning_results","text":"returned data table following columns: experiment (integer(1)) Index, giving according row number original benchmark grid. iteration (integer(1)) Iteration outer resampling. One column hyperparameter search spaces. One column performance measure. learner_param_vals (list()) Hyperparameter values used learner. Includes fixed proposed hyperparameter values. x_domain (list()) List transformed hyperparameter values. tuning_instance (TuningInstanceBatchSingleCrit | TuningInstanceBatchMultiCrit) Optionally, tuning instances. task_id (character(1)). learner_id (character(1)). resampling_id (character(1)).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_results.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Extract Inner Tuning Results — extract_inner_tuning_results","text":"","code":"# Nested Resampling on Palmer Penguins Data Set learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)) # create auto tuner at = auto_tuner( tuner = tnr(\"random_search\"), learner = learner, resampling = rsmp (\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 4) resampling_outer = rsmp(\"cv\", folds = 2) rr = resample(tsk(\"iris\"), at, resampling_outer, store_models = TRUE) # extract inner results extract_inner_tuning_results(rr) #> iteration cp classif.ce learner_param_vals x_domain task_id #> #> 1: 1 -8.235780 0.00 iris #> 2: 2 -3.983786 0.04 iris #> learner_id resampling_id #> #> 1: classif.rpart.tuned cv #> 2: classif.rpart.tuned cv"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning-package.html","id":null,"dir":"Reference","previous_headings":"","what":"mlr3tuning: Hyperparameter Optimization for 'mlr3' — mlr3tuning-package","title":"mlr3tuning: Hyperparameter Optimization for 'mlr3' — mlr3tuning-package","text":"Hyperparameter optimization package 'mlr3' ecosystem. features highly configurable search spaces via 'paradox' package finds optimal hyperparameter configurations 'mlr3' learner. 'mlr3tuning' works several optimization algorithms e.g. Random Search, Iterated Racing, Bayesian Optimization ('mlr3mbo') Hyperband ('mlr3hyperband'). Moreover, can automatically optimize learners estimate performance optimized models nested resampling.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning-package.html","id":"author","dir":"Reference","previous_headings":"","what":"Author","title":"mlr3tuning: Hyperparameter Optimization for 'mlr3' — mlr3tuning-package","text":"Maintainer: Marc Becker marcbecker@posteo.de (ORCID) Authors: Michel Lang michellang@gmail.com (ORCID) Jakob Richter jakob1richter@gmail.com (ORCID) Bernd Bischl bernd_bischl@gmx.net (ORCID) Daniel Schalk daniel.schalk@stat.uni-muenchen.de (ORCID)","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.asnyc_mlflow.html","id":null,"dir":"Reference","previous_headings":"","what":"MLflow Connector Callback — mlr3tuning.asnyc_mlflow","title":"MLflow Connector Callback — mlr3tuning.asnyc_mlflow","text":"mlr3misc::Callback logs hyperparameter configurations performance configurations MLflow.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.asnyc_mlflow.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"MLflow Connector Callback — mlr3tuning.asnyc_mlflow","text":"","code":"clbk(\"mlr3tuning.async_mlflow\", tracking_uri = \"http://localhost:5000\") #> : MLflow Connector #> * Active Stages: on_eval_before_archive, on_eval_after_xs, #> on_optimization_begin if (FALSE) { # \\dontrun{ rush::rush_plan(n_workers = 4) learner = lrn(\"classif.rpart\", minsplit = to_tune(2, 128), cp = to_tune(1e-04, 1e-1)) instance = TuningInstanceAsyncSingleCrit$new( task = tsk(\"pima\"), learner = learner, resampling = rsmp(\"cv\", folds = 3), measure = msr(\"classif.ce\"), terminator = trm(\"evals\", n_evals = 20), store_benchmark_result = FALSE, callbacks = clbk(\"mlr3tuning.rush_mlflow\", tracking_uri = \"http://localhost:8080\") ) tuner = tnr(\"random_search_v2\") tuner$optimize(instance) } # }"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.async_default_configuration.html","id":null,"dir":"Reference","previous_headings":"","what":"Default Configuration Callback — mlr3tuning.async_default_configuration","title":"Default Configuration Callback — mlr3tuning.async_default_configuration","text":"CallbackAsyncTuning CallbackBatchTuning evaluate default hyperparameter values learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.async_save_logs.html","id":null,"dir":"Reference","previous_headings":"","what":"Save Logs Callback — mlr3tuning.async_save_logs","title":"Save Logs Callback — mlr3tuning.async_save_logs","text":"CallbackAsyncTuning saves logs learners archive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.backup.html","id":null,"dir":"Reference","previous_headings":"","what":"Backup Benchmark Result Callback — mlr3tuning.backup","title":"Backup Benchmark Result Callback — mlr3tuning.backup","text":"mlr3misc::Callback writes mlr3::BenchmarkResult batch disk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.backup.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Backup Benchmark Result Callback — mlr3tuning.backup","text":"","code":"clbk(\"mlr3tuning.backup\", path = \"backup.rds\") #> : Backup Benchmark Result Callback #> * Active Stages: on_optimizer_after_eval, on_optimization_begin # tune classification tree on the pima data set instance = tune( tuner = tnr(\"random_search\", batch_size = 2), task = tsk(\"pima\"), learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)), resampling = rsmp(\"cv\", folds = 3), measures = msr(\"classif.ce\"), term_evals = 4, callbacks = clbk(\"mlr3tuning.backup\", path = tempfile(fileext = \".rds\")) )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.measures.html","id":null,"dir":"Reference","previous_headings":"","what":"Measure Callback — mlr3tuning.measures","title":"Measure Callback — mlr3tuning.measures","text":"mlr3misc::Callback scores hyperparameter configurations additional measures tuning. Usually, configurations can scored additional measures tuning (see ArchiveBatchTuning). However, memory sufficient store mlr3::BenchmarkResult, necessary score additional measures tuning. measures taken account tuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.measures.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Measure Callback — mlr3tuning.measures","text":"","code":"clbk(\"mlr3tuning.measures\") #> : Additional Measures Callback #> * Active Stages: on_eval_before_archive, on_optimization_begin # additionally score the configurations on the accuracy measure instance = tune( tuner = tnr(\"random_search\", batch_size = 2), task = tsk(\"pima\"), learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)), resampling = rsmp(\"cv\", folds = 3), measures = msr(\"classif.ce\"), term_evals = 4, callbacks = clbk(\"mlr3tuning.measures\", measures = msr(\"classif.acc\")) )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.one_se_rule.html","id":null,"dir":"Reference","previous_headings":"","what":"One Standard Error Rule Callback — mlr3tuning.one_se_rule","title":"One Standard Error Rule Callback — mlr3tuning.one_se_rule","text":"one standard error rule takes number features account selecting best hyperparameter configuration. Many learners support internal feature selection, can accessed via $selected_features(). callback selects hyperparameter configuration smallest feature set within one standard error best performing configuration. multiple hyperparameter configurations number features, first one selected.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.one_se_rule.html","id":"source","dir":"Reference","previous_headings":"","what":"Source","title":"One Standard Error Rule Callback — mlr3tuning.one_se_rule","text":"Kuhn, Max, Johnson, Kjell (2013). “Applied Predictive Modeling.” chapter -Fitting Model Tuning, 61–92. Springer New York, New York, NY. ISBN 978-1-4614-6849-3.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.one_se_rule.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"One Standard Error Rule Callback — mlr3tuning.one_se_rule","text":"","code":"clbk(\"mlr3tuning.one_se_rule\") #> : One Standard Error Rule Callback #> * Active Stages: on_tuning_result_begin, on_eval_before_archive, #> on_optimization_begin # Run optimization on the pima data set with the callback instance = tune( tuner = tnr(\"random_search\", batch_size = 15), task = tsk(\"pima\"), learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)), resampling = rsmp(\"cv\", folds = 3), measures = msr(\"classif.ce\"), term_evals = 30, callbacks = clbk(\"mlr3tuning.one_se_rule\") ) # Hyperparameter configuration with the smallest feature set within one standard error of the best instance$result #> cp n_features learner_param_vals x_domain classif.ce #> #> 1: -4.216525 4 0.2317708"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning_assertions.html","id":null,"dir":"Reference","previous_headings":"","what":"Assertion for mlr3tuning objects — mlr3tuning_assertions","title":"Assertion for mlr3tuning objects — mlr3tuning_assertions","text":"assertion functions ensure right class attribute, optionally additional properties.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning_assertions.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Assertion for mlr3tuning objects — mlr3tuning_assertions","text":"","code":"assert_tuner(tuner) assert_tuners(tuners) assert_tuner_async(tuner) assert_tuner_batch(tuner) assert_tuning_instance(inst) assert_tuning_instance_async(inst) assert_tuning_instance_batch(inst)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning_assertions.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Assertion for mlr3tuning objects — mlr3tuning_assertions","text":"tuner (TunerBatch). tuners (list Tuner). inst (TuningInstanceBatchSingleCrit | TuningInstanceBatchMultiCrit).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners.html","id":null,"dir":"Reference","previous_headings":"","what":"Dictionary of Tuners — mlr_tuners","title":"Dictionary of Tuners — mlr_tuners","text":"simple mlr3misc::Dictionary storing objects class Tuner. tuner associated help page, see mlr_tuners_[id]. dictionary can get populated additional tuners add-packages. convenient way retrieve construct tuner, see tnr()/tnrs().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Dictionary of Tuners — mlr_tuners","text":"R6::R6Class object inheriting mlr3misc::Dictionary.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Dictionary of Tuners — mlr_tuners","text":"See mlr3misc::Dictionary.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners.html","id":"s-methods","dir":"Reference","previous_headings":"","what":"S3 methods","title":"Dictionary of Tuners — mlr_tuners","text":".data.table(dict, ..., objects = FALSE)mlr3misc::Dictionary -> data.table::data.table() Returns data.table::data.table() fields \"key\", \"label\", \"param_classes\", \"properties\" \"packages\" columns. objects set TRUE, constructed objects returned list column named object.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Dictionary of Tuners — mlr_tuners","text":"","code":"as.data.table(mlr_tuners) #> Key: #> key label #> #> 1: async_design_points Asynchronous Design Points #> 2: async_grid_search Asynchronous Grid Search #> 3: async_random_search Asynchronous Random Search #> 4: cmaes Covariance Matrix Adaptation Evolution Strategy #> 5: design_points Design Points #> 6: gensa Generalized Simulated Annealing #> 7: grid_search Grid Search #> 8: internal Internal Optimizer #> 9: irace Iterated Racing #> 10: nloptr Non-linear Optimization #> 11: random_search Random Search #> param_classes #> #> 1: ParamLgl,ParamInt,ParamDbl,ParamFct,ParamUty #> 2: ParamLgl,ParamInt,ParamDbl,ParamFct #> 3: ParamLgl,ParamInt,ParamDbl,ParamFct #> 4: ParamDbl #> 5: ParamLgl,ParamInt,ParamDbl,ParamFct,ParamUty #> 6: ParamDbl #> 7: ParamLgl,ParamInt,ParamDbl,ParamFct #> 8: ParamLgl,ParamInt,ParamDbl,ParamFct #> 9: ParamDbl,ParamInt,ParamFct,ParamLgl #> 10: ParamDbl #> 11: ParamLgl,ParamInt,ParamDbl,ParamFct #> properties packages #> #> 1: dependencies,single-crit,multi-crit,async mlr3tuning,bbotk,rush #> 2: dependencies,single-crit,multi-crit,async mlr3tuning,bbotk,rush #> 3: dependencies,single-crit,multi-crit,async mlr3tuning,bbotk,rush #> 4: single-crit mlr3tuning,bbotk,adagio #> 5: dependencies,single-crit,multi-crit mlr3tuning,bbotk #> 6: single-crit mlr3tuning,bbotk,GenSA #> 7: dependencies,single-crit,multi-crit mlr3tuning,bbotk #> 8: dependencies,single-crit mlr3tuning #> 9: dependencies,single-crit mlr3tuning,bbotk,irace #> 10: single-crit mlr3tuning,bbotk,nloptr #> 11: dependencies,single-crit,multi-crit mlr3tuning,bbotk mlr_tuners$get(\"random_search\") #> : Random Search #> * Parameters: batch_size=1 #> * Parameter classes: ParamLgl, ParamInt, ParamDbl, ParamFct #> * Properties: dependencies, single-crit, multi-crit #> * Packages: mlr3tuning, bbotk tnr(\"random_search\") #> : Random Search #> * Parameters: batch_size=1 #> * Parameter classes: ParamLgl, ParamInt, ParamDbl, ParamFct #> * Properties: dependencies, single-crit, multi-crit #> * Packages: mlr3tuning, bbotk"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"Subclass asynchronous design points tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"async_design_points\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"parameters","dir":"Reference","previous_headings":"","what":"Parameters","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"design data.table::data.table Design points try search, one per row.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"mlr3tuning::Tuner -> mlr3tuning::TunerAsync -> mlr3tuning::TunerAsyncFromOptimizerAsync -> TunerAsyncDesignPoints","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerAsyncFromOptimizerAsync$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"TunerAsyncDesignPoints$new() TunerAsyncDesignPoints$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"","code":"TunerAsyncDesignPoints$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"","code":"TunerAsyncDesignPoints$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"Subclass asynchronous grid search tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"async_design_points\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"parameters","dir":"Reference","previous_headings":"","what":"Parameters","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"batch_size integer(1) Maximum number points try batch.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"mlr3tuning::Tuner -> mlr3tuning::TunerAsync -> mlr3tuning::TunerAsyncFromOptimizerAsync -> TunerAsyncGridSearch","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerAsyncFromOptimizerAsync$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"TunerAsyncGridSearch$new() TunerAsyncGridSearch$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"","code":"TunerAsyncGridSearch$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"","code":"TunerAsyncGridSearch$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"Subclass asynchronous random search tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"source","dir":"Reference","previous_headings":"","what":"Source","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"Bergstra J, Bengio Y (2012). “Random Search Hyper-Parameter Optimization.” Journal Machine Learning Research, 13(10), 281–305. https://jmlr.csail.mit.edu/papers/v13/bergstra12a.html.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"random points sampled paradox::generate_design_random().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"async_random_search\")"},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"mlr3tuning::Tuner -> mlr3tuning::TunerAsync -> mlr3tuning::TunerAsyncFromOptimizerAsync -> TunerAsyncRandomSearch","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerAsyncFromOptimizerAsync$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"TunerAsyncRandomSearch$new() TunerAsyncRandomSearch$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"","code":"TunerAsyncRandomSearch$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"","code":"TunerAsyncRandomSearch$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"Subclass Covariance Matrix Adaptation Evolution Strategy (CMA-ES). Calls adagio::pureCMAES() package adagio.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"source","dir":"Reference","previous_headings":"","what":"Source","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"Hansen N (2016). “CMA Evolution Strategy: Tutorial.” 1604.00772.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"cmaes\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"control-parameters","dir":"Reference","previous_headings":"","what":"Control Parameters","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"start_values character(1) Create random start values based center search space? latter case, center parameters trafo applied. meaning control parameters, see adagio::pureCMAES(). Note removed control parameters refer termination algorithm terminators allow obtain behavior.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"$optimize() supports progress bars via package progressr combined bbotk::Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"optimizer","dir":"Reference","previous_headings":"","what":"Optimizer","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"Tuner based bbotk::OptimizerBatchCmaes can applied black box optimization problem. See also documentation bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> mlr3tuning::TunerBatchFromOptimizerBatch -> TunerBatchCmaes","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerBatchFromOptimizerBatch$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"TunerBatchCmaes$new() TunerBatchCmaes$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"","code":"TunerBatchCmaes$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"","code":"TunerBatchCmaes$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"","code":"# Hyperparameter Optimization # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE), minsplit = to_tune(p_dbl(2, 128, trafo = as.integer)), minbucket = to_tune(p_dbl(1, 64, trafo = as.integer)) ) # run hyperparameter tuning on the Palmer Penguins data set instance = tune( tuner = tnr(\"cmaes\"), task = tsk(\"penguins\"), learner = learner, resampling = rsmp(\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 10) # best performing hyperparameter configuration instance$result #> cp minbucket minsplit learner_param_vals x_domain classif.ce #> #> 1: -7.336334 15.20906 107.2338 0.07826087 # all evaluated hyperparameter configuration as.data.table(instance$archive) #> cp minbucket minsplit classif.ce runtime_learners #> #> 1: -7.336334 15.209063 107.23382 0.07826087 0.006 #> 2: -9.210340 64.000000 22.89758 0.12173913 0.005 #> 3: -2.621780 31.763900 128.00000 0.07826087 0.005 #> 4: -2.302585 1.000000 106.26335 0.07826087 0.006 #> 5: -2.302585 62.039211 128.00000 0.12173913 0.005 #> 6: -4.416664 54.268412 108.94055 0.07826087 0.006 #> 7: -2.302585 4.755131 72.28910 0.07826087 0.023 #> 8: -4.734599 30.835601 24.51517 0.07826087 0.005 #> 9: -9.210340 39.906483 97.63893 0.07826087 0.006 #> 10: -6.242816 18.946310 96.50841 0.07826087 0.006 #> timestamp warnings errors x_domain batch_nr resample_result #> #> 1: 2024-11-25 08:44:27 0 0 1 #> 2: 2024-11-25 08:44:27 0 0 2 #> 3: 2024-11-25 08:44:27 0 0 3 #> 4: 2024-11-25 08:44:27 0 0 4 #> 5: 2024-11-25 08:44:27 0 0 5 #> 6: 2024-11-25 08:44:27 0 0 6 #> 7: 2024-11-25 08:44:27 0 0 7 #> 8: 2024-11-25 08:44:27 0 0 8 #> 9: 2024-11-25 08:44:27 0 0 9 #> 10: 2024-11-25 08:44:27 0 0 10 # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals learner$train(tsk(\"penguins\"))"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"Subclass tuning w.r.t. fixed design points. simply search set points fully specified user. points design evaluated order given.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"design_points\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"parallelization","dir":"Reference","previous_headings":"","what":"Parallelization","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"order support general termination criteria parallelization, evaluate points batch-fashion size batch_size. Larger batches mean can parallelize , smaller batches imply fine-grained checking termination criteria. batch contains batch_size times resampling$iters jobs. E.g., set batch size 10 points 5-fold cross validation, can utilize 50 cores. Parallelization supported via package future (see mlr3::benchmark()'s section parallelization details).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"optimizer","dir":"Reference","previous_headings":"","what":"Optimizer","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"Tuner based bbotk::OptimizerBatchDesignPoints can applied black box optimization problem. See also documentation bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"parameters","dir":"Reference","previous_headings":"","what":"Parameters","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"batch_size integer(1) Maximum number configurations try batch. design data.table::data.table Design points try search, one per row.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"$optimize() supports progress bars via package progressr combined Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> mlr3tuning::TunerBatchFromOptimizerBatch -> TunerBatchDesignPoints","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerBatchFromOptimizerBatch$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"TunerBatchDesignPoints$new() TunerBatchDesignPoints$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"","code":"TunerBatchDesignPoints$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"","code":"TunerBatchDesignPoints$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"","code":"# Hyperparameter Optimization # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1), minsplit = to_tune(2, 128), minbucket = to_tune(1, 64) ) # create design design = mlr3misc::rowwise_table( ~cp, ~minsplit, ~minbucket, 0.1, 2, 64, 0.01, 64, 32, 0.001, 128, 1 ) # run hyperparameter tuning on the Palmer Penguins data set instance = tune( tuner = tnr(\"design_points\", design = design), task = tsk(\"penguins\"), learner = learner, resampling = rsmp(\"holdout\"), measure = msr(\"classif.ce\") ) # best performing hyperparameter configuration instance$result #> cp minbucket minsplit learner_param_vals x_domain classif.ce #> #> 1: 0.01 32 64 0.07826087 # all evaluated hyperparameter configuration as.data.table(instance$archive) #> cp minbucket minsplit classif.ce runtime_learners timestamp #> #> 1: 0.100 64 2 0.09565217 0.006 2024-11-25 08:44:28 #> 2: 0.010 32 64 0.07826087 0.005 2024-11-25 08:44:28 #> 3: 0.001 1 128 0.07826087 0.005 2024-11-25 08:44:28 #> warnings errors x_domain batch_nr resample_result #> #> 1: 0 0 1 #> 2: 0 0 2 #> 3: 0 0 3 # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals learner$train(tsk(\"penguins\"))"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"Subclass generalized simulated annealing tuning. Calls GenSA::GenSA() package GenSA.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"source","dir":"Reference","previous_headings":"","what":"Source","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"Tsallis C, Stariolo DA (1996). “Generalized simulated annealing.” Physica : Statistical Mechanics Applications, 233(1-2), 395–406. doi:10.1016/s0378-4371(96)00271-3 . Xiang Y, Gubian S, Suomela B, Hoeng J (2013). “Generalized Simulated Annealing Global Optimization: GenSA Package.” R Journal, 5(1), 13. doi:10.32614/rj-2013-002 .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"contrast GenSA::GenSA() defaults, set smooth = FALSE default.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"gensa\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"parallelization","dir":"Reference","previous_headings":"","what":"Parallelization","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"order support general termination criteria parallelization, evaluate points batch-fashion size batch_size. Larger batches mean can parallelize , smaller batches imply fine-grained checking termination criteria. batch contains batch_size times resampling$iters jobs. E.g., set batch size 10 points 5-fold cross validation, can utilize 50 cores. Parallelization supported via package future (see mlr3::benchmark()'s section parallelization details).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"optimizer","dir":"Reference","previous_headings":"","what":"Optimizer","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"Tuner based bbotk::OptimizerBatchGenSA can applied black box optimization problem. See also documentation bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"parameters","dir":"Reference","previous_headings":"","what":"Parameters","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"smooth logical(1) temperature numeric(1) acceptance.param numeric(1) verbose logical(1) trace.mat logical(1) meaning control parameters, see GenSA::GenSA(). Note removed control parameters refer termination algorithm terminators allow obtain behavior. contrast GenSA::GenSA() defaults, set trace.mat = FALSE. Note GenSA::GenSA() uses smooth = TRUE default. case using optimizer Hyperparameter Optimization may want set smooth = FALSE.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"$optimize() supports progress bars via package progressr combined Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> mlr3tuning::TunerBatchFromOptimizerBatch -> TunerBatchGenSA","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerBatchFromOptimizerBatch$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"TunerBatchGenSA$new() TunerBatchGenSA$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"","code":"TunerBatchGenSA$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"","code":"TunerBatchGenSA$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"","code":"# Hyperparameter Optimization # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # run hyperparameter tuning on the Palmer Penguins data set instance = tune( tuner = tnr(\"gensa\"), task = tsk(\"penguins\"), learner = learner, resampling = rsmp(\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 10 ) #> Warning: one-dimensional optimization by Nelder-Mead is unreliable: #> use \"Brent\" or optimize() directly # best performing hyperparameter configuration instance$result #> cp learner_param_vals x_domain classif.ce #> #> 1: -5.721042 0.04347826 # all evaluated hyperparameter configuration as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -5.721042 0.04347826 0.006 2024-11-25 08:44:29 0 0 #> 2: -2.850714 0.06086957 0.005 2024-11-25 08:44:29 0 0 #> 3: -7.568995 0.04347826 0.005 2024-11-25 08:44:29 0 0 #> 4: -5.721042 0.04347826 0.006 2024-11-25 08:44:29 0 0 #> 5: -5.721042 0.04347826 0.005 2024-11-25 08:44:29 0 0 #> 6: -5.721042 0.04347826 0.005 2024-11-25 08:44:29 0 0 #> 7: -5.148938 0.04347826 0.006 2024-11-25 08:44:30 0 0 #> 8: -6.293146 0.04347826 0.005 2024-11-25 08:44:30 0 0 #> 9: -6.007094 0.04347826 0.005 2024-11-25 08:44:30 0 0 #> 10: -5.434990 0.04347826 0.006 2024-11-25 08:44:30 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 2 #> 3: 3 #> 4: 4 #> 5: 5 #> 6: 6 #> 7: 7 #> 8: 8 #> 9: 9 #> 10: 10 # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals learner$train(tsk(\"penguins\"))"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"Subclass grid search tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"grid constructed Cartesian product discretized values per parameter, see paradox::generate_design_grid(). learner supports hotstarting, grid sorted hotstart parameter (see also mlr3::HotstartStack). , points grid evaluated random order.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"grid_search\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"control-parameters","dir":"Reference","previous_headings":"","what":"Control Parameters","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"resolution integer(1) Resolution grid, see paradox::generate_design_grid(). param_resolutions named integer() Resolution per parameter, named parameter ID, see paradox::generate_design_grid(). batch_size integer(1) Maximum number points try batch.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"$optimize() supports progress bars via package progressr combined bbotk::Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"parallelization","dir":"Reference","previous_headings":"","what":"Parallelization","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"order support general termination criteria parallelization, evaluate points batch-fashion size batch_size. Larger batches mean can parallelize , smaller batches imply fine-grained checking termination criteria. batch contains batch_size times resampling$iters jobs. E.g., set batch size 10 points 5-fold cross validation, can utilize 50 cores. Parallelization supported via package future (see mlr3::benchmark()'s section parallelization details).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"optimizer","dir":"Reference","previous_headings":"","what":"Optimizer","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"Tuner based bbotk::OptimizerBatchGridSearch can applied black box optimization problem. See also documentation bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> mlr3tuning::TunerBatchFromOptimizerBatch -> TunerBatchGridSearch","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerBatchFromOptimizerBatch$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"TunerBatchGridSearch$new() TunerBatchGridSearch$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"","code":"TunerBatchGridSearch$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"","code":"TunerBatchGridSearch$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"","code":"# Hyperparameter Optimization # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # run hyperparameter tuning on the Palmer Penguins data set instance = tune( tuner = tnr(\"grid_search\"), task = tsk(\"penguins\"), learner = learner, resampling = rsmp(\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 10 ) # best performing hyperparameter configuration instance$result #> cp learner_param_vals x_domain classif.ce #> #> 1: -3.070113 0.06956522 # all evaluated hyperparameter configuration as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -3.070113 0.06956522 0.024 2024-11-25 08:44:31 0 0 #> 2: -4.605170 0.06956522 0.005 2024-11-25 08:44:31 0 0 #> 3: -8.442812 0.06956522 0.006 2024-11-25 08:44:31 0 0 #> 4: -3.837642 0.06956522 0.006 2024-11-25 08:44:31 0 0 #> 5: -5.372699 0.06956522 0.005 2024-11-25 08:44:31 0 0 #> 6: -2.302585 0.06956522 0.005 2024-11-25 08:44:31 0 0 #> 7: -7.675284 0.06956522 0.005 2024-11-25 08:44:31 0 0 #> 8: -6.907755 0.06956522 0.005 2024-11-25 08:44:31 0 0 #> 9: -9.210340 0.06956522 0.005 2024-11-25 08:44:31 0 0 #> 10: -6.140227 0.06956522 0.006 2024-11-25 08:44:31 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 2 #> 3: 3 #> 4: 4 #> 5: 5 #> 6: 6 #> 7: 7 #> 8: 8 #> 9: 9 #> 10: 10 # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals learner$train(tsk(\"penguins\"))"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"Subclass conduct internal hyperparameter tuning mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"note","dir":"Reference","previous_headings":"","what":"Note","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"selected mlr3::Measure influence tuning result. change loss-function internal tuning, consult hyperparameter documentation tuned mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"internal\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"$optimize() supports progress bars via package progressr combined bbotk::Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> TunerBatchInternal","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerBatch$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"TunerBatchInternal$new() TunerBatchInternal$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"","code":"TunerBatchInternal$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"","code":"TunerBatchInternal$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"","code":"library(mlr3learners) # Retrieve task task = tsk(\"pima\") # Load learner and set search space learner = lrn(\"classif.xgboost\", nrounds = to_tune(upper = 1000, internal = TRUE), early_stopping_rounds = 10, validate = \"test\", eval_metric = \"merror\" ) # Internal hyperparameter tuning on the pima indians diabetes data set instance = tune( tnr(\"internal\"), tsk(\"iris\"), learner, rsmp(\"cv\", folds = 3), msr(\"internal_valid_score\", minimize = TRUE, select = \"merror\") ) # best performing hyperparameter configuration instance$result_learner_param_vals #> $eval_metric #> [1] \"merror\" #> #> $nrounds #> [1] 3 #> #> $nthread #> [1] 1 #> #> $verbose #> [1] 0 #> instance$result_learner_param_vals$internal_tuned_values #> NULL"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"Subclass iterated racing. Calls irace::irace() package irace.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"source","dir":"Reference","previous_headings":"","what":"Source","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"Lopez-Ibanez M, Dubois-Lacoste J, Caceres LP, Birattari M, Stuetzle T (2016). “irace package: Iterated racing automatic algorithm configuration.” Operations Research Perspectives, 3, 43–58. doi:10.1016/j.orp.2016.09.002 .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"irace\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"control-parameters","dir":"Reference","previous_headings":"","what":"Control Parameters","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"n_instances integer(1) Number resampling instances. meaning parameters, see irace::defaultScenario(). Note removed control parameters refer termination algorithm. Use bbotk::TerminatorEvals instead. terminators work TunerIrace.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"archive","dir":"Reference","previous_headings":"","what":"Archive","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"ArchiveBatchTuning holds following additional columns: \"race\" (integer(1)) Race iteration. \"step\" (integer(1)) Step number race. \"instance\" (integer(1)) Identifies resampling instances across races steps. \"configuration\" (integer(1)) Identifies configurations across races steps.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"result","dir":"Reference","previous_headings":"","what":"Result","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"tuning result (instance$result) best-performing elite final race. reported performance average performance estimated used instances.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"$optimize() supports progress bars via package progressr combined bbotk::Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"optimizer","dir":"Reference","previous_headings":"","what":"Optimizer","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"Tuner based bbotk::OptimizerBatchIrace can applied black box optimization problem. See also documentation bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> mlr3tuning::TunerBatchFromOptimizerBatch -> TunerBatchIrace","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"TunerBatchIrace$new() TunerBatchIrace$optimize() TunerBatchIrace$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"","code":"TunerBatchIrace$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"method-optimize-","dir":"Reference","previous_headings":"","what":"Method optimize()","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"Performs tuning TuningInstanceBatchSingleCrit termination. single evaluations final results written ArchiveBatchTuning resides TuningInstanceBatchSingleCrit. final result returned.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"","code":"TunerBatchIrace$optimize(inst)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"inst (TuningInstanceBatchSingleCrit).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"data.table::data.table.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"","code":"TunerBatchIrace$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"","code":"# retrieve task task = tsk(\"pima\") # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)) # \\donttest{ # hyperparameter tuning on the pima indians diabetes data set instance = tune( tuner = tnr(\"irace\"), task = task, learner = learner, resampling = rsmp(\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 42 ) #> # 2024-11-25 08:44:34 UTC: Initialization #> # Elitist race #> # Elitist new instances: 1 #> # Elitist limit: 2 #> # nbIterations: 2 #> # minNbSurvival: 2 #> # nbParameters: 1 #> # seed: 1855097766 #> # confidence level: 0.95 #> # budget: 42 #> # mu: 5 #> # deterministic: FALSE #> #> # 2024-11-25 08:44:34 UTC: Iteration 1 of 2 #> # experimentsUsedSoFar: 0 #> # remainingBudget: 42 #> # currentBudget: 21 #> # nbConfigurations: 3 #> # Markers: #> x No test is performed. #> c Configurations are discarded only due to capping. #> - The test is performed and some configurations are discarded. #> = The test is performed but no configuration is discarded. #> ! The test is performed and configurations could be discarded but elite configurations are preserved. #> . All alive configurations are elite and nothing is discarded #> #> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+ #> | | Instance| Alive| Best| Mean best| Exp so far| W time| rho|KenW| Qvar| #> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+ #> |x| 1| 3| 3| 0.2812500000| 3|00:00:00| NA| NA| NA| #> |x| 2| 3| 3| 0.2675781250| 6|00:00:00|+1.00|1.00|0.0000| #> |x| 3| 3| 3| 0.2604166667| 9|00:00:00|+1.00|1.00|0.0000| #> |x| 4| 3| 3| 0.2490234375| 12|00:00:00|+1.00|1.00|0.0000| #> |-| 5| 1| 3| 0.2453125000| 15|00:00:00| NA| NA| NA| #> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+ #> Best-so-far configuration: 3 mean value: 0.2453125000 #> Description of the best-so-far configuration: #> .ID. cp .PARENT. #> 3 3 -2.7229877489945 NA #> #> # 2024-11-25 08:44:34 UTC: Elite configurations (first number is the configuration ID; listed from best to worst according to the sum of ranks): #> cp #> 3 -2.7229877489945 #> # 2024-11-25 08:44:34 UTC: Iteration 2 of 2 #> # experimentsUsedSoFar: 15 #> # remainingBudget: 27 #> # currentBudget: 27 #> # nbConfigurations: 4 #> # Markers: #> x No test is performed. #> c Configurations are discarded only due to capping. #> - The test is performed and some configurations are discarded. #> = The test is performed but no configuration is discarded. #> ! The test is performed and configurations could be discarded but elite configurations are preserved. #> . All alive configurations are elite and nothing is discarded #> #> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+ #> | | Instance| Alive| Best| Mean best| Exp so far| W time| rho|KenW| Qvar| #> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+ #> |x| 6| 4| 4| 0.2734375000| 4|00:00:00| NA| NA| NA| #> |x| 1| 4| 4| 0.2597656250| 7|00:00:00|+0.80|0.90|0.0217| #> |x| 4| 4| 4| 0.2447916667| 10|00:00:00|+0.40|0.60|0.2572| #> |x| 3| 4| 4| 0.2451171875| 13|00:00:00|+0.27|0.45|0.2536| #> |=| 5| 4| 4| 0.2421875000| 16|00:00:00|+0.20|0.36|0.2272| #> |=| 2| 4| 4| 0.2513020833| 19|00:00:00|-0.11|0.07|0.4250| #> |=| 7| 4| 5| 0.2522321429| 23|00:00:00|-0.15|0.02|0.4839| #> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+ #> Best-so-far configuration: 5 mean value: 0.2522321429 #> Description of the best-so-far configuration: #> .ID. cp .PARENT. #> 5 5 -3.17982221206359 3 #> #> # 2024-11-25 08:44:35 UTC: Elite configurations (first number is the configuration ID; listed from best to worst according to the sum of ranks): #> cp #> 5 -3.17982221206359 #> 6 -3.29396533989700 #> # 2024-11-25 08:44:35 UTC: Stopped because there is not enough budget left to race more than the minimum (2) #> # You may either increase the budget or set 'minNbSurvival' to a lower value #> # Iteration: 3 #> # nbIterations: 3 #> # experimentsUsedSoFar: 38 #> # timeUsed: 0 #> # remainingBudget: 4 #> # currentBudget: 4 #> # number of elites: 2 #> # nbConfigurations: 2 #> # Total CPU user time: 1.379, CPU sys time: 0.024, Wall-clock time: 1.409 # best performing hyperparameter configuration instance$result #> cp configuration learner_param_vals x_domain classif.ce #> #> 1: -3.179822 5 0.2522321 # all evaluated hyperparameter configuration as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp race step #> #> 1: -8.192526 0.3085938 0.007 2024-11-25 08:44:34 1 1 #> 2: -8.612223 0.3085938 0.007 2024-11-25 08:44:34 1 1 #> 3: -2.722988 0.2812500 0.007 2024-11-25 08:44:34 1 1 #> 4: -8.192526 0.3359375 0.007 2024-11-25 08:44:34 1 1 #> 5: -8.612223 0.3359375 0.007 2024-11-25 08:44:34 1 1 #> 6: -2.722988 0.2539062 0.007 2024-11-25 08:44:34 1 1 #> 7: -8.192526 0.2851562 0.007 2024-11-25 08:44:34 1 1 #> 8: -8.612223 0.2851562 0.010 2024-11-25 08:44:34 1 1 #> 9: -2.722988 0.2460938 0.007 2024-11-25 08:44:34 1 1 #> 10: -8.192526 0.2617188 0.007 2024-11-25 08:44:34 1 1 #> 11: -8.612223 0.2617188 0.007 2024-11-25 08:44:34 1 1 #> 12: -2.722988 0.2148438 0.007 2024-11-25 08:44:34 1 1 #> 13: -8.192526 0.2382812 0.007 2024-11-25 08:44:34 1 1 #> 14: -8.612223 0.2382812 0.006 2024-11-25 08:44:34 1 1 #> 15: -2.722988 0.2304688 0.006 2024-11-25 08:44:34 1 1 #> 16: -2.722988 0.2890625 0.006 2024-11-25 08:44:34 2 1 #> 17: -3.680872 0.2734375 0.006 2024-11-25 08:44:34 2 1 #> 18: -3.179822 0.2734375 0.006 2024-11-25 08:44:34 2 1 #> 19: -3.293965 0.2734375 0.006 2024-11-25 08:44:34 2 1 #> 20: -3.680872 0.2460938 0.007 2024-11-25 08:44:35 2 1 #> 21: -3.179822 0.2539062 0.007 2024-11-25 08:44:35 2 1 #> 22: -3.293965 0.2539062 0.006 2024-11-25 08:44:35 2 1 #> 23: -3.680872 0.2148438 0.007 2024-11-25 08:44:35 2 1 #> 24: -3.179822 0.2148438 0.006 2024-11-25 08:44:35 2 1 #> 25: -3.293965 0.2148438 0.007 2024-11-25 08:44:35 2 1 #> 26: -3.680872 0.2460938 0.006 2024-11-25 08:44:35 2 1 #> 27: -3.179822 0.2460938 0.007 2024-11-25 08:44:35 2 1 #> 28: -3.293965 0.2460938 0.006 2024-11-25 08:44:35 2 1 #> 29: -3.680872 0.2304688 0.006 2024-11-25 08:44:35 2 1 #> 30: -3.179822 0.2304688 0.006 2024-11-25 08:44:35 2 1 #> 31: -3.293965 0.2304688 0.006 2024-11-25 08:44:35 2 1 #> 32: -3.680872 0.2968750 0.007 2024-11-25 08:44:35 2 1 #> 33: -3.179822 0.2968750 0.006 2024-11-25 08:44:35 2 1 #> 34: -3.293965 0.2968750 0.006 2024-11-25 08:44:35 2 1 #> 35: -2.722988 0.2500000 0.007 2024-11-25 08:44:35 2 1 #> 36: -3.680872 0.2617188 0.006 2024-11-25 08:44:35 2 1 #> 37: -3.179822 0.2500000 0.010 2024-11-25 08:44:35 2 1 #> 38: -3.293965 0.2500000 0.006 2024-11-25 08:44:35 2 1 #> cp classif.ce runtime_learners timestamp race step #> instance configuration warnings errors x_domain batch_nr resample_result #> #> 1: 10 1 0 0 1 #> 2: 10 2 0 0 1 #> 3: 10 3 0 0 1 #> 4: 4 1 0 0 2 #> 5: 4 2 0 0 2 #> 6: 4 3 0 0 2 #> 7: 1 1 0 0 3 #> 8: 1 2 0 0 3 #> 9: 1 3 0 0 3 #> 10: 8 1 0 0 4 #> 11: 8 2 0 0 4 #> 12: 8 3 0 0 4 #> 13: 5 1 0 0 5 #> 14: 5 2 0 0 5 #> 15: 5 3 0 0 5 #> 16: 7 3 0 0 6 #> 17: 7 4 0 0 6 #> 18: 7 5 0 0 6 #> 19: 7 6 0 0 6 #> 20: 10 4 0 0 7 #> 21: 10 5 0 0 7 #> 22: 10 6 0 0 7 #> 23: 8 4 0 0 8 #> 24: 8 5 0 0 8 #> 25: 8 6 0 0 8 #> 26: 1 4 0 0 9 #> 27: 1 5 0 0 9 #> 28: 1 6 0 0 9 #> 29: 5 4 0 0 10 #> 30: 5 5 0 0 10 #> 31: 5 6 0 0 10 #> 32: 4 4 0 0 11 #> 33: 4 5 0 0 11 #> 34: 4 6 0 0 11 #> 35: 9 3 0 0 12 #> 36: 9 4 0 0 12 #> 37: 9 5 0 0 12 #> 38: 9 6 0 0 12 #> instance configuration warnings errors x_domain batch_nr resample_result # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals learner$train(task) # }"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"Subclass non-linear optimization (NLopt). Calls nloptr::nloptr package nloptr.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"source","dir":"Reference","previous_headings":"","what":"Source","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"Johnson, G S (2020). “NLopt nonlinear-optimization package.” https://github.com/stevengj/nlopt.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"termination conditions stopval, maxtime maxeval nloptr::nloptr() deactivated replaced bbotk::Terminator subclasses. x function value tolerance termination conditions (xtol_rel = 10^-4, xtol_abs = rep(0.0, length(x0)), ftol_rel = 0.0 ftol_abs = 0.0) still available implemented package defaults. deactivate conditions, set -1.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"nloptr\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"optimizer","dir":"Reference","previous_headings":"","what":"Optimizer","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"Tuner based bbotk::OptimizerBatchNLoptr can applied black box optimization problem. See also documentation bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"parameters","dir":"Reference","previous_headings":"","what":"Parameters","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"algorithm character(1) eval_g_ineq function() xtol_rel numeric(1) xtol_abs numeric(1) ftol_rel numeric(1) ftol_abs numeric(1) start_values character(1) Create random start values based center search space? latter case, center parameters trafo applied. meaning control parameters, see nloptr::nloptr() nloptr::nloptr.print.options(). termination conditions stopval, maxtime maxeval nloptr::nloptr() deactivated replaced Terminator subclasses. x function value tolerance termination conditions (xtol_rel = 10^-4, xtol_abs = rep(0.0, length(x0)), ftol_rel = 0.0 ftol_abs = 0.0) still available implemented package defaults. deactivate conditions, set -1.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"$optimize() supports progress bars via package progressr combined Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> mlr3tuning::TunerBatchFromOptimizerBatch -> TunerBatchNLoptr","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerBatchFromOptimizerBatch$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"TunerBatchNLoptr$new() TunerBatchNLoptr$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"","code":"TunerBatchNLoptr$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"","code":"TunerBatchNLoptr$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"","code":"# Hyperparameter Optimization # \\donttest{ # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # run hyperparameter tuning on the Palmer Penguins data set instance = tune( tuner = tnr(\"nloptr\", algorithm = \"NLOPT_LN_BOBYQA\"), task = tsk(\"penguins\"), learner = learner, resampling = rsmp(\"holdout\"), measure = msr(\"classif.ce\") ) # best performing hyperparameter configuration instance$result #> cp learner_param_vals x_domain classif.ce #> #> 1: -5.081957 0.07826087 # all evaluated hyperparameter configuration as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -5.081957 0.07826087 0.006 2024-11-25 08:44:36 0 0 #> 2: -5.081957 0.07826087 0.006 2024-11-25 08:44:36 0 0 #> 3: -5.081957 0.07826087 0.005 2024-11-25 08:44:36 0 0 #> 4: -3.355018 0.07826087 0.006 2024-11-25 08:44:36 0 0 #> 5: -6.808896 0.07826087 0.005 2024-11-25 08:44:36 0 0 #> 6: -5.064688 0.07826087 0.005 2024-11-25 08:44:37 0 0 #> 7: -5.099226 0.07826087 0.005 2024-11-25 08:44:37 0 0 #> 8: -5.080230 0.07826087 0.007 2024-11-25 08:44:37 0 0 #> 9: -5.083684 0.07826087 0.005 2024-11-25 08:44:37 0 0 #> 10: -5.081957 0.07826087 0.006 2024-11-25 08:44:37 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 2 #> 3: 3 #> 4: 4 #> 5: 5 #> 6: 6 #> 7: 7 #> 8: 8 #> 9: 9 #> 10: 10 # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals learner$train(tsk(\"penguins\")) # }"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"Subclass random search tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"source","dir":"Reference","previous_headings":"","what":"Source","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"Bergstra J, Bengio Y (2012). “Random Search Hyper-Parameter Optimization.” Journal Machine Learning Research, 13(10), 281–305. https://jmlr.csail.mit.edu/papers/v13/bergstra12a.html.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"random points sampled paradox::generate_design_random().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"random_search\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"parallelization","dir":"Reference","previous_headings":"","what":"Parallelization","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"order support general termination criteria parallelization, evaluate points batch-fashion size batch_size. Larger batches mean can parallelize , smaller batches imply fine-grained checking termination criteria. batch contains batch_size times resampling$iters jobs. E.g., set batch size 10 points 5-fold cross validation, can utilize 50 cores. Parallelization supported via package future (see mlr3::benchmark()'s section parallelization details).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"optimizer","dir":"Reference","previous_headings":"","what":"Optimizer","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"Tuner based bbotk::OptimizerBatchRandomSearch can applied black box optimization problem. See also documentation bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"parameters","dir":"Reference","previous_headings":"","what":"Parameters","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"batch_size integer(1) Maximum number points try batch.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"$optimize() supports progress bars via package progressr combined Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> mlr3tuning::TunerBatchFromOptimizerBatch -> TunerBatchRandomSearch","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerBatchFromOptimizerBatch$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"TunerBatchRandomSearch$new() TunerBatchRandomSearch$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"","code":"TunerBatchRandomSearch$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"","code":"TunerBatchRandomSearch$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"","code":"# Hyperparameter Optimization # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # run hyperparameter tuning on the Palmer Penguins data set instance = tune( tuner = tnr(\"random_search\"), task = tsk(\"penguins\"), learner = learner, resampling = rsmp(\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 10 ) # best performing hyperparameter configuration instance$result #> cp learner_param_vals x_domain classif.ce #> #> 1: -9.025467 0.03478261 # all evaluated hyperparameter configuration as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -4.711280 0.05217391 0.006 2024-11-25 08:44:38 0 0 #> 2: -3.034222 0.05217391 0.005 2024-11-25 08:44:38 0 0 #> 3: -2.403159 0.05217391 0.005 2024-11-25 08:44:38 0 0 #> 4: -9.025467 0.03478261 0.005 2024-11-25 08:44:38 0 0 #> 5: -7.209532 0.03478261 0.006 2024-11-25 08:44:38 0 0 #> 6: -6.858402 0.03478261 0.006 2024-11-25 08:44:38 0 0 #> 7: -6.311528 0.03478261 0.006 2024-11-25 08:44:38 0 0 #> 8: -3.598009 0.05217391 0.006 2024-11-25 08:44:38 0 0 #> 9: -3.967858 0.05217391 0.006 2024-11-25 08:44:38 0 0 #> 10: -6.004689 0.03478261 0.006 2024-11-25 08:44:38 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 2 #> 3: 3 #> 4: 4 #> 5: 5 #> 6: 6 #> 7: 7 #> 8: 8 #> 9: 9 #> 10: 10 # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals learner$train(tsk(\"penguins\"))"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/reexports.html","id":null,"dir":"Reference","previous_headings":"","what":"Objects exported from other packages — reexports","title":"Objects exported from other packages — reexports","text":"objects imported packages. Follow links see documentation. bbotk mlr_terminators, trm, trms mlr3misc clbk, clbks, mlr_callbacks","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/set_validate.AutoTuner.html","id":null,"dir":"Reference","previous_headings":"","what":"Configure Validation for AutoTuner — set_validate.AutoTuner","title":"Configure Validation for AutoTuner — set_validate.AutoTuner","text":"Configure validation data learner tuned AutoTuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/set_validate.AutoTuner.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Configure Validation for AutoTuner — set_validate.AutoTuner","text":"","code":"# S3 method for class 'AutoTuner' set_validate(learner, validate, ...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/set_validate.AutoTuner.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Configure Validation for AutoTuner — set_validate.AutoTuner","text":"learner (AutoTuner) autotuner enable validation. validate (numeric(1), \"predefined\", \"test\", NULL) configure validation hyperparameter tuning. ... () Passed calling set_validate() wrapped leaerner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/set_validate.AutoTuner.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Configure Validation for AutoTuner — set_validate.AutoTuner","text":"","code":"at = auto_tuner( tuner = tnr(\"random_search\"), learner = lrn(\"classif.debug\", early_stopping = TRUE, iter = to_tune(upper = 1000L, internal = TRUE), validate = 0.2), resampling = rsmp(\"holdout\") ) # use the test set as validation data during tuning set_validate(at, validate = \"test\") at$learner$validate #> [1] \"test\""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti.html","id":null,"dir":"Reference","previous_headings":"","what":"Syntactic Sugar for Tuning Instance Construction — ti","title":"Syntactic Sugar for Tuning Instance Construction — ti","text":"Function construct TuningInstanceBatchSingleCrit TuningInstanceBatchMultiCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Syntactic Sugar for Tuning Instance Construction — ti","text":"","code":"ti( task, learner, resampling, measures = NULL, terminator, search_space = NULL, internal_search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Syntactic Sugar for Tuning Instance Construction — ti","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (mlr3::Measure list mlr3::Measure) single measure creates TuningInstanceBatchSingleCrit multiple measures TuningInstanceBatchMultiCrit. NULL, default measure used. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Syntactic Sugar for Tuning Instance Construction — ti","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti.html","id":"default-measures","dir":"Reference","previous_headings":"","what":"Default Measures","title":"Syntactic Sugar for Tuning Instance Construction — ti","text":"measure passed, default measure used. default measure depends task type.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Syntactic Sugar for Tuning Instance Construction — ti","text":"","code":"# Hyperparameter optimization on the Palmer Penguins data set task = tsk(\"penguins\") # Load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # Construct tuning instance instance = ti( task = task, learner = learner, resampling = rsmp(\"cv\", folds = 3), measures = msr(\"classif.ce\"), terminator = trm(\"evals\", n_evals = 4) ) # Choose optimization algorithm tuner = tnr(\"random_search\", batch_size = 2) # Run tuning tuner$optimize(instance) #> cp learner_param_vals x_domain classif.ce #> #> 1: -8.460007 0.04942792 # Set optimal hyperparameter configuration to learner learner$param_set$values = instance$result_learner_param_vals # Train the learner on the full data set learner$train(task) # Inspect all evaluated configurations as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -3.372665 0.05814900 0.015 2024-11-25 08:44:39 0 0 #> 2: -3.054465 0.05814900 0.015 2024-11-25 08:44:39 0 0 #> 3: -8.460007 0.04942792 0.016 2024-11-25 08:44:40 0 0 #> 4: -4.158236 0.05814900 0.015 2024-11-25 08:44:40 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 1 #> 3: 2 #> 4: 2 "},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti_async.html","id":null,"dir":"Reference","previous_headings":"","what":"Syntactic Sugar for Asynchronous Tuning Instance Construction — ti_async","title":"Syntactic Sugar for Asynchronous Tuning Instance Construction — ti_async","text":"Function construct TuningInstanceAsyncSingleCrit TuningInstanceAsyncMultiCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti_async.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Syntactic Sugar for Asynchronous Tuning Instance Construction — ti_async","text":"","code":"ti_async( task, learner, resampling, measures = NULL, terminator, search_space = NULL, internal_search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL, rush = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti_async.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Syntactic Sugar for Asynchronous Tuning Instance Construction — ti_async","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (mlr3::Measure list mlr3::Measure) single measure creates TuningInstanceAsyncSingleCrit multiple measures TuningInstanceAsyncMultiCrit. NULL, default measure used. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks. rush (Rush) rush instance supplied, tuning runs without batches.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti_async.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Syntactic Sugar for Asynchronous Tuning Instance Construction — ti_async","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti_async.html","id":"default-measures","dir":"Reference","previous_headings":"","what":"Default Measures","title":"Syntactic Sugar for Asynchronous Tuning Instance Construction — ti_async","text":"measure passed, default measure used. default measure depends task type.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti_async.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Syntactic Sugar for Asynchronous Tuning Instance Construction — ti_async","text":"","code":"# Hyperparameter optimization on the Palmer Penguins data set task = tsk(\"penguins\") # Load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # Construct tuning instance instance = ti( task = task, learner = learner, resampling = rsmp(\"cv\", folds = 3), measures = msr(\"classif.ce\"), terminator = trm(\"evals\", n_evals = 4) ) # Choose optimization algorithm tuner = tnr(\"random_search\", batch_size = 2) # Run tuning tuner$optimize(instance) #> cp learner_param_vals x_domain classif.ce #> #> 1: -8.481745 0.05817442 # Set optimal hyperparameter configuration to learner learner$param_set$values = instance$result_learner_param_vals # Train the learner on the full data set learner$train(task) # Inspect all evaluated configurations as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -8.481745 0.05817442 0.018 2024-11-25 08:44:40 0 0 #> 2: -9.007729 0.05817442 0.016 2024-11-25 08:44:40 0 0 #> 3: -2.474325 0.05817442 0.016 2024-11-25 08:44:40 0 0 #> 4: -8.015548 0.05817442 0.015 2024-11-25 08:44:40 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 1 #> 3: 2 #> 4: 2 "},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tnr.html","id":null,"dir":"Reference","previous_headings":"","what":"Syntactic Sugar for Tuning Objects Construction — tnr","title":"Syntactic Sugar for Tuning Objects Construction — tnr","text":"Functions retrieve objects, set parameters assign fields one go. Relies mlr3misc::dictionary_sugar_get() extract objects respective mlr3misc::Dictionary: tnr() Tuner mlr_tuners. tnrs() list Tuners mlr_tuners. trm() bbotk::Terminator mlr_terminators. trms() list Terminators mlr_terminators.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tnr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Syntactic Sugar for Tuning Objects Construction — tnr","text":"","code":"tnr(.key, ...) tnrs(.keys, ...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tnr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Syntactic Sugar for Tuning Objects Construction — tnr","text":".key (character(1)) Key passed respective dictionary retrieve object. ... () Additional arguments. .keys (character()) Keys passed respective dictionary retrieve multiple objects.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tnr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Syntactic Sugar for Tuning Objects Construction — tnr","text":"R6::R6Class object respective type, list R6::R6Class objects plural versions.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tnr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Syntactic Sugar for Tuning Objects Construction — tnr","text":"","code":"# random search tuner with batch size of 5 tnr(\"random_search\", batch_size = 5) #> : Random Search #> * Parameters: batch_size=5 #> * Parameter classes: ParamLgl, ParamInt, ParamDbl, ParamFct #> * Properties: dependencies, single-crit, multi-crit #> * Packages: mlr3tuning, bbotk # run time terminator with 20 seconds trm(\"run_time\", secs = 20) #> : Run Time #> * Parameters: secs=20"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":null,"dir":"Reference","previous_headings":"","what":"Function for Tuning a Learner — tune","title":"Function for Tuning a Learner — tune","text":"Function tune mlr3::Learner. function internally creates TuningInstanceBatchSingleCrit TuningInstanceBatchMultiCrit describes tuning problem. executes tuning Tuner (tuner) returns result tuning instance ($result). ArchiveBatchTuning ArchiveAsyncTuning ($archive) stores evaluated hyperparameter configurations performance scores. can find overview tuners website.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Function for Tuning a Learner — tune","text":"","code":"tune( tuner, task, learner, resampling, measures = NULL, term_evals = NULL, term_time = NULL, terminator = NULL, search_space = NULL, store_benchmark_result = TRUE, internal_search_space = NULL, store_models = FALSE, check_values = FALSE, callbacks = NULL, rush = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Function for Tuning a Learner — tune","text":"tuner (Tuner) Optimization algorithm. task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (mlr3::Measure list mlr3::Measure) single measure creates TuningInstanceBatchSingleCrit multiple measures TuningInstanceBatchMultiCrit. NULL, default measure used. term_evals (integer(1)) Number allowed evaluations. Ignored terminator passed. term_time (integer(1)) Maximum allowed time seconds. Ignored terminator passed. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. internal_search_space (paradox::ParamSet NULL) internal search space. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks. rush (Rush) rush instance supplied, tuning runs without batches.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Function for Tuning a Learner — tune","text":"TuningInstanceBatchSingleCrit | TuningInstanceBatchMultiCrit","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Function for Tuning a Learner — tune","text":"mlr3::Task, mlr3::Learner, mlr3::Resampling, mlr3::Measure bbotk::Terminator used construct TuningInstanceBatchSingleCrit. multiple performance mlr3::Measures supplied, TuningInstanceBatchMultiCrit created. parameter term_evals term_time shortcuts create bbotk::Terminator. parameters passed, bbotk::TerminatorCombo constructed. Terminators, pass one terminator. termination criterion needed, set term_evals, term_time terminator NULL. search space created paradox::TuneToken supplied search_space.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"default-measures","dir":"Reference","previous_headings":"","what":"Default Measures","title":"Function for Tuning a Learner — tune","text":"measure passed, default measure used. default measure depends task type.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Function for Tuning a Learner — tune","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"analysis","dir":"Reference","previous_headings":"","what":"Analysis","title":"Function for Tuning a Learner — tune","text":"analyzing tuning results, recommended pass ArchiveBatchTuning .data.table(). returned data table joined benchmark result adds mlr3::ResampleResult hyperparameter evaluation. archive provides various getters (e.g. $learners()) ease access. getters extract position () unique hash (uhash). complete list getters see methods section. benchmark result ($benchmark_result) allows score hyperparameter configurations different measure. Alternatively, measures can supplied .data.table(). mlr3viz package provides visualizations tuning results.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Function for Tuning a Learner — tune","text":"","code":"# Hyperparameter optimization on the Palmer Penguins data set task = tsk(\"pima\") # Load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # Run tuning instance = tune( tuner = tnr(\"random_search\", batch_size = 2), task = tsk(\"pima\"), learner = learner, resampling = rsmp (\"holdout\"), measures = msr(\"classif.ce\"), terminator = trm(\"evals\", n_evals = 4) ) # Set optimal hyperparameter configuration to learner learner$param_set$values = instance$result_learner_param_vals # Train the learner on the full data set learner$train(task) # Inspect all evaluated configurations as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -7.397281 0.2460938 0.007 2024-11-25 08:44:41 0 0 #> 2: -4.648966 0.2382812 0.006 2024-11-25 08:44:41 0 0 #> 3: -9.116329 0.2460938 0.026 2024-11-25 08:44:41 0 0 #> 4: -5.519208 0.2460938 0.007 2024-11-25 08:44:41 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 1 #> 3: 2 #> 4: 2 "},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune_nested.html","id":null,"dir":"Reference","previous_headings":"","what":"Function for Nested Resampling — tune_nested","title":"Function for Nested Resampling — tune_nested","text":"Function conduct nested resampling.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune_nested.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Function for Nested Resampling — tune_nested","text":"","code":"tune_nested( tuner, task, learner, inner_resampling, outer_resampling, measure = NULL, term_evals = NULL, term_time = NULL, terminator = NULL, search_space = NULL, store_tuning_instance = TRUE, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune_nested.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Function for Nested Resampling — tune_nested","text":"tuner (Tuner) Optimization algorithm. task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. inner_resampling (mlr3::Resampling) Resampling used inner loop. outer_resampling mlr3::Resampling) Resampling used outer loop. measure (mlr3::Measure) Measure optimize. NULL, default measure used. term_evals (integer(1)) Number allowed evaluations. Ignored terminator passed. term_time (integer(1)) Maximum allowed time seconds. Ignored terminator passed. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). store_tuning_instance (logical(1)) TRUE (default), stores internally created TuningInstanceBatchSingleCrit intermediate results slot $tuning_instance. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune_nested.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Function for Nested Resampling — tune_nested","text":"mlr3::ResampleResult","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune_nested.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Function for Nested Resampling — tune_nested","text":"","code":"# Nested resampling on Palmer Penguins data set rr = tune_nested( tuner = tnr(\"random_search\", batch_size = 2), task = tsk(\"penguins\"), learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)), inner_resampling = rsmp (\"holdout\"), outer_resampling = rsmp(\"cv\", folds = 2), measure = msr(\"classif.ce\"), term_evals = 2) # Performance scores estimated on the outer resampling rr$score() #> task_id learner_id resampling_id iteration classif.ce #> #> 1: penguins classif.rpart.tuned cv 1 0.06976744 #> 2: penguins classif.rpart.tuned cv 2 0.08139535 #> Hidden columns: task, learner, resampling, prediction_test # Unbiased performance of the final model trained on the full data set rr$aggregate() #> classif.ce #> 0.0755814"},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-120","dir":"Changelog","previous_headings":"","what":"mlr3tuning 1.2.0","title":"mlr3tuning 1.2.0","text":"CRAN release: 2024-11-08 feat: Add new callback clbk(\"mlr3tuning.one_se_rule\") selects hyperparameter configuration smallest feature set within one standard error best. feat: Add new stages on_tuning_result_begin on_result_begin CallbackAsyncTuning CallbackBatchTuning. refactor: Rename stage on_result on_result_end CallbackAsyncTuning CallbackBatchTuning. docs: Extend CallbackAsyncTuning CallbackBatchTuning documentation. compatibility: mlr3 0.22.0","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-110","dir":"Changelog","previous_headings":"","what":"mlr3tuning 1.1.0","title":"mlr3tuning 1.1.0","text":"CRAN release: 2024-10-27 fix: as_data_table() functions unnest x_domain colum anymore default. fix: to_tune(internal = TRUE) now also works non-internal tuning parameters require .extra_trafo. feat: now possible pass internal_search_space manually. allows use parameter transformations primary search space combination internal hyperparameter tuning. refactor: Tuner pass extra information result extra parameter now.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-102","dir":"Changelog","previous_headings":"","what":"mlr3tuning 1.0.2","title":"mlr3tuning 1.0.2","text":"CRAN release: 2024-10-14 refactor: Extract internal tuned values instance.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-101","dir":"Changelog","previous_headings":"","what":"mlr3tuning 1.0.1","title":"mlr3tuning 1.0.1","text":"CRAN release: 2024-09-10 refactor: Replace internal tuning callback. perf: Delete intermediate BenchmarkResult ObjectiveTuningBatch optimization.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-100","dir":"Changelog","previous_headings":"","what":"mlr3tuning 1.0.0","title":"mlr3tuning 1.0.0","text":"CRAN release: 2024-06-29 feat: Introduce asynchronous optimization TunerAsync TuningInstanceAsync* classes. BREAKING CHANGE: Tuner class TunerBatch now. BREAKING CHANGE: TuningInstanceSingleCrit TuningInstanceMultiCrit classes TuningInstanceBatchSingleCrit TuningInstanceBatchMultiCrit now. BREAKING CHANGE: CallbackTuning class CallbackBatchTuning now. BREAKING CHANGE: ContextEval class ContextBatchTuning now. refactor: Remove hotstarting batch optimization due low performance. refactor: option evaluate_default callback now.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0200","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.20.0","title":"mlr3tuning 0.20.0","text":"CRAN release: 2024-03-05 compatibility: Work new paradox version 1.0.0 fix: TunerIrace failed logical parameters dependencies. Added marshaling support AutoTuner","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0192","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.19.2","title":"mlr3tuning 0.19.2","text":"CRAN release: 2023-11-28 refactor: Change thread limits.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0191","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.19.1","title":"mlr3tuning 0.19.1","text":"CRAN release: 2023-11-20 refactor: Speed tuning process minimizing number deep clones parameter checks. fix: Set store_benchmark_result = TRUE store_models = TRUE creating tuning instance. fix: Passing terminator tune_nested() work.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0190","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.19.0","title":"mlr3tuning 0.19.0","text":"CRAN release: 2023-06-26 fix: Add $phash() method AutoTuner. fix: Include Tuner hash AutoTuner. feat: Add new callback scores configurations additional measures tuning. feat: Add vignette adding new tuners previously part mlr3book.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0180","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.18.0","title":"mlr3tuning 0.18.0","text":"CRAN release: 2023-03-08 BREAKING CHANGE: method parameter tune(), tune_nested() auto_tuner() renamed tuner. Tuner objects accepted now. Arguments tuner passed ... anymore. BREAKING CHANGE: tuner parameter AutoTuner moved first position achieve consistency functions. docs: Update resources sections. docs: Add list default measures. fix: Add allow_hotstarting, keep_hotstart_stack keep_models flags AutoTuner auto_tuner().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0172","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.17.2","title":"mlr3tuning 0.17.2","text":"CRAN release: 2022-12-22 feat: AutoTuner accepts instantiated resamplings now. AutoTuner checks row ids inner resampling present outer resampling train set nested resampling performed. fix: Standalone Tuner create ContextOptimization.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0171","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.17.1","title":"mlr3tuning 0.17.1","text":"CRAN release: 2022-12-07 fix: ti() function accept callbacks.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0170","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.17.0","title":"mlr3tuning 0.17.0","text":"CRAN release: 2022-11-18 feat: methods $importance(), $selected_features(), $oob_error() $loglik() forwarded final model AutoTuner now. refactor: AutoTuner stores instance benchmark result store_models = TRUE. refactor: AutoTuner stores instance store_benchmark_result = TRUE.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0160","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.16.0","title":"mlr3tuning 0.16.0","text":"CRAN release: 2022-11-08 feat: Add new callback enables early stopping tuning mlr_callbacks. feat: Add new callback backups benchmark result disk batch. feat: Create custom callbacks callback_batch_tuning() function.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0150","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.15.0","title":"mlr3tuning 0.15.0","text":"CRAN release: 2022-10-21 fix: AutoTuner accept TuningSpace objects search spaces. feat: Add ti() function create TuningInstanceSingleCrit TuningInstanceMultiCrit. docs: Documentation technical details section now. feat: New option extract_inner_tuning_results() return tuning instances.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0140","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.14.0","title":"mlr3tuning 0.14.0","text":"CRAN release: 2022-08-25 feat: Add option evaluate_default evaluate learners hyperparameters set default values. refactor: now , default smooth FALSE TunerGenSA.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0131","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.13.1","title":"mlr3tuning 0.13.1","text":"CRAN release: 2022-05-03 feat: Tuner objects field $id now.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0130","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.13.0","title":"mlr3tuning 0.13.0","text":"CRAN release: 2022-04-06 feat: Allow pass Tuner objects method tune() auto_tuner(). docs: Link Tuner help page bbotk::Optimizer. feat: Tuner objects optional field $label now. feat: .data.table() functions objects class Dictionary extended additional columns.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0121","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.12.1","title":"mlr3tuning 0.12.1","text":"CRAN release: 2022-02-25 feat: Add .data.table.DictionaryTuner function. feat: New $help() method opens manual page Tuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0120","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.12.0","title":"mlr3tuning 0.12.0","text":"CRAN release: 2022-02-17 feat: as_search_space() function create search spaces Learner ParamSet objects. Allow pass TuningSpace objects search_space TuningInstanceSingleCrit TuningInstanceMultiCrit. feat: mlr3::HotstartStack can now removed tuning keep_hotstart_stack flag. feat: Archive stores errors warnings learners. feat: measure provided, default measure used auto_tuner() tune_nested().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0110","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.11.0","title":"mlr3tuning 0.11.0","text":"CRAN release: 2022-02-02 fix: $assign_result() method TuningInstanceSingleCrit search space empty. feat: Default measure used measure supplied TuningInstanceSingleCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0100","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.10.0","title":"mlr3tuning 0.10.0","text":"CRAN release: 2022-01-20 Fixes bug TuningInstanceMultiCrit$assign_result(). Hotstarting learners previously fitted models. Remove deep clones speed tuning. Add store_models flag auto_tuner(). Add \"noisy\" property ObjectiveTuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-090","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.9.0","title":"mlr3tuning 0.9.0","text":"CRAN release: 2021-09-14 Adds AutoTuner$base_learner() method extract base learner nested learner objects. tune() supports multi-criteria tuning. Allows empty search space. Adds TunerIrace irace package. extract_inner_tuning_archives() helper function extract inner tuning archives. Removes ArchiveTuning$extended_archive() method. mlr3::ResampleResults joined automatically .data.table.TuningArchive() extract_inner_tuning_archives().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-080","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.8.0","title":"mlr3tuning 0.8.0","text":"CRAN release: 2021-03-12 Adds tune(), auto_tuner() tune_nested() sugar functions. TuningInstanceSingleCrit, TuningInstanceMultiCrit AutoTuner can initialized store_benchmark_result = FALSE store_models = TRUE allow measures access models. Prettier printing methods.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-070","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.7.0","title":"mlr3tuning 0.7.0","text":"CRAN release: 2021-02-11 Fix TuningInstance*$assign_result() errors required parameter bug. Shortcuts access $learner(), $learners(), $learner_param_vals(), $predictions() $resample_result() benchmark result archive. extract_inner_tuning_results() helper function extract inner tuning results.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-060","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.6.0","title":"mlr3tuning 0.6.0","text":"CRAN release: 2021-01-24 ArchiveTuning$data public field now.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-050","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.5.0","title":"mlr3tuning 0.5.0","text":"CRAN release: 2020-12-07 Adds TunerCmaes adagio package. Fix predict_type AutoTuner. Support set TuneToken Learner$param_set create search space . order parameters TuningInstanceSingleCrit TuningInstanceSingleCrit changed.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-040","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.4.0","title":"mlr3tuning 0.4.0","text":"CRAN release: 2020-10-07 Option control store_benchmark_result, store_models check_values AutoTuner. store_tuning_instance must set parameter initialization. Fixes check_values flag TuningInstanceSingleCrit TuningInstanceMultiCrit. Removed dependency orphaned package bibtex.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-030","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.3.0","title":"mlr3tuning 0.3.0","text":"CRAN release: 2020-09-08 Compact -memory representation R6 objects save space saving mlr3 objects via saveRDS(), serialize() etc. Archive ArchiveTuning now stores benchmark result $benchmark_result. change removed resample results archive can still accessed via benchmark result. Warning message external package tuning installed. retrieve inner tuning results nested resampling, .data.table(rr)$learner[[1]]$tuning_result must used now.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-020","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.2.0","title":"mlr3tuning 0.2.0","text":"CRAN release: 2020-07-28 TuningInstance now TuningInstanceSingleCrit. TuningInstanceMultiCrit still available multi-criteria tuning. Terminators now accessible trm() trms() instead term() terms(). Storing resample results optional now using store_resample_result flag TuningInstanceSingleCrit TuningInstanceMultiCrit TunerNLoptr adds non-linear optimization nloptr package. Logging controlled bbotk logger now. Proposed points performance values can checked validity activating check_values flag TuningInstanceSingleCrit TuningInstanceMultiCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-013","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.1.3","title":"mlr3tuning 0.1.3","text":"mlr3tuning now depends bbotk package basic tuning objects. Terminator classes now live bbotk. consequence ObjectiveTuning inherits bbotk::Objective, TuningInstance bbotk::OptimInstance Tuner bbotk::Optimizer TuningInstance$param_set becomes TuningInstance$search_space avoid confusion param_set usually contains parameters change behavior object. Tuning triggered $optimize() instead $tune()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-012","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.1.2","title":"mlr3tuning 0.1.2","text":"CRAN release: 2020-01-31 Fixed bug AutoTuner $clone() missing. Tuning results unaffected, stored models contained wrong hyperparameter values (#223). Improved output log (#218).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-011","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.1.1","title":"mlr3tuning 0.1.1","text":"CRAN release: 2019-12-06 Maintenance release.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-010","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.1.0","title":"mlr3tuning 0.1.0","text":"CRAN release: 2019-09-30 Initial prototype.","code":""}] +[{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"adding-new-tuners","dir":"Articles","previous_headings":"","what":"Adding new Tuners","title":"Add a new Tuner","text":"vignette, show implement custom tuner mlr3tuning. main task tuner iteratively propose new hyperparameter configurations want evaluate given task, learner validation strategy. second task decide configuration returned tuning result - usually configuration led best observed performance value. want implement tuner, implement R6-Object offers .optimize method implements iterative proposal free implement .assign_result differ -mentioned default process determining result. start implementation make familiar main R6-Objects bbotk (Black-Box Optimization Toolkit). package provide basic black box optimization algorithms also objects represent optimization problem (OptimInstance) log evaluated configurations (Archive). d two ways implement new tuner: ) new tuner can applied kind optimization problem implemented Optimizer. Optimizer can easily transformed Tuner. b) new custom tuner usable hyperparameter tuning, example needs access task, learner resampling objects directly implemented mlr3tuning Tuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"adding-a-new-tuner","dir":"Articles","previous_headings":"Adding new Tuners","what":"Adding a new Tuner","title":"Add a new Tuner","text":"summary steps adding new tuner. fifth step required new tuner added via bbotk. Check tuner already exist Optimizer Tuner GitHub repositories. Use one existing optimizers / tuners template. Overwrite .optimize private method optimizer / tuner. Optionally, overwrite default .assign_result private method. Use mlr3tuning::TunerBatchFromOptimizerBatch class transform Optimizer Tuner. Add unit tests tuner optionally optimizer. Open new pull request Tuner optionally second one `Optimizer.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"tuner-template","dir":"Articles","previous_headings":"Adding new Tuners","what":"Template","title":"Add a new Tuner","text":"new custom tuner implemented via bbotk, use one existing optimizer template e.g. bbotk::OptimizerRandomSearch. currently two tuners based Optimizer: mlr3hyperband::TunerHyperband mlr3tuning::TunerIrace. rather complex can still use documentation class structure template. following steps identical optimizers tuners. Rewrite meta information documentation create new class name. Scientific sources can added R/bibentries.R added @source documentation. example dictionary sections documentation auto-generated based @templateVar id . Change parameter set optimizer / tuner document @section Parameters. forget change mlr_optimizers$add() / mlr_tuners$add() last line adds optimizer / tuner dictionary.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"optimize-method","dir":"Articles","previous_headings":"Adding new Tuners","what":"Optimize method","title":"Add a new Tuner","text":"$.optimize() private method main part tuner. takes instance, proposes new points calls $eval_batch() method instance evaluate . can go two ways: Implement iterative process call external optimization function resides another package.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"writing-a-custom-iteration","dir":"Articles","previous_headings":"Adding new Tuners > Optimize method","what":"Writing a custom iteration","title":"Add a new Tuner","text":"Usually, proposal evaluation done repeat-loop implement. Please consider following points: can evaluate one multiple points per iteration don’t care termination, $eval_batch() won’t allow evaluations allowed bbotk::Terminator. implies, code repeat-loop executed. don’t care keeping track evaluations every evaluation automatically stored inst$archive. want log additional information evaluation Objective Archive can simply add columns data.table object passed $eval_batch().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"calling-an-external-optimization-function","dir":"Articles","previous_headings":"Adding new Tuners > Optimize method","what":"Calling an external optimization function","title":"Add a new Tuner","text":"Optimization functions external packages usually take objective function argument. case, can pass inst$objective_function internally calls $eval_batch(). Check OptimizerGenSA example.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"assign-result-method","dir":"Articles","previous_headings":"Adding new Tuners","what":"Assign result method","title":"Add a new Tuner","text":"default $.assign_result() private method simply obtains best performing result archive. default method can overwritten new tuner determines result optimization different way. new function must call $assign_result() method instance write final result instance. See mlr3tuning::TunerIrace implementation $.assign_result().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"transform-optimizer-to-tuner","dir":"Articles","previous_headings":"Adding new Tuners","what":"Transform optimizer to tuner","title":"Add a new Tuner","text":"step needed implement via bbotk. mlr3tuning::TunerBatchFromOptimizerBatch class transforms Optimizer Tuner. Just add Optimizer optimizer field. See mlr3tuning::TunerRandomSearch example.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/articles/extending.html","id":"add-unit-tests","dir":"Articles","previous_headings":"Adding new Tuners","what":"Add unit tests","title":"Add a new Tuner","text":"new custom tuner thoroughly tested unit tests. Tuners can tested test_tuner() helper function. added Tuner via Optimizer, additionally test Optimizer test_optimizer() helper function.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/authors.html","id":null,"dir":"","previous_headings":"","what":"Authors","title":"Authors and Citation","text":"Marc Becker. Maintainer, author. Michel Lang. Author. Jakob Richter. Author. Bernd Bischl. Author. Daniel Schalk. Author.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/authors.html","id":"citation","dir":"","previous_headings":"","what":"Citation","title":"Authors and Citation","text":"Becker M, Lang M, Richter J, Bischl B, Schalk D (2024). mlr3tuning: Hyperparameter Optimization 'mlr3'. R package version 1.2.0.9000, https://github.com/mlr-org/mlr3tuning, https://mlr3tuning.mlr-org.com.","code":"@Manual{, title = {mlr3tuning: Hyperparameter Optimization for 'mlr3'}, author = {Marc Becker and Michel Lang and Jakob Richter and Bernd Bischl and Daniel Schalk}, year = {2024}, note = {R package version 1.2.0.9000, https://github.com/mlr-org/mlr3tuning}, url = {https://mlr3tuning.mlr-org.com}, }"},{"path":"https://mlr3tuning.mlr-org.com/dev/index.html","id":"mlr3tuning-","dir":"","previous_headings":"","what":"Hyperparameter Optimization for mlr3","title":"Hyperparameter Optimization for mlr3","text":"Package website: release | dev mlr3tuning hyperparameter optimization package mlr3 ecosystem. features highly configurable search spaces via paradox package finds optimal hyperparameter configurations mlr3 learner. mlr3tuning works several optimization algorithms e.g. Random Search, Iterated Racing, Bayesian Optimization (mlr3mbo) Hyperband (mlr3hyperband). Moreover, can automatically optimize learners estimate performance optimized models nested resampling. package built optimization framework bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/index.html","id":"extension-packages","dir":"","previous_headings":"","what":"Extension packages","title":"Hyperparameter Optimization for mlr3","text":"mlr3tuning extended following packages. mlr3tuningspaces collection search spaces scientific articles commonly used learners. mlr3hyperband adds Hyperband Successive Halving algorithm. mlr3mbo adds Bayesian Optimization methods.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/index.html","id":"resources","dir":"","previous_headings":"","what":"Resources","title":"Hyperparameter Optimization for mlr3","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/index.html","id":"installation","dir":"","previous_headings":"","what":"Installation","title":"Hyperparameter Optimization for mlr3","text":"Install last release CRAN: Install development version GitHub:","code":"install.packages(\"mlr3tuning\") remotes::install_github(\"mlr-org/mlr3tuning\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/index.html","id":"examples","dir":"","previous_headings":"","what":"Examples","title":"Hyperparameter Optimization for mlr3","text":"optimize cost gamma hyperparameters support vector machine Sonar data set. construct tuning instance ti() function. tuning instance describes tuning problem. select simple grid search optimization algorithm. start tuning, simply pass tuning instance tuner. tuner returns best hyperparameter configuration corresponding measured performance. archive contains evaluated hyperparameter configurations. mlr3viz package visualizes tuning results. fit final model optimized hyperparameters make predictions new data.","code":"library(\"mlr3learners\") library(\"mlr3tuning\") learner = lrn(\"classif.svm\", cost = to_tune(1e-5, 1e5, logscale = TRUE), gamma = to_tune(1e-5, 1e5, logscale = TRUE), kernel = \"radial\", type = \"C-classification\" ) instance = ti( task = tsk(\"sonar\"), learner = learner, resampling = rsmp(\"cv\", folds = 3), measures = msr(\"classif.ce\"), terminator = trm(\"none\") ) instance ## ## * State: Not optimized ## * Objective: ## * Search Space: ## id class lower upper nlevels ## 1: cost ParamDbl -11.51293 11.51293 Inf ## 2: gamma ParamDbl -11.51293 11.51293 Inf ## * Terminator: tuner = tnr(\"grid_search\", resolution = 5) tuner ## : Grid Search ## * Parameters: batch_size=1, resolution=5 ## * Parameter classes: ParamLgl, ParamInt, ParamDbl, ParamFct ## * Properties: dependencies, single-crit, multi-crit ## * Packages: mlr3tuning, bbotk tuner$optimize(instance) ## cost gamma learner_param_vals x_domain classif.ce ## 1: 5.756463 -5.756463 0.1828847 as.data.table(instance$archive)[, .(cost, gamma, classif.ce, batch_nr, resample_result)] ## cost gamma classif.ce batch_nr resample_result ## 1: -5.756463 5.756463 0.4663216 1 ## 2: 5.756463 -5.756463 0.1828847 2 ## 3: 11.512925 5.756463 0.4663216 3 ## 4: 5.756463 11.512925 0.4663216 4 ## 5: -11.512925 -11.512925 0.4663216 5 ## --- ## 21: -5.756463 -5.756463 0.4663216 21 ## 22: 11.512925 11.512925 0.4663216 22 ## 23: -11.512925 11.512925 0.4663216 23 ## 24: 11.512925 -5.756463 0.1828847 24 ## 25: 0.000000 -5.756463 0.2402346 25 library(mlr3viz) autoplot(instance, type = \"surface\") learner$param_set$values = instance$result_learner_param_vals learner$train(tsk(\"sonar\"))"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Rush Data Storage — ArchiveAsyncTuning","title":"Rush Data Storage — ArchiveAsyncTuning","text":"`ArchiveAsyncTuning“ stores evaluated hyperparameter configurations performance scores rush::Rush database.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Rush Data Storage — ArchiveAsyncTuning","text":"ArchiveAsyncTuning connector rush::Rush database.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"data-structure","dir":"Reference","previous_headings":"","what":"Data Structure","title":"Rush Data Storage — ArchiveAsyncTuning","text":"table ($data) following columns: One column hyperparameter search space ($search_space). One (list-)column internal_tuned_values One column performance measure ($codomain). x_domain (list()) Lists (transformed) hyperparameter values passed learner. runtime_learners (numeric(1)) Sum training predict times logged learners per mlr3::ResampleResult / evaluation. include potential overhead time. timestamp (POSIXct) Time stamp evaluation logged archive. batch_nr (integer(1)) Hyperparameters evaluated batches. batch unique batch number.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"analysis","dir":"Reference","previous_headings":"","what":"Analysis","title":"Rush Data Storage — ArchiveAsyncTuning","text":"analyzing tuning results, recommended pass ArchiveAsyncTuning .data.table(). returned data table contains mlr3::ResampleResult hyperparameter evaluation.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"s-methods","dir":"Reference","previous_headings":"","what":"S3 Methods","title":"Rush Data Storage — ArchiveAsyncTuning","text":".data.table.ArchiveTuning(x, unnest = \"x_domain\", exclude_columns = \"uhash\", measures = NULL) Returns tabular view evaluated hyperparameter configurations. ArchiveAsyncTuning -> data.table::data.table() x (ArchiveAsyncTuning) unnest (character()) Transforms list columns separate columns. Set NULL column unnested. exclude_columns (character()) Exclude columns table. Set NULL column excluded. measures (List mlr3::Measure) Score hyperparameter configurations additional measures.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Rush Data Storage — ArchiveAsyncTuning","text":"bbotk::Archive -> bbotk::ArchiveAsync -> ArchiveAsyncTuning","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Rush Data Storage — ArchiveAsyncTuning","text":"internal_search_space (paradox::ParamSet) search space containing parameters internally optimized mlr3::Learner. benchmark_result (mlr3::BenchmarkResult) Benchmark result.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Rush Data Storage — ArchiveAsyncTuning","text":"bbotk::Archive$format() bbotk::Archive$help() bbotk::ArchiveAsync$best() bbotk::ArchiveAsync$clear() bbotk::ArchiveAsync$data_with_state() bbotk::ArchiveAsync$nds_selection() bbotk::ArchiveAsync$pop_point() bbotk::ArchiveAsync$push_failed_point() bbotk::ArchiveAsync$push_points() bbotk::ArchiveAsync$push_result() bbotk::ArchiveAsync$push_running_point()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Rush Data Storage — ArchiveAsyncTuning","text":"ArchiveAsyncTuning$new() ArchiveAsyncTuning$learner() ArchiveAsyncTuning$learners() ArchiveAsyncTuning$learner_param_vals() ArchiveAsyncTuning$predictions() ArchiveAsyncTuning$resample_result() ArchiveAsyncTuning$print() ArchiveAsyncTuning$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$new( search_space, codomain, rush, internal_search_space = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). codomain (bbotk::Codomain) Specifies codomain objective function .e. set performance measures. Internally created provided mlr3::Measures. rush (Rush) rush instance supplied, tuning runs without batches. internal_search_space (paradox::ParamSet NULL) internal search space. check_values (logical(1)) TRUE (default), hyperparameter configurations check validity.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-learner-","dir":"Reference","previous_headings":"","what":"Method learner()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"Retrieve mlr3::Learner -th evaluation, position unique hash uhash. uhash mutually exclusive. Learner contain model. Use $learners() get learners models.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$learner(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-learners-","dir":"Reference","previous_headings":"","what":"Method learners()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"Retrieve list trained mlr3::Learner objects -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$learners(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-learner-param-vals-","dir":"Reference","previous_headings":"","what":"Method learner_param_vals()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"Retrieve param values -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage-3","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$learner_param_vals(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments-3","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-predictions-","dir":"Reference","previous_headings":"","what":"Method predictions()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"Retrieve list mlr3::Prediction objects -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage-4","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$predictions(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments-4","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-resample-result-","dir":"Reference","previous_headings":"","what":"Method resample_result()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"Retrieve mlr3::ResampleResult -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage-5","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$resample_result(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments-5","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-print-","dir":"Reference","previous_headings":"","what":"Method print()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"Printer.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage-6","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$print()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments-6","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"... (ignored).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Rush Data Storage — ArchiveAsyncTuning","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"usage-7","dir":"Reference","previous_headings":"","what":"Usage","title":"Rush Data Storage — ArchiveAsyncTuning","text":"","code":"ArchiveAsyncTuning$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveAsyncTuning.html","id":"arguments-7","dir":"Reference","previous_headings":"","what":"Arguments","title":"Rush Data Storage — ArchiveAsyncTuning","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"ArchiveBatchTuning stores evaluated hyperparameter configurations performance scores data.table::data.table().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"ArchiveBatchTuning container around data.table::data.table(). row corresponds single evaluation hyperparameter configuration. See section Data Structure information. archive stores additionally mlr3::BenchmarkResult ($benchmark_result) records resampling experiments. experiment corresponds single evaluation hyperparameter configuration. table ($data) benchmark result ($benchmark_result) linked uhash column. archive passed .data.table(), joined automatically.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"data-structure","dir":"Reference","previous_headings":"","what":"Data Structure","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"table ($data) following columns: One column hyperparameter search space ($search_space). One (list-)column internal_tuned_values One column performance measure ($codomain). x_domain (list()) Lists (transformed) hyperparameter values passed learner. runtime_learners (numeric(1)) Sum training predict times logged learners per mlr3::ResampleResult / evaluation. include potential overhead time. timestamp (POSIXct) Time stamp evaluation logged archive. batch_nr (integer(1)) Hyperparameters evaluated batches. batch unique batch number. uhash (character(1)) Connects hyperparameter configuration resampling experiment stored mlr3::BenchmarkResult.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"analysis","dir":"Reference","previous_headings":"","what":"Analysis","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"analyzing tuning results, recommended pass ArchiveBatchTuning .data.table(). returned data table joined benchmark result adds mlr3::ResampleResult hyperparameter evaluation. archive provides various getters (e.g. $learners()) ease access. getters extract position () unique hash (uhash). complete list getters see methods section. benchmark result ($benchmark_result) allows score hyperparameter configurations different measure. Alternatively, measures can supplied .data.table(). mlr3viz package provides visualizations tuning results.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"s-methods","dir":"Reference","previous_headings":"","what":"S3 Methods","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":".data.table.ArchiveTuning(x, unnest = \"x_domain\", exclude_columns = \"uhash\", measures = NULL) Returns tabular view evaluated hyperparameter configurations. ArchiveBatchTuning -> data.table::data.table() x (ArchiveBatchTuning) unnest (character()) Transforms list columns separate columns. Set NULL column unnested. exclude_columns (character()) Exclude columns table. Set NULL column excluded. measures (List mlr3::Measure) Score hyperparameter configurations additional measures.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"bbotk::Archive -> bbotk::ArchiveBatch -> ArchiveBatchTuning","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"benchmark_result (mlr3::BenchmarkResult) Benchmark result.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"internal_search_space (paradox::ParamSet) search space containing parameters internally optimized mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"bbotk::Archive$format() bbotk::Archive$help() bbotk::ArchiveBatch$add_evals() bbotk::ArchiveBatch$best() bbotk::ArchiveBatch$clear() bbotk::ArchiveBatch$nds_selection()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"ArchiveBatchTuning$new() ArchiveBatchTuning$learner() ArchiveBatchTuning$learners() ArchiveBatchTuning$learner_param_vals() ArchiveBatchTuning$predictions() ArchiveBatchTuning$resample_result() ArchiveBatchTuning$print() ArchiveBatchTuning$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$new( search_space, codomain, check_values = FALSE, internal_search_space = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). codomain (bbotk::Codomain) Specifies codomain objective function .e. set performance measures. Internally created provided mlr3::Measures. check_values (logical(1)) TRUE (default), hyperparameter configurations check validity. internal_search_space (paradox::ParamSet NULL) internal search space.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-learner-","dir":"Reference","previous_headings":"","what":"Method learner()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"Retrieve mlr3::Learner -th evaluation, position unique hash uhash. uhash mutually exclusive. Learner contain model. Use $learners() get learners models.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$learner(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-learners-","dir":"Reference","previous_headings":"","what":"Method learners()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"Retrieve list trained mlr3::Learner objects -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$learners(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-learner-param-vals-","dir":"Reference","previous_headings":"","what":"Method learner_param_vals()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"Retrieve param values -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage-3","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$learner_param_vals(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments-3","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-predictions-","dir":"Reference","previous_headings":"","what":"Method predictions()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"Retrieve list mlr3::Prediction objects -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage-4","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$predictions(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments-4","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-resample-result-","dir":"Reference","previous_headings":"","what":"Method resample_result()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"Retrieve mlr3::ResampleResult -th evaluation, position unique hash uhash. uhash mutually exclusive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage-5","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$resample_result(i = NULL, uhash = NULL)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments-5","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"(integer(1)) iteration value filter . uhash (logical(1)) uhash value filter .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-print-","dir":"Reference","previous_headings":"","what":"Method print()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"Printer.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage-6","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$print()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments-6","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"... (ignored).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"usage-7","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"","code":"ArchiveBatchTuning$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ArchiveBatchTuning.html","id":"arguments-7","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Logging Evaluated Hyperparameter Configurations — ArchiveBatchTuning","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Automatic Tuning — AutoTuner","title":"Class for Automatic Tuning — AutoTuner","text":"AutoTuner wraps mlr3::Learner augments automatic tuning process given set hyperparameters. auto_tuner() function creates AutoTuner object.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Class for Automatic Tuning — AutoTuner","text":"AutoTuner mlr3::Learner wraps another mlr3::Learner performs following steps $train(): hyperparameters wrapped (inner) learner trained training data via resampling. tuning can specified providing Tuner, bbotk::Terminator, search space paradox::ParamSet, mlr3::Resampling mlr3::Measure. best found hyperparameter configuration set hyperparameters wrapped (inner) learner stored $learner. Access tuned hyperparameters via $tuning_result. final model fit complete training data using now parametrized wrapped learner. respective model available via field $learner$model. $predict() AutoTuner just calls predict method wrapped (inner) learner. set timeout disabled fitting final model.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"validation","dir":"Reference","previous_headings":"","what":"Validation","title":"Class for Automatic Tuning — AutoTuner","text":"AutoTuner \"validation\" property. enable validation tuning, set $validate field tuned learner. also possible via set_validate().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"nested-resampling","dir":"Reference","previous_headings":"","what":"Nested Resampling","title":"Class for Automatic Tuning — AutoTuner","text":"Nested resampling performed passing AutoTuner mlr3::resample() mlr3::benchmark(). access inner resampling results, set store_tuning_instance = TRUE execute mlr3::resample() mlr3::benchmark() store_models = TRUE (see examples). mlr3::Resampling passed AutoTuner meant inner resampling, operating training set arbitrary outer resampling. reason, inner resampling instantiated. instantiated resampling passed, AutoTuner fails row id inner resampling present training set outer resampling.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"default-measures","dir":"Reference","previous_headings":"","what":"Default Measures","title":"Class for Automatic Tuning — AutoTuner","text":"measure passed, default measure used. default measure depends task type.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Class for Automatic Tuning — AutoTuner","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"super-class","dir":"Reference","previous_headings":"","what":"Super class","title":"Class for Automatic Tuning — AutoTuner","text":"mlr3::Learner -> AutoTuner","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Class for Automatic Tuning — AutoTuner","text":"instance_args (list()) arguments construction create TuningInstanceBatchSingleCrit. tuner (Tuner) Optimization algorithm.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Class for Automatic Tuning — AutoTuner","text":"internal_valid_scores Retrieves inner validation scores named list(). Returns NULL learner trained yet. archive ArchiveBatchTuning Archive TuningInstanceBatchSingleCrit. learner (mlr3::Learner) Trained learner tuning_instance (TuningInstanceAsyncSingleCrit | TuningInstanceBatchSingleCrit) Internally created tuning instance intermediate results. tuning_result (data.table::data.table) Short-cut result tuning instance. predict_type (character(1)) Stores currently active predict type, e.g. \"response\". Must element $predict_types. hash (character(1)) Hash (unique identifier) object. phash (character(1)) Hash (unique identifier) partial object, excluding components varied systematically tuning (parameter values) feature selection (feature names).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Automatic Tuning — AutoTuner","text":"mlr3::Learner$encapsulate() mlr3::Learner$format() mlr3::Learner$help() mlr3::Learner$predict() mlr3::Learner$predict_newdata() mlr3::Learner$reset() mlr3::Learner$train()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Automatic Tuning — AutoTuner","text":"AutoTuner$new() AutoTuner$base_learner() AutoTuner$importance() AutoTuner$selected_features() AutoTuner$oob_error() AutoTuner$loglik() AutoTuner$print() AutoTuner$marshal() AutoTuner$unmarshal() AutoTuner$marshaled() AutoTuner$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Class for Automatic Tuning — AutoTuner","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$new( tuner, learner, resampling, measure = NULL, terminator, search_space = NULL, internal_search_space = NULL, store_tuning_instance = TRUE, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL, rush = NULL, id = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Automatic Tuning — AutoTuner","text":"tuner (Tuner) Optimization algorithm. learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measure (mlr3::Measure) Measure optimize. NULL, default measure used. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. store_tuning_instance (logical(1)) TRUE (default), stores internally created TuningInstanceBatchSingleCrit intermediate results slot $tuning_instance. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks. rush (Rush) rush instance supplied, tuning runs without batches. id (character(1)) Identifier new instance.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-base-learner-","dir":"Reference","previous_headings":"","what":"Method base_learner()","title":"Class for Automatic Tuning — AutoTuner","text":"Extracts base learner nested learner objects like GraphLearner mlr3pipelines. recursive = 0, (tuned) learner returned.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$base_learner(recursive = Inf)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Automatic Tuning — AutoTuner","text":"recursive (integer(1)) Depth recursion multiple nested objects.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Automatic Tuning — AutoTuner","text":"mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-importance-","dir":"Reference","previous_headings":"","what":"Method importance()","title":"Class for Automatic Tuning — AutoTuner","text":"importance scores final model.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$importance()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"returns-1","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Automatic Tuning — AutoTuner","text":"Named numeric().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-selected-features-","dir":"Reference","previous_headings":"","what":"Method selected_features()","title":"Class for Automatic Tuning — AutoTuner","text":"selected features final model.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-3","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$selected_features()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"returns-2","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Automatic Tuning — AutoTuner","text":"character().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-oob-error-","dir":"Reference","previous_headings":"","what":"Method oob_error()","title":"Class for Automatic Tuning — AutoTuner","text":"--bag error final model.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-4","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$oob_error()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"returns-3","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Automatic Tuning — AutoTuner","text":"numeric(1).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-loglik-","dir":"Reference","previous_headings":"","what":"Method loglik()","title":"Class for Automatic Tuning — AutoTuner","text":"log-likelihood final model.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-5","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$loglik()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"returns-4","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Automatic Tuning — AutoTuner","text":"logLik. Printer.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-6","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$print()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Automatic Tuning — AutoTuner","text":"... (ignored).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-marshal-","dir":"Reference","previous_headings":"","what":"Method marshal()","title":"Class for Automatic Tuning — AutoTuner","text":"Marshal learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-7","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$marshal(...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"arguments-3","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Automatic Tuning — AutoTuner","text":"... () Additional parameters.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"returns-5","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Automatic Tuning — AutoTuner","text":"self","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-unmarshal-","dir":"Reference","previous_headings":"","what":"Method unmarshal()","title":"Class for Automatic Tuning — AutoTuner","text":"Unmarshal learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-8","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$unmarshal(...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"arguments-4","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Automatic Tuning — AutoTuner","text":"... () Additional parameters.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"returns-6","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Automatic Tuning — AutoTuner","text":"self","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-marshaled-","dir":"Reference","previous_headings":"","what":"Method marshaled()","title":"Class for Automatic Tuning — AutoTuner","text":"Whether learner marshaled.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-9","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$marshaled()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Automatic Tuning — AutoTuner","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"usage-10","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"AutoTuner$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"arguments-5","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Automatic Tuning — AutoTuner","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/AutoTuner.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Class for Automatic Tuning — AutoTuner","text":"","code":"# Automatic Tuning # split to train and external set task = tsk(\"penguins\") split = partition(task, ratio = 0.8) # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # create auto tuner at = auto_tuner( tuner = tnr(\"random_search\"), learner = learner, resampling = rsmp (\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 4) # tune hyperparameters and fit final model at$train(task, row_ids = split$train) # predict with final model at$predict(task, row_ids = split$test) #> for 69 observations: #> row_ids truth response #> 1 Adelie Adelie #> 2 Adelie Adelie #> 9 Adelie Adelie #> --- --- --- #> 318 Chinstrap Chinstrap #> 334 Chinstrap Chinstrap #> 338 Chinstrap Chinstrap # show tuning result at$tuning_result #> cp learner_param_vals x_domain classif.ce #> #> 1: -4.797088 0.05434783 # model slot contains trained learner and tuning instance at$model #> $learner #> : Classification Tree #> * Model: rpart #> * Parameters: cp=0.008254, xval=0 #> * Packages: mlr3, rpart #> * Predict Types: [response], prob #> * Feature Types: logical, integer, numeric, factor, ordered #> * Properties: importance, missings, multiclass, selected_features, #> twoclass, weights #> #> $tuning_instance #> #> * State: Optimized #> * Objective: #> * Search Space: #> id class lower upper nlevels #> #> 1: cp ParamDbl -9.21034 -2.302585 Inf #> * Terminator: #> * Result: #> cp classif.ce #> #> 1: -4.797088 0.05434783 #> * Archive: #> cp classif.ce #> #> 1: -2.529580 0.08695652 #> 2: -4.797088 0.05434783 #> 3: -2.447415 0.08695652 #> 4: -4.854704 0.05434783 #> #> attr(,\"class\") #> [1] \"auto_tuner_model\" \"list\" # shortcut trained learner at$learner #> : Classification Tree #> * Model: rpart #> * Parameters: cp=0.008254, xval=0 #> * Packages: mlr3, rpart #> * Predict Types: [response], prob #> * Feature Types: logical, integer, numeric, factor, ordered #> * Properties: importance, missings, multiclass, selected_features, #> twoclass, weights # shortcut tuning instance at$tuning_instance #> #> * State: Optimized #> * Objective: #> * Search Space: #> id class lower upper nlevels #> #> 1: cp ParamDbl -9.21034 -2.302585 Inf #> * Terminator: #> * Result: #> cp classif.ce #> #> 1: -4.797088 0.05434783 #> * Archive: #> cp classif.ce #> #> 1: -2.529580 0.08695652 #> 2: -4.797088 0.05434783 #> 3: -2.447415 0.08695652 #> 4: -4.854704 0.05434783 # Nested Resampling at = auto_tuner( tuner = tnr(\"random_search\"), learner = learner, resampling = rsmp (\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 4) resampling_outer = rsmp(\"cv\", folds = 3) rr = resample(task, at, resampling_outer, store_models = TRUE) # retrieve inner tuning results. extract_inner_tuning_results(rr) #> iteration cp classif.ce learner_param_vals x_domain task_id #> #> 1: 1 -8.698664 0.03947368 penguins #> 2: 2 -2.421343 0.06578947 penguins #> 3: 3 -7.917442 0.06493506 penguins #> learner_id resampling_id #> #> 1: classif.rpart.tuned cv #> 2: classif.rpart.tuned cv #> 3: classif.rpart.tuned cv # performance scores estimated on the outer resampling rr$score() #> task_id learner_id resampling_id iteration classif.ce #> #> 1: penguins classif.rpart.tuned cv 1 0.06956522 #> 2: penguins classif.rpart.tuned cv 2 0.06956522 #> 3: penguins classif.rpart.tuned cv 3 0.01754386 #> Hidden columns: task, learner, resampling, prediction_test # unbiased performance of the final model trained on the full data set rr$aggregate() #> classif.ce #> 0.05222476"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"Specialized bbotk::CallbackAsync asynchronous tuning. Callbacks allow customize behavior processes mlr3tuning. callback_async_tuning() function creates CallbackAsyncTuning. Predefined callbacks stored dictionary mlr_callbacks can retrieved clbk(). information tuning callbacks see callback_async_tuning().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"mlr3misc::Callback -> bbotk::CallbackAsync -> CallbackAsyncTuning","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"on_eval_after_xs (function()) Stage called xs passed. Called ObjectiveTuningAsync$eval(). on_eval_after_resample (function()) Stage called hyperparameter configurations evaluated. Called ObjectiveTuningAsync$eval(). on_eval_before_archive (function()) Stage called performance values written archive. Called ObjectiveTuningAsync$eval(). on_tuning_result_begin (function()) Stage called results written. Called TuningInstance*$assign_result().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"mlr3misc::Callback$call() mlr3misc::Callback$format() mlr3misc::Callback$help() mlr3misc::Callback$initialize() mlr3misc::Callback$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"CallbackAsyncTuning$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"","code":"CallbackAsyncTuning$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackAsyncTuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create Asynchronous Tuning Callback — CallbackAsyncTuning","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Create Batch Tuning Callback — CallbackBatchTuning","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"Specialized bbotk::CallbackBatch batch tuning. Callbacks allow customize behavior processes mlr3tuning. callback_batch_tuning() function creates CallbackBatchTuning. Predefined callbacks stored dictionary mlr_callbacks can retrieved clbk(). information tuning callbacks see callback_batch_tuning().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"mlr3misc::Callback -> bbotk::CallbackBatch -> CallbackBatchTuning","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"on_eval_after_design (function()) Stage called design created. Called ObjectiveTuningBatch$eval_many(). on_eval_after_benchmark (function()) Stage called hyperparameter configurations evaluated. Called ObjectiveTuningBatch$eval_many(). on_eval_before_archive (function()) Stage called performance values written archive. Called ObjectiveTuningBatch$eval_many(). on_tuning_result_begin (function()) Stage called results written. Called TuningInstance*$assign_result().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"mlr3misc::Callback$call() mlr3misc::Callback$format() mlr3misc::Callback$help() mlr3misc::Callback$initialize() mlr3misc::Callback$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"CallbackBatchTuning$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"","code":"CallbackBatchTuning$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/CallbackBatchTuning.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Create Batch Tuning Callback — CallbackBatchTuning","text":"","code":"# write archive to disk callback_batch_tuning(\"mlr3tuning.backup\", on_optimization_end = function(callback, context) { saveRDS(context$instance$archive, \"archive.rds\") } ) #> #> * Active Stages: on_optimization_end"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Asynchronous Tuning Context — ContextAsyncTuning","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"CallbackAsyncTuning accesses modifies data optimization via ContextAsyncTuning. See section active bindings list modifiable objects. See callback_async_tuning() list stages access ContextAsyncTuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"Changes $instance $optimizer stages executed workers reflected main process.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"mlr3misc::Context -> bbotk::ContextAsync -> ContextAsyncTuning","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"xs_learner (list()) hyperparameter configuration currently evaluated. Contains values learner scale .e. transformations applied. resample_result (mlr3::BenchmarkResult) resample result hyperparameter configuration currently evaluated. aggregated_performance (list()) Aggregated performance scores training time evaluated hyperparameter configuration. list passed archive. callback can add additional elements also written archive. result_learner_param_vals (list()) learner parameter values passed instance$assign_result().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"mlr3misc::Context$format() mlr3misc::Context$print() bbotk::ContextAsync$initialize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"ContextAsyncTuning$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"","code":"ContextAsyncTuning$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextAsyncTuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Asynchronous Tuning Context — ContextAsyncTuning","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Batch Tuning Context — ContextBatchTuning","title":"Batch Tuning Context — ContextBatchTuning","text":"CallbackBatchTuning accesses modifies data optimization via ContextBatchTuning. See section active bindings list modifiable objects. See callback_batch_tuning() list stages access ContextBatchTuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Batch Tuning Context — ContextBatchTuning","text":"mlr3misc::Context -> bbotk::ContextBatch -> ContextBatchTuning","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Batch Tuning Context — ContextBatchTuning","text":"xss (list()) hyperparameter configurations latest batch. Contains values learner scale .e. transformations applied. See $xdt untransformed values. design (data.table::data.table) benchmark design latest batch. benchmark_result (mlr3::BenchmarkResult) benchmark result latest batch. aggregated_performance (data.table::data.table) Aggregated performance scores training time latest batch. data table passed archive. callback can add additional columns also written archive. result_learner_param_vals (list()) learner parameter values passed instance$assign_result().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Batch Tuning Context — ContextBatchTuning","text":"mlr3misc::Context$format() mlr3misc::Context$print() bbotk::ContextBatch$initialize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Batch Tuning Context — ContextBatchTuning","text":"ContextBatchTuning$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Batch Tuning Context — ContextBatchTuning","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Batch Tuning Context — ContextBatchTuning","text":"","code":"ContextBatchTuning$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ContextBatchTuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Batch Tuning Context — ContextBatchTuning","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Tuning Objective — ObjectiveTuning","title":"Class for Tuning Objective — ObjectiveTuning","text":"Stores objective function estimates performance hyperparameter configurations. class usually constructed internally TuningInstanceBatchSingleCrit TuningInstanceBatchMultiCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"super-class","dir":"Reference","previous_headings":"","what":"Super class","title":"Class for Tuning Objective — ObjectiveTuning","text":"bbotk::Objective -> ObjectiveTuning","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Class for Tuning Objective — ObjectiveTuning","text":"task (mlr3::Task). learner (mlr3::Learner). resampling (mlr3::Resampling). measures (list mlr3::Measure). store_models (logical(1)). store_benchmark_result (logical(1)). callbacks (List mlr3misc::Callback). default_values (named list()). internal_search_space (paradox::ParamSet). Internal search space internal tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Tuning Objective — ObjectiveTuning","text":"bbotk::Objective$eval() bbotk::Objective$eval_dt() bbotk::Objective$eval_many() bbotk::Objective$format() bbotk::Objective$help() bbotk::Objective$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Tuning Objective — ObjectiveTuning","text":"ObjectiveTuning$new() ObjectiveTuning$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Class for Tuning Objective — ObjectiveTuning","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Tuning Objective — ObjectiveTuning","text":"","code":"ObjectiveTuning$new( task, learner, resampling, measures, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL, internal_search_space = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Tuning Objective — ObjectiveTuning","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (list mlr3::Measure) Measures optimize. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks. internal_search_space (paradox::ParamSet NULL) internal search space.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Tuning Objective — ObjectiveTuning","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Tuning Objective — ObjectiveTuning","text":"","code":"ObjectiveTuning$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuning.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Tuning Objective — ObjectiveTuning","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningAsync.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Tuning Objective — ObjectiveTuningAsync","title":"Class for Tuning Objective — ObjectiveTuningAsync","text":"Stores objective function estimates performance hyperparameter configurations. class usually constructed internally TuningInstanceBatchSingleCrit TuningInstanceBatchMultiCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningAsync.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Class for Tuning Objective — ObjectiveTuningAsync","text":"bbotk::Objective -> mlr3tuning::ObjectiveTuning -> ObjectiveTuningAsync","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningAsync.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Tuning Objective — ObjectiveTuningAsync","text":"bbotk::Objective$eval() bbotk::Objective$eval_dt() bbotk::Objective$eval_many() bbotk::Objective$format() bbotk::Objective$help() bbotk::Objective$print() mlr3tuning::ObjectiveTuning$initialize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningAsync.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Tuning Objective — ObjectiveTuningAsync","text":"ObjectiveTuningAsync$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningAsync.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Tuning Objective — ObjectiveTuningAsync","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningAsync.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Tuning Objective — ObjectiveTuningAsync","text":"","code":"ObjectiveTuningAsync$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningAsync.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Tuning Objective — ObjectiveTuningAsync","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Tuning Objective — ObjectiveTuningBatch","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"Stores objective function estimates performance hyperparameter configurations. class usually constructed internally TuningInstanceBatchSingleCrit TuningInstanceBatchMultiCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"bbotk::Objective -> mlr3tuning::ObjectiveTuning -> ObjectiveTuningBatch","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"archive (ArchiveBatchTuning).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"bbotk::Objective$eval() bbotk::Objective$eval_dt() bbotk::Objective$eval_many() bbotk::Objective$format() bbotk::Objective$help() bbotk::Objective$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"ObjectiveTuningBatch$new() ObjectiveTuningBatch$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"","code":"ObjectiveTuningBatch$new( task, learner, resampling, measures, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, archive = NULL, callbacks = NULL, internal_search_space = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (list mlr3::Measure) Measures optimize. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. archive (ArchiveBatchTuning) Reference archive TuningInstanceBatchSingleCrit | TuningInstanceBatchMultiCrit. NULL (default), benchmark result models stored. callbacks (list mlr3misc::Callback) List callbacks. internal_search_space (paradox::ParamSet NULL) internal search space.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"","code":"ObjectiveTuningBatch$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ObjectiveTuningBatch.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Tuning Objective — ObjectiveTuningBatch","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":null,"dir":"Reference","previous_headings":"","what":"Tuner — Tuner","title":"Tuner — Tuner","text":"Tuner implements optimization algorithm.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Tuner — Tuner","text":"Tuner abstract base class implements base functionality tuner must provide.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"extension-packages","dir":"Reference","previous_headings":"","what":"Extension Packages","title":"Tuner — Tuner","text":"Additional tuners provided following packages. mlr3hyperband adds Hyperband Successive Halving algorithm. mlr3mbo adds Bayesian optimization methods.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Tuner — Tuner","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Tuner — Tuner","text":"id (character(1)) Identifier object. Used tables, plot text output.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Tuner — Tuner","text":"param_set (paradox::ParamSet) Set control parameters. param_classes (character()) Supported parameter classes learner hyperparameters tuner can optimize, given paradox::ParamSet $class field. properties (character()) Set properties tuner. Must subset mlr_reflections$tuner_properties. packages (character()) Set required packages. Note packages loaded via requireNamespace(), attached. label (character(1)) Label object. Can used tables, plot text output instead ID. man (character(1)) String format [pkg]::[topic] pointing manual page object. referenced help package can opened via method $help().","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Tuner — Tuner","text":"Tuner$new() Tuner$format() Tuner$print() Tuner$help() Tuner$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Tuner — Tuner","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Tuner — Tuner","text":"","code":"Tuner$new( id = \"tuner\", param_set, param_classes, properties, packages = character(), label = NA_character_, man = NA_character_ )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Tuner — Tuner","text":"id (character(1)) Identifier new instance. param_set (paradox::ParamSet) Set control parameters. param_classes (character()) Supported parameter classes learner hyperparameters tuner can optimize, given paradox::ParamSet $class field. properties (character()) Set properties tuner. Must subset mlr_reflections$tuner_properties. packages (character()) Set required packages. Note packages loaded via requireNamespace(), attached. label (character(1)) Label object. Can used tables, plot text output instead ID. man (character(1)) String format [pkg]::[topic] pointing manual page object. referenced help package can opened via method $help().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"method-format-","dir":"Reference","previous_headings":"","what":"Method format()","title":"Tuner — Tuner","text":"Helper print outputs.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Tuner — Tuner","text":"","code":"Tuner$format(...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Tuner — Tuner","text":"... (ignored).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"Tuner — Tuner","text":"(character()).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"method-print-","dir":"Reference","previous_headings":"","what":"Method print()","title":"Tuner — Tuner","text":"Print method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Tuner — Tuner","text":"","code":"Tuner$print()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"returns-1","dir":"Reference","previous_headings":"","what":"Returns","title":"Tuner — Tuner","text":"(character()).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"method-help-","dir":"Reference","previous_headings":"","what":"Method help()","title":"Tuner — Tuner","text":"Opens corresponding help page referenced field $man.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"usage-3","dir":"Reference","previous_headings":"","what":"Usage","title":"Tuner — Tuner","text":"","code":"Tuner$help()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Tuner — Tuner","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"usage-4","dir":"Reference","previous_headings":"","what":"Usage","title":"Tuner — Tuner","text":"","code":"Tuner$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/Tuner.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Tuner — Tuner","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Asynchronous Tuning Algorithms — TunerAsync","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"TunerAsync implements asynchronous optimization algorithm.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"TunerAsync abstract base class implements base functionality asynchronous tuner must provide.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"super-class","dir":"Reference","previous_headings":"","what":"Super class","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"mlr3tuning::Tuner -> TunerAsync","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$initialize() mlr3tuning::Tuner$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"TunerAsync$optimize() TunerAsync$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"method-optimize-","dir":"Reference","previous_headings":"","what":"Method optimize()","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"Performs tuning TuningInstanceAsyncSingleCrit TuningInstanceAsyncMultiCrit termination. single evaluations written ArchiveAsyncTuning resides TuningInstanceAsyncSingleCrit/TuningInstanceAsyncMultiCrit. result written instance object.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"","code":"TunerAsync$optimize(inst)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"inst (TuningInstanceAsyncSingleCrit | TuningInstanceAsyncMultiCrit).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"data.table::data.table()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"","code":"TunerAsync$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsync.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Asynchronous Tuning Algorithms — TunerAsync","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":null,"dir":"Reference","previous_headings":"","what":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"Internally used transform bbotk::Optimizer Tuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"mlr3tuning::Tuner -> mlr3tuning::TunerAsync -> TunerAsyncFromOptimizerAsync","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"param_set (paradox::ParamSet) Set control parameters.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"TunerAsyncFromOptimizerAsync$new() TunerAsyncFromOptimizerAsync$optimize() TunerAsyncFromOptimizerAsync$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"","code":"TunerAsyncFromOptimizerAsync$new(optimizer, man = NA_character_)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"optimizer bbotk::Optimizer Optimizer called. man (character(1)) String format [pkg]::[topic] pointing manual page object. referenced help package can opened via method $help().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"method-optimize-","dir":"Reference","previous_headings":"","what":"Method optimize()","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"Performs tuning TuningInstanceBatchSingleCrit / TuningInstanceBatchMultiCrit termination. single evaluations final results written ArchiveAsyncTuning resides TuningInstanceBatchSingleCrit/TuningInstanceBatchMultiCrit. final result returned.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"","code":"TunerAsyncFromOptimizerAsync$optimize(inst)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"inst (TuningInstanceBatchSingleCrit | TuningInstanceBatchMultiCrit).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"data.table::data.table.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"","code":"TunerAsyncFromOptimizerAsync$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerAsyncFromOptimizerAsync.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"TunerAsyncFromOptimizerAsync — TunerAsyncFromOptimizerAsync","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Batch Tuning Algorithms — TunerBatch","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"TunerBatch implements optimization algorithm.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"TunerBatch abstract base class implements base functionality tuner must provide. subclass implemented following way: Inherit Tuner. Specify private abstract method $.optimize() use call optimizer. need call instance$eval_batch() evaluate design points. batch evaluation requested TuningInstanceBatchSingleCrit/TuningInstanceBatchMultiCrit object instance, batch possibly executed parallel via mlr3::benchmark(), evaluations stored inside instance$archive. batch evaluation, bbotk::Terminator checked, positive, exception class \"terminated_error\" generated. later case current batch evaluations still stored instance, numeric scores sent back handling optimizer lost execution control. exception caught select best configuration instance$archive return . Note therefore points specified bbotk::Terminator may evaluated, Terminator checked batch evaluation, -evaluation batch. many depends setting batch size. Overwrite private super-method .assign_result() want decide estimate final configuration instance estimated performance. default behavior : pick best resample-experiment, regarding given measure, assign configuration aggregated performance instance.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"private-methods","dir":"Reference","previous_headings":"","what":"Private Methods","title":"Class for Batch Tuning Algorithms — TunerBatch","text":".optimize(instance) -> NULL Abstract base method. Implement specify tuning subclass. See details sections. .assign_result(instance) -> NULL Abstract base method. Implement specify final configuration selected. See details sections.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"super-class","dir":"Reference","previous_headings":"","what":"Super class","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"mlr3tuning::Tuner -> TunerBatch","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"TunerBatch$new() TunerBatch$optimize() TunerBatch$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"","code":"TunerBatch$new( id = \"tuner_batch\", param_set, param_classes, properties, packages = character(), label = NA_character_, man = NA_character_ )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"id (character(1)) Identifier new instance. param_set (paradox::ParamSet) Set control parameters. param_classes (character()) Supported parameter classes learner hyperparameters tuner can optimize, given paradox::ParamSet $class field. properties (character()) Set properties tuner. Must subset mlr_reflections$tuner_properties. packages (character()) Set required packages. Note packages loaded via requireNamespace(), attached. label (character(1)) Label object. Can used tables, plot text output instead ID. man (character(1)) String format [pkg]::[topic] pointing manual page object. referenced help package can opened via method $help().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"method-optimize-","dir":"Reference","previous_headings":"","what":"Method optimize()","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"Performs tuning TuningInstanceBatchSingleCrit TuningInstanceBatchMultiCrit termination. single evaluations written ArchiveBatchTuning resides TuningInstanceBatchSingleCrit/TuningInstanceBatchMultiCrit. result written instance object.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"","code":"TunerBatch$optimize(inst)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"inst (TuningInstanceBatchSingleCrit | TuningInstanceBatchMultiCrit).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"data.table::data.table()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"","code":"TunerBatch$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatch.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Batch Tuning Algorithms — TunerBatch","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":null,"dir":"Reference","previous_headings":"","what":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"Internally used transform bbotk::Optimizer Tuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> TunerBatchFromOptimizerBatch","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"param_set (paradox::ParamSet) Set control parameters.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"TunerBatchFromOptimizerBatch$new() TunerBatchFromOptimizerBatch$optimize() TunerBatchFromOptimizerBatch$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"","code":"TunerBatchFromOptimizerBatch$new(optimizer, man = NA_character_)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"optimizer bbotk::Optimizer Optimizer called. man (character(1)) String format [pkg]::[topic] pointing manual page object. referenced help package can opened via method $help().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"method-optimize-","dir":"Reference","previous_headings":"","what":"Method optimize()","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"Performs tuning TuningInstanceBatchSingleCrit / TuningInstanceBatchMultiCrit termination. single evaluations final results written ArchiveBatchTuning resides TuningInstanceBatchSingleCrit/TuningInstanceBatchMultiCrit. final result returned.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"","code":"TunerBatchFromOptimizerBatch$optimize(inst)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"inst (TuningInstanceBatchSingleCrit | TuningInstanceBatchMultiCrit).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"data.table::data.table.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"","code":"TunerBatchFromOptimizerBatch$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TunerBatchFromOptimizerBatch.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"TunerBatchFromOptimizerBatch — TunerBatchFromOptimizerBatch","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":null,"dir":"Reference","previous_headings":"","what":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"TuningInstanceAsyncMultiCrit specifies tuning problem Tuner. function ti_async() creates TuningInstanceAsyncMultiCrit function tune() creates instance internally.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"instance contains ObjectiveTuningAsync object encodes black box objective function Tuner optimize. instance allows basic operations querying objective design points ($eval_async()). operation usually done Tuner. Hyperparameter configurations asynchronously sent workers evaluated calling mlr3::resample(). evaluated hyperparameter configurations stored ArchiveAsyncTuning ($archive). batch evaluated, bbotk::Terminator queried remaining budget. available budget exhausted, exception raised, evaluations can performed point . tuner also supposed store final result, consisting selected hyperparameter configuration associated estimated performance values, calling method instance$.assign_result.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"analysis","dir":"Reference","previous_headings":"","what":"Analysis","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"analyzing tuning results, recommended pass ArchiveAsyncTuning .data.table(). returned data table contains mlr3::ResampleResult hyperparameter evaluation.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"bbotk::OptimInstance -> bbotk::OptimInstanceAsync -> bbotk::OptimInstanceAsyncMultiCrit -> TuningInstanceAsyncMultiCrit","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"internal_search_space (paradox::ParamSet) search space containing parameters internally optimized mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"result_learner_param_vals (list()) List param values optimal learner call.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"bbotk::OptimInstance$format() bbotk::OptimInstanceAsync$clear() bbotk::OptimInstanceAsync$print() bbotk::OptimInstanceAsync$reconnect()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"TuningInstanceAsyncMultiCrit$new() TuningInstanceAsyncMultiCrit$assign_result() TuningInstanceAsyncMultiCrit$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"","code":"TuningInstanceAsyncMultiCrit$new( task, learner, resampling, measures, terminator, search_space = NULL, internal_search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL, rush = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (list mlr3::Measure) Measures optimize. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks. rush (Rush) rush instance supplied, tuning runs without batches.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"method-assign-result-","dir":"Reference","previous_headings":"","what":"Method assign_result()","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"TunerAsync writes best found points estimated performance values (probably Pareto set / front). internal use.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"","code":"TuningInstanceAsyncMultiCrit$assign_result( xdt, ydt, learner_param_vals = NULL, extra = NULL, ... )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"xdt (data.table::data.table()) Hyperparameter values data.table::data.table(). row one configuration. Contains values search space. Can contain additional columns extra information. ydt (numeric(1)) Optimal outcomes, e.g. Pareto front. learner_param_vals (List named list()s) Fixed parameter values learner neither part extra (data.table::data.table()) Additional information. ... () ignored.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"","code":"TuningInstanceAsyncMultiCrit$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncMultiCrit.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Multi-Criteria Tuning with Rush — TuningInstanceAsyncMultiCrit","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":null,"dir":"Reference","previous_headings":"","what":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"TuningInstanceAsyncSingleCrit specifies tuning problem TunerAsync. function ti_async() creates TuningInstanceAsyncSingleCrit function tune() creates instance internally.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"instance contains ObjectiveTuningAsync object encodes black box objective function Tuner optimize. instance allows basic operations querying objective design points ($eval_async()). operation usually done Tuner. Hyperparameter configurations asynchronously sent workers evaluated calling mlr3::resample(). evaluated hyperparameter configurations stored ArchiveAsyncTuning ($archive). batch evaluated, bbotk::Terminator queried remaining budget. available budget exhausted, exception raised, evaluations can performed point . tuner also supposed store final result, consisting selected hyperparameter configuration associated estimated performance values, calling method instance$.assign_result.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"default-measures","dir":"Reference","previous_headings":"","what":"Default Measures","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"measure passed, default measure used. default measure depends task type.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"analysis","dir":"Reference","previous_headings":"","what":"Analysis","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"analyzing tuning results, recommended pass ArchiveAsyncTuning .data.table(). returned data table contains mlr3::ResampleResult hyperparameter evaluation.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"extension-packages","dir":"Reference","previous_headings":"","what":"Extension Packages","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"mlr3tuning extended following packages. mlr3tuningspaces collection search spaces scientific articles commonly used learners. mlr3hyperband adds Hyperband Successive Halving algorithm. mlr3mbo adds Bayesian optimization methods.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"bbotk::OptimInstance -> bbotk::OptimInstanceAsync -> bbotk::OptimInstanceAsyncSingleCrit -> TuningInstanceAsyncSingleCrit","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"internal_search_space (paradox::ParamSet) search space containing parameters internally optimized mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"result_learner_param_vals (list()) Param values optimal learner call.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"bbotk::OptimInstance$format() bbotk::OptimInstanceAsync$clear() bbotk::OptimInstanceAsync$print() bbotk::OptimInstanceAsync$reconnect()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"TuningInstanceAsyncSingleCrit$new() TuningInstanceAsyncSingleCrit$assign_result() TuningInstanceAsyncSingleCrit$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"","code":"TuningInstanceAsyncSingleCrit$new( task, learner, resampling, measure = NULL, terminator, search_space = NULL, internal_search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL, rush = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measure (mlr3::Measure) Measure optimize. NULL, default measure used. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. internal_search_space (paradox::ParamSet NULL) internal search space. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks. rush (Rush) rush instance supplied, tuning runs without batches.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"method-assign-result-","dir":"Reference","previous_headings":"","what":"Method assign_result()","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"TunerAsync object writes best found point estimated performance value . internal use.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"","code":"TuningInstanceAsyncSingleCrit$assign_result( xdt, y, learner_param_vals = NULL, extra = NULL, ... )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"xdt (data.table::data.table()) Hyperparameter values data.table::data.table(). row one configuration. Contains values search space. Can contain additional columns extra information. y (numeric(1)) Optimal outcome. learner_param_vals (List named list()s) Fixed parameter values learner neither part extra (data.table::data.table()) Additional information. ... () ignored.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"","code":"TuningInstanceAsyncSingleCrit$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceAsyncSingleCrit.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Single Criterion Tuning with Rush — TuningInstanceAsyncSingleCrit","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"TuningInstanceBatchMultiCrit specifies tuning problem Tuner. function ti() creates TuningInstanceBatchMultiCrit function tune() creates instance internally.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"instance contains ObjectiveTuningBatch object encodes black box objective function Tuner optimize. instance allows basic operations querying objective design points ($eval_batch()). operation usually done Tuner. Evaluations hyperparameter configurations performed batches calling mlr3::benchmark() internally. evaluated hyperparameter configurations stored ArchiveBatchTuning ($archive). batch evaluated, bbotk::Terminator queried remaining budget. available budget exhausted, exception raised, evaluations can performed point . tuner also supposed store final result, consisting selected hyperparameter configuration associated estimated performance values, calling method instance$assign_result.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"analysis","dir":"Reference","previous_headings":"","what":"Analysis","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"analyzing tuning results, recommended pass ArchiveBatchTuning .data.table(). returned data table joined benchmark result adds mlr3::ResampleResult hyperparameter evaluation. archive provides various getters (e.g. $learners()) ease access. getters extract position () unique hash (uhash). complete list getters see methods section. benchmark result ($benchmark_result) allows score hyperparameter configurations different measure. Alternatively, measures can supplied .data.table(). mlr3viz package provides visualizations tuning results.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"bbotk::OptimInstance -> bbotk::OptimInstanceBatch -> bbotk::OptimInstanceBatchMultiCrit -> TuningInstanceBatchMultiCrit","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"internal_search_space (paradox::ParamSet) search space containing parameters internally optimized mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"result_learner_param_vals (list()) List param values optimal learner call.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"bbotk::OptimInstance$clear() bbotk::OptimInstance$format() bbotk::OptimInstance$print() bbotk::OptimInstanceBatch$eval_batch() bbotk::OptimInstanceBatch$objective_function()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"TuningInstanceBatchMultiCrit$new() TuningInstanceBatchMultiCrit$assign_result() TuningInstanceBatchMultiCrit$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"","code":"TuningInstanceBatchMultiCrit$new( task, learner, resampling, measures, terminator, search_space = NULL, internal_search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (list mlr3::Measure) Measures optimize. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. internal_search_space (paradox::ParamSet NULL) internal search space. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"method-assign-result-","dir":"Reference","previous_headings":"","what":"Method assign_result()","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"Tuner object writes best found points estimated performance values . internal use.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"","code":"TuningInstanceBatchMultiCrit$assign_result( xdt, ydt, learner_param_vals = NULL, extra = NULL, ... )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"xdt (data.table::data.table()) Hyperparameter values data.table::data.table(). row one configuration. Contains values search space. Can contain additional columns extra information. ydt (data.table::data.table()) Optimal outcomes, e.g. Pareto front. learner_param_vals (List named list()s) Fixed parameter values learner neither part extra (data.table::data.table()) Additional information. ... () ignored.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"","code":"TuningInstanceBatchMultiCrit$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchMultiCrit.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Class for Multi Criteria Tuning — TuningInstanceBatchMultiCrit","text":"","code":"# Hyperparameter optimization on the Palmer Penguins data set task = tsk(\"penguins\") # Load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # Construct tuning instance instance = ti( task = task, learner = learner, resampling = rsmp(\"cv\", folds = 3), measures = msrs(c(\"classif.ce\", \"time_train\")), terminator = trm(\"evals\", n_evals = 4) ) # Choose optimization algorithm tuner = tnr(\"random_search\", batch_size = 2) # Run tuning tuner$optimize(instance) #> cp learner_param_vals x_domain classif.ce time_train #> #> 1: -3.259804 0.09583016 0.003 #> 2: -3.759791 0.09583016 0.003 #> 3: -2.565382 0.09583016 0.003 #> 4: -3.080830 0.09583016 0.003 # Optimal hyperparameter configurations instance$result #> cp learner_param_vals x_domain classif.ce time_train #> #> 1: -3.259804 0.09583016 0.003 #> 2: -3.759791 0.09583016 0.003 #> 3: -2.565382 0.09583016 0.003 #> 4: -3.080830 0.09583016 0.003 # Inspect all evaluated configurations as.data.table(instance$archive) #> cp classif.ce time_train runtime_learners timestamp #> #> 1: -3.259804 0.09583016 0.003 0.016 2024-11-26 14:00:42 #> 2: -3.759791 0.09583016 0.003 0.016 2024-11-26 14:00:42 #> 3: -2.565382 0.09583016 0.003 0.017 2024-11-26 14:00:42 #> 4: -3.080830 0.09583016 0.003 0.016 2024-11-26 14:00:42 #> warnings errors x_domain batch_nr resample_result #> #> 1: 0 0 1 #> 2: 0 0 1 #> 3: 0 0 2 #> 4: 0 0 2 "},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":null,"dir":"Reference","previous_headings":"","what":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"TuningInstanceBatchSingleCrit specifies tuning problem Tuner. function ti() creates TuningInstanceBatchSingleCrit function tune() creates instance internally.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"instance contains ObjectiveTuningBatch object encodes black box objective function Tuner optimize. instance allows basic operations querying objective design points ($eval_batch()). operation usually done Tuner. Evaluations hyperparameter configurations performed batches calling mlr3::benchmark() internally. evaluated hyperparameter configurations stored ArchiveBatchTuning ($archive). batch evaluated, bbotk::Terminator queried remaining budget. available budget exhausted, exception raised, evaluations can performed point . tuner also supposed store final result, consisting selected hyperparameter configuration associated estimated performance values, calling method instance$assign_result.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"default-measures","dir":"Reference","previous_headings":"","what":"Default Measures","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"measure passed, default measure used. default measure depends task type.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"extension-packages","dir":"Reference","previous_headings":"","what":"Extension Packages","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"mlr3tuning extended following packages. mlr3tuningspaces collection search spaces scientific articles commonly used learners. mlr3hyperband adds Hyperband Successive Halving algorithm. mlr3mbo adds Bayesian optimization methods.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"analysis","dir":"Reference","previous_headings":"","what":"Analysis","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"analyzing tuning results, recommended pass ArchiveBatchTuning .data.table(). returned data table joined benchmark result adds mlr3::ResampleResult hyperparameter evaluation. archive provides various getters (e.g. $learners()) ease access. getters extract position () unique hash (uhash). complete list getters see methods section. benchmark result ($benchmark_result) allows score hyperparameter configurations different measure. Alternatively, measures can supplied .data.table(). mlr3viz package provides visualizations tuning results.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"bbotk::OptimInstance -> bbotk::OptimInstanceBatch -> bbotk::OptimInstanceBatchSingleCrit -> TuningInstanceBatchSingleCrit","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"public-fields","dir":"Reference","previous_headings":"","what":"Public fields","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"internal_search_space (paradox::ParamSet) search space containing parameters internally optimized mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"result_learner_param_vals (list()) Param values optimal learner call.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"bbotk::OptimInstance$clear() bbotk::OptimInstance$format() bbotk::OptimInstance$print() bbotk::OptimInstanceBatch$eval_batch() bbotk::OptimInstanceBatch$objective_function()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"TuningInstanceBatchSingleCrit$new() TuningInstanceBatchSingleCrit$assign_result() TuningInstanceBatchSingleCrit$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"","code":"TuningInstanceBatchSingleCrit$new( task, learner, resampling, measure = NULL, terminator, search_space = NULL, internal_search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measure (mlr3::Measure) Measure optimize. NULL, default measure used. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"method-assign-result-","dir":"Reference","previous_headings":"","what":"Method assign_result()","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"Tuner object writes best found point estimated performance value . internal use.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"","code":"TuningInstanceBatchSingleCrit$assign_result( xdt, y, learner_param_vals = NULL, extra = NULL, ... )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"xdt (data.table::data.table()) Hyperparameter values data.table::data.table(). row one configuration. Contains values search space. Can contain additional columns extra information. y (numeric(1)) Optimal outcome. learner_param_vals (List named list()s) Fixed parameter values learner neither part extra (data.table::data.table()) Additional information. ... () ignored.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"","code":"TuningInstanceBatchSingleCrit$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceBatchSingleCrit.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Class for Single Criterion Tuning — TuningInstanceBatchSingleCrit","text":"","code":"# Hyperparameter optimization on the Palmer Penguins data set task = tsk(\"penguins\") # Load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # Construct tuning instance instance = ti( task = task, learner = learner, resampling = rsmp(\"cv\", folds = 3), measures = msr(\"classif.ce\"), terminator = trm(\"evals\", n_evals = 4) ) # Choose optimization algorithm tuner = tnr(\"random_search\", batch_size = 2) # Run tuning tuner$optimize(instance) #> cp learner_param_vals x_domain classif.ce #> #> 1: -3.036646 0.06392067 # Set optimal hyperparameter configuration to learner learner$param_set$values = instance$result_learner_param_vals # Train the learner on the full data set learner$train(task) # Inspect all evaluated configurations as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -3.036646 0.06392067 0.017 2024-11-26 14:00:43 0 0 #> 2: -5.238604 0.06392067 0.037 2024-11-26 14:00:43 0 0 #> 3: -7.255326 0.06392067 0.017 2024-11-26 14:00:43 0 0 #> 4: -6.314690 0.06392067 0.016 2024-11-26 14:00:43 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 1 #> 3: 2 #> 4: 2 "},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":null,"dir":"Reference","previous_headings":"","what":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"TuningInstanceMultiCrit deprecated class now wrapper around TuningInstanceBatchMultiCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"bbotk::OptimInstance -> bbotk::OptimInstanceBatch -> bbotk::OptimInstanceBatchMultiCrit -> mlr3tuning::TuningInstanceBatchMultiCrit -> TuningInstanceMultiCrit","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"bbotk::OptimInstance$clear() bbotk::OptimInstance$format() bbotk::OptimInstance$print() bbotk::OptimInstanceBatch$eval_batch() bbotk::OptimInstanceBatch$objective_function() mlr3tuning::TuningInstanceBatchMultiCrit$assign_result()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"TuningInstanceMultiCrit$new() TuningInstanceMultiCrit$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"","code":"TuningInstanceMultiCrit$new( task, learner, resampling, measures, terminator, search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (list mlr3::Measure) Measures optimize. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"","code":"TuningInstanceMultiCrit$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceMultiCrit.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Multi Criteria Tuning Instance for Batch Tuning — TuningInstanceMultiCrit","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":null,"dir":"Reference","previous_headings":"","what":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"TuningInstanceSingleCrit deprecated class now wrapper around TuningInstanceBatchSingleCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"bbotk::OptimInstance -> bbotk::OptimInstanceBatch -> bbotk::OptimInstanceBatchSingleCrit -> mlr3tuning::TuningInstanceBatchSingleCrit -> TuningInstanceSingleCrit","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"bbotk::OptimInstance$clear() bbotk::OptimInstance$format() bbotk::OptimInstance$print() bbotk::OptimInstanceBatch$eval_batch() bbotk::OptimInstanceBatch$objective_function() mlr3tuning::TuningInstanceBatchSingleCrit$assign_result()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"TuningInstanceSingleCrit$new() TuningInstanceSingleCrit$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"","code":"TuningInstanceSingleCrit$new( task, learner, resampling, measure = NULL, terminator, search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measure (mlr3::Measure) Measure optimize. NULL, default measure used. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"","code":"TuningInstanceSingleCrit$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/TuningInstanceSingleCrit.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Single Criterion Tuning Instance for Batch Tuning — TuningInstanceSingleCrit","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/as_search_space.html","id":null,"dir":"Reference","previous_headings":"","what":"Convert to a Search Space — as_search_space","title":"Convert to a Search Space — as_search_space","text":"Convert object search space.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/as_search_space.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Convert to a Search Space — as_search_space","text":"","code":"as_search_space(x, ...) # S3 method for class 'Learner' as_search_space(x, ...) # S3 method for class 'ParamSet' as_search_space(x, ...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/as_search_space.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Convert to a Search Space — as_search_space","text":"x () Object convert search space. ... () Additional arguments.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/as_search_space.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Convert to a Search Space — as_search_space","text":"paradox::ParamSet.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/as_tuner.html","id":null,"dir":"Reference","previous_headings":"","what":"Convert to a Tuner — as_tuner","title":"Convert to a Tuner — as_tuner","text":"Convert object Tuner list Tuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/as_tuner.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Convert to a Tuner — as_tuner","text":"","code":"as_tuner(x, ...) # S3 method for class 'Tuner' as_tuner(x, clone = FALSE, ...) as_tuners(x, ...) # Default S3 method as_tuners(x, ...) # S3 method for class 'list' as_tuners(x, ...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/as_tuner.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Convert to a Tuner — as_tuner","text":"x () Object convert. ... () Additional arguments. clone (logical(1)) Whether clone object.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_async_tuning_callback.html","id":null,"dir":"Reference","previous_headings":"","what":"Assertions for Callbacks — assert_async_tuning_callback","title":"Assertions for Callbacks — assert_async_tuning_callback","text":"Assertions CallbackAsyncTuning class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_async_tuning_callback.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Assertions for Callbacks — assert_async_tuning_callback","text":"","code":"assert_async_tuning_callback(callback, null_ok = FALSE) assert_async_tuning_callbacks(callbacks)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_async_tuning_callback.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Assertions for Callbacks — assert_async_tuning_callback","text":"callback (CallbackAsyncTuning). null_ok (logical(1)) TRUE, NULL allowed. callbacks (list CallbackAsyncTuning).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_async_tuning_callback.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Assertions for Callbacks — assert_async_tuning_callback","text":"[CallbackAsyncTuning | List CallbackAsyncTunings.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_batch_tuning_callback.html","id":null,"dir":"Reference","previous_headings":"","what":"Assertions for Callbacks — assert_batch_tuning_callback","title":"Assertions for Callbacks — assert_batch_tuning_callback","text":"Assertions CallbackBatchTuning class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_batch_tuning_callback.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Assertions for Callbacks — assert_batch_tuning_callback","text":"","code":"assert_batch_tuning_callback(callback, null_ok = FALSE) assert_batch_tuning_callbacks(callbacks)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_batch_tuning_callback.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Assertions for Callbacks — assert_batch_tuning_callback","text":"callback (CallbackBatchTuning). null_ok (logical(1)) TRUE, NULL allowed. callbacks (list CallbackBatchTuning).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/assert_batch_tuning_callback.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Assertions for Callbacks — assert_batch_tuning_callback","text":"[CallbackBatchTuning | List CallbackBatchTunings.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":null,"dir":"Reference","previous_headings":"","what":"Function for Automatic Tuning — auto_tuner","title":"Function for Automatic Tuning — auto_tuner","text":"AutoTuner wraps mlr3::Learner augments automatic tuning process given set hyperparameters. auto_tuner() function creates AutoTuner object.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Function for Automatic Tuning — auto_tuner","text":"","code":"auto_tuner( tuner, learner, resampling, measure = NULL, term_evals = NULL, term_time = NULL, terminator = NULL, search_space = NULL, internal_search_space = NULL, store_tuning_instance = TRUE, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL, rush = NULL, id = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Function for Automatic Tuning — auto_tuner","text":"tuner (Tuner) Optimization algorithm. learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measure (mlr3::Measure) Measure optimize. NULL, default measure used. term_evals (integer(1)) Number allowed evaluations. Ignored terminator passed. term_time (integer(1)) Maximum allowed time seconds. Ignored terminator passed. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. store_tuning_instance (logical(1)) TRUE (default), stores internally created TuningInstanceBatchSingleCrit intermediate results slot $tuning_instance. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks. rush (Rush) rush instance supplied, tuning runs without batches. id (character(1)) Identifier new instance.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Function for Automatic Tuning — auto_tuner","text":"AutoTuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Function for Automatic Tuning — auto_tuner","text":"AutoTuner mlr3::Learner wraps another mlr3::Learner performs following steps $train(): hyperparameters wrapped (inner) learner trained training data via resampling. tuning can specified providing Tuner, bbotk::Terminator, search space paradox::ParamSet, mlr3::Resampling mlr3::Measure. best found hyperparameter configuration set hyperparameters wrapped (inner) learner stored $learner. Access tuned hyperparameters via $tuning_result. final model fit complete training data using now parametrized wrapped learner. respective model available via field $learner$model. $predict() AutoTuner just calls predict method wrapped (inner) learner. set timeout disabled fitting final model.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"default-measures","dir":"Reference","previous_headings":"","what":"Default Measures","title":"Function for Automatic Tuning — auto_tuner","text":"measure passed, default measure used. default measure depends task type.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Function for Automatic Tuning — auto_tuner","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"nested-resampling","dir":"Reference","previous_headings":"","what":"Nested Resampling","title":"Function for Automatic Tuning — auto_tuner","text":"Nested resampling performed passing AutoTuner mlr3::resample() mlr3::benchmark(). access inner resampling results, set store_tuning_instance = TRUE execute mlr3::resample() mlr3::benchmark() store_models = TRUE (see examples). mlr3::Resampling passed AutoTuner meant inner resampling, operating training set arbitrary outer resampling. reason, inner resampling instantiated. instantiated resampling passed, AutoTuner fails row id inner resampling present training set outer resampling.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/auto_tuner.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Function for Automatic Tuning — auto_tuner","text":"","code":"at = auto_tuner( tuner = tnr(\"random_search\"), learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)), resampling = rsmp (\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 4) at$train(tsk(\"pima\"))"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_async_tuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Create Asynchronous Tuning Callback — callback_async_tuning","title":"Create Asynchronous Tuning Callback — callback_async_tuning","text":"Function create CallbackAsyncTuning. Predefined callbacks stored dictionary mlr_callbacks can retrieved clbk(). Tuning callbacks can called different stages tuning process. stages prefixed on_*. See also section parameters information stages. tuning callback works ContextAsyncTuning.","code":"Start Tuning - on_optimization_begin Start Worker - on_worker_begin Start Optimization on Worker - on_optimizer_before_eval Start Evaluation - on_eval_after_xs - on_eval_after_resample - on_eval_before_archive End Evaluation - on_optimizer_after_eval End Optimization on Worker - on_worker_end End Worker - on_tuning_result_begin - on_result_begin - on_result_end - on_optimization_end End Tuning"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_async_tuning.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create Asynchronous Tuning Callback — callback_async_tuning","text":"","code":"callback_async_tuning( id, label = NA_character_, man = NA_character_, on_optimization_begin = NULL, on_worker_begin = NULL, on_optimizer_before_eval = NULL, on_eval_after_xs = NULL, on_eval_after_resample = NULL, on_eval_before_archive = NULL, on_optimizer_after_eval = NULL, on_worker_end = NULL, on_tuning_result_begin = NULL, on_result_begin = NULL, on_result_end = NULL, on_result = NULL, on_optimization_end = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_async_tuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create Asynchronous Tuning Callback — callback_async_tuning","text":"id (character(1)) Identifier new instance. label (character(1)) Label new instance. man (character(1)) String format [pkg]::[topic] pointing manual page object. referenced help package can opened via method $help(). on_optimization_begin (function()) Stage called beginning optimization. Called Optimizer$optimize(). functions must two arguments named callback context. on_worker_begin (function()) Stage called beginning optimization worker. Called worker loop. functions must two arguments named callback context. on_optimizer_before_eval (function()) Stage called optimizer proposes points. Called OptimInstance$.eval_point(). functions must two arguments named callback context. argument instance$.eval_point(xs) xs_trafoed extra available context. xs xs_trafoed instance$.eval_queue() available context. on_eval_after_xs (function()) Stage called xs passed objective. Called ObjectiveTuningAsync$eval(). functions must two arguments named callback context. argument $.eval(xs) available context. on_eval_after_resample (function()) Stage called hyperparameter configuration evaluated. Called ObjectiveTuningAsync$eval(). functions must two arguments named callback context. resample_result available `context on_eval_before_archive (function()) Stage called performance values written archive. Called ObjectiveTuningAsync$eval(). functions must two arguments named callback context. aggregated_performance available context. on_optimizer_after_eval (function()) Stage called points evaluated. Called OptimInstance$.eval_point(). functions must two arguments named callback context. on_worker_end (function()) Stage called end optimization worker. Called worker loop. functions must two arguments named callback context. on_tuning_result_begin (function()) Stage called beginning result writing. Called TuningInstance*$assign_result(). functions must two arguments named callback context. arguments $assign_result(xdt, y, learner_param_vals, extra) available context. on_result_begin (function()) Stage called beginning result writing. Called OptimInstance$assign_result(). functions must two arguments named callback context. arguments $.assign_result(xdt, y, extra) available context. on_result_end (function()) Stage called result written. Called OptimInstance$assign_result(). functions must two arguments named callback context. final result instance$result available context. on_result (function()) Deprecated. Use on_result_end instead. Stage called result written. Called OptimInstance$assign_result(). on_optimization_end (function()) Stage called end optimization. Called Optimizer$optimize().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_async_tuning.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Create Asynchronous Tuning Callback — callback_async_tuning","text":"implementing callback, function must two arguments named callback context. callback can write data state ($state), e.g. settings affect callback . Tuning callbacks access ContextAsyncTuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_batch_tuning.html","id":null,"dir":"Reference","previous_headings":"","what":"Create Batch Tuning Callback — callback_batch_tuning","title":"Create Batch Tuning Callback — callback_batch_tuning","text":"Function create CallbackBatchTuning. Predefined callbacks stored dictionary mlr_callbacks can retrieved clbk(). Tuning callbacks can called different stages tuning process. stages prefixed on_*. See also section parameters information stages. tuning callback works ContextBatchTuning.","code":"Start Tuning - on_optimization_begin Start Tuner Batch - on_optimizer_before_eval Start Evaluation - on_eval_after_design - on_eval_after_benchmark - on_eval_before_archive End Evaluation - on_optimizer_after_eval End Tuner Batch - on_tuning_result_begin - on_result_begin - on_result_end - on_optimization_end End Tuning"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_batch_tuning.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Create Batch Tuning Callback — callback_batch_tuning","text":"","code":"callback_batch_tuning( id, label = NA_character_, man = NA_character_, on_optimization_begin = NULL, on_optimizer_before_eval = NULL, on_eval_after_design = NULL, on_eval_after_benchmark = NULL, on_eval_before_archive = NULL, on_optimizer_after_eval = NULL, on_tuning_result_begin = NULL, on_result_begin = NULL, on_result_end = NULL, on_result = NULL, on_optimization_end = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_batch_tuning.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Create Batch Tuning Callback — callback_batch_tuning","text":"id (character(1)) Identifier new instance. label (character(1)) Label new instance. man (character(1)) String format [pkg]::[topic] pointing manual page object. referenced help package can opened via method $help(). on_optimization_begin (function()) Stage called beginning optimization. Called Optimizer$optimize(). functions must two arguments named callback context. on_optimizer_before_eval (function()) Stage called optimizer proposes points. Called OptimInstance$eval_batch(). functions must two arguments named callback context. argument $eval_batch(xdt) available context. on_eval_after_design (function()) Stage called design created. Called ObjectiveTuningBatch$eval_many(). functions must two arguments named callback context. arguments $eval_many(xss, resampling) available context. Additionally, design available context. on_eval_after_benchmark (function()) Stage called hyperparameter configurations evaluated. Called ObjectiveTuningBatch$eval_many(). functions must two arguments named callback context. benchmark_result available context. on_eval_before_archive (function()) Stage called performance values written archive. Called ObjectiveTuningBatch$eval_many(). functions must two arguments named callback context. aggregated_performance available context. on_optimizer_after_eval (function()) Stage called points evaluated. Called OptimInstance$eval_batch(). functions must two arguments named callback context. new configurations performances instance$archive available context. on_tuning_result_begin (function()) Stage called beginning result writing. Called TuningInstanceBatch$assign_result(). functions must two arguments named callback context. arguments $assign_result(xdt, y, learner_param_vals, extra) available context. on_result_begin (function()) Stage called beginning result writing. Called OptimInstance$assign_result(). functions must two arguments named callback context. arguments $assign_result(xdt, y, extra) available context. on_result_end (function()) Stage called result written. Called OptimInstance$assign_result(). functions must two arguments named callback context. final result instance$result available context. on_result (function()) Deprecated. Use on_result_end instead. Stage called result written. Called OptimInstance$assign_result(). functions must two arguments named callback context. on_optimization_end (function()) Stage called end optimization. Called Optimizer$optimize(). functions must two arguments named callback context.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_batch_tuning.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Create Batch Tuning Callback — callback_batch_tuning","text":"implementing callback, function must two arguments named callback context. callback can write data state ($state), e.g. settings affect callback . Tuning callbacks access ContextBatchTuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/callback_batch_tuning.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Create Batch Tuning Callback — callback_batch_tuning","text":"","code":"# write archive to disk callback_batch_tuning(\"mlr3tuning.backup\", on_optimization_end = function(callback, context) { saveRDS(context$instance$archive, \"archive.rds\") } ) #> #> * Active Stages: on_optimization_end"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_archives.html","id":null,"dir":"Reference","previous_headings":"","what":"Extract Inner Tuning Archives — extract_inner_tuning_archives","title":"Extract Inner Tuning Archives — extract_inner_tuning_archives","text":"Extract inner tuning archives nested resampling. Implemented mlr3::ResampleResult mlr3::BenchmarkResult. function iterates AutoTuner objects binds tuning archives data.table::data.table(). AutoTuner must initialized store_tuning_instance = TRUE mlr3::resample() mlr3::benchmark() must called store_models = TRUE.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_archives.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Extract Inner Tuning Archives — extract_inner_tuning_archives","text":"","code":"extract_inner_tuning_archives( x, unnest = \"x_domain\", exclude_columns = \"uhash\" )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_archives.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Extract Inner Tuning Archives — extract_inner_tuning_archives","text":"x (mlr3::ResampleResult | mlr3::BenchmarkResult). unnest (character()) Transforms list columns separate columns. default, x_domain unnested. Set NULL column unnested. exclude_columns (character()) Exclude columns result table. Set NULL column excluded.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_archives.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Extract Inner Tuning Archives — extract_inner_tuning_archives","text":"data.table::data.table().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_archives.html","id":"data-structure","dir":"Reference","previous_headings":"","what":"Data structure","title":"Extract Inner Tuning Archives — extract_inner_tuning_archives","text":"returned data table following columns: experiment (integer(1)) Index, giving according row number original benchmark grid. iteration (integer(1)) Iteration outer resampling. One column hyperparameter search spaces. One column performance measure. runtime_learners (numeric(1)) Sum training predict times logged learners per mlr3::ResampleResult / evaluation. include potential overhead time. timestamp (POSIXct) Time stamp evaluation logged archive. batch_nr (integer(1)) Hyperparameters evaluated batches. batch unique batch number. x_domain (list()) List transformed hyperparameter values. default column unnested. x_domain_* () Separate column transformed hyperparameter. resample_result (mlr3::ResampleResult) Resample result inner resampling. task_id (character(1)). learner_id (character(1)). resampling_id (character(1)).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_archives.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Extract Inner Tuning Archives — extract_inner_tuning_archives","text":"","code":"# Nested Resampling on Palmer Penguins Data Set learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)) # create auto tuner at = auto_tuner( tuner = tnr(\"random_search\"), learner = learner, resampling = rsmp (\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 4) resampling_outer = rsmp(\"cv\", folds = 2) rr = resample(tsk(\"iris\"), at, resampling_outer, store_models = TRUE) # extract inner archives extract_inner_tuning_archives(rr) #> iteration cp classif.ce x_domain_cp runtime_learners #> #> 1: 1 -6.819407 0.04 0.001092369 0.005 #> 2: 1 -6.361894 0.04 0.001726095 0.005 #> 3: 1 -5.017906 0.04 0.006618373 0.005 #> 4: 1 -4.487537 0.04 0.011248315 0.005 #> 5: 2 -2.771268 0.08 0.062582599 0.005 #> 6: 2 -5.852816 0.08 0.002871801 0.005 #> 7: 2 -6.365882 0.08 0.001719224 0.005 #> 8: 2 -3.185002 0.08 0.041378177 0.004 #> timestamp warnings errors batch_nr resample_result task_id #> #> 1: 2024-11-26 14:00:47 0 0 1 iris #> 2: 2024-11-26 14:00:47 0 0 2 iris #> 3: 2024-11-26 14:00:47 0 0 3 iris #> 4: 2024-11-26 14:00:47 0 0 4 iris #> 5: 2024-11-26 14:00:47 0 0 1 iris #> 6: 2024-11-26 14:00:47 0 0 2 iris #> 7: 2024-11-26 14:00:47 0 0 3 iris #> 8: 2024-11-26 14:00:47 0 0 4 iris #> learner_id resampling_id #> #> 1: classif.rpart.tuned cv #> 2: classif.rpart.tuned cv #> 3: classif.rpart.tuned cv #> 4: classif.rpart.tuned cv #> 5: classif.rpart.tuned cv #> 6: classif.rpart.tuned cv #> 7: classif.rpart.tuned cv #> 8: classif.rpart.tuned cv"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_results.html","id":null,"dir":"Reference","previous_headings":"","what":"Extract Inner Tuning Results — extract_inner_tuning_results","title":"Extract Inner Tuning Results — extract_inner_tuning_results","text":"Extract inner tuning results nested resampling. Implemented mlr3::ResampleResult mlr3::BenchmarkResult.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_results.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Extract Inner Tuning Results — extract_inner_tuning_results","text":"","code":"extract_inner_tuning_results(x, tuning_instance, ...) # S3 method for class 'ResampleResult' extract_inner_tuning_results(x, tuning_instance = FALSE, ...) # S3 method for class 'BenchmarkResult' extract_inner_tuning_results(x, tuning_instance = FALSE, ...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_results.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Extract Inner Tuning Results — extract_inner_tuning_results","text":"x (mlr3::ResampleResult | mlr3::BenchmarkResult). tuning_instance (logical(1)) TRUE, tuning instances added table. ... () Additional arguments.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_results.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Extract Inner Tuning Results — extract_inner_tuning_results","text":"data.table::data.table().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_results.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Extract Inner Tuning Results — extract_inner_tuning_results","text":"function iterates AutoTuner objects binds tuning results data.table::data.table(). AutoTuner must initialized store_tuning_instance = TRUE mlr3::resample() mlr3::benchmark() must called store_models = TRUE. Optionally, tuning instance can added iteration.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_results.html","id":"data-structure","dir":"Reference","previous_headings":"","what":"Data structure","title":"Extract Inner Tuning Results — extract_inner_tuning_results","text":"returned data table following columns: experiment (integer(1)) Index, giving according row number original benchmark grid. iteration (integer(1)) Iteration outer resampling. One column hyperparameter search spaces. One column performance measure. learner_param_vals (list()) Hyperparameter values used learner. Includes fixed proposed hyperparameter values. x_domain (list()) List transformed hyperparameter values. tuning_instance (TuningInstanceBatchSingleCrit | TuningInstanceBatchMultiCrit) Optionally, tuning instances. task_id (character(1)). learner_id (character(1)). resampling_id (character(1)).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/extract_inner_tuning_results.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Extract Inner Tuning Results — extract_inner_tuning_results","text":"","code":"# Nested Resampling on Palmer Penguins Data Set learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)) # create auto tuner at = auto_tuner( tuner = tnr(\"random_search\"), learner = learner, resampling = rsmp (\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 4) resampling_outer = rsmp(\"cv\", folds = 2) rr = resample(tsk(\"iris\"), at, resampling_outer, store_models = TRUE) # extract inner results extract_inner_tuning_results(rr) #> iteration cp classif.ce learner_param_vals x_domain task_id #> #> 1: 1 -8.235780 0.00 iris #> 2: 2 -3.983786 0.04 iris #> learner_id resampling_id #> #> 1: classif.rpart.tuned cv #> 2: classif.rpart.tuned cv"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning-package.html","id":null,"dir":"Reference","previous_headings":"","what":"mlr3tuning: Hyperparameter Optimization for 'mlr3' — mlr3tuning-package","title":"mlr3tuning: Hyperparameter Optimization for 'mlr3' — mlr3tuning-package","text":"Hyperparameter optimization package 'mlr3' ecosystem. features highly configurable search spaces via 'paradox' package finds optimal hyperparameter configurations 'mlr3' learner. 'mlr3tuning' works several optimization algorithms e.g. Random Search, Iterated Racing, Bayesian Optimization ('mlr3mbo') Hyperband ('mlr3hyperband'). Moreover, can automatically optimize learners estimate performance optimized models nested resampling.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning-package.html","id":"author","dir":"Reference","previous_headings":"","what":"Author","title":"mlr3tuning: Hyperparameter Optimization for 'mlr3' — mlr3tuning-package","text":"Maintainer: Marc Becker marcbecker@posteo.de (ORCID) Authors: Michel Lang michellang@gmail.com (ORCID) Jakob Richter jakob1richter@gmail.com (ORCID) Bernd Bischl bernd_bischl@gmx.net (ORCID) Daniel Schalk daniel.schalk@stat.uni-muenchen.de (ORCID)","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.asnyc_mlflow.html","id":null,"dir":"Reference","previous_headings":"","what":"MLflow Connector Callback — mlr3tuning.asnyc_mlflow","title":"MLflow Connector Callback — mlr3tuning.asnyc_mlflow","text":"mlr3misc::Callback logs hyperparameter configurations performance configurations MLflow.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.asnyc_mlflow.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"MLflow Connector Callback — mlr3tuning.asnyc_mlflow","text":"","code":"clbk(\"mlr3tuning.async_mlflow\", tracking_uri = \"http://localhost:5000\") #> : MLflow Connector #> * Active Stages: on_eval_before_archive, on_eval_after_xs, #> on_optimization_begin if (FALSE) { # \\dontrun{ rush::rush_plan(n_workers = 4) learner = lrn(\"classif.rpart\", minsplit = to_tune(2, 128), cp = to_tune(1e-04, 1e-1)) instance = TuningInstanceAsyncSingleCrit$new( task = tsk(\"pima\"), learner = learner, resampling = rsmp(\"cv\", folds = 3), measure = msr(\"classif.ce\"), terminator = trm(\"evals\", n_evals = 20), store_benchmark_result = FALSE, callbacks = clbk(\"mlr3tuning.rush_mlflow\", tracking_uri = \"http://localhost:8080\") ) tuner = tnr(\"random_search_v2\") tuner$optimize(instance) } # }"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.async_default_configuration.html","id":null,"dir":"Reference","previous_headings":"","what":"Default Configuration Callback — mlr3tuning.async_default_configuration","title":"Default Configuration Callback — mlr3tuning.async_default_configuration","text":"CallbackAsyncTuning CallbackBatchTuning evaluate default hyperparameter values learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.async_save_logs.html","id":null,"dir":"Reference","previous_headings":"","what":"Save Logs Callback — mlr3tuning.async_save_logs","title":"Save Logs Callback — mlr3tuning.async_save_logs","text":"CallbackAsyncTuning saves logs learners archive.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.backup.html","id":null,"dir":"Reference","previous_headings":"","what":"Backup Benchmark Result Callback — mlr3tuning.backup","title":"Backup Benchmark Result Callback — mlr3tuning.backup","text":"mlr3misc::Callback writes mlr3::BenchmarkResult batch disk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.backup.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Backup Benchmark Result Callback — mlr3tuning.backup","text":"","code":"clbk(\"mlr3tuning.backup\", path = \"backup.rds\") #> : Backup Benchmark Result Callback #> * Active Stages: on_optimizer_after_eval, on_optimization_begin # tune classification tree on the pima data set instance = tune( tuner = tnr(\"random_search\", batch_size = 2), task = tsk(\"pima\"), learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)), resampling = rsmp(\"cv\", folds = 3), measures = msr(\"classif.ce\"), term_evals = 4, callbacks = clbk(\"mlr3tuning.backup\", path = tempfile(fileext = \".rds\")) )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.measures.html","id":null,"dir":"Reference","previous_headings":"","what":"Measure Callback — mlr3tuning.measures","title":"Measure Callback — mlr3tuning.measures","text":"mlr3misc::Callback scores hyperparameter configurations additional measures tuning. Usually, configurations can scored additional measures tuning (see ArchiveBatchTuning). However, memory sufficient store mlr3::BenchmarkResult, necessary score additional measures tuning. measures taken account tuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.measures.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Measure Callback — mlr3tuning.measures","text":"","code":"clbk(\"mlr3tuning.measures\") #> : Additional Measures Callback #> * Active Stages: on_eval_before_archive, on_optimization_begin # additionally score the configurations on the accuracy measure instance = tune( tuner = tnr(\"random_search\", batch_size = 2), task = tsk(\"pima\"), learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)), resampling = rsmp(\"cv\", folds = 3), measures = msr(\"classif.ce\"), term_evals = 4, callbacks = clbk(\"mlr3tuning.measures\", measures = msr(\"classif.acc\")) )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.one_se_rule.html","id":null,"dir":"Reference","previous_headings":"","what":"One Standard Error Rule Callback — mlr3tuning.one_se_rule","title":"One Standard Error Rule Callback — mlr3tuning.one_se_rule","text":"one standard error rule takes number features account selecting best hyperparameter configuration. Many learners support internal feature selection, can accessed via $selected_features(). callback selects hyperparameter configuration smallest feature set within one standard error best performing configuration. multiple hyperparameter configurations number features, first one selected.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.one_se_rule.html","id":"source","dir":"Reference","previous_headings":"","what":"Source","title":"One Standard Error Rule Callback — mlr3tuning.one_se_rule","text":"Kuhn, Max, Johnson, Kjell (2013). “Applied Predictive Modeling.” chapter -Fitting Model Tuning, 61–92. Springer New York, New York, NY. ISBN 978-1-4614-6849-3.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning.one_se_rule.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"One Standard Error Rule Callback — mlr3tuning.one_se_rule","text":"","code":"clbk(\"mlr3tuning.one_se_rule\") #> : One Standard Error Rule Callback #> * Active Stages: on_tuning_result_begin, on_eval_before_archive, #> on_optimization_begin # Run optimization on the pima data set with the callback instance = tune( tuner = tnr(\"random_search\", batch_size = 15), task = tsk(\"pima\"), learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)), resampling = rsmp(\"cv\", folds = 3), measures = msr(\"classif.ce\"), term_evals = 30, callbacks = clbk(\"mlr3tuning.one_se_rule\") ) # Hyperparameter configuration with the smallest feature set within one standard error of the best instance$result #> cp n_features learner_param_vals x_domain classif.ce #> #> 1: -4.216525 4 0.2317708"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning_assertions.html","id":null,"dir":"Reference","previous_headings":"","what":"Assertion for mlr3tuning objects — mlr3tuning_assertions","title":"Assertion for mlr3tuning objects — mlr3tuning_assertions","text":"assertion functions ensure right class attribute, optionally additional properties.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning_assertions.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Assertion for mlr3tuning objects — mlr3tuning_assertions","text":"","code":"assert_tuner(tuner) assert_tuners(tuners) assert_tuner_async(tuner) assert_tuner_batch(tuner) assert_tuning_instance(inst) assert_tuning_instance_async(inst) assert_tuning_instance_batch(inst)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr3tuning_assertions.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Assertion for mlr3tuning objects — mlr3tuning_assertions","text":"tuner (TunerBatch). tuners (list Tuner). inst (TuningInstanceBatchSingleCrit | TuningInstanceBatchMultiCrit).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners.html","id":null,"dir":"Reference","previous_headings":"","what":"Dictionary of Tuners — mlr_tuners","title":"Dictionary of Tuners — mlr_tuners","text":"simple mlr3misc::Dictionary storing objects class Tuner. tuner associated help page, see mlr_tuners_[id]. dictionary can get populated additional tuners add-packages. convenient way retrieve construct tuner, see tnr()/tnrs().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners.html","id":"format","dir":"Reference","previous_headings":"","what":"Format","title":"Dictionary of Tuners — mlr_tuners","text":"R6::R6Class object inheriting mlr3misc::Dictionary.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Dictionary of Tuners — mlr_tuners","text":"See mlr3misc::Dictionary.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners.html","id":"s-methods","dir":"Reference","previous_headings":"","what":"S3 methods","title":"Dictionary of Tuners — mlr_tuners","text":".data.table(dict, ..., objects = FALSE)mlr3misc::Dictionary -> data.table::data.table() Returns data.table::data.table() fields \"key\", \"label\", \"param_classes\", \"properties\" \"packages\" columns. objects set TRUE, constructed objects returned list column named object.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Dictionary of Tuners — mlr_tuners","text":"","code":"as.data.table(mlr_tuners) #> Key: #> key label #> #> 1: async_design_points Asynchronous Design Points #> 2: async_grid_search Asynchronous Grid Search #> 3: async_random_search Asynchronous Random Search #> 4: cmaes Covariance Matrix Adaptation Evolution Strategy #> 5: design_points Design Points #> 6: gensa Generalized Simulated Annealing #> 7: grid_search Grid Search #> 8: internal Internal Optimizer #> 9: irace Iterated Racing #> 10: nloptr Non-linear Optimization #> 11: random_search Random Search #> param_classes #> #> 1: ParamLgl,ParamInt,ParamDbl,ParamFct,ParamUty #> 2: ParamLgl,ParamInt,ParamDbl,ParamFct #> 3: ParamLgl,ParamInt,ParamDbl,ParamFct #> 4: ParamDbl #> 5: ParamLgl,ParamInt,ParamDbl,ParamFct,ParamUty #> 6: ParamDbl #> 7: ParamLgl,ParamInt,ParamDbl,ParamFct #> 8: ParamLgl,ParamInt,ParamDbl,ParamFct #> 9: ParamDbl,ParamInt,ParamFct,ParamLgl #> 10: ParamDbl #> 11: ParamLgl,ParamInt,ParamDbl,ParamFct #> properties packages #> #> 1: dependencies,single-crit,multi-crit,async mlr3tuning,bbotk,rush #> 2: dependencies,single-crit,multi-crit,async mlr3tuning,bbotk,rush #> 3: dependencies,single-crit,multi-crit,async mlr3tuning,bbotk,rush #> 4: single-crit mlr3tuning,bbotk,adagio #> 5: dependencies,single-crit,multi-crit mlr3tuning,bbotk #> 6: single-crit mlr3tuning,bbotk,GenSA #> 7: dependencies,single-crit,multi-crit mlr3tuning,bbotk #> 8: dependencies,single-crit mlr3tuning #> 9: dependencies,single-crit mlr3tuning,bbotk,irace #> 10: single-crit mlr3tuning,bbotk,nloptr #> 11: dependencies,single-crit,multi-crit mlr3tuning,bbotk mlr_tuners$get(\"random_search\") #> : Random Search #> * Parameters: batch_size=1 #> * Parameter classes: ParamLgl, ParamInt, ParamDbl, ParamFct #> * Properties: dependencies, single-crit, multi-crit #> * Packages: mlr3tuning, bbotk tnr(\"random_search\") #> : Random Search #> * Parameters: batch_size=1 #> * Parameter classes: ParamLgl, ParamInt, ParamDbl, ParamFct #> * Properties: dependencies, single-crit, multi-crit #> * Packages: mlr3tuning, bbotk"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"Subclass asynchronous design points tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"async_design_points\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"parameters","dir":"Reference","previous_headings":"","what":"Parameters","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"design data.table::data.table Design points try search, one per row.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"mlr3tuning::Tuner -> mlr3tuning::TunerAsync -> mlr3tuning::TunerAsyncFromOptimizerAsync -> TunerAsyncDesignPoints","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerAsyncFromOptimizerAsync$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"TunerAsyncDesignPoints$new() TunerAsyncDesignPoints$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"","code":"TunerAsyncDesignPoints$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"","code":"TunerAsyncDesignPoints$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_design_points.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Asynchronous Design Points — mlr_tuners_async_design_points","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"Subclass asynchronous grid search tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"async_design_points\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"parameters","dir":"Reference","previous_headings":"","what":"Parameters","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"batch_size integer(1) Maximum number points try batch.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"mlr3tuning::Tuner -> mlr3tuning::TunerAsync -> mlr3tuning::TunerAsyncFromOptimizerAsync -> TunerAsyncGridSearch","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerAsyncFromOptimizerAsync$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"TunerAsyncGridSearch$new() TunerAsyncGridSearch$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"","code":"TunerAsyncGridSearch$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"","code":"TunerAsyncGridSearch$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_grid_search.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Asynchronous Grid Search — mlr_tuners_async_grid_search","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"Subclass asynchronous random search tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"source","dir":"Reference","previous_headings":"","what":"Source","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"Bergstra J, Bengio Y (2012). “Random Search Hyper-Parameter Optimization.” Journal Machine Learning Research, 13(10), 281–305. https://jmlr.csail.mit.edu/papers/v13/bergstra12a.html.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"random points sampled paradox::generate_design_random().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"async_random_search\")"},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"mlr3tuning::Tuner -> mlr3tuning::TunerAsync -> mlr3tuning::TunerAsyncFromOptimizerAsync -> TunerAsyncRandomSearch","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerAsyncFromOptimizerAsync$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"TunerAsyncRandomSearch$new() TunerAsyncRandomSearch$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"","code":"TunerAsyncRandomSearch$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"","code":"TunerAsyncRandomSearch$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_async_random_search.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Asynchronous Random Search — mlr_tuners_async_random_search","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"Subclass Covariance Matrix Adaptation Evolution Strategy (CMA-ES). Calls adagio::pureCMAES() package adagio.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"source","dir":"Reference","previous_headings":"","what":"Source","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"Hansen N (2016). “CMA Evolution Strategy: Tutorial.” 1604.00772.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"cmaes\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"control-parameters","dir":"Reference","previous_headings":"","what":"Control Parameters","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"start_values character(1) Create random start values based center search space? latter case, center parameters trafo applied. meaning control parameters, see adagio::pureCMAES(). Note removed control parameters refer termination algorithm terminators allow obtain behavior.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"$optimize() supports progress bars via package progressr combined bbotk::Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"optimizer","dir":"Reference","previous_headings":"","what":"Optimizer","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"Tuner based bbotk::OptimizerBatchCmaes can applied black box optimization problem. See also documentation bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> mlr3tuning::TunerBatchFromOptimizerBatch -> TunerBatchCmaes","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerBatchFromOptimizerBatch$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"TunerBatchCmaes$new() TunerBatchCmaes$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"","code":"TunerBatchCmaes$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"","code":"TunerBatchCmaes$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_cmaes.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Covariance Matrix Adaptation Evolution Strategy — mlr_tuners_cmaes","text":"","code":"# Hyperparameter Optimization # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE), minsplit = to_tune(p_dbl(2, 128, trafo = as.integer)), minbucket = to_tune(p_dbl(1, 64, trafo = as.integer)) ) # run hyperparameter tuning on the Palmer Penguins data set instance = tune( tuner = tnr(\"cmaes\"), task = tsk(\"penguins\"), learner = learner, resampling = rsmp(\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 10) # best performing hyperparameter configuration instance$result #> cp minbucket minsplit learner_param_vals x_domain classif.ce #> #> 1: -7.336334 15.20906 107.2338 0.07826087 # all evaluated hyperparameter configuration as.data.table(instance$archive) #> cp minbucket minsplit classif.ce runtime_learners #> #> 1: -7.336334 15.209063 107.23382 0.07826087 0.005 #> 2: -9.210340 64.000000 22.89758 0.12173913 0.025 #> 3: -2.621780 31.763900 128.00000 0.07826087 0.006 #> 4: -2.302585 1.000000 106.26335 0.07826087 0.006 #> 5: -2.302585 62.039211 128.00000 0.12173913 0.006 #> 6: -4.416664 54.268412 108.94055 0.07826087 0.006 #> 7: -2.302585 4.755131 72.28910 0.07826087 0.006 #> 8: -4.734599 30.835601 24.51517 0.07826087 0.005 #> 9: -9.210340 39.906483 97.63893 0.07826087 0.006 #> 10: -6.242816 18.946310 96.50841 0.07826087 0.006 #> timestamp warnings errors x_domain batch_nr resample_result #> #> 1: 2024-11-26 14:00:57 0 0 1 #> 2: 2024-11-26 14:00:57 0 0 2 #> 3: 2024-11-26 14:00:57 0 0 3 #> 4: 2024-11-26 14:00:57 0 0 4 #> 5: 2024-11-26 14:00:57 0 0 5 #> 6: 2024-11-26 14:00:57 0 0 6 #> 7: 2024-11-26 14:00:57 0 0 7 #> 8: 2024-11-26 14:00:57 0 0 8 #> 9: 2024-11-26 14:00:57 0 0 9 #> 10: 2024-11-26 14:00:57 0 0 10 # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals learner$train(tsk(\"penguins\"))"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"Subclass tuning w.r.t. fixed design points. simply search set points fully specified user. points design evaluated order given.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"design_points\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"parallelization","dir":"Reference","previous_headings":"","what":"Parallelization","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"order support general termination criteria parallelization, evaluate points batch-fashion size batch_size. Larger batches mean can parallelize , smaller batches imply fine-grained checking termination criteria. batch contains batch_size times resampling$iters jobs. E.g., set batch size 10 points 5-fold cross validation, can utilize 50 cores. Parallelization supported via package future (see mlr3::benchmark()'s section parallelization details).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"optimizer","dir":"Reference","previous_headings":"","what":"Optimizer","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"Tuner based bbotk::OptimizerBatchDesignPoints can applied black box optimization problem. See also documentation bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"parameters","dir":"Reference","previous_headings":"","what":"Parameters","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"batch_size integer(1) Maximum number configurations try batch. design data.table::data.table Design points try search, one per row.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"$optimize() supports progress bars via package progressr combined Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> mlr3tuning::TunerBatchFromOptimizerBatch -> TunerBatchDesignPoints","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerBatchFromOptimizerBatch$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"TunerBatchDesignPoints$new() TunerBatchDesignPoints$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"","code":"TunerBatchDesignPoints$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"","code":"TunerBatchDesignPoints$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_design_points.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Design Points — mlr_tuners_design_points","text":"","code":"# Hyperparameter Optimization # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1), minsplit = to_tune(2, 128), minbucket = to_tune(1, 64) ) # create design design = mlr3misc::rowwise_table( ~cp, ~minsplit, ~minbucket, 0.1, 2, 64, 0.01, 64, 32, 0.001, 128, 1 ) # run hyperparameter tuning on the Palmer Penguins data set instance = tune( tuner = tnr(\"design_points\", design = design), task = tsk(\"penguins\"), learner = learner, resampling = rsmp(\"holdout\"), measure = msr(\"classif.ce\") ) # best performing hyperparameter configuration instance$result #> cp minbucket minsplit learner_param_vals x_domain classif.ce #> #> 1: 0.01 32 64 0.07826087 # all evaluated hyperparameter configuration as.data.table(instance$archive) #> cp minbucket minsplit classif.ce runtime_learners timestamp #> #> 1: 0.100 64 2 0.09565217 0.006 2024-11-26 14:00:58 #> 2: 0.010 32 64 0.07826087 0.006 2024-11-26 14:00:58 #> 3: 0.001 1 128 0.07826087 0.006 2024-11-26 14:00:58 #> warnings errors x_domain batch_nr resample_result #> #> 1: 0 0 1 #> 2: 0 0 2 #> 3: 0 0 3 # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals learner$train(tsk(\"penguins\"))"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"Subclass generalized simulated annealing tuning. Calls GenSA::GenSA() package GenSA.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"source","dir":"Reference","previous_headings":"","what":"Source","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"Tsallis C, Stariolo DA (1996). “Generalized simulated annealing.” Physica : Statistical Mechanics Applications, 233(1-2), 395–406. doi:10.1016/s0378-4371(96)00271-3 . Xiang Y, Gubian S, Suomela B, Hoeng J (2013). “Generalized Simulated Annealing Global Optimization: GenSA Package.” R Journal, 5(1), 13. doi:10.32614/rj-2013-002 .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"contrast GenSA::GenSA() defaults, set smooth = FALSE default.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"gensa\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"parallelization","dir":"Reference","previous_headings":"","what":"Parallelization","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"order support general termination criteria parallelization, evaluate points batch-fashion size batch_size. Larger batches mean can parallelize , smaller batches imply fine-grained checking termination criteria. batch contains batch_size times resampling$iters jobs. E.g., set batch size 10 points 5-fold cross validation, can utilize 50 cores. Parallelization supported via package future (see mlr3::benchmark()'s section parallelization details).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"optimizer","dir":"Reference","previous_headings":"","what":"Optimizer","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"Tuner based bbotk::OptimizerBatchGenSA can applied black box optimization problem. See also documentation bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"parameters","dir":"Reference","previous_headings":"","what":"Parameters","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"smooth logical(1) temperature numeric(1) acceptance.param numeric(1) verbose logical(1) trace.mat logical(1) meaning control parameters, see GenSA::GenSA(). Note removed control parameters refer termination algorithm terminators allow obtain behavior. contrast GenSA::GenSA() defaults, set trace.mat = FALSE. Note GenSA::GenSA() uses smooth = TRUE default. case using optimizer Hyperparameter Optimization may want set smooth = FALSE.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"$optimize() supports progress bars via package progressr combined Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> mlr3tuning::TunerBatchFromOptimizerBatch -> TunerBatchGenSA","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerBatchFromOptimizerBatch$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"TunerBatchGenSA$new() TunerBatchGenSA$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"","code":"TunerBatchGenSA$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"","code":"TunerBatchGenSA$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_gensa.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Generalized Simulated Annealing — mlr_tuners_gensa","text":"","code":"# Hyperparameter Optimization # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # run hyperparameter tuning on the Palmer Penguins data set instance = tune( tuner = tnr(\"gensa\"), task = tsk(\"penguins\"), learner = learner, resampling = rsmp(\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 10 ) #> Warning: one-dimensional optimization by Nelder-Mead is unreliable: #> use \"Brent\" or optimize() directly # best performing hyperparameter configuration instance$result #> cp learner_param_vals x_domain classif.ce #> #> 1: -5.721042 0.04347826 # all evaluated hyperparameter configuration as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -5.721042 0.04347826 0.006 2024-11-26 14:00:59 0 0 #> 2: -2.850714 0.06086957 0.006 2024-11-26 14:00:59 0 0 #> 3: -7.568995 0.04347826 0.006 2024-11-26 14:00:59 0 0 #> 4: -5.721042 0.04347826 0.006 2024-11-26 14:00:59 0 0 #> 5: -5.721042 0.04347826 0.005 2024-11-26 14:00:59 0 0 #> 6: -5.721042 0.04347826 0.006 2024-11-26 14:00:59 0 0 #> 7: -5.148938 0.04347826 0.006 2024-11-26 14:01:00 0 0 #> 8: -6.293146 0.04347826 0.006 2024-11-26 14:01:00 0 0 #> 9: -6.007094 0.04347826 0.006 2024-11-26 14:01:00 0 0 #> 10: -5.434990 0.04347826 0.006 2024-11-26 14:01:00 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 2 #> 3: 3 #> 4: 4 #> 5: 5 #> 6: 6 #> 7: 7 #> 8: 8 #> 9: 9 #> 10: 10 # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals learner$train(tsk(\"penguins\"))"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"Subclass grid search tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"grid constructed Cartesian product discretized values per parameter, see paradox::generate_design_grid(). learner supports hotstarting, grid sorted hotstart parameter (see also mlr3::HotstartStack). , points grid evaluated random order.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"grid_search\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"control-parameters","dir":"Reference","previous_headings":"","what":"Control Parameters","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"resolution integer(1) Resolution grid, see paradox::generate_design_grid(). param_resolutions named integer() Resolution per parameter, named parameter ID, see paradox::generate_design_grid(). batch_size integer(1) Maximum number points try batch.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"$optimize() supports progress bars via package progressr combined bbotk::Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"parallelization","dir":"Reference","previous_headings":"","what":"Parallelization","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"order support general termination criteria parallelization, evaluate points batch-fashion size batch_size. Larger batches mean can parallelize , smaller batches imply fine-grained checking termination criteria. batch contains batch_size times resampling$iters jobs. E.g., set batch size 10 points 5-fold cross validation, can utilize 50 cores. Parallelization supported via package future (see mlr3::benchmark()'s section parallelization details).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"optimizer","dir":"Reference","previous_headings":"","what":"Optimizer","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"Tuner based bbotk::OptimizerBatchGridSearch can applied black box optimization problem. See also documentation bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> mlr3tuning::TunerBatchFromOptimizerBatch -> TunerBatchGridSearch","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerBatchFromOptimizerBatch$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"TunerBatchGridSearch$new() TunerBatchGridSearch$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"","code":"TunerBatchGridSearch$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"","code":"TunerBatchGridSearch$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_grid_search.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Grid Search — mlr_tuners_grid_search","text":"","code":"# Hyperparameter Optimization # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # run hyperparameter tuning on the Palmer Penguins data set instance = tune( tuner = tnr(\"grid_search\"), task = tsk(\"penguins\"), learner = learner, resampling = rsmp(\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 10 ) # best performing hyperparameter configuration instance$result #> cp learner_param_vals x_domain classif.ce #> #> 1: -3.070113 0.06956522 # all evaluated hyperparameter configuration as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -3.070113 0.06956522 0.008 2024-11-26 14:01:01 0 0 #> 2: -4.605170 0.06956522 0.006 2024-11-26 14:01:01 0 0 #> 3: -8.442812 0.06956522 0.007 2024-11-26 14:01:01 0 0 #> 4: -3.837642 0.06956522 0.007 2024-11-26 14:01:01 0 0 #> 5: -5.372699 0.06956522 0.007 2024-11-26 14:01:01 0 0 #> 6: -2.302585 0.06956522 0.006 2024-11-26 14:01:01 0 0 #> 7: -7.675284 0.06956522 0.007 2024-11-26 14:01:01 0 0 #> 8: -6.907755 0.06956522 0.006 2024-11-26 14:01:01 0 0 #> 9: -9.210340 0.06956522 0.005 2024-11-26 14:01:01 0 0 #> 10: -6.140227 0.06956522 0.006 2024-11-26 14:01:01 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 2 #> 3: 3 #> 4: 4 #> 5: 5 #> 6: 6 #> 7: 7 #> 8: 8 #> 9: 9 #> 10: 10 # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals learner$train(tsk(\"penguins\"))"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"Subclass conduct internal hyperparameter tuning mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"note","dir":"Reference","previous_headings":"","what":"Note","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"selected mlr3::Measure influence tuning result. change loss-function internal tuning, consult hyperparameter documentation tuned mlr3::Learner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"internal\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"$optimize() supports progress bars via package progressr combined bbotk::Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> TunerBatchInternal","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerBatch$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"TunerBatchInternal$new() TunerBatchInternal$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"","code":"TunerBatchInternal$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"","code":"TunerBatchInternal$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_internal.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Internal Tuning — mlr_tuners_internal","text":"","code":"library(mlr3learners) # Retrieve task task = tsk(\"pima\") # Load learner and set search space learner = lrn(\"classif.xgboost\", nrounds = to_tune(upper = 1000, internal = TRUE), early_stopping_rounds = 10, validate = \"test\", eval_metric = \"merror\" ) # Internal hyperparameter tuning on the pima indians diabetes data set instance = tune( tnr(\"internal\"), tsk(\"iris\"), learner, rsmp(\"cv\", folds = 3), msr(\"internal_valid_score\", minimize = TRUE, select = \"merror\") ) # best performing hyperparameter configuration instance$result_learner_param_vals #> $eval_metric #> [1] \"merror\" #> #> $nrounds #> [1] 3 #> #> $nthread #> [1] 1 #> #> $verbose #> [1] 0 #> instance$result_learner_param_vals$internal_tuned_values #> NULL"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"Subclass iterated racing. Calls irace::irace() package irace.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"source","dir":"Reference","previous_headings":"","what":"Source","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"Lopez-Ibanez M, Dubois-Lacoste J, Caceres LP, Birattari M, Stuetzle T (2016). “irace package: Iterated racing automatic algorithm configuration.” Operations Research Perspectives, 3, 43–58. doi:10.1016/j.orp.2016.09.002 .","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"irace\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"control-parameters","dir":"Reference","previous_headings":"","what":"Control Parameters","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"n_instances integer(1) Number resampling instances. meaning parameters, see irace::defaultScenario(). Note removed control parameters refer termination algorithm. Use bbotk::TerminatorEvals instead. terminators work TunerIrace.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"archive","dir":"Reference","previous_headings":"","what":"Archive","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"ArchiveBatchTuning holds following additional columns: \"race\" (integer(1)) Race iteration. \"step\" (integer(1)) Step number race. \"instance\" (integer(1)) Identifies resampling instances across races steps. \"configuration\" (integer(1)) Identifies configurations across races steps.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"result","dir":"Reference","previous_headings":"","what":"Result","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"tuning result (instance$result) best-performing elite final race. reported performance average performance estimated used instances.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"$optimize() supports progress bars via package progressr combined bbotk::Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"optimizer","dir":"Reference","previous_headings":"","what":"Optimizer","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"Tuner based bbotk::OptimizerBatchIrace can applied black box optimization problem. See also documentation bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> mlr3tuning::TunerBatchFromOptimizerBatch -> TunerBatchIrace","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"TunerBatchIrace$new() TunerBatchIrace$optimize() TunerBatchIrace$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"","code":"TunerBatchIrace$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"method-optimize-","dir":"Reference","previous_headings":"","what":"Method optimize()","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"Performs tuning TuningInstanceBatchSingleCrit termination. single evaluations final results written ArchiveBatchTuning resides TuningInstanceBatchSingleCrit. final result returned.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"","code":"TunerBatchIrace$optimize(inst)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"inst (TuningInstanceBatchSingleCrit).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"data.table::data.table.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"","code":"TunerBatchIrace$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_irace.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Iterated Racing. — mlr_tuners_irace","text":"","code":"# retrieve task task = tsk(\"pima\") # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)) # \\donttest{ # hyperparameter tuning on the pima indians diabetes data set instance = tune( tuner = tnr(\"irace\"), task = task, learner = learner, resampling = rsmp(\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 42 ) #> # 2024-11-26 14:01:04 UTC: Initialization #> # Elitist race #> # Elitist new instances: 1 #> # Elitist limit: 2 #> # nbIterations: 2 #> # minNbSurvival: 2 #> # nbParameters: 1 #> # seed: 1855097766 #> # confidence level: 0.95 #> # budget: 42 #> # mu: 5 #> # deterministic: FALSE #> #> # 2024-11-26 14:01:04 UTC: Iteration 1 of 2 #> # experimentsUsedSoFar: 0 #> # remainingBudget: 42 #> # currentBudget: 21 #> # nbConfigurations: 3 #> # Markers: #> x No test is performed. #> c Configurations are discarded only due to capping. #> - The test is performed and some configurations are discarded. #> = The test is performed but no configuration is discarded. #> ! The test is performed and configurations could be discarded but elite configurations are preserved. #> . All alive configurations are elite and nothing is discarded #> #> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+ #> | | Instance| Alive| Best| Mean best| Exp so far| W time| rho|KenW| Qvar| #> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+ #> |x| 1| 3| 3| 0.2812500000| 3|00:00:00| NA| NA| NA| #> |x| 2| 3| 3| 0.2675781250| 6|00:00:00|+1.00|1.00|0.0000| #> |x| 3| 3| 3| 0.2604166667| 9|00:00:00|+1.00|1.00|0.0000| #> |x| 4| 3| 3| 0.2490234375| 12|00:00:00|+1.00|1.00|0.0000| #> |-| 5| 1| 3| 0.2453125000| 15|00:00:00| NA| NA| NA| #> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+ #> Best-so-far configuration: 3 mean value: 0.2453125000 #> Description of the best-so-far configuration: #> .ID. cp .PARENT. #> 3 3 -2.7229877489945 NA #> #> # 2024-11-26 14:01:05 UTC: Elite configurations (first number is the configuration ID; listed from best to worst according to the sum of ranks): #> cp #> 3 -2.7229877489945 #> # 2024-11-26 14:01:05 UTC: Iteration 2 of 2 #> # experimentsUsedSoFar: 15 #> # remainingBudget: 27 #> # currentBudget: 27 #> # nbConfigurations: 4 #> # Markers: #> x No test is performed. #> c Configurations are discarded only due to capping. #> - The test is performed and some configurations are discarded. #> = The test is performed but no configuration is discarded. #> ! The test is performed and configurations could be discarded but elite configurations are preserved. #> . All alive configurations are elite and nothing is discarded #> #> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+ #> | | Instance| Alive| Best| Mean best| Exp so far| W time| rho|KenW| Qvar| #> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+ #> |x| 6| 4| 4| 0.2734375000| 4|00:00:00| NA| NA| NA| #> |x| 1| 4| 4| 0.2597656250| 7|00:00:00|+0.80|0.90|0.0217| #> |x| 4| 4| 4| 0.2447916667| 10|00:00:00|+0.40|0.60|0.2572| #> |x| 3| 4| 4| 0.2451171875| 13|00:00:00|+0.27|0.45|0.2536| #> |=| 5| 4| 4| 0.2421875000| 16|00:00:00|+0.20|0.36|0.2272| #> |=| 2| 4| 4| 0.2513020833| 19|00:00:00|-0.11|0.07|0.4250| #> |=| 7| 4| 5| 0.2522321429| 23|00:00:00|-0.15|0.02|0.4839| #> +-+-----------+-----------+-----------+----------------+-----------+--------+-----+----+------+ #> Best-so-far configuration: 5 mean value: 0.2522321429 #> Description of the best-so-far configuration: #> .ID. cp .PARENT. #> 5 5 -3.17982221206359 3 #> #> # 2024-11-26 14:01:06 UTC: Elite configurations (first number is the configuration ID; listed from best to worst according to the sum of ranks): #> cp #> 5 -3.17982221206359 #> 6 -3.29396533989700 #> # 2024-11-26 14:01:06 UTC: Stopped because there is not enough budget left to race more than the minimum (2) #> # You may either increase the budget or set 'minNbSurvival' to a lower value #> # Iteration: 3 #> # nbIterations: 3 #> # experimentsUsedSoFar: 38 #> # timeUsed: 0 #> # remainingBudget: 4 #> # currentBudget: 4 #> # number of elites: 2 #> # nbConfigurations: 2 #> # Total CPU user time: 1.491, CPU sys time: 0.016, Wall-clock time: 1.511 # best performing hyperparameter configuration instance$result #> cp configuration learner_param_vals x_domain classif.ce #> #> 1: -3.179822 5 0.2522321 # all evaluated hyperparameter configuration as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp race step #> #> 1: -8.192526 0.3085938 0.007 2024-11-26 14:01:04 1 1 #> 2: -8.612223 0.3085938 0.007 2024-11-26 14:01:04 1 1 #> 3: -2.722988 0.2812500 0.007 2024-11-26 14:01:04 1 1 #> 4: -8.192526 0.3359375 0.007 2024-11-26 14:01:04 1 1 #> 5: -8.612223 0.3359375 0.007 2024-11-26 14:01:04 1 1 #> 6: -2.722988 0.2539062 0.006 2024-11-26 14:01:04 1 1 #> 7: -8.192526 0.2851562 0.008 2024-11-26 14:01:04 1 1 #> 8: -8.612223 0.2851562 0.008 2024-11-26 14:01:04 1 1 #> 9: -2.722988 0.2460938 0.007 2024-11-26 14:01:04 1 1 #> 10: -8.192526 0.2617188 0.006 2024-11-26 14:01:04 1 1 #> 11: -8.612223 0.2617188 0.007 2024-11-26 14:01:04 1 1 #> 12: -2.722988 0.2148438 0.007 2024-11-26 14:01:04 1 1 #> 13: -8.192526 0.2382812 0.007 2024-11-26 14:01:05 1 1 #> 14: -8.612223 0.2382812 0.006 2024-11-26 14:01:05 1 1 #> 15: -2.722988 0.2304688 0.006 2024-11-26 14:01:05 1 1 #> 16: -2.722988 0.2890625 0.007 2024-11-26 14:01:05 2 1 #> 17: -3.680872 0.2734375 0.006 2024-11-26 14:01:05 2 1 #> 18: -3.179822 0.2734375 0.007 2024-11-26 14:01:05 2 1 #> 19: -3.293965 0.2734375 0.007 2024-11-26 14:01:05 2 1 #> 20: -3.680872 0.2460938 0.007 2024-11-26 14:01:05 2 1 #> 21: -3.179822 0.2539062 0.006 2024-11-26 14:01:05 2 1 #> 22: -3.293965 0.2539062 0.007 2024-11-26 14:01:05 2 1 #> 23: -3.680872 0.2148438 0.007 2024-11-26 14:01:05 2 1 #> 24: -3.179822 0.2148438 0.007 2024-11-26 14:01:05 2 1 #> 25: -3.293965 0.2148438 0.007 2024-11-26 14:01:05 2 1 #> 26: -3.680872 0.2460938 0.007 2024-11-26 14:01:05 2 1 #> 27: -3.179822 0.2460938 0.007 2024-11-26 14:01:05 2 1 #> 28: -3.293965 0.2460938 0.007 2024-11-26 14:01:05 2 1 #> 29: -3.680872 0.2304688 0.006 2024-11-26 14:01:05 2 1 #> 30: -3.179822 0.2304688 0.006 2024-11-26 14:01:05 2 1 #> 31: -3.293965 0.2304688 0.007 2024-11-26 14:01:05 2 1 #> 32: -3.680872 0.2968750 0.007 2024-11-26 14:01:05 2 1 #> 33: -3.179822 0.2968750 0.007 2024-11-26 14:01:05 2 1 #> 34: -3.293965 0.2968750 0.007 2024-11-26 14:01:05 2 1 #> 35: -2.722988 0.2500000 0.007 2024-11-26 14:01:06 2 1 #> 36: -3.680872 0.2617188 0.008 2024-11-26 14:01:06 2 1 #> 37: -3.179822 0.2500000 0.006 2024-11-26 14:01:06 2 1 #> 38: -3.293965 0.2500000 0.006 2024-11-26 14:01:06 2 1 #> cp classif.ce runtime_learners timestamp race step #> instance configuration warnings errors x_domain batch_nr resample_result #> #> 1: 10 1 0 0 1 #> 2: 10 2 0 0 1 #> 3: 10 3 0 0 1 #> 4: 4 1 0 0 2 #> 5: 4 2 0 0 2 #> 6: 4 3 0 0 2 #> 7: 1 1 0 0 3 #> 8: 1 2 0 0 3 #> 9: 1 3 0 0 3 #> 10: 8 1 0 0 4 #> 11: 8 2 0 0 4 #> 12: 8 3 0 0 4 #> 13: 5 1 0 0 5 #> 14: 5 2 0 0 5 #> 15: 5 3 0 0 5 #> 16: 7 3 0 0 6 #> 17: 7 4 0 0 6 #> 18: 7 5 0 0 6 #> 19: 7 6 0 0 6 #> 20: 10 4 0 0 7 #> 21: 10 5 0 0 7 #> 22: 10 6 0 0 7 #> 23: 8 4 0 0 8 #> 24: 8 5 0 0 8 #> 25: 8 6 0 0 8 #> 26: 1 4 0 0 9 #> 27: 1 5 0 0 9 #> 28: 1 6 0 0 9 #> 29: 5 4 0 0 10 #> 30: 5 5 0 0 10 #> 31: 5 6 0 0 10 #> 32: 4 4 0 0 11 #> 33: 4 5 0 0 11 #> 34: 4 6 0 0 11 #> 35: 9 3 0 0 12 #> 36: 9 4 0 0 12 #> 37: 9 5 0 0 12 #> 38: 9 6 0 0 12 #> instance configuration warnings errors x_domain batch_nr resample_result # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals learner$train(task) # }"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"Subclass non-linear optimization (NLopt). Calls nloptr::nloptr package nloptr.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"source","dir":"Reference","previous_headings":"","what":"Source","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"Johnson, G S (2020). “NLopt nonlinear-optimization package.” https://github.com/stevengj/nlopt.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"termination conditions stopval, maxtime maxeval nloptr::nloptr() deactivated replaced bbotk::Terminator subclasses. x function value tolerance termination conditions (xtol_rel = 10^-4, xtol_abs = rep(0.0, length(x0)), ftol_rel = 0.0 ftol_abs = 0.0) still available implemented package defaults. deactivate conditions, set -1.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"nloptr\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"optimizer","dir":"Reference","previous_headings":"","what":"Optimizer","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"Tuner based bbotk::OptimizerBatchNLoptr can applied black box optimization problem. See also documentation bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"parameters","dir":"Reference","previous_headings":"","what":"Parameters","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"algorithm character(1) eval_g_ineq function() xtol_rel numeric(1) xtol_abs numeric(1) ftol_rel numeric(1) ftol_abs numeric(1) start_values character(1) Create random start values based center search space? latter case, center parameters trafo applied. meaning control parameters, see nloptr::nloptr() nloptr::nloptr.print.options(). termination conditions stopval, maxtime maxeval nloptr::nloptr() deactivated replaced Terminator subclasses. x function value tolerance termination conditions (xtol_rel = 10^-4, xtol_abs = rep(0.0, length(x0)), ftol_rel = 0.0 ftol_abs = 0.0) still available implemented package defaults. deactivate conditions, set -1.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"$optimize() supports progress bars via package progressr combined Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> mlr3tuning::TunerBatchFromOptimizerBatch -> TunerBatchNLoptr","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerBatchFromOptimizerBatch$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"TunerBatchNLoptr$new() TunerBatchNLoptr$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"","code":"TunerBatchNLoptr$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"","code":"TunerBatchNLoptr$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_nloptr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Non-linear Optimization — mlr_tuners_nloptr","text":"","code":"# Hyperparameter Optimization # \\donttest{ # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # run hyperparameter tuning on the Palmer Penguins data set instance = tune( tuner = tnr(\"nloptr\", algorithm = \"NLOPT_LN_BOBYQA\"), task = tsk(\"penguins\"), learner = learner, resampling = rsmp(\"holdout\"), measure = msr(\"classif.ce\") ) # best performing hyperparameter configuration instance$result #> cp learner_param_vals x_domain classif.ce #> #> 1: -5.081957 0.07826087 # all evaluated hyperparameter configuration as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -5.081957 0.07826087 0.006 2024-11-26 14:01:07 0 0 #> 2: -5.081957 0.07826087 0.005 2024-11-26 14:01:07 0 0 #> 3: -5.081957 0.07826087 0.006 2024-11-26 14:01:07 0 0 #> 4: -3.355018 0.07826087 0.005 2024-11-26 14:01:07 0 0 #> 5: -6.808896 0.07826087 0.006 2024-11-26 14:01:07 0 0 #> 6: -5.064688 0.07826087 0.006 2024-11-26 14:01:07 0 0 #> 7: -5.099226 0.07826087 0.006 2024-11-26 14:01:07 0 0 #> 8: -5.080230 0.07826087 0.007 2024-11-26 14:01:07 0 0 #> 9: -5.083684 0.07826087 0.005 2024-11-26 14:01:07 0 0 #> 10: -5.081957 0.07826087 0.006 2024-11-26 14:01:07 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 2 #> 3: 3 #> 4: 4 #> 5: 5 #> 6: 6 #> 7: 7 #> 8: 8 #> 9: 9 #> 10: 10 # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals learner$train(tsk(\"penguins\")) # }"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":null,"dir":"Reference","previous_headings":"","what":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"Subclass random search tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"source","dir":"Reference","previous_headings":"","what":"Source","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"Bergstra J, Bengio Y (2012). “Random Search Hyper-Parameter Optimization.” Journal Machine Learning Research, 13(10), 281–305. https://jmlr.csail.mit.edu/papers/v13/bergstra12a.html.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"random points sampled paradox::generate_design_random().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"dictionary","dir":"Reference","previous_headings":"","what":"Dictionary","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"Tuner can instantiated associated sugar function tnr():","code":"tnr(\"random_search\")"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"parallelization","dir":"Reference","previous_headings":"","what":"Parallelization","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"order support general termination criteria parallelization, evaluate points batch-fashion size batch_size. Larger batches mean can parallelize , smaller batches imply fine-grained checking termination criteria. batch contains batch_size times resampling$iters jobs. E.g., set batch size 10 points 5-fold cross validation, can utilize 50 cores. Parallelization supported via package future (see mlr3::benchmark()'s section parallelization details).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"logging","dir":"Reference","previous_headings":"","what":"Logging","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"Tuners use logger (implemented lgr) package bbotk. Use lgr::get_logger(\"bbotk\") access control logger.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"optimizer","dir":"Reference","previous_headings":"","what":"Optimizer","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"Tuner based bbotk::OptimizerBatchRandomSearch can applied black box optimization problem. See also documentation bbotk.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"parameters","dir":"Reference","previous_headings":"","what":"Parameters","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"batch_size integer(1) Maximum number points try batch.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"progress-bars","dir":"Reference","previous_headings":"","what":"Progress Bars","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"$optimize() supports progress bars via package progressr combined Terminator. Simply wrap function progressr::with_progress() enable . recommend use package progress backend; enable progressr::handlers(\"progress\").","code":""},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"super-classes","dir":"Reference","previous_headings":"","what":"Super classes","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"mlr3tuning::Tuner -> mlr3tuning::TunerBatch -> mlr3tuning::TunerBatchFromOptimizerBatch -> TunerBatchRandomSearch","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"methods","dir":"Reference","previous_headings":"","what":"Methods","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"mlr3tuning::Tuner$format() mlr3tuning::Tuner$help() mlr3tuning::Tuner$print() mlr3tuning::TunerBatchFromOptimizerBatch$optimize()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"TunerBatchRandomSearch$new() TunerBatchRandomSearch$clone()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"method-new-","dir":"Reference","previous_headings":"","what":"Method new()","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"Creates new instance R6 class.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"","code":"TunerBatchRandomSearch$new()"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"objects class cloneable method.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"","code":"TunerBatchRandomSearch$clone(deep = FALSE)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"deep Whether make deep clone.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/mlr_tuners_random_search.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Hyperparameter Tuning with Random Search — mlr_tuners_random_search","text":"","code":"# Hyperparameter Optimization # load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # run hyperparameter tuning on the Palmer Penguins data set instance = tune( tuner = tnr(\"random_search\"), task = tsk(\"penguins\"), learner = learner, resampling = rsmp(\"holdout\"), measure = msr(\"classif.ce\"), term_evals = 10 ) # best performing hyperparameter configuration instance$result #> cp learner_param_vals x_domain classif.ce #> #> 1: -9.025467 0.03478261 # all evaluated hyperparameter configuration as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -4.711280 0.05217391 0.006 2024-11-26 14:01:08 0 0 #> 2: -3.034222 0.05217391 0.006 2024-11-26 14:01:08 0 0 #> 3: -2.403159 0.05217391 0.007 2024-11-26 14:01:08 0 0 #> 4: -9.025467 0.03478261 0.007 2024-11-26 14:01:08 0 0 #> 5: -7.209532 0.03478261 0.005 2024-11-26 14:01:09 0 0 #> 6: -6.858402 0.03478261 0.006 2024-11-26 14:01:09 0 0 #> 7: -6.311528 0.03478261 0.006 2024-11-26 14:01:09 0 0 #> 8: -3.598009 0.05217391 0.006 2024-11-26 14:01:09 0 0 #> 9: -3.967858 0.05217391 0.007 2024-11-26 14:01:09 0 0 #> 10: -6.004689 0.03478261 0.007 2024-11-26 14:01:09 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 2 #> 3: 3 #> 4: 4 #> 5: 5 #> 6: 6 #> 7: 7 #> 8: 8 #> 9: 9 #> 10: 10 # fit final model on complete data set learner$param_set$values = instance$result_learner_param_vals learner$train(tsk(\"penguins\"))"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/reexports.html","id":null,"dir":"Reference","previous_headings":"","what":"Objects exported from other packages — reexports","title":"Objects exported from other packages — reexports","text":"objects imported packages. Follow links see documentation. bbotk mlr_terminators, trm, trms mlr3misc clbk, clbks, mlr_callbacks","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/set_validate.AutoTuner.html","id":null,"dir":"Reference","previous_headings":"","what":"Configure Validation for AutoTuner — set_validate.AutoTuner","title":"Configure Validation for AutoTuner — set_validate.AutoTuner","text":"Configure validation data learner tuned AutoTuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/set_validate.AutoTuner.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Configure Validation for AutoTuner — set_validate.AutoTuner","text":"","code":"# S3 method for class 'AutoTuner' set_validate(learner, validate, ...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/set_validate.AutoTuner.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Configure Validation for AutoTuner — set_validate.AutoTuner","text":"learner (AutoTuner) autotuner enable validation. validate (numeric(1), \"predefined\", \"test\", NULL) configure validation hyperparameter tuning. ... () Passed calling set_validate() wrapped leaerner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/set_validate.AutoTuner.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Configure Validation for AutoTuner — set_validate.AutoTuner","text":"","code":"at = auto_tuner( tuner = tnr(\"random_search\"), learner = lrn(\"classif.debug\", early_stopping = TRUE, iter = to_tune(upper = 1000L, internal = TRUE), validate = 0.2), resampling = rsmp(\"holdout\") ) # use the test set as validation data during tuning set_validate(at, validate = \"test\") at$learner$validate #> [1] \"test\""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti.html","id":null,"dir":"Reference","previous_headings":"","what":"Syntactic Sugar for Tuning Instance Construction — ti","title":"Syntactic Sugar for Tuning Instance Construction — ti","text":"Function construct TuningInstanceBatchSingleCrit TuningInstanceBatchMultiCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Syntactic Sugar for Tuning Instance Construction — ti","text":"","code":"ti( task, learner, resampling, measures = NULL, terminator, search_space = NULL, internal_search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Syntactic Sugar for Tuning Instance Construction — ti","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (mlr3::Measure list mlr3::Measure) single measure creates TuningInstanceBatchSingleCrit multiple measures TuningInstanceBatchMultiCrit. NULL, default measure used. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Syntactic Sugar for Tuning Instance Construction — ti","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti.html","id":"default-measures","dir":"Reference","previous_headings":"","what":"Default Measures","title":"Syntactic Sugar for Tuning Instance Construction — ti","text":"measure passed, default measure used. default measure depends task type.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Syntactic Sugar for Tuning Instance Construction — ti","text":"","code":"# Hyperparameter optimization on the Palmer Penguins data set task = tsk(\"penguins\") # Load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # Construct tuning instance instance = ti( task = task, learner = learner, resampling = rsmp(\"cv\", folds = 3), measures = msr(\"classif.ce\"), terminator = trm(\"evals\", n_evals = 4) ) # Choose optimization algorithm tuner = tnr(\"random_search\", batch_size = 2) # Run tuning tuner$optimize(instance) #> cp learner_param_vals x_domain classif.ce #> #> 1: -8.460007 0.04942792 # Set optimal hyperparameter configuration to learner learner$param_set$values = instance$result_learner_param_vals # Train the learner on the full data set learner$train(task) # Inspect all evaluated configurations as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -3.372665 0.05814900 0.017 2024-11-26 14:01:10 0 0 #> 2: -3.054465 0.05814900 0.016 2024-11-26 14:01:10 0 0 #> 3: -8.460007 0.04942792 0.018 2024-11-26 14:01:10 0 0 #> 4: -4.158236 0.05814900 0.017 2024-11-26 14:01:10 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 1 #> 3: 2 #> 4: 2 "},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti_async.html","id":null,"dir":"Reference","previous_headings":"","what":"Syntactic Sugar for Asynchronous Tuning Instance Construction — ti_async","title":"Syntactic Sugar for Asynchronous Tuning Instance Construction — ti_async","text":"Function construct TuningInstanceAsyncSingleCrit TuningInstanceAsyncMultiCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti_async.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Syntactic Sugar for Asynchronous Tuning Instance Construction — ti_async","text":"","code":"ti_async( task, learner, resampling, measures = NULL, terminator, search_space = NULL, internal_search_space = NULL, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL, rush = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti_async.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Syntactic Sugar for Asynchronous Tuning Instance Construction — ti_async","text":"task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (mlr3::Measure list mlr3::Measure) single measure creates TuningInstanceAsyncSingleCrit multiple measures TuningInstanceAsyncMultiCrit. NULL, default measure used. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). internal_search_space (paradox::ParamSet NULL) internal search space. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks. rush (Rush) rush instance supplied, tuning runs without batches.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti_async.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Syntactic Sugar for Asynchronous Tuning Instance Construction — ti_async","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti_async.html","id":"default-measures","dir":"Reference","previous_headings":"","what":"Default Measures","title":"Syntactic Sugar for Asynchronous Tuning Instance Construction — ti_async","text":"measure passed, default measure used. default measure depends task type.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/ti_async.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Syntactic Sugar for Asynchronous Tuning Instance Construction — ti_async","text":"","code":"# Hyperparameter optimization on the Palmer Penguins data set task = tsk(\"penguins\") # Load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # Construct tuning instance instance = ti( task = task, learner = learner, resampling = rsmp(\"cv\", folds = 3), measures = msr(\"classif.ce\"), terminator = trm(\"evals\", n_evals = 4) ) # Choose optimization algorithm tuner = tnr(\"random_search\", batch_size = 2) # Run tuning tuner$optimize(instance) #> cp learner_param_vals x_domain classif.ce #> #> 1: -8.481745 0.05817442 # Set optimal hyperparameter configuration to learner learner$param_set$values = instance$result_learner_param_vals # Train the learner on the full data set learner$train(task) # Inspect all evaluated configurations as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -8.481745 0.05817442 0.017 2024-11-26 14:01:11 0 0 #> 2: -9.007729 0.05817442 0.016 2024-11-26 14:01:11 0 0 #> 3: -2.474325 0.05817442 0.017 2024-11-26 14:01:11 0 0 #> 4: -8.015548 0.05817442 0.017 2024-11-26 14:01:11 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 1 #> 3: 2 #> 4: 2 "},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tnr.html","id":null,"dir":"Reference","previous_headings":"","what":"Syntactic Sugar for Tuning Objects Construction — tnr","title":"Syntactic Sugar for Tuning Objects Construction — tnr","text":"Functions retrieve objects, set parameters assign fields one go. Relies mlr3misc::dictionary_sugar_get() extract objects respective mlr3misc::Dictionary: tnr() Tuner mlr_tuners. tnrs() list Tuners mlr_tuners. trm() bbotk::Terminator mlr_terminators. trms() list Terminators mlr_terminators.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tnr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Syntactic Sugar for Tuning Objects Construction — tnr","text":"","code":"tnr(.key, ...) tnrs(.keys, ...)"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tnr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Syntactic Sugar for Tuning Objects Construction — tnr","text":".key (character(1)) Key passed respective dictionary retrieve object. ... () Additional arguments. .keys (character()) Keys passed respective dictionary retrieve multiple objects.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tnr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Syntactic Sugar for Tuning Objects Construction — tnr","text":"R6::R6Class object respective type, list R6::R6Class objects plural versions.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tnr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Syntactic Sugar for Tuning Objects Construction — tnr","text":"","code":"# random search tuner with batch size of 5 tnr(\"random_search\", batch_size = 5) #> : Random Search #> * Parameters: batch_size=5 #> * Parameter classes: ParamLgl, ParamInt, ParamDbl, ParamFct #> * Properties: dependencies, single-crit, multi-crit #> * Packages: mlr3tuning, bbotk # run time terminator with 20 seconds trm(\"run_time\", secs = 20) #> : Run Time #> * Parameters: secs=20"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":null,"dir":"Reference","previous_headings":"","what":"Function for Tuning a Learner — tune","title":"Function for Tuning a Learner — tune","text":"Function tune mlr3::Learner. function internally creates TuningInstanceBatchSingleCrit TuningInstanceBatchMultiCrit describes tuning problem. executes tuning Tuner (tuner) returns result tuning instance ($result). ArchiveBatchTuning ArchiveAsyncTuning ($archive) stores evaluated hyperparameter configurations performance scores. can find overview tuners website.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Function for Tuning a Learner — tune","text":"","code":"tune( tuner, task, learner, resampling, measures = NULL, term_evals = NULL, term_time = NULL, terminator = NULL, search_space = NULL, store_benchmark_result = TRUE, internal_search_space = NULL, store_models = FALSE, check_values = FALSE, callbacks = NULL, rush = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Function for Tuning a Learner — tune","text":"tuner (Tuner) Optimization algorithm. task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. resampling (mlr3::Resampling) Resampling used evaluate performance hyperparameter configurations. Uninstantiated resamplings instantiated construction configurations evaluated data splits. Already instantiated resamplings kept unchanged. Specialized Tuner change resampling e.g. evaluate hyperparameter configuration different data splits. field, however, always returns resampling passed construction. measures (mlr3::Measure list mlr3::Measure) single measure creates TuningInstanceBatchSingleCrit multiple measures TuningInstanceBatchMultiCrit. NULL, default measure used. term_evals (integer(1)) Number allowed evaluations. Ignored terminator passed. term_time (integer(1)) Maximum allowed time seconds. Ignored terminator passed. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. internal_search_space (paradox::ParamSet NULL) internal search space. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks. rush (Rush) rush instance supplied, tuning runs without batches.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Function for Tuning a Learner — tune","text":"TuningInstanceBatchSingleCrit | TuningInstanceBatchMultiCrit","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Function for Tuning a Learner — tune","text":"mlr3::Task, mlr3::Learner, mlr3::Resampling, mlr3::Measure bbotk::Terminator used construct TuningInstanceBatchSingleCrit. multiple performance mlr3::Measures supplied, TuningInstanceBatchMultiCrit created. parameter term_evals term_time shortcuts create bbotk::Terminator. parameters passed, bbotk::TerminatorCombo constructed. Terminators, pass one terminator. termination criterion needed, set term_evals, term_time terminator NULL. search space created paradox::TuneToken supplied search_space.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"default-measures","dir":"Reference","previous_headings":"","what":"Default Measures","title":"Function for Tuning a Learner — tune","text":"measure passed, default measure used. default measure depends task type.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"resources","dir":"Reference","previous_headings":"","what":"Resources","title":"Function for Tuning a Learner — tune","text":"several sections hyperparameter optimization mlr3book. Getting started hyperparameter optimization. overview tuners can found website. Tune support vector machine Sonar data set. Learn tuning spaces. Estimate model performance nested resampling. Learn multi-objective optimization. Simultaneously optimize hyperparameters use early stopping XGBoost. Automate tuning. gallery features collection case studies demos optimization. Learn advanced methods Practical Tuning Series. Learn hotstarting models. Run default hyperparameter configuration learners baseline. Use Hyperband optimizer different budget parameters. cheatsheet summarizes important functions mlr3tuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"analysis","dir":"Reference","previous_headings":"","what":"Analysis","title":"Function for Tuning a Learner — tune","text":"analyzing tuning results, recommended pass ArchiveBatchTuning .data.table(). returned data table joined benchmark result adds mlr3::ResampleResult hyperparameter evaluation. archive provides various getters (e.g. $learners()) ease access. getters extract position () unique hash (uhash). complete list getters see methods section. benchmark result ($benchmark_result) allows score hyperparameter configurations different measure. Alternatively, measures can supplied .data.table(). mlr3viz package provides visualizations tuning results.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Function for Tuning a Learner — tune","text":"","code":"# Hyperparameter optimization on the Palmer Penguins data set task = tsk(\"pima\") # Load learner and set search space learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE) ) # Run tuning instance = tune( tuner = tnr(\"random_search\", batch_size = 2), task = tsk(\"pima\"), learner = learner, resampling = rsmp (\"holdout\"), measures = msr(\"classif.ce\"), terminator = trm(\"evals\", n_evals = 4) ) # Set optimal hyperparameter configuration to learner learner$param_set$values = instance$result_learner_param_vals # Train the learner on the full data set learner$train(task) # Inspect all evaluated configurations as.data.table(instance$archive) #> cp classif.ce runtime_learners timestamp warnings errors #> #> 1: -7.397281 0.2460938 0.007 2024-11-26 14:01:12 0 0 #> 2: -4.648966 0.2382812 0.008 2024-11-26 14:01:12 0 0 #> 3: -9.116329 0.2460938 0.031 2024-11-26 14:01:12 0 0 #> 4: -5.519208 0.2460938 0.008 2024-11-26 14:01:12 0 0 #> x_domain batch_nr resample_result #> #> 1: 1 #> 2: 1 #> 3: 2 #> 4: 2 "},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune_nested.html","id":null,"dir":"Reference","previous_headings":"","what":"Function for Nested Resampling — tune_nested","title":"Function for Nested Resampling — tune_nested","text":"Function conduct nested resampling.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune_nested.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Function for Nested Resampling — tune_nested","text":"","code":"tune_nested( tuner, task, learner, inner_resampling, outer_resampling, measure = NULL, term_evals = NULL, term_time = NULL, terminator = NULL, search_space = NULL, store_tuning_instance = TRUE, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, callbacks = NULL )"},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune_nested.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Function for Nested Resampling — tune_nested","text":"tuner (Tuner) Optimization algorithm. task (mlr3::Task) Task operate . learner (mlr3::Learner) Learner tune. inner_resampling (mlr3::Resampling) Resampling used inner loop. outer_resampling mlr3::Resampling) Resampling used outer loop. measure (mlr3::Measure) Measure optimize. NULL, default measure used. term_evals (integer(1)) Number allowed evaluations. Ignored terminator passed. term_time (integer(1)) Maximum allowed time seconds. Ignored terminator passed. terminator (bbotk::Terminator) Stop criterion tuning process. search_space (paradox::ParamSet) Hyperparameter search space. NULL (default), search space constructed paradox::TuneToken learner's parameter set (learner$param_set). store_tuning_instance (logical(1)) TRUE (default), stores internally created TuningInstanceBatchSingleCrit intermediate results slot $tuning_instance. store_benchmark_result (logical(1)) TRUE (default), store resample result evaluated hyperparameter configurations archive mlr3::BenchmarkResult. store_models (logical(1)) TRUE, fitted models stored benchmark result (archive$benchmark_result). store_benchmark_result = FALSE, models stored temporarily accessible tuning. combination needed measures require model. check_values (logical(1)) TRUE, hyperparameter values checked evaluation performance scores . FALSE (default), values unchecked computational overhead reduced. callbacks (list mlr3misc::Callback) List callbacks.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune_nested.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Function for Nested Resampling — tune_nested","text":"mlr3::ResampleResult","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/reference/tune_nested.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Function for Nested Resampling — tune_nested","text":"","code":"# Nested resampling on Palmer Penguins data set rr = tune_nested( tuner = tnr(\"random_search\", batch_size = 2), task = tsk(\"penguins\"), learner = lrn(\"classif.rpart\", cp = to_tune(1e-04, 1e-1, logscale = TRUE)), inner_resampling = rsmp (\"holdout\"), outer_resampling = rsmp(\"cv\", folds = 2), measure = msr(\"classif.ce\"), term_evals = 2) # Performance scores estimated on the outer resampling rr$score() #> task_id learner_id resampling_id iteration classif.ce #> #> 1: penguins classif.rpart.tuned cv 1 0.06976744 #> 2: penguins classif.rpart.tuned cv 2 0.08139535 #> Hidden columns: task, learner, resampling, prediction_test # Unbiased performance of the final model trained on the full data set rr$aggregate() #> classif.ce #> 0.0755814"},{"path":[]},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-120","dir":"Changelog","previous_headings":"","what":"mlr3tuning 1.2.0","title":"mlr3tuning 1.2.0","text":"CRAN release: 2024-11-08 feat: Add new callback clbk(\"mlr3tuning.one_se_rule\") selects hyperparameter configuration smallest feature set within one standard error best. feat: Add new stages on_tuning_result_begin on_result_begin CallbackAsyncTuning CallbackBatchTuning. refactor: Rename stage on_result on_result_end CallbackAsyncTuning CallbackBatchTuning. docs: Extend CallbackAsyncTuning CallbackBatchTuning documentation. compatibility: mlr3 0.22.0","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-110","dir":"Changelog","previous_headings":"","what":"mlr3tuning 1.1.0","title":"mlr3tuning 1.1.0","text":"CRAN release: 2024-10-27 fix: as_data_table() functions unnest x_domain colum anymore default. fix: to_tune(internal = TRUE) now also works non-internal tuning parameters require .extra_trafo. feat: now possible pass internal_search_space manually. allows use parameter transformations primary search space combination internal hyperparameter tuning. refactor: Tuner pass extra information result extra parameter now.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-102","dir":"Changelog","previous_headings":"","what":"mlr3tuning 1.0.2","title":"mlr3tuning 1.0.2","text":"CRAN release: 2024-10-14 refactor: Extract internal tuned values instance.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-101","dir":"Changelog","previous_headings":"","what":"mlr3tuning 1.0.1","title":"mlr3tuning 1.0.1","text":"CRAN release: 2024-09-10 refactor: Replace internal tuning callback. perf: Delete intermediate BenchmarkResult ObjectiveTuningBatch optimization.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-100","dir":"Changelog","previous_headings":"","what":"mlr3tuning 1.0.0","title":"mlr3tuning 1.0.0","text":"CRAN release: 2024-06-29 feat: Introduce asynchronous optimization TunerAsync TuningInstanceAsync* classes. BREAKING CHANGE: Tuner class TunerBatch now. BREAKING CHANGE: TuningInstanceSingleCrit TuningInstanceMultiCrit classes TuningInstanceBatchSingleCrit TuningInstanceBatchMultiCrit now. BREAKING CHANGE: CallbackTuning class CallbackBatchTuning now. BREAKING CHANGE: ContextEval class ContextBatchTuning now. refactor: Remove hotstarting batch optimization due low performance. refactor: option evaluate_default callback now.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0200","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.20.0","title":"mlr3tuning 0.20.0","text":"CRAN release: 2024-03-05 compatibility: Work new paradox version 1.0.0 fix: TunerIrace failed logical parameters dependencies. Added marshaling support AutoTuner","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0192","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.19.2","title":"mlr3tuning 0.19.2","text":"CRAN release: 2023-11-28 refactor: Change thread limits.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0191","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.19.1","title":"mlr3tuning 0.19.1","text":"CRAN release: 2023-11-20 refactor: Speed tuning process minimizing number deep clones parameter checks. fix: Set store_benchmark_result = TRUE store_models = TRUE creating tuning instance. fix: Passing terminator tune_nested() work.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0190","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.19.0","title":"mlr3tuning 0.19.0","text":"CRAN release: 2023-06-26 fix: Add $phash() method AutoTuner. fix: Include Tuner hash AutoTuner. feat: Add new callback scores configurations additional measures tuning. feat: Add vignette adding new tuners previously part mlr3book.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0180","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.18.0","title":"mlr3tuning 0.18.0","text":"CRAN release: 2023-03-08 BREAKING CHANGE: method parameter tune(), tune_nested() auto_tuner() renamed tuner. Tuner objects accepted now. Arguments tuner passed ... anymore. BREAKING CHANGE: tuner parameter AutoTuner moved first position achieve consistency functions. docs: Update resources sections. docs: Add list default measures. fix: Add allow_hotstarting, keep_hotstart_stack keep_models flags AutoTuner auto_tuner().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0172","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.17.2","title":"mlr3tuning 0.17.2","text":"CRAN release: 2022-12-22 feat: AutoTuner accepts instantiated resamplings now. AutoTuner checks row ids inner resampling present outer resampling train set nested resampling performed. fix: Standalone Tuner create ContextOptimization.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0171","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.17.1","title":"mlr3tuning 0.17.1","text":"CRAN release: 2022-12-07 fix: ti() function accept callbacks.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0170","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.17.0","title":"mlr3tuning 0.17.0","text":"CRAN release: 2022-11-18 feat: methods $importance(), $selected_features(), $oob_error() $loglik() forwarded final model AutoTuner now. refactor: AutoTuner stores instance benchmark result store_models = TRUE. refactor: AutoTuner stores instance store_benchmark_result = TRUE.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0160","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.16.0","title":"mlr3tuning 0.16.0","text":"CRAN release: 2022-11-08 feat: Add new callback enables early stopping tuning mlr_callbacks. feat: Add new callback backups benchmark result disk batch. feat: Create custom callbacks callback_batch_tuning() function.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0150","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.15.0","title":"mlr3tuning 0.15.0","text":"CRAN release: 2022-10-21 fix: AutoTuner accept TuningSpace objects search spaces. feat: Add ti() function create TuningInstanceSingleCrit TuningInstanceMultiCrit. docs: Documentation technical details section now. feat: New option extract_inner_tuning_results() return tuning instances.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0140","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.14.0","title":"mlr3tuning 0.14.0","text":"CRAN release: 2022-08-25 feat: Add option evaluate_default evaluate learners hyperparameters set default values. refactor: now , default smooth FALSE TunerGenSA.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0131","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.13.1","title":"mlr3tuning 0.13.1","text":"CRAN release: 2022-05-03 feat: Tuner objects field $id now.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0130","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.13.0","title":"mlr3tuning 0.13.0","text":"CRAN release: 2022-04-06 feat: Allow pass Tuner objects method tune() auto_tuner(). docs: Link Tuner help page bbotk::Optimizer. feat: Tuner objects optional field $label now. feat: .data.table() functions objects class Dictionary extended additional columns.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0121","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.12.1","title":"mlr3tuning 0.12.1","text":"CRAN release: 2022-02-25 feat: Add .data.table.DictionaryTuner function. feat: New $help() method opens manual page Tuner.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0120","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.12.0","title":"mlr3tuning 0.12.0","text":"CRAN release: 2022-02-17 feat: as_search_space() function create search spaces Learner ParamSet objects. Allow pass TuningSpace objects search_space TuningInstanceSingleCrit TuningInstanceMultiCrit. feat: mlr3::HotstartStack can now removed tuning keep_hotstart_stack flag. feat: Archive stores errors warnings learners. feat: measure provided, default measure used auto_tuner() tune_nested().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0110","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.11.0","title":"mlr3tuning 0.11.0","text":"CRAN release: 2022-02-02 fix: $assign_result() method TuningInstanceSingleCrit search space empty. feat: Default measure used measure supplied TuningInstanceSingleCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-0100","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.10.0","title":"mlr3tuning 0.10.0","text":"CRAN release: 2022-01-20 Fixes bug TuningInstanceMultiCrit$assign_result(). Hotstarting learners previously fitted models. Remove deep clones speed tuning. Add store_models flag auto_tuner(). Add \"noisy\" property ObjectiveTuning.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-090","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.9.0","title":"mlr3tuning 0.9.0","text":"CRAN release: 2021-09-14 Adds AutoTuner$base_learner() method extract base learner nested learner objects. tune() supports multi-criteria tuning. Allows empty search space. Adds TunerIrace irace package. extract_inner_tuning_archives() helper function extract inner tuning archives. Removes ArchiveTuning$extended_archive() method. mlr3::ResampleResults joined automatically .data.table.TuningArchive() extract_inner_tuning_archives().","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-080","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.8.0","title":"mlr3tuning 0.8.0","text":"CRAN release: 2021-03-12 Adds tune(), auto_tuner() tune_nested() sugar functions. TuningInstanceSingleCrit, TuningInstanceMultiCrit AutoTuner can initialized store_benchmark_result = FALSE store_models = TRUE allow measures access models. Prettier printing methods.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-070","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.7.0","title":"mlr3tuning 0.7.0","text":"CRAN release: 2021-02-11 Fix TuningInstance*$assign_result() errors required parameter bug. Shortcuts access $learner(), $learners(), $learner_param_vals(), $predictions() $resample_result() benchmark result archive. extract_inner_tuning_results() helper function extract inner tuning results.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-060","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.6.0","title":"mlr3tuning 0.6.0","text":"CRAN release: 2021-01-24 ArchiveTuning$data public field now.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-050","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.5.0","title":"mlr3tuning 0.5.0","text":"CRAN release: 2020-12-07 Adds TunerCmaes adagio package. Fix predict_type AutoTuner. Support set TuneToken Learner$param_set create search space . order parameters TuningInstanceSingleCrit TuningInstanceSingleCrit changed.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-040","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.4.0","title":"mlr3tuning 0.4.0","text":"CRAN release: 2020-10-07 Option control store_benchmark_result, store_models check_values AutoTuner. store_tuning_instance must set parameter initialization. Fixes check_values flag TuningInstanceSingleCrit TuningInstanceMultiCrit. Removed dependency orphaned package bibtex.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-030","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.3.0","title":"mlr3tuning 0.3.0","text":"CRAN release: 2020-09-08 Compact -memory representation R6 objects save space saving mlr3 objects via saveRDS(), serialize() etc. Archive ArchiveTuning now stores benchmark result $benchmark_result. change removed resample results archive can still accessed via benchmark result. Warning message external package tuning installed. retrieve inner tuning results nested resampling, .data.table(rr)$learner[[1]]$tuning_result must used now.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-020","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.2.0","title":"mlr3tuning 0.2.0","text":"CRAN release: 2020-07-28 TuningInstance now TuningInstanceSingleCrit. TuningInstanceMultiCrit still available multi-criteria tuning. Terminators now accessible trm() trms() instead term() terms(). Storing resample results optional now using store_resample_result flag TuningInstanceSingleCrit TuningInstanceMultiCrit TunerNLoptr adds non-linear optimization nloptr package. Logging controlled bbotk logger now. Proposed points performance values can checked validity activating check_values flag TuningInstanceSingleCrit TuningInstanceMultiCrit.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-013","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.1.3","title":"mlr3tuning 0.1.3","text":"mlr3tuning now depends bbotk package basic tuning objects. Terminator classes now live bbotk. consequence ObjectiveTuning inherits bbotk::Objective, TuningInstance bbotk::OptimInstance Tuner bbotk::Optimizer TuningInstance$param_set becomes TuningInstance$search_space avoid confusion param_set usually contains parameters change behavior object. Tuning triggered $optimize() instead $tune()","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-012","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.1.2","title":"mlr3tuning 0.1.2","text":"CRAN release: 2020-01-31 Fixed bug AutoTuner $clone() missing. Tuning results unaffected, stored models contained wrong hyperparameter values (#223). Improved output log (#218).","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-011","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.1.1","title":"mlr3tuning 0.1.1","text":"CRAN release: 2019-12-06 Maintenance release.","code":""},{"path":"https://mlr3tuning.mlr-org.com/dev/news/index.html","id":"mlr3tuning-010","dir":"Changelog","previous_headings":"","what":"mlr3tuning 0.1.0","title":"mlr3tuning 0.1.0","text":"CRAN release: 2019-09-30 Initial prototype.","code":""}]