Skip to content

Commit 87c7391

Browse files
committed
adapt naming of test functions
1 parent 105e482 commit 87c7391

File tree

4 files changed

+36
-36
lines changed

4 files changed

+36
-36
lines changed

skore/tests/unit/displays/metrics_summary/test_comparison_cross_validation.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -107,8 +107,8 @@ def test_aggregate_is_used_in_cache(
107107
assert list(call1.columns) != list(call2.columns)
108108

109109

110-
def test_scoring(comparison_cross_validation_reports_binary_classification):
111-
"""`MetricsSummaryDisplay` works as intended with the `scoring` parameter."""
110+
def test_metric(comparison_cross_validation_reports_binary_classification):
111+
"""`MetricsSummaryDisplay` works as intended with the `metric` parameter."""
112112
report = comparison_cross_validation_reports_binary_classification
113113
result = report.metrics.summarize(metric=["accuracy"], aggregate=None).frame()
114114

@@ -237,24 +237,24 @@ def test_cache_poisoning(binary_classification_data):
237237

238238

239239
@pytest.mark.parametrize(
240-
"scoring, scoring_kwargs",
240+
"metric, metric_kwargs",
241241
[
242242
("accuracy", None),
243243
("neg_log_loss", None),
244244
(accuracy_score, {"response_method": "predict"}),
245245
(get_scorer("accuracy"), None),
246246
],
247247
)
248-
def test_scoring_single_list_equivalence(
249-
comparison_cross_validation_reports_binary_classification, scoring, scoring_kwargs
248+
def test_metric_single_list_equivalence(
249+
comparison_cross_validation_reports_binary_classification, metric, metric_kwargs
250250
):
251251
"""Check that passing a single string, callable, scorer is equivalent to passing a
252252
list with a single element."""
253253
report = comparison_cross_validation_reports_binary_classification
254254
result_single = report.metrics.summarize(
255-
metric=scoring, metric_kwargs=scoring_kwargs
255+
metric=metric, metric_kwargs=metric_kwargs
256256
).frame()
257257
result_list = report.metrics.summarize(
258-
metric=[scoring], metric_kwargs=scoring_kwargs
258+
metric=[metric], metric_kwargs=metric_kwargs
259259
).frame()
260260
assert result_single.equals(result_list)

skore/tests/unit/displays/metrics_summary/test_comparison_estimator.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -140,24 +140,24 @@ def test_aggregate(
140140

141141

142142
@pytest.mark.parametrize(
143-
"scoring, scoring_kwargs",
143+
"metric, metric_kwargs",
144144
[
145145
("accuracy", None),
146146
("neg_log_loss", None),
147147
(accuracy_score, {"response_method": "predict"}),
148148
(get_scorer("accuracy"), None),
149149
],
150150
)
151-
def test_scoring_single_list_equivalence(
152-
comparison_estimator_reports_binary_classification, scoring, scoring_kwargs
151+
def test_metric_single_list_equivalence(
152+
comparison_estimator_reports_binary_classification, metric, metric_kwargs
153153
):
154154
"""Check that passing a single string, callable, scorer is equivalent to passing a
155155
list with a single element."""
156156
report = comparison_estimator_reports_binary_classification
157157
result_single = report.metrics.summarize(
158-
metric=scoring, metric_kwargs=scoring_kwargs
158+
metric=metric, metric_kwargs=metric_kwargs
159159
).frame()
160160
result_list = report.metrics.summarize(
161-
metric=[scoring], metric_kwargs=scoring_kwargs
161+
metric=[metric], metric_kwargs=metric_kwargs
162162
).frame()
163163
assert result_single.equals(result_list)

skore/tests/unit/displays/metrics_summary/test_cross_validation.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -126,26 +126,26 @@ def _check_results_summarize(
126126

127127

128128
@pytest.mark.parametrize(
129-
"scoring, scoring_kwargs",
129+
"metric, metric_kwargs",
130130
[
131131
("accuracy", None),
132132
("neg_log_loss", None),
133133
(accuracy_score, {"response_method": "predict"}),
134134
(get_scorer("accuracy"), None),
135135
],
136136
)
137-
def test_scoring_single_list_equivalence(
138-
forest_binary_classification_data, scoring, scoring_kwargs
137+
def test_metric_single_list_equivalence(
138+
forest_binary_classification_data, metric, metric_kwargs
139139
):
140140
"""Check that passing a single string, callable, scorer is equivalent to passing a
141141
list with a single element."""
142142
(estimator, X, y), cv = forest_binary_classification_data, 2
143143
report = CrossValidationReport(estimator, X, y, splitter=cv)
144144
result_single = report.metrics.summarize(
145-
metric=scoring, metric_kwargs=scoring_kwargs
145+
metric=metric, metric_kwargs=metric_kwargs
146146
).frame()
147147
result_list = report.metrics.summarize(
148-
metric=[scoring], metric_kwargs=scoring_kwargs
148+
metric=[metric], metric_kwargs=metric_kwargs
149149
).frame()
150150
assert result_single.equals(result_list)
151151

@@ -284,7 +284,7 @@ def test_regression(linear_regression_data):
284284
)
285285

286286

287-
def test_scoring_kwargs_regression(
287+
def test_metric_kwargs_regression(
288288
linear_regression_multioutput_data,
289289
):
290290
"""Check the behaviour of the `MetricsSummaryDisplay` method with scoring kwargs."""
@@ -299,7 +299,7 @@ def test_scoring_kwargs_regression(
299299
assert result.index.names == ["Metric", "Output"]
300300

301301

302-
def test_scoring_kwargs_multi_class(
302+
def test_metric_kwargs_multi_class(
303303
forest_multiclass_classification_data,
304304
):
305305
"""Check the behaviour of the `MetricsSummaryDisplay` method with scoring kwargs."""
@@ -313,7 +313,7 @@ def test_scoring_kwargs_multi_class(
313313

314314

315315
@pytest.mark.parametrize(
316-
"fixture_name, scoring, expected_index",
316+
"fixture_name, metric, expected_index",
317317
[
318318
(
319319
"linear_regression_data",
@@ -352,11 +352,11 @@ def test_scoring_kwargs_multi_class(
352352
),
353353
],
354354
)
355-
def test_overwrite_scoring_names(request, fixture_name, scoring, expected_index):
355+
def test_overwrite_metric_names(request, fixture_name, metric, expected_index):
356356
"""Test that we can overwrite the scoring names in `MetricsSummaryDisplay`."""
357357
estimator, X, y = request.getfixturevalue(fixture_name)
358358
report = CrossValidationReport(estimator, X, y, splitter=2)
359-
result = report.metrics.summarize(metric=scoring).frame()
359+
result = report.metrics.summarize(metric=metric).frame()
360360
assert result.shape == (len(expected_index), 2)
361361

362362
# Get level 0 names if MultiIndex, otherwise get column names
@@ -368,14 +368,14 @@ def test_overwrite_scoring_names(request, fixture_name, scoring, expected_index)
368368
assert result_index == expected_index
369369

370370

371-
@pytest.mark.parametrize("scoring", ["public_metric", "_private_metric"])
372-
def test_error_scoring_strings(linear_regression_data, scoring):
373-
"""Check that we raise an error if a scoring string is not a valid metric."""
371+
@pytest.mark.parametrize("metric", ["public_metric", "_private_metric"])
372+
def test_error_metric_strings(linear_regression_data, metric):
373+
"""Check that we raise an error if a metric string is not a valid metric."""
374374
estimator, X, y = linear_regression_data
375375
report = CrossValidationReport(estimator, X, y, splitter=2)
376-
err_msg = re.escape(f"Invalid metric: {scoring!r}.")
376+
err_msg = re.escape(f"Invalid metric: {metric!r}.")
377377
with pytest.raises(ValueError, match=err_msg):
378-
report.metrics.summarize(metric=[scoring])
378+
report.metrics.summarize(metric=[metric])
379379

380380

381381
def test_scorer(linear_regression_data):

skore/tests/unit/displays/metrics_summary/test_estimator.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -168,10 +168,10 @@ def test_regression(linear_regression_with_test, data_source):
168168
_check_results_summarize(result, expected_metrics, len(expected_metrics))
169169

170170

171-
def test_scoring_kwargs(
171+
def test_metric_kwargs(
172172
linear_regression_multioutput_with_test, forest_multiclass_classification_with_test
173173
):
174-
"""Check the behaviour of the `MetricsSummaryDisplay` method with scoring kwargs."""
174+
"""Check the behaviour of the `MetricsSummaryDisplay` method with metric kwargs."""
175175
estimator, X_test, y_test = linear_regression_multioutput_with_test
176176
report = EstimatorReport(estimator, X_test=X_test, y_test=y_test)
177177
assert hasattr(report.metrics, "summarize")
@@ -265,31 +265,31 @@ def test_indicator_favorability(
265265

266266

267267
@pytest.mark.parametrize(
268-
"scoring, scoring_kwargs",
268+
"metric, metric_kwargs",
269269
[
270270
("accuracy", None),
271271
("neg_log_loss", None),
272272
(accuracy_score, {"response_method": "predict"}),
273273
(get_scorer("accuracy"), None),
274274
],
275275
)
276-
def test_scoring_single_list_equivalence(
277-
forest_binary_classification_with_test, scoring, scoring_kwargs
276+
def test_metric_single_list_equivalence(
277+
forest_binary_classification_with_test, metric, metric_kwargs
278278
):
279279
"""Check that passing a single string, callable, scorer is equivalent to passing a
280280
list with a single element."""
281281
estimator, X_test, y_test = forest_binary_classification_with_test
282282
report = EstimatorReport(estimator, X_test=X_test, y_test=y_test)
283283
result_single = report.metrics.summarize(
284-
metric=scoring, metric_kwargs=scoring_kwargs
284+
metric=metric, metric_kwargs=metric_kwargs
285285
).frame()
286286
result_list = report.metrics.summarize(
287-
metric=[scoring], metric_kwargs=scoring_kwargs
287+
metric=[metric], metric_kwargs=metric_kwargs
288288
).frame()
289289
assert result_single.equals(result_list)
290290

291291

292-
def test_scoring_custom_metric(linear_regression_with_test):
292+
def test_metric_custom_metric(linear_regression_with_test):
293293
"""Check that we can pass a custom metric with specific kwargs into
294294
`MetricsSummaryDisplay`."""
295295
estimator, X_test, y_test = linear_regression_with_test
@@ -481,7 +481,7 @@ def test_sklearn_scoring_strings_regression(
481481
assert reg_result.loc["R²"]["Favorability"] == "(↗︎)"
482482

483483

484-
def test_scoring_strings_regression(linear_regression_with_test):
484+
def test_metric_strings_regression(linear_regression_with_test):
485485
"""Test skore regression metric strings in `MetricsSummaryDisplay`."""
486486
regressor, X_test, y_test = linear_regression_with_test
487487
reg_report = EstimatorReport(regressor, X_test=X_test, y_test=y_test)

0 commit comments

Comments
 (0)