diff --git a/skpro/metrics/_classes.py b/skpro/metrics/_classes.py index 85f449552..816e2a238 100644 --- a/skpro/metrics/_classes.py +++ b/skpro/metrics/_classes.py @@ -127,9 +127,8 @@ def _evaluate_by_index(self, y_true, y_pred, **kwargs): # if alpha was provided, check whether they are predicted # if not all alpha are observed, raise a ValueError if not np.isin(alpha, y_pred_alphas).all(): - # todo: make error msg more informative - # which alphas are missing - msg = "not all quantile values in alpha are available in y_pred" + missing_alphas = list(set(alpha) - set(y_pred_alphas)) + msg = f"not all quantile values in alpha are available in y_pred. Missing alphas: {missing_alphas}." raise ValueError(msg) else: alphas = alpha diff --git a/skpro/metrics/tests/test_probabilistic_metrics.py b/skpro/metrics/tests/test_probabilistic_metrics.py index eb852ffa4..1882bd398 100644 --- a/skpro/metrics/tests/test_probabilistic_metrics.py +++ b/skpro/metrics/tests/test_probabilistic_metrics.py @@ -210,7 +210,7 @@ def test_evaluate_alpha_positive(Metric, y_pred, y_true): ) def test_evaluate_alpha_negative(Metric, y_pred, y_true): """Tests whether correct error raised when required quantile not present.""" - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=".*Missing alphas:.*"): # 0.3 not in test quantile data so raise error. Loss = Metric.create_test_instance().set_params(alpha=0.3) res = Loss(y_true=y_true, y_pred=y_pred) # noqa