| 
 | 1 | +from unitxt import add_to_catalog  | 
 | 2 | +from unitxt.metrics import MeanSquaredError, RootMeanSquaredError  | 
 | 3 | +from unitxt.test_utils.metrics import test_metric  | 
 | 4 | + | 
 | 5 | +metric = MeanSquaredError(  | 
 | 6 | +    __description__="""Metric to calculate the mean squared error (MSE) between the prediction and the reference values.  | 
 | 7 | +
  | 
 | 8 | +    Assume both the prediction and reference are floats.  | 
 | 9 | +
  | 
 | 10 | +    Support only a single reference per prediction  .  | 
 | 11 | +    """  | 
 | 12 | +)  | 
 | 13 | +predictions = [1.0, 2.0, 1.0]  | 
 | 14 | +references = [[-1.0], [1.0], [0.0]]  | 
 | 15 | + | 
 | 16 | +instance_targets = [  | 
 | 17 | +    {"mean_squared_error": 4.0, "score": 4.0, "score_name": "mean_squared_error"},  | 
 | 18 | +    {"mean_squared_error": 1.0, "score": 1.0, "score_name": "mean_squared_error"},  | 
 | 19 | +    {"mean_squared_error": 1.0, "score": 1.0, "score_name": "mean_squared_error"},  | 
 | 20 | +]  | 
 | 21 | + | 
 | 22 | +global_target = {  | 
 | 23 | +    "mean_squared_error": 2.0,  | 
 | 24 | +    "score": 2.0,  | 
 | 25 | +    "score_name": "mean_squared_error",  | 
 | 26 | +    "mean_squared_error_ci_low": 1.0,  | 
 | 27 | +    "mean_squared_error_ci_high": 4.0,  | 
 | 28 | +    "score_ci_low": 1.0,  | 
 | 29 | +    "score_ci_high": 4.0,  | 
 | 30 | +    "num_of_instances": 3,  | 
 | 31 | +}  | 
 | 32 | + | 
 | 33 | +outputs = test_metric(  | 
 | 34 | +    metric=metric,  | 
 | 35 | +    predictions=predictions,  | 
 | 36 | +    references=references,  | 
 | 37 | +    instance_targets=instance_targets,  | 
 | 38 | +    global_target=global_target,  | 
 | 39 | +)  | 
 | 40 | + | 
 | 41 | +add_to_catalog(metric, "metrics.mean_squared_error", overwrite=True)  | 
 | 42 | + | 
 | 43 | + | 
 | 44 | +metric = RootMeanSquaredError(  | 
 | 45 | +    __description__="""Metric to calculate the root mean squared error (RMSE) between the prediction and the reference values.  | 
 | 46 | +
  | 
 | 47 | +    Assume both the prediction and reference are floats.  | 
 | 48 | +
  | 
 | 49 | +    Support only a single reference per prediction  .  | 
 | 50 | +    """  | 
 | 51 | +)  | 
 | 52 | + | 
 | 53 | + | 
 | 54 | +instance_targets = [  | 
 | 55 | +    {  | 
 | 56 | +        "root_mean_squared_error": 2.0,  | 
 | 57 | +        "score": 2.0,  | 
 | 58 | +        "score_name": "root_mean_squared_error",  | 
 | 59 | +    },  | 
 | 60 | +    {  | 
 | 61 | +        "root_mean_squared_error": 1.0,  | 
 | 62 | +        "score": 1.0,  | 
 | 63 | +        "score_name": "root_mean_squared_error",  | 
 | 64 | +    },  | 
 | 65 | +    {  | 
 | 66 | +        "root_mean_squared_error": 1.0,  | 
 | 67 | +        "score": 1.0,  | 
 | 68 | +        "score_name": "root_mean_squared_error",  | 
 | 69 | +    },  | 
 | 70 | +]  | 
 | 71 | + | 
 | 72 | +global_target = {  | 
 | 73 | +    "root_mean_squared_error": 1.41,  | 
 | 74 | +    "score": 1.41,  | 
 | 75 | +    "score_name": "root_mean_squared_error",  | 
 | 76 | +    "root_mean_squared_error_ci_low": 1.0,  | 
 | 77 | +    "root_mean_squared_error_ci_high": 2.0,  | 
 | 78 | +    "score_ci_low": 1.0,  | 
 | 79 | +    "score_ci_high": 2.0,  | 
 | 80 | +    "num_of_instances": 3,  | 
 | 81 | +}  | 
 | 82 | + | 
 | 83 | +outputs = test_metric(  | 
 | 84 | +    metric=metric,  | 
 | 85 | +    predictions=predictions,  | 
 | 86 | +    references=references,  | 
 | 87 | +    instance_targets=instance_targets,  | 
 | 88 | +    global_target=global_target,  | 
 | 89 | +)  | 
 | 90 | + | 
 | 91 | +add_to_catalog(metric, "metrics.root_mean_squared_error", overwrite=True)  | 
0 commit comments