diff --git a/skore/src/skore/__init__.py b/skore/src/skore/__init__.py index 7dd639b673..64615821d9 100644 --- a/skore/src/skore/__init__.py +++ b/skore/src/skore/__init__.py @@ -21,6 +21,9 @@ train_test_split, ) from skore._sklearn._plot.base import Display +from skore._sklearn._plot.metrics.feature_importance_coefficients_display import ( + FeatureImportanceCoefficientsDisplay, +) from skore._utils._patch import setup_jupyter_display from skore._utils._show_versions import show_versions from skore.project import Project @@ -41,6 +44,7 @@ "get_config", "set_config", "TableReportDisplay", + "FeatureImportanceCoefficientsDisplay", ] logger = logging.getLogger(__name__) diff --git a/skore/src/skore/_sklearn/_comparison/feature_importance_accessor.py b/skore/src/skore/_sklearn/_comparison/feature_importance_accessor.py index 3cca4cdd32..b306d1ec64 100644 --- a/skore/src/skore/_sklearn/_comparison/feature_importance_accessor.py +++ b/skore/src/skore/_sklearn/_comparison/feature_importance_accessor.py @@ -10,8 +10,8 @@ from skore._sklearn._base import _BaseAccessor from skore._sklearn._cross_validation import CrossValidationReport from skore._sklearn._estimator import EstimatorReport -from skore._sklearn._plot.metrics.feature_importance_display import ( - FeatureImportanceDisplay, +from skore._sklearn._plot.metrics.feature_importance_coefficients_display import ( + FeatureImportanceCoefficientsDisplay, ) from skore._utils._accessor import _check_comparison_report_sub_estimators_have_coef @@ -29,20 +29,25 @@ def __init__(self, parent: ComparisonReport) -> None: super().__init__(parent) @available_if(_check_comparison_report_sub_estimators_have_coef()) - def coefficients(self) -> FeatureImportanceDisplay: + def coefficients(self) -> FeatureImportanceCoefficientsDisplay: """Retrieve the coefficients for each report, including the intercepts. - If the compared reports are `EstimatorReport`s, the coefficients from each - report's estimator are returned as a single-column DataFrame. + If the compared reports are :class:`EstimatorReport` instances, the coefficients + from each report's estimator are returned as a single-column DataFrame. - If the compared reports are `CrossValidationReport`s, the coefficients - across all cross-validation splits are retained and the columns are prefixed - with the corresponding estimator name to distinguish them. + If the compared reports are :class:`CrossValidationReport` instances, the + coefficients across all cross-validation splits are retained and the columns are + prefixed with the corresponding estimator name to distinguish them. Comparison reports with the same features are put under one key and are plotted - together. - When some reports share the same features and others do not, those with the same - features are plotted together. + together. When some reports share the same features and others do not, those + with the same features are plotted together. + + Returns + ------- + :class:`FeatureImportanceCoefficientsDisplay` + The feature importance display containing model coefficients and + intercept. """ similar_reports = defaultdict(list) @@ -89,7 +94,12 @@ def coefficients(self) -> FeatureImportanceDisplay: else: raise TypeError(f"Unexpected report type: {self._parent._reports_type}") - return FeatureImportanceDisplay(self._parent, coef_frames) + return FeatureImportanceCoefficientsDisplay( + "comparison-estimator" + if self._parent._reports_type == "EstimatorReport" + else "comparison-cross-validation", + coef_frames, + ) #################################################################################### # Methods related to the help tree diff --git a/skore/src/skore/_sklearn/_comparison/report.py b/skore/src/skore/_sklearn/_comparison/report.py index 20ef23d11d..ed4618bda1 100644 --- a/skore/src/skore/_sklearn/_comparison/report.py +++ b/skore/src/skore/_sklearn/_comparison/report.py @@ -103,6 +103,7 @@ class ComparisonReport(_BaseReport, DirNamesMixin): _ACCESSOR_CONFIG: dict[str, dict[str, str]] = { "metrics": {"name": "metrics"}, + "feature_importance": {"name": "feature_importance"}, } metrics: _MetricsAccessor feature_importance: _FeatureImportanceAccessor diff --git a/skore/src/skore/_sklearn/_cross_validation/feature_importance_accessor.py b/skore/src/skore/_sklearn/_cross_validation/feature_importance_accessor.py index 92ef345411..bcf347d7a7 100644 --- a/skore/src/skore/_sklearn/_cross_validation/feature_importance_accessor.py +++ b/skore/src/skore/_sklearn/_cross_validation/feature_importance_accessor.py @@ -6,8 +6,8 @@ from skore._externals._pandas_accessors import DirNamesMixin from skore._sklearn._base import _BaseAccessor from skore._sklearn._cross_validation.report import CrossValidationReport -from skore._sklearn._plot.metrics.feature_importance_display import ( - FeatureImportanceDisplay, +from skore._sklearn._plot.metrics.feature_importance_coefficients_display import ( + FeatureImportanceCoefficientsDisplay, ) from skore._utils._accessor import _check_cross_validation_sub_estimator_has_coef @@ -22,9 +22,15 @@ def __init__(self, parent: CrossValidationReport) -> None: super().__init__(parent) @available_if(_check_cross_validation_sub_estimator_has_coef()) - def coefficients(self) -> FeatureImportanceDisplay: + def coefficients(self) -> FeatureImportanceCoefficientsDisplay: """Retrieve the coefficients across splits, including the intercept. + Returns + ------- + :class:`FeatureImportanceCoefficientsDisplay` + The feature importance display containing model coefficients and + intercept. + Examples -------- >>> from sklearn.datasets import make_regression @@ -34,7 +40,8 @@ def coefficients(self) -> FeatureImportanceDisplay: >>> report = CrossValidationReport( >>> estimator=Ridge(), X=X, y=y, splitter=5, n_jobs=4 >>> ) - >>> report.feature_importance.coefficients().frame() + >>> display = report.feature_importance.coefficients() + >>> display.frame() Intercept Feature #0 Feature #1 Feature #2 Split index 0 0.064837 74.100966 27.309656 17.367865 @@ -42,7 +49,7 @@ def coefficients(self) -> FeatureImportanceDisplay: 2 0.000084 74.107126 27.614821 17.277730 3 0.145613 74.207645 27.523667 17.391055 4 0.033695 74.259575 27.599610 17.390481 - >>> report.feature_importance.coefficients().plot() # shows plot + >>> display.plot() # shows plot """ combined = pd.concat( { @@ -58,7 +65,7 @@ def coefficients(self) -> FeatureImportanceDisplay: ).T combined.index.name = "Split index" - return FeatureImportanceDisplay(self._parent, combined) + return FeatureImportanceCoefficientsDisplay("cross-validation", combined) #################################################################################### # Methods related to the help tree diff --git a/skore/src/skore/_sklearn/_cross_validation/report.py b/skore/src/skore/_sklearn/_cross_validation/report.py index ecd730d9ba..da0a4541dc 100644 --- a/skore/src/skore/_sklearn/_cross_validation/report.py +++ b/skore/src/skore/_sklearn/_cross_validation/report.py @@ -143,6 +143,7 @@ class CrossValidationReport(_BaseReport, DirNamesMixin): _ACCESSOR_CONFIG: dict[str, dict[str, str]] = { "metrics": {"name": "metrics"}, + "feature_importance": {"name": "feature_importance"}, } metrics: _MetricsAccessor feature_importance: _FeatureImportanceAccessor diff --git a/skore/src/skore/_sklearn/_estimator/feature_importance_accessor.py b/skore/src/skore/_sklearn/_estimator/feature_importance_accessor.py index 785fd95360..daceac81aa 100644 --- a/skore/src/skore/_sklearn/_estimator/feature_importance_accessor.py +++ b/skore/src/skore/_sklearn/_estimator/feature_importance_accessor.py @@ -17,8 +17,8 @@ from skore._externals._pandas_accessors import DirNamesMixin from skore._sklearn._base import _BaseAccessor from skore._sklearn._estimator.report import EstimatorReport -from skore._sklearn._plot.metrics.feature_importance_display import ( - FeatureImportanceDisplay, +from skore._sklearn._plot.metrics.feature_importance_coefficients_display import ( + FeatureImportanceCoefficientsDisplay, ) from skore._sklearn.types import Aggregate from skore._utils._accessor import ( @@ -158,9 +158,15 @@ def __init__(self, parent: EstimatorReport) -> None: super().__init__(parent) @available_if(_check_estimator_has_coef()) - def coefficients(self) -> FeatureImportanceDisplay: + def coefficients(self) -> FeatureImportanceCoefficientsDisplay: """Retrieve the coefficients of a linear model, including the intercept. + Returns + ------- + :class:`FeatureImportanceCoefficientsDisplay` + The feature importance display containing model coefficients and + intercept. + Examples -------- >>> from sklearn.datasets import load_diabetes @@ -171,7 +177,8 @@ def coefficients(self) -> FeatureImportanceDisplay: >>> split_data = train_test_split(X=X, y=y, random_state=0, as_dict=True) >>> regressor = Ridge() >>> report = EstimatorReport(regressor, **split_data) - >>> report.feature_importance.coefficients().frame() + >>> display = report.feature_importance.coefficients() + >>> display.frame() Coefficient Intercept 152.4... Feature #0 21.2... @@ -184,7 +191,7 @@ def coefficients(self) -> FeatureImportanceDisplay: Feature #7 112.6... Feature #8 250.5... Feature #9 99.5... - >>> report.feature_importance.coefficients().plot() # shows plot + >>> display.plot() # shows plot """ parent_estimator = self._parent.estimator_ @@ -239,7 +246,7 @@ def coefficients(self) -> FeatureImportanceDisplay: columns=columns, ) - return FeatureImportanceDisplay(self._parent, df) + return FeatureImportanceCoefficientsDisplay("estimator", df) @available_if(_check_has_feature_importances()) def mean_decrease_impurity(self): diff --git a/skore/src/skore/_sklearn/_plot/metrics/feature_importance_coefficients_display.py b/skore/src/skore/_sklearn/_plot/metrics/feature_importance_coefficients_display.py new file mode 100644 index 0000000000..d340edba54 --- /dev/null +++ b/skore/src/skore/_sklearn/_plot/metrics/feature_importance_coefficients_display.py @@ -0,0 +1,171 @@ +import matplotlib.pyplot as plt + +from skore._sklearn._plot.base import DisplayMixin + + +class FeatureImportanceCoefficientsDisplay(DisplayMixin): + """Feature importance display. + + Each report type produces its own output frame and plot. + + Parameters + ---------- + report_type : {"estimator", "cross-validation", "comparison-estimator", \ + "comparison-cross-validation"} + Report type from which the display is created. + + coefficients : DataFrame | list[DataFrame] + The coefficients data to display. + + Attributes + ---------- + ax_ : matplotlib Axes + Axes with the different matplotlib axis. + + figure_ : matplotlib Figure + Figure containing the plot. + + Examples + -------- + >>> from sklearn.datasets import load_diabetes + >>> from sklearn.linear_model import LinearRegression + >>> from skore import train_test_split + >>> from skore import EstimatorReport + >>> X, y = load_diabetes(return_X_y=True) + >>> split_data = train_test_split( + >>> X=X, y=y, random_state=0, as_dict=True, shuffle=False + >>> ) + >>> report = EstimatorReport(LinearRegression(), **split_data) + >>> display = report.feature_importance.coefficients() + >>> display.plot() + >>> display.frame() + Coefficient + Intercept 151.487952 + Feature #0 -11.861904 + Feature #1 -238.445509 + Feature #2 505.395493 + Feature #3 298.977119 + ... ... + """ + + def __init__(self, report_type, coefficients): + self.report_type = report_type + self.coefficients = coefficients + + def frame(self): + """Return coefficients as a DataFrame. + + Returns + ------- + pd.DataFrame + The structure of the returned frame depends on the underlying report type: + + - If an :class:`EstimatorReport`, a single column "Coefficient", with the + index being the feature names. + + - If a :class:`CrossValidationReport`, the columns are the feature names, + and the index is the respective split number. + + - If a :class:`ComparisonReport`, the columns are the models passed in the + report, with the index being the feature names. + """ + if self.report_type == "estimator": + return self._frame_estimator_report() + elif self.report_type == "cross-validation": + return self._frame_cross_validation_report() + else: + return self._frame_comparison_report() + + def _frame_estimator_report(self): + return self.coefficients + + def _frame_cross_validation_report(self): + return self.coefficients + + def _frame_comparison_report(self): + import pandas as pd + + return pd.concat(self.coefficients, axis=1) + + @DisplayMixin.style_plot + def plot(self, **kwargs) -> None: + """Plot the coefficients of linear models. + + Parameters + ---------- + **kwargs : dict + Additional keyword arguments to be passed to the plot method. + """ + return self._plot(**kwargs) + + def _style_plot_matplotlib(self, ax, title=None, legend=True): + if title: + ax.set_title(title) + if legend: + ax.legend(loc="best", bbox_to_anchor=(1, 1), borderaxespad=1) + ax.grid(False) + for spine in ["top", "right", "left"]: + ax.spines[spine].set_visible(False) + ax.tick_params(axis="y", length=0) + + def _plot_matplotlib(self, **kwargs): + if self.report_type == "estimator": + return self._plot_estimator_report() + elif self.report_type == "cross-validation": + return self._plot_cross_validation_report() + elif self.report_type == "comparison-estimator": + return self._plot_comparison_report_estimator() + elif self.report_type == "comparison-cross-validation": + return self._plot_comparison_report_cross_validation() + else: + raise TypeError(f"Unexpected report type: {self.report_type!r}") + + def _plot_estimator_report(self): + self.figure_, self.ax_ = plt.subplots() + self.coefficients.plot.barh(ax=self.ax_) + self._style_plot_matplotlib(self.ax_, title="Coefficients") + self.figure_.tight_layout() + plt.show() + + def _plot_cross_validation_report(self): + self.figure_, self.ax_ = plt.subplots() + self.coefficients.boxplot(ax=self.ax_, vert=False) + self._style_plot_matplotlib( + self.ax_, title="Coefficient variance across CV splits", legend=None + ) + self.figure_.tight_layout() + plt.show() + + def _plot_comparison_report_estimator(self): + self.figure_, self.ax_ = plt.subplots( + nrows=1, + ncols=len(self.coefficients), + figsize=(5 * len(self.coefficients), 6), + squeeze=False, + ) + self.ax_ = self.ax_.flatten() + self.figure_.suptitle("Coefficients") + for ax, coef_frame in zip(self.ax_, self.coefficients, strict=False): + coef_frame.plot.barh(ax=ax) + self._style_plot_matplotlib(ax, title=None) + self.figure_.tight_layout() + plt.show() + + def _plot_comparison_report_cross_validation(self): + self.figure_, self.ax_ = plt.subplots( + nrows=1, + ncols=len(self.coefficients), + figsize=(5 * len(self.coefficients), 6), + squeeze=False, + ) + self.ax_ = self.ax_.flatten() + for ax, coef_frame in zip(self.ax_, self.coefficients, strict=False): + coef_frame.boxplot(ax=ax, vert=False) + model_name = coef_frame.columns[0].split("__")[0] + self._style_plot_matplotlib( + ax, + title=f"{model_name} Coefficients across splits", + legend=None, + ) + self.figure_.tight_layout() + plt.show() diff --git a/skore/src/skore/_sklearn/_plot/metrics/feature_importance_display.py b/skore/src/skore/_sklearn/_plot/metrics/feature_importance_display.py deleted file mode 100644 index 9b9d5cb0fe..0000000000 --- a/skore/src/skore/_sklearn/_plot/metrics/feature_importance_display.py +++ /dev/null @@ -1,162 +0,0 @@ -import matplotlib.pyplot as plt - -from skore._sklearn._plot.base import DisplayMixin - - -class FeatureImportanceDisplay(DisplayMixin): - """Feature importance display. - - Each report type produces its own output frame and plot. - - Parameters - ---------- - parent : EstimatorReport | CrossValidationReport | ComparisonReport - Report type from which the display is created. - - coefficient_data : DataFrame | list[DataFrame] - The ROC AUC data to display. The columns are - - Attributes - ---------- - ax_ : matplotlib Axes - Axes with the different matplotlib axis. - - figure_ : matplotlib Figure - Figure containing the plot. - - Methods - ------- - frame() -> DataFrame - The coefficients as a dataframe. - - plot() -> NoneType - A plot of the coefficients. - - Examples - -------- - >>> from sklearn.datasets import load_diabetes - >>> from sklearn.linear_model import LinearRegression - >>> from skore import train_test_split - >>> from skore import EstimatorReport - >>> X, y = load_diabetes(return_X_y=True) - >>> split_data = train_test_split( - >>> X=X, y=y, random_state=0, as_dict=True, shuffle=False - >>> ) - >>> report = EstimatorReport(LinearRegression(), **split_data) - >>> display = report.feature_importance.coefficients() - >>> display.plot() - >>> display.frame() - Coefficient - Intercept 151.487952 - Feature #0 -11.861904 - Feature #1 -238.445509 - Feature #2 505.395493 - Feature #3 298.977119 - ... ... - """ - - def __init__(self, parent, coefficient_data): - self._parent = parent - self._coefficient_data = coefficient_data - - def frame(self): - """Return coefficients as a DataFrame. - - Returns - ------- - pd.DataFrame - The structure of the returned frame depends on the underlying report type: - - - If an ``EstimatorReport``, a single column - "Coefficient", with the index being the feature names. - - - If a ``CrossValidationReport``, the columns are - the feature names, and the index is the respective split number. - - - If a ``ComparisonReport``, the columns are the - models passed in the report, with the index being the feature names. - """ - from skore import ComparisonReport, CrossValidationReport, EstimatorReport - - if isinstance(self._parent, EstimatorReport): - return self._frame_estimator_report() - elif isinstance(self._parent, CrossValidationReport): - return self._frame_cross_validation_report() - elif isinstance(self._parent, ComparisonReport): - return self._frame_comparison_report() - else: - raise TypeError(f"Unrecognised report type: {self._parent}") - - def _frame_estimator_report(self): - return self._coefficient_data - - def _frame_cross_validation_report(self): - return self._coefficient_data - - def _frame_comparison_report(self): - import pandas as pd - - return pd.concat(self._coefficient_data, axis=1) - - @DisplayMixin.style_plot - def plot(self, **kwargs) -> None: - """Plot the coefficients of linear models. - - Parameters - ---------- - **kwargs : dict - Additional keyword arguments to be passed to the plot method. - """ - return self._plot(**kwargs) - - def _plot_matplotlib(self, **kwargs): - from skore._sklearn._comparison import ComparisonReport - from skore._sklearn._cross_validation import CrossValidationReport - from skore._sklearn._estimator import EstimatorReport - - if isinstance(self._parent, EstimatorReport): - return self._plot_estimator_report() - elif isinstance(self._parent, CrossValidationReport): - return self._plot_cross_validation_report() - elif isinstance(self._parent, ComparisonReport): - return self._plot_comparison_report() - else: - raise TypeError(f"Unrecognised report type: {self._parent}") - - def _plot_estimator_report(self): - self.figure_, self.ax_ = plt.subplots() - self._coefficient_data.plot.bar(ax=self.ax_) - self.ax_.set_title(f"{self._parent.estimator_name_} Coefficients") - self.ax_.legend(loc="best", bbox_to_anchor=(1, 1), borderaxespad=1) - self.figure_.tight_layout() - plt.show() - - def _plot_cross_validation_report(self): - self.figure_, self.ax_ = plt.subplots() - self._coefficient_data.boxplot(ax=self.ax_) - self.ax_.set_title("Coefficient variance across CV splits") - self.figure_.tight_layout() - plt.show() - - def _plot_comparison_report(self): - if self._parent._reports_type == "EstimatorReport": - for coef_frame in self._coefficient_data: - self.figure_, self.ax_ = plt.subplots() - coef_frame.plot.bar(ax=self.ax_) - self.ax_.legend(loc="best", bbox_to_anchor=(1, 1), borderaxespad=1) - - self.ax_.set_title("Coefficients") - self.figure_.tight_layout() - plt.show() - elif self._parent._reports_type == "CrossValidationReport": - for coef_frame in self._coefficient_data: - self.figure_, self.ax_ = plt.subplots() - coef_frame.boxplot(ax=self.ax_) - self.ax_.set_title( - f"{coef_frame.columns[0].split('__')[0]} Coefficients across splits" - ) - plt.xticks(rotation=90) - plt.tight_layout() - plt.show() - else: - raise TypeError(f"Unexpected report type: {type(self._parent.reports_[0])}") diff --git a/sphinx/reference/report/comparison_report.rst b/sphinx/reference/report/comparison_report.rst index 8dfac28877..212b00b9ec 100644 --- a/sphinx/reference/report/comparison_report.rst +++ b/sphinx/reference/report/comparison_report.rst @@ -56,3 +56,16 @@ get the common performance metric representations. ComparisonReport.metrics.rmse ComparisonReport.metrics.roc_auc ComparisonReport.metrics.timings + +Feature importance +------------------ + +The `feature_importance` accessor helps you evaluate the importance +used to train your estimator. + +.. autosummary:: + :toctree: ../api/ + :template: autosummary/accessor_method.rst + + ComparisonReport.feature_importance.help + ComparisonReport.feature_importance.coefficients diff --git a/sphinx/reference/report/cross_validation_report.rst b/sphinx/reference/report/cross_validation_report.rst index c7783ba43f..96114bdcd8 100644 --- a/sphinx/reference/report/cross_validation_report.rst +++ b/sphinx/reference/report/cross_validation_report.rst @@ -77,3 +77,16 @@ estimator across cross-validation splits. CrossValidationReport.metrics.roc CrossValidationReport.metrics.roc_auc CrossValidationReport.metrics.timings + +Feature importance +------------------ + +The `feature_importance` accessor helps you evaluate the importance +used to train your estimator. + +.. autosummary:: + :toctree: ../api/ + :template: autosummary/accessor_method.rst + + CrossValidationReport.feature_importance.help + CrossValidationReport.feature_importance.coefficients diff --git a/sphinx/reference/report/displays.rst b/sphinx/reference/report/displays.rst index 7c4952de44..b54af83aca 100644 --- a/sphinx/reference/report/displays.rst +++ b/sphinx/reference/report/displays.rst @@ -16,3 +16,4 @@ the API of each display. RocCurveDisplay PrecisionRecallCurveDisplay PredictionErrorDisplay + FeatureImportanceCoefficientsDisplay diff --git a/sphinx/user_guide/reporters.rst b/sphinx/user_guide/reporters.rst index 064660f9d5..af1536ff56 100644 --- a/sphinx/user_guide/reporters.rst +++ b/sphinx/user_guide/reporters.rst @@ -51,8 +51,10 @@ Model evaluation :obj:`EstimatorReport.metrics` is the entry point that provides methods to evaluate the statistical and performance metrics of the predictive model. This accessor provides two -types of methods: (i) methods that return some metrics and (ii) methods that return a -`skore` :class:`Display` object. +types of methods: + +1. Methods that return some metrics, +2. Methods that return a :class:`skore.Display` object. Before diving into the details of these methods, we first discuss the parameters they share. `data_source` is a parameter that specifies the data to use to compute the @@ -66,29 +68,45 @@ They return usual python objects such as floats, integers, or dictionaries. The second type of methods provided by :obj:`EstimatorReport.metrics` are methods that return a :class:`~skore.Display` object. They have a common API as well. They expose three methods: -(i) `plot` that plots graphically the information contained in the display, -(ii) `set_style` that sets some graphical settings instead of passing them to the `plot` -method at each call. -(iii) `frame` that returns a `pandas.DataFrame` with the information contained in the -display. + +1. `plot` that plots graphically the information contained in the display, + +2. `set_style` that sets some graphical settings instead of passing them to the `plot` + method at each call, + +3. `frame` that returns a `pandas.DataFrame` with the information contained in the + display. We provide the :class:`EstimatorReport.metrics.summarize` method that aggregates metrics in a single dataframe, available through a :class:`~skore.Display`. By default, a set of metrics is computed based on the type of target variable (e.g. classification or regression). Nevertheless, you can specify the metrics you want to compute thanks to the -`scoring` parameter. We accept different types: (i) some strings that correspond to -scikit-learn scorer names or a built-in `skore` metric name, (ii) a callable or a (iii) -scikit-learn scorer constructed with :func:`sklearn.metrics.make_scorer`. +`scoring` parameter. We accept different types: + +1. A string that corresponds to a scikit-learn scorer name or a built-in `skore` + metric name, + +2. A callable, + +3. A scikit-learn scorer constructed with :func:`sklearn.metrics.make_scorer`. Refer to the :ref:`displays` section for more details regarding the `skore` display API. Refer to the :ref:`estimator_metrics` section for more details on all the available metrics in `skore`. +Model interpretability +^^^^^^^^^^^^^^^^^^^^^^ + +:obj:`EstimatorReport.feature_importance` is the entry point to interpret and explain a +predictive model. This accessor provides methods that return a :class:`skore.Display` +object. As with other display objects, they expose three methods: `plot`, `set_style` +and `frame`. + Caching mechanism ^^^^^^^^^^^^^^^^^ :class:`EstimatorReport` comes together with a caching mechanism that stores -intermediate information that is expensive to compute such as predictions. It +intermediate information that is expensive to compute, such as predictions. It efficiently re-uses this information when recomputing the same metric or a metric requiring the same intermediate information.