diff --git a/ax/analysis/plotly/interaction.py b/ax/analysis/plotly/interaction.py index cd29d38ae71..b38bac2cfd9 100644 --- a/ax/analysis/plotly/interaction.py +++ b/ax/analysis/plotly/interaction.py @@ -333,6 +333,7 @@ def _prepare_surface_plot( log_y=is_axis_log_scale( parameter=experiment.search_space.parameters[y_parameter_name] ), + display_sampled=True, ) # If the feature is a first-order component, plot a slice plot. @@ -350,4 +351,5 @@ def _prepare_surface_plot( log_x=is_axis_log_scale( parameter=experiment.search_space.parameters[feature_name] ), + display_sampled=True, ) diff --git a/ax/analysis/plotly/surface/contour.py b/ax/analysis/plotly/surface/contour.py index 60bb32a7ede..7bed9cc9b53 100644 --- a/ax/analysis/plotly/surface/contour.py +++ b/ax/analysis/plotly/surface/contour.py @@ -36,6 +36,7 @@ class ContourPlot(PlotlyAnalysis): - PARAMETER_NAME: The value of the x parameter specified - PARAMETER_NAME: The value of the y parameter specified - METRIC_NAME: The predected mean of the metric specified + - sampled: Whether the parameter values were sampled in at least one trial """ def __init__( @@ -43,12 +44,15 @@ def __init__( x_parameter_name: str, y_parameter_name: str, metric_name: str | None = None, + display_sampled: bool = True, ) -> None: """ Args: y_parameter_name: The name of the parameter to plot on the x-axis. y_parameter_name: The name of the parameter to plot on the y-axis. metric_name: The name of the metric to plot + display_sampled: If True, plot "x"s at x coordinates which have been + sampled in at least one trial. """ # TODO: Add a flag to specify whether or not to plot markers at the (x, y) # coordinates of arms (with hover text). This is fine to exlude for now because @@ -57,6 +61,7 @@ def __init__( self.x_parameter_name = x_parameter_name self.y_parameter_name = y_parameter_name self.metric_name = metric_name + self._display_sampled = display_sampled def compute( self, @@ -93,6 +98,7 @@ def compute( log_y=is_axis_log_scale( parameter=experiment.search_space.parameters[self.y_parameter_name] ), + display_sampled=self._display_sampled, ) return self._create_plotly_analysis_card( @@ -116,14 +122,23 @@ def _prepare_data( y_parameter_name: str, metric_name: str, ) -> pd.DataFrame: + sampled = [ + (arm.parameters[x_parameter_name], arm.parameters[y_parameter_name]) + for trial in experiment.trials.values() + for arm in trial.arms + ] + # Choose which parameter values to predict points for. - xs = get_parameter_values( + unsampled_xs = get_parameter_values( parameter=experiment.search_space.parameters[x_parameter_name], density=10 ) - ys = get_parameter_values( + unsampled_ys = get_parameter_values( parameter=experiment.search_space.parameters[y_parameter_name], density=10 ) + xs = [*[sample[0] for sample in sampled], *unsampled_xs] + ys = [*[sample[1] for sample in sampled], *unsampled_ys] + # Construct observation features for each parameter value previously chosen by # fixing all other parameters to their status-quo value or mean. features = [ @@ -147,15 +162,22 @@ def _prepare_data( predictions = model.predict(observation_features=features) - return pd.DataFrame.from_records( - [ - { - x_parameter_name: features[i].parameters[x_parameter_name], - y_parameter_name: features[i].parameters[y_parameter_name], - f"{metric_name}_mean": predictions[0][metric_name][i], - } - for i in range(len(features)) - ] + return none_throws( + pd.DataFrame.from_records( + [ + { + x_parameter_name: features[i].parameters[x_parameter_name], + y_parameter_name: features[i].parameters[y_parameter_name], + f"{metric_name}_mean": predictions[0][metric_name][i], + "sampled": ( + features[i].parameters[x_parameter_name], + features[i].parameters[y_parameter_name], + ) + in sampled, + } + for i in range(len(features)) + ] + ).drop_duplicates() ) @@ -166,6 +188,7 @@ def _prepare_plot( metric_name: str, log_x: bool, log_y: bool, + display_sampled: bool, ) -> go.Figure: z_grid = df.pivot( index=y_parameter_name, columns=x_parameter_name, values=f"{metric_name}_mean" @@ -185,6 +208,24 @@ def _prepare_plot( ), ) + if display_sampled: + x_sampled = df[df["sampled"]][x_parameter_name].tolist() + y_sampled = df[df["sampled"]][y_parameter_name].tolist() + + samples = go.Scatter( + x=x_sampled, + y=y_sampled, + mode="markers", + marker={ + "symbol": "x", + "color": "black", + }, + name="Sampled", + showlegend=False, + ) + + fig.add_trace(samples) + # Set the x-axis scale to log if relevant if log_x: fig.update_xaxes( diff --git a/ax/analysis/plotly/surface/slice.py b/ax/analysis/plotly/surface/slice.py index d0cbc6a7a7c..58abec24d02 100644 --- a/ax/analysis/plotly/surface/slice.py +++ b/ax/analysis/plotly/surface/slice.py @@ -36,21 +36,26 @@ class SlicePlot(PlotlyAnalysis): - PARAMETER_NAME: The value of the parameter specified - METRIC_NAME_mean: The predected mean of the metric specified - METRIC_NAME_sem: The predected sem of the metric specified + - sampled: Whether the parameter value was sampled in at least one trial """ def __init__( self, parameter_name: str, metric_name: str | None = None, + display_sampled: bool = True, ) -> None: """ Args: parameter_name: The name of the parameter to plot on the x axis. metric_name: The name of the metric to plot on the y axis. If not specified the objective will be used. + display_sampled: If True, plot "x"s at x coordinates which have been + sampled in at least one trial. """ self.parameter_name = parameter_name self.metric_name = metric_name + self._display_sampled = display_sampled def compute( self, @@ -82,6 +87,7 @@ def compute( log_x=is_axis_log_scale( parameter=experiment.search_space.parameters[self.parameter_name] ), + display_sampled=self._display_sampled, ) return self._create_plotly_analysis_card( @@ -102,10 +108,16 @@ def _prepare_data( parameter_name: str, metric_name: str, ) -> pd.DataFrame: + sampled_xs = [ + arm.parameters[parameter_name] + for trial in experiment.trials.values() + for arm in trial.arms + ] # Choose which parameter values to predict points for. - xs = get_parameter_values( + unsampled_xs = get_parameter_values( parameter=experiment.search_space.parameters[parameter_name] ) + xs = [*sampled_xs, *unsampled_xs] # Construct observation features for each parameter value previously chosen by # fixing all other parameters to their status-quo value or mean. @@ -125,15 +137,19 @@ def _prepare_data( predictions = model.predict(observation_features=features) - return pd.DataFrame.from_records( - [ - { - parameter_name: xs[i], - f"{metric_name}_mean": predictions[0][metric_name][i], - f"{metric_name}_sem": predictions[1][metric_name][metric_name][i], - } - for i in range(len(xs)) - ] + return none_throws( + pd.DataFrame.from_records( + [ + { + parameter_name: xs[i], + f"{metric_name}_mean": predictions[0][metric_name][i], + f"{metric_name}_sem": predictions[1][metric_name][metric_name][i] + ** 0.5, # Convert the variance to the SEM + "sampled": xs[i] in sampled_xs, + } + for i in range(len(xs)) + ] + ).drop_duplicates() ).sort_values(by=parameter_name) @@ -141,10 +157,13 @@ def _prepare_plot( df: pd.DataFrame, parameter_name: str, metric_name: str, - log_x: bool = False, + log_x: bool, + display_sampled: bool, ) -> go.Figure: x = df[parameter_name].tolist() y = df[f"{metric_name}_mean"].tolist() + + # Convert the SEMs to 95% confidence intervals y_upper = (df[f"{metric_name}_mean"] + 1.96 * df[f"{metric_name}_sem"]).tolist() y_lower = (df[f"{metric_name}_mean"] - 1.96 * df[f"{metric_name}_sem"]).tolist() @@ -180,6 +199,24 @@ def _prepare_plot( ), ) + if display_sampled: + x_sampled = df[df["sampled"]][parameter_name].tolist() + y_sampled = df[df["sampled"]][f"{metric_name}_mean"].tolist() + + samples = go.Scatter( + x=x_sampled, + y=y_sampled, + mode="markers", + marker={ + "symbol": "x", + "color": "black", + }, + name=f"Sampled {parameter_name}", + showlegend=False, + ) + + fig.add_trace(samples) + # Set the x-axis scale to log if relevant if log_x: fig.update_xaxes( diff --git a/ax/analysis/plotly/surface/tests/test_contour.py b/ax/analysis/plotly/surface/tests/test_contour.py index 6deec31ae4a..f8a63c1fea4 100644 --- a/ax/analysis/plotly/surface/tests/test_contour.py +++ b/ax/analysis/plotly/surface/tests/test_contour.py @@ -77,7 +77,10 @@ def test_compute(self) -> None: "x", "y", "bar_mean", + "sampled", }, ) self.assertIsNotNone(card.blob) self.assertEqual(card.blob_annotation, "plotly") + + self.assertEqual(card.df["sampled"].sum(), len(self.client.experiment.trials)) diff --git a/ax/analysis/plotly/surface/tests/test_slice.py b/ax/analysis/plotly/surface/tests/test_slice.py index 557a7665c37..5b1b3ab9520 100644 --- a/ax/analysis/plotly/surface/tests/test_slice.py +++ b/ax/analysis/plotly/surface/tests/test_slice.py @@ -63,11 +63,9 @@ def test_compute(self) -> None: self.assertEqual(card.level, AnalysisCardLevel.LOW) self.assertEqual( {*card.df.columns}, - { - "x", - "bar_mean", - "bar_sem", - }, + {"x", "bar_mean", "bar_sem", "sampled"}, ) self.assertIsNotNone(card.blob) self.assertEqual(card.blob_annotation, "plotly") + + self.assertEqual(card.df["sampled"].sum(), len(self.client.experiment.trials))