Skip to content

Commit 6f24700

Browse files
authored
Add test for pareto filter (opendatahub-io#962)
* feat: Add test for pareto filter Signed-off-by: lugi0 <lgiorgi@redhat.com> * fix: change comment Signed-off-by: lugi0 <lgiorgi@redhat.com> * fix: address comments Signed-off-by: lugi0 <lgiorgi@redhat.com> * fix: use model from constant list Signed-off-by: lugi0 <lgiorgi@redhat.com> --------- Signed-off-by: lugi0 <lgiorgi@redhat.com>
1 parent 60906a7 commit 6f24700

File tree

3 files changed

+997
-962
lines changed

3 files changed

+997
-962
lines changed

tests/model_registry/model_catalog/test_model_artifact_search.py

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
fetch_all_artifacts_with_dynamic_paging,
77
validate_model_artifacts_match_criteria_and,
88
validate_model_artifacts_match_criteria_or,
9+
validate_recommendations_subset,
910
)
1011
from tests.model_registry.model_catalog.constants import (
1112
VALIDATED_CATALOG_ID,
@@ -204,3 +205,66 @@ def test_filter_query_advanced_artifact_search(
204205
)
205206
else:
206207
pytest.fail(f"{logic_type} filter validation failed for model {model_name}")
208+
209+
@pytest.mark.parametrize(
210+
"randomly_picked_model_from_catalog_api_by_source",
211+
[
212+
pytest.param(
213+
{
214+
"catalog_id": VALIDATED_CATALOG_ID,
215+
"header_type": "registry",
216+
"model_name": random.choice(MODEL_NAMEs_ARTIFACT_SEARCH),
217+
},
218+
id="test_performance_artifacts_recommendations_parameter",
219+
),
220+
],
221+
indirect=["randomly_picked_model_from_catalog_api_by_source"],
222+
)
223+
def test_performance_artifacts_recommendations_parameter(
224+
self: Self,
225+
enabled_model_catalog_config_map: ConfigMap,
226+
model_catalog_rest_url: list[str],
227+
model_registry_rest_headers: dict[str, str],
228+
randomly_picked_model_from_catalog_api_by_source: tuple[dict, str, str],
229+
):
230+
"""
231+
Test the recommendations query parameter for performance artifacts endpoint.
232+
233+
Validates that recommendations=true returns a filtered subset of performance
234+
artifacts that are optimal based on cost and latency compared to the full set.
235+
"""
236+
_, model_name, catalog_id = randomly_picked_model_from_catalog_api_by_source
237+
238+
LOGGER.info(f"Testing performance artifacts recommendations parameter for model: {model_name}")
239+
240+
# Get all performance artifacts (baseline)
241+
full_results = fetch_all_artifacts_with_dynamic_paging(
242+
url_with_pagesize=(
243+
f"{model_catalog_rest_url[0]}sources/{catalog_id}/models/{model_name}/artifacts/performance?pageSize"
244+
),
245+
headers=model_registry_rest_headers,
246+
page_size=100,
247+
)
248+
249+
# Get recommendations-filtered performance artifacts
250+
recommendations_results = fetch_all_artifacts_with_dynamic_paging(
251+
url_with_pagesize=(
252+
f"{model_catalog_rest_url[0]}sources/{catalog_id}/models/{model_name}/artifacts/performance?"
253+
f"recommendations=true&pageSize"
254+
),
255+
headers=model_registry_rest_headers,
256+
page_size=100,
257+
)
258+
259+
if (full_results and not recommendations_results) or (len(recommendations_results) > len(full_results)):
260+
pytest.fail(f"Recommendations parameter functionality failed for model {model_name}")
261+
262+
# Validate subset relationship
263+
validation_passed = validate_recommendations_subset(
264+
full_artifacts=full_results["items"],
265+
recommendations_artifacts=recommendations_results["items"],
266+
model_name=model_name,
267+
)
268+
269+
assert validation_passed, f"Recommendations subset validation failed for model {model_name}"
270+
LOGGER.info(f"Successfully validated recommendations parameter functionality for model {model_name}")

tests/model_registry/model_catalog/utils.py

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1272,3 +1272,47 @@ def get_metadata_from_catalog_pod(model_catalog_pod: Pod, model_name: str) -> di
12721272
except Exception as e:
12731273
LOGGER.error(f"Failed to read metadata.json for model '{model_name}': {e}")
12741274
raise
1275+
1276+
1277+
def validate_recommendations_subset(
1278+
full_artifacts: list[dict[str, Any]], recommendations_artifacts: list[dict[str, Any]], model_name: str
1279+
) -> bool:
1280+
"""
1281+
Validate that recommendations artifacts are a proper subset of all artifacts.
1282+
1283+
Args:
1284+
full_artifacts: All performance artifacts (recommendations=false)
1285+
recommendations_artifacts: Filtered artifacts (recommendations=true)
1286+
model_name: Model name for logging
1287+
1288+
Returns:
1289+
bool: True if validation passes
1290+
1291+
Raises:
1292+
AssertionError: If validation fails with descriptive message
1293+
"""
1294+
LOGGER.info(f"Validating recommendations subset for model '{model_name}'")
1295+
1296+
# Convert artifacts to comparable format (using artifact ID for comparison)
1297+
full_artifact_ids = {artifact.get("id") for artifact in full_artifacts if artifact.get("id")}
1298+
recommendations_artifact_ids = {artifact.get("id") for artifact in recommendations_artifacts if artifact.get("id")}
1299+
1300+
# Check subset relationship: all recommendation IDs should exist in full results
1301+
missing_in_full = recommendations_artifact_ids - full_artifact_ids
1302+
if missing_in_full:
1303+
error_msg = (
1304+
f"Model '{model_name}': Found {len(missing_in_full)} recommendation artifacts "
1305+
f"that don't exist in full results: {missing_in_full}"
1306+
)
1307+
LOGGER.error(error_msg)
1308+
raise AssertionError(error_msg)
1309+
1310+
# Log success details
1311+
subset_percentage = (len(recommendations_artifacts) / len(full_artifacts)) * 100
1312+
LOGGER.info(
1313+
f"Model '{model_name}': Recommendations validation passed - "
1314+
f"{len(recommendations_artifacts)}/{len(full_artifacts)} artifacts "
1315+
f"({subset_percentage:.1f}% of total)"
1316+
)
1317+
1318+
return True

0 commit comments

Comments
 (0)