Skip to content

Commit ec2f94d

Browse files
authored
Add new tests for filterQuery artifact endpoint (#913)
* Add new tests for filterQuery artifact endpoint * addressed review comments * Add a list of models for pytest.param
1 parent be76ef3 commit ec2f94d

File tree

2 files changed

+237
-21
lines changed

2 files changed

+237
-21
lines changed

tests/model_registry/model_catalog/conftest.py

Lines changed: 31 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -180,42 +180,52 @@ def randomly_picked_model_from_catalog_api_by_source(
180180
model_registry_rest_headers: dict[str, str],
181181
request: pytest.FixtureRequest,
182182
) -> tuple[dict[Any, Any], str, str]:
183-
"""Pick a random model from a specific catalog (function-scoped for test isolation)
183+
"""
184+
Pick a random model from a specific catalog if a model name is not provided. If model name is provided, verify
185+
that it exists and is associated with a given catalog and return the same.
184186
185187
Supports parameterized headers via 'header_type':
186188
- 'user_token': Uses user_token_for_api_calls (default for user-specific tests)
187189
- 'registry': Uses model_registry_rest_headers (for catalog/registry tests)
190+
- 'model_name': Name of the model
188191
189-
Accepts 'catalog_id' or 'source' (alias) to specify the catalog.
192+
Accepts 'catalog_id' or 'source' (alias) to specify the catalog. Accepts 'model_name' to specify the model to
193+
look for.
190194
"""
191195
param = getattr(request, "param", {})
192196
# Support both 'catalog_id' and 'source' for backward compatibility
193197
catalog_id = param.get("catalog_id") or param.get("source", REDHAT_AI_CATALOG_ID)
194198
header_type = param.get("header_type", "user_token")
195-
199+
model_name = param.get("model_name")
200+
random_model = None
196201
# Select headers based on header_type
197202
if header_type == "registry":
198203
headers = model_registry_rest_headers
199204
else:
200205
headers = get_rest_headers(token=user_token_for_api_calls)
201-
202-
LOGGER.info(f"Picking random model from catalog: {catalog_id} with header_type: {header_type}")
203-
204-
models_response = execute_get_command(
205-
url=f"{model_catalog_rest_url[0]}models?source={catalog_id}&pageSize=100",
206-
headers=headers,
207-
)
208-
models = models_response.get("items", [])
209-
assert models, f"No models found for catalog: {catalog_id}"
210-
LOGGER.info(f"{len(models)} models found in catalog {catalog_id}")
211-
212-
random_model = random.choice(seq=models)
213-
214-
model_name = random_model.get("name")
215-
assert model_name, "Model name not found in random model"
216-
assert random_model.get("source_id") == catalog_id, f"Catalog ID (source_id) mismatch for model {model_name}"
217-
LOGGER.info(f"Testing model '{model_name}' from catalog '{catalog_id}'")
218-
206+
if not model_name:
207+
LOGGER.info(f"Picking random model from catalog: {catalog_id} with header_type: {header_type}")
208+
models_response = execute_get_command(
209+
url=f"{model_catalog_rest_url[0]}models?source={catalog_id}&pageSize=100",
210+
headers=headers,
211+
)
212+
models = models_response.get("items", [])
213+
assert models, f"No models found for catalog: {catalog_id}"
214+
LOGGER.info(f"{len(models)} models found in catalog {catalog_id}")
215+
random_model = random.choice(seq=models)
216+
model_name = random_model.get("name")
217+
assert model_name, "Model name not found in random model"
218+
assert random_model.get("source_id") == catalog_id, f"Catalog ID (source_id) mismatch for model {model_name}"
219+
LOGGER.info(f"Testing model '{model_name}' from catalog '{catalog_id}'")
220+
else:
221+
LOGGER.info(f"Looking for pre-selected model: {model_name} from catalog: {catalog_id}")
222+
# check if the model exists:
223+
random_model = execute_get_command(
224+
url=f"{model_catalog_rest_url[0]}sources/{catalog_id}/models/{model_name}",
225+
headers=headers,
226+
)
227+
assert random_model["source_id"] == catalog_id, f"Catalog ID (source_id) mismatch for model {model_name}"
228+
LOGGER.info(f"Using model '{model_name}' from catalog '{catalog_id}'")
219229
return random_model, model_name, catalog_id
220230

221231

Lines changed: 206 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,206 @@
1+
import pytest
2+
from typing import Self, Any
3+
import random
4+
from ocp_resources.config_map import ConfigMap
5+
from tests.model_registry.model_catalog.utils import (
6+
fetch_all_artifacts_with_dynamic_paging,
7+
validate_model_artifacts_match_criteria_and,
8+
validate_model_artifacts_match_criteria_or,
9+
)
10+
from tests.model_registry.model_catalog.constants import (
11+
VALIDATED_CATALOG_ID,
12+
)
13+
from kubernetes.dynamic.exceptions import ResourceNotFoundError
14+
from simple_logger.logger import get_logger
15+
16+
LOGGER = get_logger(name=__name__)
17+
pytestmark = [pytest.mark.usefixtures("updated_dsc_component_state_scope_session", "model_registry_namespace")]
18+
MODEL_NAMEs_ARTIFACT_SEARCH: list[str] = [
19+
"RedHatAI/Llama-3.1-8B-Instruct",
20+
"RedHatAI/Mistral-Small-3.1-24B-Instruct-2503-FP8-dynamic",
21+
"RedHatAI/Mistral-Small-3.1-24B-Instruct-2503-quantized.w4a16",
22+
"RedHatAI/Mistral-Small-3.1-24B-Instruct-2503-quantized.w8a8",
23+
"RedHatAI/Mixtral-8x7B-Instruct-v0.1",
24+
]
25+
26+
27+
class TestSearchArtifactsByFilterQuery:
28+
@pytest.mark.parametrize(
29+
"randomly_picked_model_from_catalog_api_by_source, invalid_filter_query",
30+
[
31+
pytest.param(
32+
{"catalog_id": VALIDATED_CATALOG_ID, "header_type": "registry"},
33+
"fake IN ('test', 'fake'))",
34+
id="test_invalid_artifact_filter_query_malformed",
35+
),
36+
pytest.param(
37+
{"catalog_id": VALIDATED_CATALOG_ID, "header_type": "registry"},
38+
"ttft_p90.double_value < abc",
39+
id="test_invalid_artifact_filter_query_data_type_mismatch",
40+
),
41+
pytest.param(
42+
{"catalog_id": VALIDATED_CATALOG_ID, "header_type": "registry"},
43+
"hardware_type.string_value = 5.0",
44+
id="test_invalid_artifact_filter_query_data_type_mismatch_equality",
45+
),
46+
],
47+
indirect=["randomly_picked_model_from_catalog_api_by_source"],
48+
)
49+
def test_search_artifacts_by_invalid_filter_query(
50+
self: Self,
51+
enabled_model_catalog_config_map: ConfigMap,
52+
model_catalog_rest_url: list[str],
53+
model_registry_rest_headers: dict[str, str],
54+
randomly_picked_model_from_catalog_api_by_source: tuple[dict, str, str],
55+
invalid_filter_query: str,
56+
):
57+
"""
58+
Tests the API's response to invalid filter queries syntax when searching artifacts.
59+
It verifies that an invalid filter query syntax raises the correct error.
60+
"""
61+
_, model_name, catalog_id = randomly_picked_model_from_catalog_api_by_source
62+
63+
LOGGER.info(f"Testing invalid artifact filter query: '{invalid_filter_query}' for model: {model_name}")
64+
with pytest.raises(ResourceNotFoundError, match="invalid filter query"):
65+
fetch_all_artifacts_with_dynamic_paging(
66+
url_with_pagesize=(
67+
f"{model_catalog_rest_url[0]}sources/{catalog_id}/models/{model_name}/artifacts?"
68+
f"filterQuery={invalid_filter_query}&pageSize"
69+
),
70+
headers=model_registry_rest_headers,
71+
page_size=1,
72+
)
73+
74+
LOGGER.info(
75+
f"Successfully validated that invalid artifact filter query '{invalid_filter_query}' raises an error"
76+
)
77+
78+
@pytest.mark.parametrize(
79+
"randomly_picked_model_from_catalog_api_by_source, filter_query, expected_value, logic_type",
80+
[
81+
pytest.param(
82+
{
83+
"catalog_id": VALIDATED_CATALOG_ID,
84+
"header_type": "registry",
85+
"model_name": random.choice(MODEL_NAMEs_ARTIFACT_SEARCH),
86+
},
87+
"hardware_type.string_value = 'ABC-1234'",
88+
None,
89+
None,
90+
id="test_valid_artifact_filter_query_no_results",
91+
),
92+
pytest.param(
93+
{
94+
"catalog_id": VALIDATED_CATALOG_ID,
95+
"header_type": "registry",
96+
"model_name": random.choice(MODEL_NAMEs_ARTIFACT_SEARCH),
97+
},
98+
"requests_per_second.double_value > 15.0",
99+
[{"key_name": "requests_per_second", "key_type": "double_value", "comparison": "min", "value": 15.0}],
100+
"and",
101+
id="test_performance_min_filter",
102+
),
103+
pytest.param(
104+
{
105+
"catalog_id": VALIDATED_CATALOG_ID,
106+
"header_type": "registry",
107+
"model_name": random.choice(MODEL_NAMEs_ARTIFACT_SEARCH),
108+
},
109+
"hardware_count.int_value = 8",
110+
[{"key_name": "hardware_count", "key_type": "int_value", "comparison": "exact", "value": 8}],
111+
"and",
112+
id="test_hardware_exact_filter",
113+
),
114+
pytest.param(
115+
{
116+
"catalog_id": VALIDATED_CATALOG_ID,
117+
"header_type": "registry",
118+
"model_name": random.choice(MODEL_NAMEs_ARTIFACT_SEARCH),
119+
},
120+
"(hardware_type.string_value = 'H100') AND (ttft_p99.double_value < 200)",
121+
[
122+
{"key_name": "hardware_type", "key_type": "string_value", "comparison": "exact", "value": "H100"},
123+
{"key_name": "ttft_p99", "key_type": "double_value", "comparison": "max", "value": 199},
124+
],
125+
"and",
126+
id="test_combined_hardware_performance_filter_and_operation",
127+
),
128+
pytest.param(
129+
{
130+
"catalog_id": VALIDATED_CATALOG_ID,
131+
"header_type": "registry",
132+
"model_name": random.choice(MODEL_NAMEs_ARTIFACT_SEARCH),
133+
},
134+
"(tps_mean.double_value <260) OR (hardware_type.string_value = 'A100-80')",
135+
[
136+
{"key_name": "tps_mean", "key_type": "double_value", "comparison": "max", "value": 260},
137+
{
138+
"key_name": "hardware_type",
139+
"key_type": "string_value",
140+
"comparison": "exact",
141+
"value": "A100-80",
142+
},
143+
],
144+
"or",
145+
id="performance_or_hardware_filter_or_operation",
146+
),
147+
],
148+
indirect=["randomly_picked_model_from_catalog_api_by_source"],
149+
)
150+
def test_filter_query_advanced_artifact_search(
151+
self: Self,
152+
enabled_model_catalog_config_map: ConfigMap,
153+
model_catalog_rest_url: list[str],
154+
model_registry_rest_headers: dict[str, str],
155+
randomly_picked_model_from_catalog_api_by_source: tuple[dict, str, str],
156+
filter_query: str,
157+
expected_value: list[dict[str, Any]] | None,
158+
logic_type: str | None,
159+
):
160+
"""
161+
Advanced filter query test for artifact-based filtering with AND/OR logic
162+
"""
163+
_, model_name, catalog_id = randomly_picked_model_from_catalog_api_by_source
164+
165+
LOGGER.info(f"Testing artifact filter query: '{filter_query}' for model: {model_name}")
166+
167+
result = fetch_all_artifacts_with_dynamic_paging(
168+
url_with_pagesize=(
169+
f"{model_catalog_rest_url[0]}sources/{catalog_id}/models/{model_name}/artifacts?"
170+
f"filterQuery={filter_query}&pageSize"
171+
),
172+
headers=model_registry_rest_headers,
173+
page_size=100,
174+
)
175+
176+
if expected_value is None:
177+
# Simple validation of length and size for basic filter queries
178+
assert result["items"] == [], f"Filter query '{filter_query}' should return valid results"
179+
assert result["size"] == 0, f"Size should be 0 for filter query '{filter_query}'"
180+
LOGGER.info(
181+
f"Successfully validated that filter query '{filter_query}' returns {len(result['items'])} artifacts"
182+
)
183+
else:
184+
# Advanced validation using criteria matching
185+
all_artifacts = result["items"]
186+
187+
validation_result = None
188+
# Select validation function based on logic type
189+
if logic_type == "and":
190+
validation_result = validate_model_artifacts_match_criteria_and(
191+
all_model_artifacts=all_artifacts, expected_validations=expected_value, model_name=model_name
192+
)
193+
elif logic_type == "or":
194+
validation_result = validate_model_artifacts_match_criteria_or(
195+
all_model_artifacts=all_artifacts, expected_validations=expected_value, model_name=model_name
196+
)
197+
else:
198+
raise ValueError(f"Invalid logic_type: {logic_type}. Must be 'and' or 'or'")
199+
200+
if validation_result:
201+
LOGGER.info(
202+
f"For Model: {model_name}, {logic_type} validation completed successfully"
203+
f" for {len(all_artifacts)} artifacts"
204+
)
205+
else:
206+
pytest.fail(f"{logic_type} filter validation failed for model {model_name}")

0 commit comments

Comments
 (0)