Skip to content

Commit efee474

Browse files
authored
Add tests for filter_options endpoint (#731)
* feat: Add tests for filter_options endpoint Signed-off-by: lugi0 <lgiorgi@redhat.com> * fix: adjust coderabbit review Signed-off-by: lugi0 <lgiorgi@redhat.com> * fix: implement review feedback, resolve conflict Signed-off-by: lugi0 <lgiorgi@redhat.com> * fix: add missing constant for tox Signed-off-by: lugi0 <lgiorgi@redhat.com> * fix: import for tox Signed-off-by: lugi0 <lgiorgi@redhat.com> --------- Signed-off-by: lugi0 <lgiorgi@redhat.com>
1 parent 76407d9 commit efee474

File tree

4 files changed

+1061
-820
lines changed

4 files changed

+1061
-820
lines changed
Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
import pytest
2+
from typing import Self
3+
from simple_logger.logger import get_logger
4+
5+
from tests.model_registry.model_catalog.utils import execute_get_command, validate_filter_options_structure
6+
from tests.model_registry.utils import get_rest_headers
7+
from utilities.user_utils import UserTestSession
8+
9+
LOGGER = get_logger(name=__name__)
10+
11+
pytestmark = [
12+
pytest.mark.usefixtures("updated_dsc_component_state_scope_session", "model_registry_namespace", "original_user")
13+
]
14+
15+
16+
@pytest.mark.parametrize(
17+
"user_token_for_api_calls,",
18+
[
19+
pytest.param(
20+
{},
21+
id="test_filter_options_admin_user",
22+
),
23+
pytest.param(
24+
{"user_type": "test"},
25+
id="test_filter_options_non_admin_user",
26+
),
27+
pytest.param(
28+
{"user_type": "sa_user"},
29+
id="test_filter_options_service_account",
30+
),
31+
],
32+
indirect=["user_token_for_api_calls"],
33+
)
34+
class TestFilterOptionsEndpoint:
35+
"""
36+
Test class for validating the models/filter_options endpoint
37+
RHOAIENG-36696
38+
"""
39+
40+
def test_filter_options_endpoint_validation(
41+
self: Self,
42+
model_catalog_rest_url: list[str],
43+
user_token_for_api_calls: str,
44+
test_idp_user: UserTestSession,
45+
):
46+
"""
47+
Comprehensive test for filter_options endpoint.
48+
Validates all acceptance criteria:
49+
- A GET request returns a 200 OK response
50+
- Response includes filter options for string-based properties with values array containing distinct values
51+
- Response includes filter options for numeric properties with range object containing min/max values
52+
- Core properties are present (license, provider, tasks, validated_on)
53+
"""
54+
url = f"{model_catalog_rest_url[0]}models/filter_options"
55+
LOGGER.info(f"Testing filter_options endpoint: {url}")
56+
57+
# This will raise an exception if the status code is not 200/201 (validates acceptance criteria #1)
58+
response = execute_get_command(
59+
url=url,
60+
headers=get_rest_headers(token=user_token_for_api_calls),
61+
)
62+
63+
assert response is not None, "Filter options response should not be None"
64+
LOGGER.info("Filter options endpoint successfully returned 200 OK")
65+
66+
# Expected core properties based on current API response
67+
expected_properties = {"license", "provider", "tasks", "validated_on"}
68+
69+
# Comprehensive validation using single function (validates acceptance criteria #2, #3, #4)
70+
is_valid, errors = validate_filter_options_structure(response=response, expected_properties=expected_properties)
71+
assert is_valid, f"Filter options validation failed: {'; '.join(errors)}"
72+
73+
filters = response["filters"]
74+
LOGGER.info(f"Found {len(filters)} filter properties: {list(filters.keys())}")
75+
LOGGER.info("All filter options validation passed successfully")
76+
77+
@pytest.mark.skip(reason="TODO: Implement after investigating backend DB queries")
78+
def test_comprehensive_coverage_against_database(
79+
self: Self,
80+
model_catalog_rest_url: list[str],
81+
user_token_for_api_calls: str,
82+
test_idp_user: UserTestSession,
83+
):
84+
"""
85+
STUBBED: Validate filter options are comprehensive across all sources/models in DB.
86+
Acceptance Criteria: The returned options are comprehensive and not limited to a
87+
subset of models or a single source.
88+
89+
TODO IMPLEMENTATION PLAN:
90+
1. Investigate backend endpoint logic:
91+
- Find the source code for /models/filter_options endpoint in kubeflow/model-registry
92+
- Understand what DB tables it queries (likely model/artifact tables)
93+
- Identify the exact SQL queries used to build filter values
94+
- Determine database schema and column names
95+
96+
2. Replicate queries via pod shell:
97+
- Use get_model_catalog_pod() to access catalog pod
98+
- Execute psql commands via pod.execute()
99+
- Query same tables/columns the endpoint uses
100+
- Extract all distinct values for string properties: SELECT DISTINCT license FROM models;
101+
- Extract min/max ranges for numeric properties: SELECT MIN(metric), MAX(metric) FROM models;
102+
103+
3. Compare results:
104+
- API response filter values should match DB query results exactly
105+
- Ensure no values are missing (comprehensive coverage)
106+
- Validate across all sources, not just one
107+
108+
4. DB Access Pattern Example:
109+
catalog_pod = get_model_catalog_pod(client, namespace)[0]
110+
result = catalog_pod.execute(
111+
command=["psql", "-U", "catalog_user", "-d", "catalog_db", "-c", "SELECT DISTINCT license FROM models;"],
112+
container="catalog"
113+
)
114+
115+
5. Implementation considerations:
116+
- Handle different data types (strings vs arrays like tasks)
117+
- Parse psql output correctly
118+
- Handle null/empty values
119+
- Ensure database connection credentials are available
120+
"""
121+
pytest.skip("TODO: Implement comprehensive coverage validation after backend investigation")

tests/model_registry/model_catalog/test_model_catalog_negative.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from ocp_resources.config_map import ConfigMap
88
from ocp_resources.pod import Pod
99
from ocp_resources.resource import ResourceEditor
10-
from tests.model_registry.constants import DEFAULT_MODEL_CATALOG_CFG
10+
from tests.model_registry.constants import DEFAULT_MODEL_CATALOG_CM
1111
from tests.model_registry.model_catalog.constants import DEFAULT_CATALOGS, CATALOG_CONTAINER
1212
from tests.model_registry.model_catalog.utils import validate_model_catalog_configmap_data, is_model_catalog_ready
1313
from tests.model_registry.utils import get_model_catalog_pod
@@ -102,7 +102,7 @@ class TestDefaultCatalogNegative:
102102
"model_catalog_config_map, modified_sources_yaml",
103103
[
104104
pytest.param(
105-
{"configmap_name": DEFAULT_MODEL_CATALOG_CFG},
105+
{"configmap_name": DEFAULT_MODEL_CATALOG_CM},
106106
"""
107107
catalogs:
108108
- name: Modified Catalog

tests/model_registry/model_catalog/utils.py

Lines changed: 125 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import json
2-
from typing import Any
2+
from typing import Any, Tuple, List
33
import time
44
import yaml
55

@@ -213,6 +213,130 @@ def _extract_properties_and_required(schema: dict[Any, Any]) -> tuple[set[str],
213213
return all_properties - excluded_fields, required_fields - excluded_fields
214214

215215

216+
def validate_filter_options_structure(
217+
response: dict[Any, Any], expected_properties: set[str] | None = None
218+
) -> Tuple[bool, List[str]]:
219+
"""
220+
Comprehensive validation of filter_options response structure.
221+
222+
Validates:
223+
- Top-level structure (filters object)
224+
- All property types and their required fields
225+
- Core properties presence (if specified)
226+
- String properties: type, values array, distinct values
227+
- Numeric properties: type, range object, min/max validity
228+
229+
Args:
230+
response: The API response to validate
231+
expected_properties: Optional set of core properties that must be present
232+
233+
Returns:
234+
Tuple of (is_valid, list_of_errors)
235+
"""
236+
errors = []
237+
238+
# Validate top-level structure
239+
if not isinstance(response, dict):
240+
errors.append("Response should be a dictionary")
241+
return False, errors
242+
243+
if "filters" not in response:
244+
errors.append("Response should contain 'filters' object")
245+
return False, errors
246+
247+
filters = response["filters"]
248+
if not isinstance(filters, dict):
249+
errors.append("Filters should be a dictionary")
250+
return False, errors
251+
252+
if not filters:
253+
errors.append("Filters object should not be empty")
254+
return False, errors
255+
256+
# Validate expected core properties if specified
257+
if expected_properties:
258+
for prop in expected_properties:
259+
if prop not in filters:
260+
errors.append(f"Core property '{prop}' should be present in filter options")
261+
262+
# Validate each property structure
263+
for prop_name, prop_data in filters.items():
264+
if not isinstance(prop_data, dict):
265+
errors.append(f"Property '{prop_name}' should be a dictionary")
266+
continue
267+
268+
if "type" not in prop_data:
269+
errors.append(f"Property '{prop_name}' should have 'type' field")
270+
continue
271+
272+
prop_type = prop_data["type"]
273+
if not isinstance(prop_type, str) or not prop_type.strip():
274+
errors.append(f"Type for '{prop_name}' should be a non-empty string")
275+
continue
276+
277+
# Validate string properties
278+
if prop_type == "string":
279+
if "values" not in prop_data:
280+
errors.append(f"String property '{prop_name}' should have 'values' array")
281+
continue
282+
283+
values = prop_data["values"]
284+
if not isinstance(values, list):
285+
errors.append(f"Values for '{prop_name}' should be a list")
286+
continue
287+
288+
if not values:
289+
errors.append(f"Values array for '{prop_name}' should not be empty")
290+
continue
291+
292+
# Validate individual values
293+
for i, value in enumerate(values):
294+
if not isinstance(value, str):
295+
errors.append(f"Value at index {i} for '{prop_name}' should be string, got: {type(value)}")
296+
elif not value.strip():
297+
errors.append(f"Value at index {i} for '{prop_name}' should not be empty or whitespace")
298+
299+
# Check for distinct values (no duplicates)
300+
try:
301+
if len(values) != len(set(values)):
302+
errors.append(f"Values for '{prop_name}' should be distinct (found duplicates)")
303+
except TypeError:
304+
errors.append(f"Values for '{prop_name}' should be a list of strings, found unhashable type")
305+
306+
# Validate numeric properties - checking multiple type names since we don't know what the API will return
307+
elif prop_type in ["number", "numeric", "float", "integer", "int"]:
308+
if "range" not in prop_data:
309+
errors.append(f"Numeric property '{prop_name}' should have 'range' object")
310+
continue
311+
312+
range_obj = prop_data["range"]
313+
if not isinstance(range_obj, dict):
314+
errors.append(f"Range for '{prop_name}' should be a dictionary")
315+
continue
316+
317+
# Check min/max presence
318+
if "min" not in range_obj:
319+
errors.append(f"Range for '{prop_name}' should have 'min' value")
320+
if "max" not in range_obj:
321+
errors.append(f"Range for '{prop_name}' should have 'max' value")
322+
323+
if "min" in range_obj and "max" in range_obj:
324+
min_val = range_obj["min"]
325+
max_val = range_obj["max"]
326+
327+
# Validate min/max are numeric
328+
if not isinstance(min_val, (int, float)):
329+
errors.append(f"Min value for '{prop_name}' should be numeric, got: {type(min_val)}")
330+
if not isinstance(max_val, (int, float)):
331+
errors.append(f"Max value for '{prop_name}' should be numeric, got: {type(max_val)}")
332+
333+
# Validate logical relationship (min <= max)
334+
if isinstance(min_val, (int, float)) and isinstance(max_val, (int, float)) and min_val > max_val:
335+
errors.append(f"Min value ({min_val}) should be <= max value ({max_val}) for '{prop_name}'")
336+
337+
return len(errors) == 0, errors
338+
339+
216340
def validate_model_catalog_configmap_data(configmap: ConfigMap, num_catalogs: int) -> None:
217341
"""
218342
Validate the model catalog configmap data.

0 commit comments

Comments
 (0)