Skip to content

Commit 733a31d

Browse files
authored
test: remove schema validation tests moved to upstream (#1070)
Removes schema structure validation tests that are moved by kubeflow/model-registry#2149. Retains functional tests that verify data correctness against database state and metadata files.
1 parent b8055d0 commit 733a31d

File tree

4 files changed

+1
-324
lines changed

4 files changed

+1
-324
lines changed

tests/model_registry/model_catalog/metadata/test_custom_properties.py

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
from tests.model_registry.model_catalog.constants import VALIDATED_CATALOG_ID, REDHAT_AI_CATALOG_ID
88
from tests.model_registry.model_catalog.metadata.utils import (
99
extract_custom_property_values,
10-
validate_custom_properties_structure,
1110
validate_custom_properties_match_metadata,
1211
get_metadata_from_catalog_pod,
1312
)
@@ -27,21 +26,6 @@
2726
class TestCustomProperties:
2827
"""Test suite for validating custom properties in model catalog API"""
2928

30-
@pytest.mark.parametrize(
31-
"randomly_picked_model_from_catalog_api_by_source", [{"source": VALIDATED_CATALOG_ID}], indirect=True
32-
)
33-
def test_custom_properties_structure_is_valid(
34-
self,
35-
randomly_picked_model_from_catalog_api_by_source: tuple[dict[Any, Any], str, str],
36-
):
37-
"""Test that custom properties follow the expected MetadataStringValue structure."""
38-
model_data, model_name, catalog_id = randomly_picked_model_from_catalog_api_by_source
39-
40-
LOGGER.info(f"Testing custom properties structure for model '{model_name}' from catalog '{catalog_id}'")
41-
42-
custom_props = model_data.get("customProperties", {})
43-
assert validate_custom_properties_structure(custom_props)
44-
4529
@pytest.mark.parametrize(
4630
"randomly_picked_model_from_catalog_api_by_source", [{"source": VALIDATED_CATALOG_ID}], indirect=True
4731
)

tests/model_registry/model_catalog/metadata/test_filter_options_endpoint.py

Lines changed: 0 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
from typing import Self
33
from simple_logger.logger import get_logger
44
from tests.model_registry.model_catalog.metadata.utils import (
5-
validate_filter_options_structure,
65
compare_filter_options_with_database,
76
)
87
from tests.model_registry.model_catalog.utils import (
@@ -30,62 +29,6 @@ class TestFilterOptionsEndpoint:
3029
RHOAIENG-36696
3130
"""
3231

33-
@pytest.mark.parametrize(
34-
"user_token_for_api_calls,",
35-
[
36-
pytest.param(
37-
{},
38-
id="test_filter_options_admin_user",
39-
),
40-
pytest.param(
41-
{"user_type": "test"},
42-
id="test_filter_options_non_admin_user",
43-
),
44-
pytest.param(
45-
{"user_type": "sa_user"},
46-
id="test_filter_options_service_account",
47-
),
48-
],
49-
indirect=["user_token_for_api_calls"],
50-
)
51-
@pytest.mark.sanity
52-
def test_filter_options_endpoint_validation(
53-
self: Self,
54-
model_catalog_rest_url: list[str],
55-
user_token_for_api_calls: str,
56-
test_idp_user: UserTestSession,
57-
):
58-
"""
59-
Comprehensive test for filter_options endpoint.
60-
Validates all acceptance criteria:
61-
- A GET request returns a 200 OK response
62-
- Response includes filter options for string-based properties with values array containing distinct values
63-
- Response includes filter options for numeric properties with range object containing min/max values
64-
- Core properties are present (license, provider, tasks, validated_on)
65-
"""
66-
url = f"{model_catalog_rest_url[0]}models/filter_options"
67-
LOGGER.info(f"Testing filter_options endpoint: {url}")
68-
69-
# This will raise an exception if the status code is not 200/201 (validates acceptance criteria #1)
70-
response = execute_get_command(
71-
url=url,
72-
headers=get_rest_headers(token=user_token_for_api_calls),
73-
)
74-
75-
assert response is not None, "Filter options response should not be None"
76-
LOGGER.info("Filter options endpoint successfully returned 200 OK")
77-
78-
# Expected core properties based on current API response
79-
expected_properties = {"license", "provider", "tasks", "validated_on.array_value"}
80-
81-
# Comprehensive validation using single function (validates acceptance criteria #2, #3, #4)
82-
is_valid, errors = validate_filter_options_structure(response=response, expected_properties=expected_properties)
83-
assert is_valid, f"Filter options validation failed: {'; '.join(errors)}"
84-
85-
filters = response["filters"]
86-
LOGGER.info(f"Found {len(filters)} filter properties: {list(filters.keys())}")
87-
LOGGER.info("All filter options validation passed successfully")
88-
8932
# Cannot use non-admin user for this test as it cannot list the pods in the namespace
9033
@pytest.mark.parametrize(
9134
"user_token_for_api_calls,",

tests/model_registry/model_catalog/metadata/test_sources_endpoint.py

Lines changed: 1 addition & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44

55
from tests.model_registry.model_catalog.constants import REDHAT_AI_CATALOG_ID
66
from tests.model_registry.utils import execute_get_command
7-
from tests.model_registry.model_catalog.metadata.utils import validate_source_status
87

98
pytestmark = [pytest.mark.usefixtures("updated_dsc_component_state_scope_session", "model_registry_namespace")]
109

@@ -14,75 +13,12 @@
1413
class TestSourcesEndpoint:
1514
"""Test class for the model catalog sources endpoint."""
1615

17-
@pytest.mark.smoke
18-
def test_available_source_status(
19-
self,
20-
model_catalog_rest_url: list[str],
21-
model_registry_rest_headers: dict[str, str],
22-
):
23-
"""
24-
RHOAIENG-41849: Test that the sources endpoint returns no error for available sources.
25-
"""
26-
response = execute_get_command(url=f"{model_catalog_rest_url[0]}sources", headers=model_registry_rest_headers)
27-
items = response.get("items", [])
28-
assert items, "Sources not found"
29-
for item in items:
30-
validate_source_status(catalog=item, expected_status="available")
31-
error_value = item["error"]
32-
assert error_value is None or error_value == "", (
33-
f"Source '{item.get('id')}' should not have error, got: {error_value}"
34-
)
35-
36-
LOGGER.info(
37-
f"Available catalog verified - ID: {item.get('id')}, Status: {item.get('status')}, Error: {error_value}"
38-
)
39-
40-
@pytest.mark.parametrize(
41-
"sparse_override_catalog_source",
42-
[{"id": REDHAT_AI_CATALOG_ID, "field_name": "enabled", "field_value": False}],
43-
indirect=True,
44-
)
45-
def test_disabled_source_status(
46-
self,
47-
sparse_override_catalog_source: dict,
48-
model_catalog_rest_url: list[str],
49-
model_registry_rest_headers: dict[str, str],
50-
):
51-
"""
52-
RHOAIENG-41849:
53-
This test disables an existing catalog and verifies:
54-
- status field is "disabled"
55-
- error field is null or empty
56-
"""
57-
catalog_id = sparse_override_catalog_source["catalog_id"]
58-
59-
response = execute_get_command(url=f"{model_catalog_rest_url[0]}sources", headers=model_registry_rest_headers)
60-
items = response.get("items", [])
61-
62-
# Find the disabled catalog
63-
disabled_catalog = next((item for item in items if item.get("id") == catalog_id), None)
64-
assert disabled_catalog is not None, f"Disabled catalog '{catalog_id}' not found in sources"
65-
66-
# Validate status and error fields
67-
validate_source_status(catalog=disabled_catalog, expected_status="disabled")
68-
error_value = disabled_catalog["error"]
69-
assert error_value is None or error_value == "", (
70-
f"Source '{disabled_catalog.get('id')}' should not have error, got: {error_value}"
71-
)
72-
73-
LOGGER.info(
74-
"Disabled catalog verified - "
75-
f"ID: {disabled_catalog.get('id')}, "
76-
f"Status: {disabled_catalog.get('status')}, "
77-
f"Error: {error_value}"
78-
)
79-
8016
@pytest.mark.parametrize(
8117
"sparse_override_catalog_source",
8218
[{"id": REDHAT_AI_CATALOG_ID, "field_name": "enabled", "field_value": False}],
8319
indirect=True,
8420
)
85-
@pytest.mark.sanity
21+
@pytest.mark.smoke
8622
def test_sources_endpoint_returns_all_sources_regardless_of_enabled_field(
8723
self,
8824
sparse_override_catalog_source: dict,

tests/model_registry/model_catalog/metadata/utils.py

Lines changed: 0 additions & 186 deletions
Original file line numberDiff line numberDiff line change
@@ -244,52 +244,6 @@ def extract_custom_property_values(custom_properties: dict[str, Any]) -> dict[st
244244
return extracted
245245

246246

247-
def validate_custom_properties_structure(custom_properties: dict[str, Any]) -> bool:
248-
"""
249-
Validate that custom properties follow the expected MetadataStringValue structure.
250-
251-
Args:
252-
custom_properties: Dictionary of custom properties from API response
253-
254-
Returns:
255-
True if all custom properties have valid structure, False otherwise
256-
"""
257-
if not custom_properties:
258-
LOGGER.info("No custom properties found - structure validation skipped")
259-
return True
260-
261-
expected_keys = ["size", "tensor_type", "variant_group_id"]
262-
263-
for key in expected_keys:
264-
if key in custom_properties:
265-
prop_data = custom_properties[key]
266-
267-
if not isinstance(prop_data, dict):
268-
LOGGER.error(f"Custom property '{key}' is not a dictionary: {prop_data}")
269-
return False
270-
271-
if "metadataType" not in prop_data:
272-
LOGGER.error(f"Custom property '{key}' missing 'metadataType' field")
273-
return False
274-
275-
if prop_data.get("metadataType") != "MetadataStringValue":
276-
LOGGER.error(f"Custom property '{key}' has unexpected metadataType: {prop_data.get('metadataType')}")
277-
return False
278-
279-
if "string_value" not in prop_data:
280-
LOGGER.error(f"Custom property '{key}' missing 'string_value' field")
281-
return False
282-
283-
if not isinstance(prop_data.get("string_value"), str):
284-
LOGGER.error(f"Custom property '{key}' string_value is not a string: {prop_data.get('string_value')}")
285-
return False
286-
287-
LOGGER.info(f"Custom property '{key}' has valid structure: '{prop_data.get('string_value')}'")
288-
289-
LOGGER.info("All custom properties have valid structure")
290-
return True
291-
292-
293247
def validate_custom_properties_match_metadata(api_custom_properties: dict[str, str], metadata: dict[str, Any]) -> bool:
294248
"""
295249
Compare API custom properties with metadata.json values.
@@ -345,130 +299,6 @@ def get_metadata_from_catalog_pod(model_catalog_pod: Pod, model_name: str) -> di
345299
raise
346300

347301

348-
def validate_filter_options_structure(
349-
response: dict[Any, Any], expected_properties: set[str] | None = None
350-
) -> Tuple[bool, List[str]]:
351-
"""
352-
Comprehensive validation of filter_options response structure.
353-
354-
Validates:
355-
- Top-level structure (filters object)
356-
- All property types and their required fields
357-
- Core properties presence (if specified)
358-
- String properties: type, values array, distinct values
359-
- Numeric properties: type, range object, min/max validity
360-
361-
Args:
362-
response: The API response to validate
363-
expected_properties: Optional set of core properties that must be present
364-
365-
Returns:
366-
Tuple of (is_valid, list_of_errors)
367-
"""
368-
errors = []
369-
370-
# Validate top-level structure
371-
if not isinstance(response, dict):
372-
errors.append("Response should be a dictionary")
373-
return False, errors
374-
375-
if "filters" not in response:
376-
errors.append("Response should contain 'filters' object")
377-
return False, errors
378-
379-
filters = response["filters"]
380-
if not isinstance(filters, dict):
381-
errors.append("Filters should be a dictionary")
382-
return False, errors
383-
384-
if not filters:
385-
errors.append("Filters object should not be empty")
386-
return False, errors
387-
388-
# Validate expected core properties if specified
389-
if expected_properties:
390-
for prop in expected_properties:
391-
if prop not in filters:
392-
errors.append(f"Core property '{prop}' should be present in filter options")
393-
394-
# Validate each property structure
395-
for prop_name, prop_data in filters.items():
396-
if not isinstance(prop_data, dict):
397-
errors.append(f"Property '{prop_name}' should be a dictionary")
398-
continue
399-
400-
if "type" not in prop_data:
401-
errors.append(f"Property '{prop_name}' should have 'type' field")
402-
continue
403-
404-
prop_type = prop_data["type"]
405-
if not isinstance(prop_type, str) or not prop_type.strip():
406-
errors.append(f"Type for '{prop_name}' should be a non-empty string")
407-
continue
408-
409-
# Validate string properties
410-
if prop_type == "string":
411-
if "values" not in prop_data:
412-
errors.append(f"String property '{prop_name}' should have 'values' array")
413-
continue
414-
415-
values = prop_data["values"]
416-
if not isinstance(values, list):
417-
errors.append(f"Values for '{prop_name}' should be a list")
418-
continue
419-
420-
if not values:
421-
errors.append(f"Values array for '{prop_name}' should not be empty")
422-
continue
423-
424-
# Validate individual values
425-
for i, value in enumerate(values):
426-
if not isinstance(value, str):
427-
errors.append(f"Value at index {i} for '{prop_name}' should be string, got: {type(value)}")
428-
elif not value.strip():
429-
errors.append(f"Value at index {i} for '{prop_name}' should not be empty or whitespace")
430-
431-
# Check for distinct values (no duplicates)
432-
try:
433-
if len(values) != len(set(values)):
434-
errors.append(f"Values for '{prop_name}' should be distinct (found duplicates)")
435-
except TypeError:
436-
errors.append(f"Values for '{prop_name}' should be a list of strings, found unhashable type")
437-
438-
# Validate numeric properties - checking multiple type names since we don't know what the API will return
439-
elif prop_type in ["number", "numeric", "float", "integer", "int"]:
440-
if "range" not in prop_data:
441-
errors.append(f"Numeric property '{prop_name}' should have 'range' object")
442-
continue
443-
444-
range_obj = prop_data["range"]
445-
if not isinstance(range_obj, dict):
446-
errors.append(f"Range for '{prop_name}' should be a dictionary")
447-
continue
448-
449-
# Check min/max presence
450-
if "min" not in range_obj:
451-
errors.append(f"Range for '{prop_name}' should have 'min' value")
452-
if "max" not in range_obj:
453-
errors.append(f"Range for '{prop_name}' should have 'max' value")
454-
455-
if "min" in range_obj and "max" in range_obj:
456-
min_val = range_obj["min"]
457-
max_val = range_obj["max"]
458-
459-
# Validate min/max are numeric
460-
if not isinstance(min_val, (int, float)):
461-
errors.append(f"Min value for '{prop_name}' should be numeric, got: {type(min_val)}")
462-
if not isinstance(max_val, (int, float)):
463-
errors.append(f"Max value for '{prop_name}' should be numeric, got: {type(max_val)}")
464-
465-
# Validate logical relationship (min <= max)
466-
if isinstance(min_val, (int, float)) and isinstance(max_val, (int, float)) and min_val > max_val:
467-
errors.append(f"Min value ({min_val}) should be <= max value ({max_val}) for '{prop_name}'")
468-
469-
return len(errors) == 0, errors
470-
471-
472302
def compare_filter_options_with_database(
473303
api_filters: dict[str, Any], db_properties: dict[str, list[str]], excluded_fields: set[str]
474304
) -> Tuple[bool, List[str]]:
@@ -653,19 +483,3 @@ def verify_labels_match(expected_labels: List[Dict[str, Any]], api_labels: List[
653483
break
654484

655485
assert found, f"Expected label not found in API response: {expected_label}"
656-
657-
658-
def validate_source_status(catalog: dict[str, Any], expected_status: str) -> None:
659-
"""
660-
Validate the status field of a catalog source.
661-
662-
Args:
663-
catalog: The catalog source dictionary from API response
664-
expected_status: The expected status value (e.g., "available", "disabled", "error")
665-
666-
Raises:
667-
AssertionError: If status field does not match expected value
668-
"""
669-
assert catalog.get("status") == expected_status, (
670-
f"Source '{catalog.get('id')}' status should be '{expected_status}', got: {catalog.get('status')}"
671-
)

0 commit comments

Comments
 (0)