|
| 1 | +import pytest |
| 2 | +from typing import Self, Any |
| 3 | +import random |
| 4 | +from ocp_resources.config_map import ConfigMap |
| 5 | +from tests.model_registry.model_catalog.utils import ( |
| 6 | + fetch_all_artifacts_with_dynamic_paging, |
| 7 | + validate_model_artifacts_match_criteria_and, |
| 8 | + validate_model_artifacts_match_criteria_or, |
| 9 | +) |
| 10 | +from tests.model_registry.model_catalog.constants import ( |
| 11 | + VALIDATED_CATALOG_ID, |
| 12 | +) |
| 13 | +from kubernetes.dynamic.exceptions import ResourceNotFoundError |
| 14 | +from simple_logger.logger import get_logger |
| 15 | + |
| 16 | +LOGGER = get_logger(name=__name__) |
| 17 | +pytestmark = [pytest.mark.usefixtures("updated_dsc_component_state_scope_session", "model_registry_namespace")] |
| 18 | +MODEL_NAMEs_ARTIFACT_SEARCH: list[str] = [ |
| 19 | + "RedHatAI/Llama-3.1-8B-Instruct", |
| 20 | + "RedHatAI/Mistral-Small-3.1-24B-Instruct-2503-FP8-dynamic", |
| 21 | + "RedHatAI/Mistral-Small-3.1-24B-Instruct-2503-quantized.w4a16", |
| 22 | + "RedHatAI/Mistral-Small-3.1-24B-Instruct-2503-quantized.w8a8", |
| 23 | + "RedHatAI/Mixtral-8x7B-Instruct-v0.1", |
| 24 | +] |
| 25 | + |
| 26 | + |
| 27 | +class TestSearchArtifactsByFilterQuery: |
| 28 | + @pytest.mark.parametrize( |
| 29 | + "randomly_picked_model_from_catalog_api_by_source, invalid_filter_query", |
| 30 | + [ |
| 31 | + pytest.param( |
| 32 | + {"catalog_id": VALIDATED_CATALOG_ID, "header_type": "registry"}, |
| 33 | + "fake IN ('test', 'fake'))", |
| 34 | + id="test_invalid_artifact_filter_query_malformed", |
| 35 | + ), |
| 36 | + pytest.param( |
| 37 | + {"catalog_id": VALIDATED_CATALOG_ID, "header_type": "registry"}, |
| 38 | + "ttft_p90.double_value < abc", |
| 39 | + id="test_invalid_artifact_filter_query_data_type_mismatch", |
| 40 | + ), |
| 41 | + pytest.param( |
| 42 | + {"catalog_id": VALIDATED_CATALOG_ID, "header_type": "registry"}, |
| 43 | + "hardware_type.string_value = 5.0", |
| 44 | + id="test_invalid_artifact_filter_query_data_type_mismatch_equality", |
| 45 | + ), |
| 46 | + ], |
| 47 | + indirect=["randomly_picked_model_from_catalog_api_by_source"], |
| 48 | + ) |
| 49 | + def test_search_artifacts_by_invalid_filter_query( |
| 50 | + self: Self, |
| 51 | + enabled_model_catalog_config_map: ConfigMap, |
| 52 | + model_catalog_rest_url: list[str], |
| 53 | + model_registry_rest_headers: dict[str, str], |
| 54 | + randomly_picked_model_from_catalog_api_by_source: tuple[dict, str, str], |
| 55 | + invalid_filter_query: str, |
| 56 | + ): |
| 57 | + """ |
| 58 | + Tests the API's response to invalid filter queries syntax when searching artifacts. |
| 59 | + It verifies that an invalid filter query syntax raises the correct error. |
| 60 | + """ |
| 61 | + _, model_name, catalog_id = randomly_picked_model_from_catalog_api_by_source |
| 62 | + |
| 63 | + LOGGER.info(f"Testing invalid artifact filter query: '{invalid_filter_query}' for model: {model_name}") |
| 64 | + with pytest.raises(ResourceNotFoundError, match="invalid filter query"): |
| 65 | + fetch_all_artifacts_with_dynamic_paging( |
| 66 | + url_with_pagesize=( |
| 67 | + f"{model_catalog_rest_url[0]}sources/{catalog_id}/models/{model_name}/artifacts?" |
| 68 | + f"filterQuery={invalid_filter_query}&pageSize" |
| 69 | + ), |
| 70 | + headers=model_registry_rest_headers, |
| 71 | + page_size=1, |
| 72 | + ) |
| 73 | + |
| 74 | + LOGGER.info( |
| 75 | + f"Successfully validated that invalid artifact filter query '{invalid_filter_query}' raises an error" |
| 76 | + ) |
| 77 | + |
| 78 | + @pytest.mark.parametrize( |
| 79 | + "randomly_picked_model_from_catalog_api_by_source, filter_query, expected_value, logic_type", |
| 80 | + [ |
| 81 | + pytest.param( |
| 82 | + { |
| 83 | + "catalog_id": VALIDATED_CATALOG_ID, |
| 84 | + "header_type": "registry", |
| 85 | + "model_name": random.choice(MODEL_NAMEs_ARTIFACT_SEARCH), |
| 86 | + }, |
| 87 | + "hardware_type.string_value = 'ABC-1234'", |
| 88 | + None, |
| 89 | + None, |
| 90 | + id="test_valid_artifact_filter_query_no_results", |
| 91 | + ), |
| 92 | + pytest.param( |
| 93 | + { |
| 94 | + "catalog_id": VALIDATED_CATALOG_ID, |
| 95 | + "header_type": "registry", |
| 96 | + "model_name": random.choice(MODEL_NAMEs_ARTIFACT_SEARCH), |
| 97 | + }, |
| 98 | + "requests_per_second.double_value > 15.0", |
| 99 | + [{"key_name": "requests_per_second", "key_type": "double_value", "comparison": "min", "value": 15.0}], |
| 100 | + "and", |
| 101 | + id="test_performance_min_filter", |
| 102 | + ), |
| 103 | + pytest.param( |
| 104 | + { |
| 105 | + "catalog_id": VALIDATED_CATALOG_ID, |
| 106 | + "header_type": "registry", |
| 107 | + "model_name": random.choice(MODEL_NAMEs_ARTIFACT_SEARCH), |
| 108 | + }, |
| 109 | + "hardware_count.int_value = 8", |
| 110 | + [{"key_name": "hardware_count", "key_type": "int_value", "comparison": "exact", "value": 8}], |
| 111 | + "and", |
| 112 | + id="test_hardware_exact_filter", |
| 113 | + ), |
| 114 | + pytest.param( |
| 115 | + { |
| 116 | + "catalog_id": VALIDATED_CATALOG_ID, |
| 117 | + "header_type": "registry", |
| 118 | + "model_name": random.choice(MODEL_NAMEs_ARTIFACT_SEARCH), |
| 119 | + }, |
| 120 | + "(hardware_type.string_value = 'H100') AND (ttft_p99.double_value < 200)", |
| 121 | + [ |
| 122 | + {"key_name": "hardware_type", "key_type": "string_value", "comparison": "exact", "value": "H100"}, |
| 123 | + {"key_name": "ttft_p99", "key_type": "double_value", "comparison": "max", "value": 199}, |
| 124 | + ], |
| 125 | + "and", |
| 126 | + id="test_combined_hardware_performance_filter_and_operation", |
| 127 | + ), |
| 128 | + pytest.param( |
| 129 | + { |
| 130 | + "catalog_id": VALIDATED_CATALOG_ID, |
| 131 | + "header_type": "registry", |
| 132 | + "model_name": random.choice(MODEL_NAMEs_ARTIFACT_SEARCH), |
| 133 | + }, |
| 134 | + "(tps_mean.double_value <260) OR (hardware_type.string_value = 'A100-80')", |
| 135 | + [ |
| 136 | + {"key_name": "tps_mean", "key_type": "double_value", "comparison": "max", "value": 260}, |
| 137 | + { |
| 138 | + "key_name": "hardware_type", |
| 139 | + "key_type": "string_value", |
| 140 | + "comparison": "exact", |
| 141 | + "value": "A100-80", |
| 142 | + }, |
| 143 | + ], |
| 144 | + "or", |
| 145 | + id="performance_or_hardware_filter_or_operation", |
| 146 | + ), |
| 147 | + ], |
| 148 | + indirect=["randomly_picked_model_from_catalog_api_by_source"], |
| 149 | + ) |
| 150 | + def test_filter_query_advanced_artifact_search( |
| 151 | + self: Self, |
| 152 | + enabled_model_catalog_config_map: ConfigMap, |
| 153 | + model_catalog_rest_url: list[str], |
| 154 | + model_registry_rest_headers: dict[str, str], |
| 155 | + randomly_picked_model_from_catalog_api_by_source: tuple[dict, str, str], |
| 156 | + filter_query: str, |
| 157 | + expected_value: list[dict[str, Any]] | None, |
| 158 | + logic_type: str | None, |
| 159 | + ): |
| 160 | + """ |
| 161 | + Advanced filter query test for artifact-based filtering with AND/OR logic |
| 162 | + """ |
| 163 | + _, model_name, catalog_id = randomly_picked_model_from_catalog_api_by_source |
| 164 | + |
| 165 | + LOGGER.info(f"Testing artifact filter query: '{filter_query}' for model: {model_name}") |
| 166 | + |
| 167 | + result = fetch_all_artifacts_with_dynamic_paging( |
| 168 | + url_with_pagesize=( |
| 169 | + f"{model_catalog_rest_url[0]}sources/{catalog_id}/models/{model_name}/artifacts?" |
| 170 | + f"filterQuery={filter_query}&pageSize" |
| 171 | + ), |
| 172 | + headers=model_registry_rest_headers, |
| 173 | + page_size=100, |
| 174 | + ) |
| 175 | + |
| 176 | + if expected_value is None: |
| 177 | + # Simple validation of length and size for basic filter queries |
| 178 | + assert result["items"] == [], f"Filter query '{filter_query}' should return valid results" |
| 179 | + assert result["size"] == 0, f"Size should be 0 for filter query '{filter_query}'" |
| 180 | + LOGGER.info( |
| 181 | + f"Successfully validated that filter query '{filter_query}' returns {len(result['items'])} artifacts" |
| 182 | + ) |
| 183 | + else: |
| 184 | + # Advanced validation using criteria matching |
| 185 | + all_artifacts = result["items"] |
| 186 | + |
| 187 | + validation_result = None |
| 188 | + # Select validation function based on logic type |
| 189 | + if logic_type == "and": |
| 190 | + validation_result = validate_model_artifacts_match_criteria_and( |
| 191 | + all_model_artifacts=all_artifacts, expected_validations=expected_value, model_name=model_name |
| 192 | + ) |
| 193 | + elif logic_type == "or": |
| 194 | + validation_result = validate_model_artifacts_match_criteria_or( |
| 195 | + all_model_artifacts=all_artifacts, expected_validations=expected_value, model_name=model_name |
| 196 | + ) |
| 197 | + else: |
| 198 | + raise ValueError(f"Invalid logic_type: {logic_type}. Must be 'and' or 'or'") |
| 199 | + |
| 200 | + if validation_result: |
| 201 | + LOGGER.info( |
| 202 | + f"For Model: {model_name}, {logic_type} validation completed successfully" |
| 203 | + f" for {len(all_artifacts)} artifacts" |
| 204 | + ) |
| 205 | + else: |
| 206 | + pytest.fail(f"{logic_type} filter validation failed for model {model_name}") |
0 commit comments