|
18 | 18 | validate_search_results_against_database, |
19 | 19 | validate_filter_query_results_against_database, |
20 | 20 | validate_performance_data_files_on_pod, |
| 21 | + validate_model_artifacts_match_criteria_and, |
| 22 | + validate_model_artifacts_match_criteria_or, |
21 | 23 | ) |
22 | 24 | from tests.model_registry.utils import get_model_catalog_pod |
23 | 25 | from kubernetes.dynamic import DynamicClient |
24 | 26 | from kubernetes.dynamic.exceptions import ResourceNotFoundError |
25 | 27 |
|
26 | 28 | LOGGER = get_logger(name=__name__) |
27 | | -pytestmark = [ |
28 | | - pytest.mark.usefixtures("updated_dsc_component_state_scope_session", "model_registry_namespace", "test_idp_user") |
29 | | -] |
| 29 | +pytestmark = [pytest.mark.usefixtures("updated_dsc_component_state_scope_session", "model_registry_namespace")] |
30 | 30 |
|
31 | 31 |
|
32 | 32 | class TestSearchModelCatalog: |
@@ -597,3 +597,101 @@ def test_presence_performance_data_on_pod( |
597 | 597 |
|
598 | 598 | # Assert that all models have all required performance data files |
599 | 599 | assert not validation_results, f"Models with missing performance data files: {validation_results}" |
| 600 | + |
| 601 | + @pytest.mark.parametrize( |
| 602 | + "models_from_filter_query, expected_value, logic_type", |
| 603 | + [ |
| 604 | + pytest.param( |
| 605 | + "artifacts.requests_per_second > 15.0", |
| 606 | + [{"key_name": "requests_per_second", "key_type": "double_value", "comparison": "min", "value": 15.0}], |
| 607 | + "and", |
| 608 | + id="performance_min_filter", |
| 609 | + ), |
| 610 | + pytest.param( |
| 611 | + "artifacts.hardware_count = 8", |
| 612 | + [{"key_name": "hardware_count", "key_type": "int_value", "comparison": "exact", "value": 8}], |
| 613 | + "and", |
| 614 | + id="hardware_exact_filter", |
| 615 | + ), |
| 616 | + pytest.param( |
| 617 | + "(artifacts.hardware_type LIKE 'H200') AND (artifacts.ttft_p95 < 50)", |
| 618 | + [ |
| 619 | + {"key_name": "hardware_type", "key_type": "string_value", "comparison": "exact", "value": "H200"}, |
| 620 | + {"key_name": "ttft_p95", "key_type": "double_value", "comparison": "max", "value": 50}, |
| 621 | + ], |
| 622 | + "and", |
| 623 | + id="test_combined_hardware_performance_filter_mixed_types", |
| 624 | + ), |
| 625 | + pytest.param( |
| 626 | + "(artifacts.ttft_mean < 100) AND (artifacts.requests_per_second > 10)", |
| 627 | + [ |
| 628 | + {"key_name": "ttft_mean", "key_type": "double_value", "comparison": "max", "value": 100}, |
| 629 | + {"key_name": "requests_per_second", "key_type": "double_value", "comparison": "min", "value": 10}, |
| 630 | + ], |
| 631 | + "and", |
| 632 | + id="test_combined_hardware_performance_filter_numeric_types", |
| 633 | + ), |
| 634 | + pytest.param( |
| 635 | + "(artifacts.tps_mean < 247) OR (artifacts.hardware_type LIKE 'A100-80')", |
| 636 | + [ |
| 637 | + {"key_name": "tps_mean", "key_type": "double_value", "comparison": "max", "value": 247}, |
| 638 | + { |
| 639 | + "key_name": "hardware_type", |
| 640 | + "key_type": "string_value", |
| 641 | + "comparison": "exact", |
| 642 | + "value": "A100-80", |
| 643 | + }, |
| 644 | + ], |
| 645 | + "or", |
| 646 | + id="performance_or_hardware_filter", |
| 647 | + ), |
| 648 | + ], |
| 649 | + indirect=["models_from_filter_query"], |
| 650 | + ) |
| 651 | + def test_filter_query_advanced_model_search( |
| 652 | + self: Self, |
| 653 | + models_from_filter_query: list[str], |
| 654 | + expected_value: list[dict[str, Any]], |
| 655 | + logic_type: str, |
| 656 | + model_catalog_rest_url: list[str], |
| 657 | + model_registry_rest_headers: dict[str, str], |
| 658 | + ): |
| 659 | + """ |
| 660 | + RHOAIENG-39615: Advanced filter query test for performance-based filtering with AND/OR logic |
| 661 | + """ |
| 662 | + errors = [] |
| 663 | + |
| 664 | + # Additional validation: ensure returned models match the filter criteria |
| 665 | + for model_name in models_from_filter_query: |
| 666 | + url = f"{model_catalog_rest_url[0]}sources/{VALIDATED_CATALOG_ID}/models/{model_name}/artifacts?pageSize" |
| 667 | + LOGGER.info(f"Validating model: {model_name} with {len(expected_value)} {logic_type.upper()} validation(s)") |
| 668 | + |
| 669 | + # Fetch all artifacts with dynamic page size adjustment |
| 670 | + all_model_artifacts = fetch_all_artifacts_with_dynamic_paging( |
| 671 | + url_with_pagesize=url, |
| 672 | + headers=model_registry_rest_headers, |
| 673 | + page_size=200, |
| 674 | + )["items"] |
| 675 | + |
| 676 | + validation_result = None |
| 677 | + # Select validation function based on logic type |
| 678 | + if logic_type == "and": |
| 679 | + validation_result = validate_model_artifacts_match_criteria_and( |
| 680 | + all_model_artifacts=all_model_artifacts, expected_validations=expected_value, model_name=model_name |
| 681 | + ) |
| 682 | + elif logic_type == "or": |
| 683 | + validation_result = validate_model_artifacts_match_criteria_or( |
| 684 | + all_model_artifacts=all_model_artifacts, expected_validations=expected_value, model_name=model_name |
| 685 | + ) |
| 686 | + else: |
| 687 | + raise ValueError(f"Invalid logic_type: {logic_type}. Must be 'and' or 'or'") |
| 688 | + |
| 689 | + if validation_result: |
| 690 | + LOGGER.info(f"For Model: {model_name}, {logic_type.upper()} validation completed successfully") |
| 691 | + else: |
| 692 | + errors.append(model_name) |
| 693 | + |
| 694 | + assert not errors, f"{logic_type.upper()} filter validations failed for {', '.join(errors)}" |
| 695 | + LOGGER.info( |
| 696 | + f"Advanced {logic_type.upper()} filter validation completed for {len(models_from_filter_query)} models" |
| 697 | + ) |
0 commit comments