Skip to content

Commit d7c2546

Browse files
authored
Merge branch 'main' into refactor-llmd
2 parents 5b254ee + a811bea commit d7c2546

File tree

54 files changed

+1790
-1217
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

54 files changed

+1790
-1217
lines changed

.pre-commit-config.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ minimum_pre_commit_version: 3.3.0
22
default_install_hook_types: [pre-commit, commit-msg]
33

44
default_language_version:
5-
python: python3.13
5+
python: python3.14
66

77
repos:
88
- repo: https://github.com/pre-commit/pre-commit-hooks
@@ -36,7 +36,7 @@ repos:
3636
exclude: .*/__snapshots__/.*|.*-input\.json$|^semgrep\.yaml$
3737

3838
- repo: https://github.com/astral-sh/ruff-pre-commit
39-
rev: v0.15.4
39+
rev: v0.15.5
4040
hooks:
4141
- id: ruff
4242
- id: ruff-format

Dockerfile

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,19 @@
1-
FROM fedora:42
1+
FROM fedora:43
22

33
ARG USER=odh
44
ARG HOME=/home/$USER
55
ARG TESTS_DIR=$HOME/opendatahub-tests/
6-
ENV UV_PYTHON=python3.13
6+
ENV UV_PYTHON=python3.14
77
ENV UV_COMPILE_BYTECODE=1
88
ENV UV_NO_SYNC=1
99
ENV UV_NO_CACHE=1
1010

1111
ENV BIN_DIR="$HOME_DIR/.local/bin"
1212
ENV PATH="$PATH:$BIN_DIR"
1313

14-
# Install Python 3.13 and other dependencies using dnf
14+
# Install system dependencies using dnf
1515
RUN dnf update -y \
16-
&& dnf install -y python3.13 python3.13-pip ssh gnupg curl gpg wget vim httpd-tools rsync openssl openssl-devel\
16+
&& dnf install -y python3 python3-pip ssh gnupg curl gpg wget vim httpd-tools rsync openssl openssl-devel\
1717
&& dnf clean all \
1818
&& rm -rf /var/cache/dnf
1919

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ dev = [
3131
]
3232

3333
[project]
34-
requires-python = "==3.13.*"
34+
requires-python = "==3.14.*"
3535
name = "opendatahub-tests"
3636
version = "0.1.0"
3737
description = "Tests repository for Open Data Hub (ODH)"

pytest.ini

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,11 @@ markers =
1010
parallel: marks tests that can run in parallel along with pytest-xdist
1111

1212
# CI
13-
smoke: Mark tests as smoke tests; covers core functionality of the product. Aims to ensure that the build is stable enough for further testing.
14-
sanity: Mark tests as sanity tests. Aims to verify that specific functionality is working as expected.
15-
tier1: Mark tests as tier1. Aims to cover frequently used functionality of the product and basic user flows.
16-
tier2: Mark tests as tier2. Aims to cover more advanced functionality of the product.
13+
smoke: Mark tests as smoke tests; very high critical priority tests. Covers core functionality of the product. Aims to ensure that the build is stable enough for further testing.
14+
sanity: <<DEPRECATION WARNING: to be superseded by tier1>> Mark tests as sanity tests. Aims to verify that specific functionality is working as expected.
15+
tier1: Mark tests as tier1. High-priority tests.
16+
tier2: Mark tests as tier2. Medium/low-priority positive tests.
17+
tier3: Mark tests as tier3. Negative and destructive tests.
1718
slow: Mark tests which take more than 10 minutes as slow tests.
1819
pre_upgrade: Mark tests which should be run before upgrading the product.
1920
post_upgrade: Mark tests which should be run after upgrading the product.

tests/conftest.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -676,6 +676,12 @@ def cluster_sanity_scope_session(
676676
dsc_resource: DataScienceCluster,
677677
junitxml_plugin: Callable[[str, object], None],
678678
) -> None:
679+
# Skip cluster sanity check when running tests that have cluster_health or operator_health markers
680+
selected_markers = {mark.name for item in request.session.items for mark in item.iter_markers()}
681+
if {"cluster_health", "operator_health"} & selected_markers:
682+
LOGGER.info("Skipping cluster sanity check because selected tests include cluster/operator health")
683+
return
684+
679685
verify_cluster_sanity(
680686
request=request,
681687
nodes=nodes,
@@ -829,7 +835,7 @@ def gpu_count_on_cluster(nodes: list[Any]) -> int:
829835
if key in allowed_exact or any(key.startswith(p) for p in allowed_prefixes):
830836
try:
831837
total_gpus += int(val)
832-
except (ValueError, TypeError):
838+
except ValueError, TypeError:
833839
LOGGER.debug(f"Skipping non-integer allocatable for {key} on {node.name}: {val!r}")
834840
continue
835841
return total_gpus

tests/llama_stack/safety/test_trustyai_fms_provider.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
SECURE_SHIELD_ID: str = "secure_shield"
1010

1111

12+
@pytest.mark.tier1
1213
@pytest.mark.parametrize(
1314
"model_namespace, minio_pod, minio_data_connection, "
1415
"orchestrator_config, guardrails_orchestrator, llama_stack_server_config",

tests/model_explainability/guardrails/test_guardrails.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
LOGGER = get_logger(name=__name__)
3939

4040

41+
@pytest.mark.smoke
4142
@pytest.mark.parametrize(
4243
"model_namespace, orchestrator_config, guardrails_orchestrator",
4344
[
@@ -56,7 +57,6 @@
5657
],
5758
indirect=True,
5859
)
59-
@pytest.mark.smoke
6060
def test_validate_guardrails_orchestrator_images(
6161
model_namespace,
6262
orchestrator_config,
@@ -70,6 +70,7 @@ def test_validate_guardrails_orchestrator_images(
7070
validate_tai_component_images(pod=guardrails_orchestrator_pod, tai_operator_configmap=trustyai_operator_configmap)
7171

7272

73+
@pytest.mark.smoke
7374
@pytest.mark.parametrize(
7475
"model_namespace, orchestrator_config, guardrails_gateway_config, guardrails_orchestrator",
7576
[
@@ -115,7 +116,6 @@ def test_validate_guardrails_orchestrator_images(
115116
],
116117
indirect=True,
117118
)
118-
@pytest.mark.smoke
119119
@pytest.mark.rawdeployment
120120
@pytest.mark.usefixtures("patched_dsc_kserve_headed", "guardrails_gateway_config")
121121
class TestGuardrailsOrchestratorWithBuiltInDetectors:
@@ -212,6 +212,7 @@ def test_guardrails_builtin_detectors_negative_detection(
212212
)
213213

214214

215+
@pytest.mark.smoke
215216
@pytest.mark.parametrize(
216217
"model_namespace, orchestrator_config, guardrails_gateway_config,guardrails_orchestrator",
217218
[
@@ -421,6 +422,7 @@ def check_traces():
421422
indirect=True,
422423
)
423424
@pytest.mark.usefixtures("patched_dsc_kserve_headed")
425+
@pytest.mark.tier1
424426
@pytest.mark.rawdeployment
425427
class TestGuardrailsOrchestratorAutoConfig:
426428
"""
@@ -495,6 +497,7 @@ def test_guardrails_autoconfig_negative_detection(
495497
indirect=True,
496498
)
497499
@pytest.mark.usefixtures("patched_dsc_kserve_headed")
500+
@pytest.mark.tier2
498501
@pytest.mark.rawdeployment
499502
class TestGuardrailsOrchestratorAutoConfigWithGateway:
500503
"""

tests/model_explainability/lm_eval/test_lm_eval.py

Lines changed: 22 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,17 +28,14 @@
2828

2929

3030
@pytest.mark.skip_on_disconnected
31+
@pytest.mark.tier1
3132
@pytest.mark.parametrize(
3233
"model_namespace, lmevaljob_hf",
3334
[
3435
pytest.param(
3536
{"name": "test-lmeval-hf-tier1"},
3637
{"task_list": {"taskNames": TIER1_LMEVAL_TASKS}},
3738
),
38-
pytest.param(
39-
{"name": "test-lmeval-hf-tier2"},
40-
{"task_list": {"taskNames": TIER2_LMEVAL_TASKS}},
41-
),
4239
pytest.param(
4340
{"name": "test-lmeval-hf-custom-task"},
4441
CUSTOM_UNITXT_TASK_DATA,
@@ -58,6 +55,24 @@ def test_lmeval_huggingface_model(admin_client, model_namespace, lmevaljob_hf_po
5855
validate_lmeval_job_pod_and_logs(lmevaljob_pod=lmevaljob_hf_pod)
5956

6057

58+
@pytest.mark.skip_on_disconnected
59+
@pytest.mark.tier2
60+
@pytest.mark.parametrize(
61+
"model_namespace, lmevaljob_hf",
62+
[
63+
pytest.param(
64+
{"name": "test-lmeval-hf-tier2"},
65+
{"task_list": {"taskNames": TIER2_LMEVAL_TASKS}},
66+
),
67+
],
68+
indirect=True,
69+
)
70+
def test_lmeval_huggingface_model_tier2(admin_client, model_namespace, lmevaljob_hf_pod):
71+
"""Tests that verify running common evaluations (and a custom one) on a model pulled directly from HuggingFace.
72+
On each test we run a different evaluation task, limiting it to 0.5% of the questions on each eval."""
73+
validate_lmeval_job_pod_and_logs(lmevaljob_pod=lmevaljob_hf_pod)
74+
75+
6176
@pytest.mark.parametrize(
6277
"model_namespace, lmeval_data_downloader_pod, lmevaljob_local_offline",
6378
[
@@ -80,6 +95,7 @@ def test_lmeval_local_offline_builtin_tasks_flan_arceasy(
8095
validate_lmeval_job_pod_and_logs(lmevaljob_pod=lmevaljob_local_offline_pod)
8196

8297

98+
@pytest.mark.tier1
8399
@pytest.mark.parametrize(
84100
"model_namespace",
85101
[
@@ -95,6 +111,7 @@ def test_lmeval_vllm_emulator(admin_client, model_namespace, lmevaljob_vllm_emul
95111
validate_lmeval_job_pod_and_logs(lmevaljob_pod=lmevaljob_vllm_emulator_pod)
96112

97113

114+
@pytest.mark.tier1
98115
@pytest.mark.parametrize(
99116
"model_namespace, minio_data_connection",
100117
[
@@ -138,6 +155,7 @@ def test_verify_lmeval_pod_images(lmevaljob_s3_offline_pod, trustyai_operator_co
138155
)
139156

140157

158+
@pytest.mark.tier1
141159
@pytest.mark.parametrize(
142160
"model_namespace, oci_registry_pod_with_minio, lmeval_data_downloader_pod, lmevaljob_local_offline_oci",
143161
[

tests/model_explainability/trustyai_service/service/test_trustyai_service.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
from utilities.constants import MinIo
2323

2424

25+
@pytest.mark.tier1
2526
@pytest.mark.parametrize(
2627
"model_namespace",
2728
[
@@ -45,6 +46,7 @@ def test_trustyai_service_with_invalid_db_cert(
4546
)
4647

4748

49+
@pytest.mark.smoke
4850
@pytest.mark.parametrize(
4951
"model_namespace, trustyai_service",
5052
[
@@ -55,7 +57,6 @@ def test_trustyai_service_with_invalid_db_cert(
5557
],
5658
indirect=True,
5759
)
58-
@pytest.mark.smoke
5960
def test_validate_trustyai_service_image(
6061
admin_client,
6162
model_namespace: Namespace,
@@ -72,6 +73,7 @@ def test_validate_trustyai_service_image(
7273
)
7374

7475

76+
@pytest.mark.tier1
7577
@pytest.mark.parametrize(
7678
"model_namespace, minio_pod, minio_data_connection, trustyai_service",
7779
[

tests/model_registry/conftest.py

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from typing import Any
55

66
import pytest
7+
import yaml
78
from kubernetes.dynamic import DynamicClient
89
from ocp_resources.config_map import ConfigMap
910
from ocp_resources.data_science_cluster import DataScienceCluster
@@ -25,18 +26,26 @@
2526
from tests.model_registry.constants import (
2627
DB_BASE_RESOURCES_NAME,
2728
DB_RESOURCE_NAME,
29+
DEFAULT_CUSTOM_MODEL_CATALOG,
2830
KUBERBACPROXY_STR,
2931
MR_INSTANCE_BASE_NAME,
3032
MR_INSTANCE_NAME,
3133
MR_OPERATOR_NAME,
3234
)
35+
from tests.model_registry.mcp_servers.constants import (
36+
MCP_CATALOG_API_PATH,
37+
MCP_CATALOG_SOURCE,
38+
MCP_SERVERS_YAML,
39+
)
3340
from tests.model_registry.utils import (
3441
generate_namespace_name,
3542
get_byoidc_user_credentials,
3643
get_model_registry_metadata_resources,
3744
get_model_registry_objects,
3845
get_rest_headers,
3946
wait_for_default_resource_cleanedup,
47+
wait_for_mcp_catalog_api,
48+
wait_for_model_catalog_pod_ready_after_deletion,
4049
)
4150
from utilities.constants import DscComponents, Labels
4251
from utilities.general import (
@@ -466,3 +475,54 @@ def model_catalog_routes(admin_client: DynamicClient, model_registry_namespace:
466475
return list(
467476
Route.get(namespace=model_registry_namespace, label_selector="component=model-catalog", client=admin_client)
468477
)
478+
479+
480+
@pytest.fixture(scope="class")
481+
def mcp_catalog_rest_urls(model_registry_namespace: str, model_catalog_routes: list[Route]) -> list[str]:
482+
"""Build MCP catalog REST URL from existing model catalog routes."""
483+
assert model_catalog_routes, f"Model catalog routes do not exist in {model_registry_namespace}"
484+
return [f"https://{route.instance.spec.host}:443{MCP_CATALOG_API_PATH}" for route in model_catalog_routes]
485+
486+
487+
@pytest.fixture(scope="class")
488+
def mcp_servers_configmap_patch(
489+
admin_client: DynamicClient,
490+
model_registry_namespace: str,
491+
mcp_catalog_rest_urls: list[str],
492+
model_registry_rest_headers: dict[str, str],
493+
) -> Generator[None]:
494+
"""
495+
Class-scoped fixture that patches the model-catalog-sources ConfigMap
496+
497+
Sets two keys in the ConfigMap data:
498+
- sources.yaml: catalog source definition pointing to the MCP servers YAML
499+
- mcp-servers.yaml: the actual MCP server definitions
500+
"""
501+
catalog_config_map = ConfigMap(
502+
name=DEFAULT_CUSTOM_MODEL_CATALOG,
503+
client=admin_client,
504+
namespace=model_registry_namespace,
505+
)
506+
507+
current_data = yaml.safe_load(catalog_config_map.instance.data.get("sources.yaml", "{}") or "{}")
508+
if "mcp_catalogs" not in current_data:
509+
current_data["mcp_catalogs"] = []
510+
current_data["mcp_catalogs"].append(MCP_CATALOG_SOURCE)
511+
512+
patches = {
513+
"data": {
514+
"sources.yaml": yaml.dump(current_data, default_flow_style=False),
515+
"mcp-servers.yaml": MCP_SERVERS_YAML,
516+
}
517+
}
518+
519+
with ResourceEditor(patches={catalog_config_map: patches}):
520+
wait_for_model_catalog_pod_ready_after_deletion(
521+
client=admin_client, model_registry_namespace=model_registry_namespace
522+
)
523+
wait_for_mcp_catalog_api(url=mcp_catalog_rest_urls[0], headers=model_registry_rest_headers)
524+
yield
525+
526+
wait_for_model_catalog_pod_ready_after_deletion(
527+
client=admin_client, model_registry_namespace=model_registry_namespace
528+
)

0 commit comments

Comments
 (0)