Skip to content

Commit b84ba6b

Browse files
authored
Merge branch 'main' into push_container_image_on_merge
2 parents 25524fb + 9f88c00 commit b84ba6b

File tree

28 files changed

+609
-153
lines changed

28 files changed

+609
-153
lines changed

.github/workflows/scripts/pr_workflow.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,10 @@ def issue_comment_label_actions(
296296

297297
def add_welcome_comment_set_assignee(self) -> None:
298298
self.pr.create_issue_comment(body=WELCOME_COMMENT)
299-
self.pr.add_to_assignees(self.pr.user.login)
299+
try:
300+
self.pr.add_to_assignees(self.pr.user.login)
301+
except UnknownObjectException:
302+
LOGGER.warning(f"User {self.pr.user.login} can not be assigned to the PR.")
300303

301304

302305
def main() -> None:

tests/model_explainability/conftest.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,7 @@
88
from ocp_resources.service import Service
99

1010
from tests.model_explainability.constants import MINIO, MINIO_PORT
11-
12-
OPENDATAHUB_IO: str = "opendatahub.io"
11+
from utilities.constants import ApiGroups, Labels
1312

1413

1514
@pytest.fixture(scope="class")
@@ -43,11 +42,11 @@ def minio_data_connection(
4342
namespace=model_namespace.name,
4443
data_dict=request.param["data-dict"],
4544
label={
46-
f"{OPENDATAHUB_IO}/dashboard": "true",
47-
f"{OPENDATAHUB_IO}/managed": "true",
45+
Labels.OpenDataHub.DASHBOARD: "true",
46+
Labels.OpenDataHubIo.MANAGED: "true",
4847
},
4948
annotations={
50-
f"{OPENDATAHUB_IO}/connection-type": "s3",
49+
f"{ApiGroups.OPENDATAHUB_IO}/connection-type": "s3",
5150
"openshift.io/display-name": "Minio Data Connection",
5251
},
5352
) as minio_secret:

tests/model_explainability/trustyai_service/conftest.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@
2727
from utilities.infra import update_configmap_data
2828

2929
MINIO: str = "minio"
30-
OPENDATAHUB_IO: str = "opendatahub.io"
3130
OPENSHIFT_OPERATORS: str = "openshift-operators"
3231

3332
MARIADB: str = "mariadb"

tests/model_explainability/trustyai_service/drift/conftest.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from tests.model_explainability.trustyai_service.trustyai_service_utils import (
1414
wait_for_isvc_deployment_registered_by_trustyai_service,
1515
)
16-
from utilities.constants import KServeDeploymentType, Ports, Timeout, Labels
16+
from utilities.constants import ApiGroups, KServeDeploymentType, Ports, Timeout, Labels
1717
from utilities.inference_utils import create_isvc
1818

1919
MLSERVER: str = "mlserver"
@@ -62,9 +62,9 @@ def mlserver_runtime(
6262
supported_model_formats=supported_model_formats,
6363
protocol_versions=["v2"],
6464
annotations={
65-
"opendatahub.io/accelerator-name": "",
66-
"opendatahub.io/recommended-accelerators": '["nvidia.com/gpu"]',
67-
"opendatahub.io/template-display-name": "KServe MLServer",
65+
f"{ApiGroups.OPENDATAHUB_IO}/accelerator-name": "",
66+
f"{ApiGroups.OPENDATAHUB_IO}/recommended-accelerators": '["nvidia.com/gpu"]',
67+
f"{ApiGroups.OPENDATAHUB_IO}/template-display-name": "KServe MLServer",
6868
"prometheus.kserve.io/path": "/metrics",
6969
"prometheus.io/port": str(Ports.REST_PORT),
7070
"openshift.io/display-name": "mlserver-1.x",

tests/model_registry/conftest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ def model_registry_db_deployment(
186186
"/var/lib/mysql/datadir",
187187
"--default-authentication-plugin=mysql_native_password",
188188
],
189-
"image": "mysql:8.3.0",
189+
"image": "public.ecr.aws/docker/library/mysql:8.3.0",
190190
"imagePullPolicy": "IfNotPresent",
191191
"livenessProbe": {
192192
"exec": {
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
from utilities.constants import (
2+
KServeDeploymentType,
3+
ModelAndFormat,
4+
ModelFormat,
5+
ModelInferenceRuntime,
6+
ModelStoragePath,
7+
ModelVersion,
8+
)
9+
10+
KSERVE_RUNTIME_PARAMS = {
11+
"runtime-name": ModelInferenceRuntime.OPENVINO_KSERVE_RUNTIME,
12+
"model-format": {ModelAndFormat.OPENVINO_IR: ModelVersion.OPSET1},
13+
}
14+
SERVERLESS_ISVC_PARAMS = {
15+
"name": ModelFormat.OPENVINO,
16+
"model-version": ModelVersion.OPSET1,
17+
"model-dir": ModelStoragePath.KSERVE_OPENVINO_EXAMPLE_MODEL,
18+
"deployment-mode": KServeDeploymentType.SERVERLESS,
19+
}

tests/model_serving/model_server/components/kserve_dsc_deployment_mode/test_kserve_dsc_default_deployment_mode.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -50,10 +50,7 @@ def test_isvc_contains_serverless_deployment_mode(
5050
ovms_inference_service,
5151
):
5252
"""Verify that default deployment mode is set to serverless in inference service."""
53-
assert (
54-
ovms_inference_service.instance.metadata.annotations[Annotations.KserveIo.DEPLOYMENT_MODE]
55-
== KServeDeploymentType.SERVERLESS
56-
)
53+
assert ovms_inference_service.instance.status.deploymentMode == KServeDeploymentType.SERVERLESS
5754

5855
def test_kserve_dsc_serverless_default_deployment_mode(
5956
self, default_deployment_mode_in_dsc, ovms_inference_service
@@ -83,10 +80,7 @@ def test_isvc_on_dsc_default_deployment_mode_change_to_raw(
8380
ovms_inference_service,
8481
):
8582
"""Verify that Serverless isvc not changed after dsc default deployment mode is changed to raw"""
86-
assert (
87-
ovms_inference_service.instance.metadata.annotations[Annotations.KserveIo.DEPLOYMENT_MODE]
88-
== KServeDeploymentType.SERVERLESS
89-
)
83+
assert ovms_inference_service.instance.status.deploymentMode == KServeDeploymentType.SERVERLESS
9084

9185
@pytest.mark.parametrize(
9286
"patched_default_deployment_mode_in_dsc",

tests/model_serving/model_server/components/model_mesh_kserve_co_exist/test_model_mesh_kserve_inference_co_exist.py

Lines changed: 11 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,62 +1,50 @@
11
import pytest
22

3+
from tests.model_serving.model_server.components.constants import KSERVE_RUNTIME_PARAMS, SERVERLESS_ISVC_PARAMS
34
from tests.model_serving.model_server.utils import verify_inference_response
45
from utilities.constants import (
5-
ModelAndFormat,
6-
ModelFormat,
7-
ModelInferenceRuntime,
86
ModelStoragePath,
9-
ModelVersion,
107
Protocols,
118
)
129
from utilities.inference_utils import Inference
1310
from utilities.manifests.openvino import OPENVINO_INFERENCE_CONFIG, OPENVINO_KSERVE_INFERENCE_CONFIG
1411

1512
pytestmark = [pytest.mark.serverless, pytest.mark.modelmesh, pytest.mark.sanity]
1613

17-
KSERVE_RUNTIME_PARAMS = {
18-
"runtime-name": ModelInferenceRuntime.OPENVINO_KSERVE_RUNTIME,
19-
"model-format": {ModelAndFormat.OPENVINO_IR: ModelVersion.OPSET1},
20-
}
21-
KSERVE_ISVC_PARAMS = {
22-
"name": ModelFormat.OPENVINO,
23-
"model-version": ModelVersion.OPSET1,
24-
"model-dir": ModelStoragePath.KSERVE_OPENVINO_EXAMPLE_MODEL,
25-
}
2614
MODELMESH_ISVC_PARAMS = {
2715
"model-path": ModelStoragePath.OPENVINO_EXAMPLE_MODEL,
2816
"modelmesh-enabled": True,
2917
}
3018

3119

3220
@pytest.mark.parametrize(
33-
"model_namespace, openvino_kserve_serving_runtime, ovms_serverless_inference_service, "
21+
"model_namespace, openvino_kserve_serving_runtime, ovms_kserve_inference_service, "
3422
"http_s3_openvino_model_mesh_inference_service",
3523
[
3624
pytest.param(
3725
{"name": "serverless-model-mesh-openvino", "modelmesh-enabled": True},
3826
KSERVE_RUNTIME_PARAMS,
39-
KSERVE_ISVC_PARAMS,
27+
SERVERLESS_ISVC_PARAMS,
4028
MODELMESH_ISVC_PARAMS,
4129
)
4230
],
4331
indirect=True,
4432
)
4533
class TestOpenVINOServerlessModelMesh:
4634
def test_serverless_openvino_created_before_model_mesh_ns_rest_inference(
47-
self, ovms_serverless_inference_service, http_s3_openvino_model_mesh_inference_service
35+
self, ovms_kserve_inference_service, http_s3_openvino_model_mesh_inference_service
4836
):
4937
"""Verify that Serverless model can be queried when running with modelmesh inference service"""
5038
verify_inference_response(
51-
inference_service=ovms_serverless_inference_service,
39+
inference_service=ovms_kserve_inference_service,
5240
inference_config=OPENVINO_KSERVE_INFERENCE_CONFIG,
5341
inference_type=Inference.INFER,
5442
protocol=Protocols.HTTPS,
5543
use_default_query=True,
5644
)
5745

5846
def test_model_mesh_openvino_created_after_serverless_in_namespace_rest_inference(
59-
self, ovms_serverless_inference_service, http_s3_openvino_model_mesh_inference_service
47+
self, ovms_kserve_inference_service, http_s3_openvino_model_mesh_inference_service
6048
):
6149
"""Verify that modelmesh model can be queried when running with kserve inference service"""
6250
verify_inference_response(
@@ -70,20 +58,20 @@ def test_model_mesh_openvino_created_after_serverless_in_namespace_rest_inferenc
7058

7159
@pytest.mark.parametrize(
7260
"model_namespace, http_s3_openvino_model_mesh_inference_service, openvino_kserve_serving_runtime, "
73-
"ovms_serverless_inference_service, ",
61+
"ovms_kserve_inference_service, ",
7462
[
7563
pytest.param(
7664
{"name": "model-mesh-serverless-openvino", "modelmesh-enabled": True},
7765
MODELMESH_ISVC_PARAMS,
7866
KSERVE_RUNTIME_PARAMS,
79-
KSERVE_ISVC_PARAMS,
67+
SERVERLESS_ISVC_PARAMS,
8068
)
8169
],
8270
indirect=True,
8371
)
8472
class TestOpenVINOModelMeshServerless:
8573
def test_model_mesh_openvino_created_before_serverless_in_namespace_rest_inference(
86-
self, http_s3_openvino_model_mesh_inference_service, ovms_serverless_inference_service
74+
self, http_s3_openvino_model_mesh_inference_service, ovms_kserve_inference_service
8775
):
8876
"""Verify that modelmesh model can be queried when running with kserve inference service"""
8977
verify_inference_response(
@@ -95,11 +83,11 @@ def test_model_mesh_openvino_created_before_serverless_in_namespace_rest_inferen
9583
)
9684

9785
def test_serverless_openvino_created_after_model_mesh_ns_rest_inference(
98-
self, http_s3_openvino_model_mesh_inference_service, ovms_serverless_inference_service
86+
self, http_s3_openvino_model_mesh_inference_service, ovms_kserve_inference_service
9987
):
10088
"""Verify that Serverless model can be queried when running with modelmesh inference service"""
10189
verify_inference_response(
102-
inference_service=ovms_serverless_inference_service,
90+
inference_service=ovms_kserve_inference_service,
10391
inference_config=OPENVINO_KSERVE_INFERENCE_CONFIG,
10492
inference_type=Inference.INFER,
10593
protocol=Protocols.HTTPS,

0 commit comments

Comments
 (0)