Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
342a5e4
Rough draft for adding tests for stopping and resuming an ISVC
hdefazio Jun 20, 2025
5f652d9
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 20, 2025
1ac50f0
RHOAIENG-19886-created raw stop_resume tests. Fixed stop_resume serve…
andresllh Jun 20, 2025
2d4dd0d
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 20, 2025
1faaafa
Update utilities/inference_utils.py
hdefazio Jun 20, 2025
b0d5fd6
RHOAIENG-19886-created raw stop_resume tests. Created fixture for pat…
andresllh Jun 25, 2025
662895f
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 25, 2025
bb18da3
Merge branch 'main' into feat/stop_resume_model
andresllh Jun 25, 2025
0fa989f
Merge branch 'main' into feat/stop_resume_model
hdefazio Jun 25, 2025
32a885e
RHOAIENG-19886-created raw stop_resume tests. Addressed Edgar's PR co…
andresllh Jun 27, 2025
c5fa6c0
Merge branch 'feat/stop_resume_model' of github.com:hdefazio/opendata…
andresllh Jun 27, 2025
b236e00
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 27, 2025
772c4de
Merge branch 'main' into feat/stop_resume_model
andresllh Jul 1, 2025
0184d7c
RHOAIENG-19886-created raw stop_resume tests. Addressed Brett and Edg…
andresllh Jul 1, 2025
76e5215
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 1, 2025
1e71542
RHOAIENG-19886-created raw stop_resume tests. Addressed Brett and Edg…
andresllh Jul 1, 2025
f7071fa
Merge branch 'feat/stop_resume_model' of github.com:hdefazio/opendata…
andresllh Jul 1, 2025
ba27c5c
RHOAIENG-19886-created raw stop_resume tests. Addressed Brett and Edg…
andresllh Jul 1, 2025
011d0b8
RHOAIENG-19886-created raw stop_resume tests. Addressed Milind and Br…
andresllh Jul 2, 2025
eb202d8
Merge branch 'main' into feat/stop_resume_model
andresllh Jul 2, 2025
800b496
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 2, 2025
edce9a7
RHOAIENG-19886-created raw stop_resume tests. Fix precommit issues.
andresllh Jul 2, 2025
1571e56
Merge branch 'feat/stop_resume_model' of github.com:hdefazio/opendata…
andresllh Jul 2, 2025
7f4867f
RHOAIENG-19886-created raw stop_resume tests. Cleaning up.
andresllh Jul 2, 2025
53cb343
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 2, 2025
85fb2f0
RHOAIENG-19886-created raw stop_resume tests. Reworked nested time sa…
andresllh Jul 2, 2025
69f8e4a
Merge branch 'feat/stop_resume_model' of github.com:hdefazio/opendata…
andresllh Jul 2, 2025
f9a3091
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 2, 2025
9ce8022
RHOAIENG-19886-created raw stop_resume tests. Fix precommit issues.
andresllh Jul 2, 2025
6fb0c3b
Merge branch 'feat/stop_resume_model' of github.com:hdefazio/opendata…
andresllh Jul 2, 2025
3af8532
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 2, 2025
861e86e
RHOAIENG-19886-created raw stop_resume tests. Fix precommit issues.
andresllh Jul 2, 2025
3344265
Merge branch 'feat/stop_resume_model' of github.com:hdefazio/opendata…
andresllh Jul 2, 2025
47889cd
Merge branch 'main' into feat/stop_resume_model
andresllh Jul 7, 2025
5ae2f5c
Merge branch 'main' into feat/stop_resume_model
andresllh Jul 7, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 20 additions & 12 deletions tests/model_serving/model_server/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,9 @@ def ovms_kserve_inference_service(
if (scale_target := request.param.get("scale-target")) is not None:
isvc_kwargs["scale_target"] = scale_target

if (stop_resume := request.param.get("stop")) is not None:
isvc_kwargs["stop_resume"] = stop_resume

with create_isvc(**isvc_kwargs) as isvc:
yield isvc

Expand All @@ -371,18 +374,23 @@ def ovms_raw_inference_service(
ovms_kserve_serving_runtime: ServingRuntime,
ci_endpoint_s3_secret: Secret,
) -> Generator[InferenceService, Any, Any]:
with create_isvc(
client=unprivileged_client,
name=f"{request.param['name']}-raw",
namespace=unprivileged_model_namespace.name,
external_route=True,
runtime=ovms_kserve_serving_runtime.name,
storage_path=request.param["model-dir"],
storage_key=ci_endpoint_s3_secret.name,
model_format=ModelAndFormat.OPENVINO_IR,
deployment_mode=KServeDeploymentType.RAW_DEPLOYMENT,
model_version=request.param["model-version"],
) as isvc:
isvc_kwargs = {
"client": unprivileged_client,
"name": f"{request.param['name']}-raw",
"namespace": unprivileged_model_namespace.name,
"external_route": True,
"runtime": ovms_kserve_serving_runtime.name,
"storage_path": request.param["model-dir"],
"storage_key": ci_endpoint_s3_secret.name,
"model_format": ModelAndFormat.OPENVINO_IR,
"deployment_mode": KServeDeploymentType.RAW_DEPLOYMENT,
"model_version": request.param["model-version"],
}

if (stop_resume := request.param.get("stop")) is not None:
isvc_kwargs["stop_resume"] = stop_resume

with create_isvc(**isvc_kwargs) as isvc:
yield isvc


Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
import pytest

from tests.model_serving.model_server.utils import verify_inference_response
from utilities.constants import (
KServeDeploymentType,
ModelFormat,
ModelVersion,
Protocols,
RunTimeConfigs,
Annotations,
)
from utilities.inference_utils import Inference
from utilities.manifests.onnx import ONNX_INFERENCE_CONFIG
from ocp_resources.resource import ResourceEditor
from tests.model_serving.model_server.serverless.utils import verify_no_inference_pods

pytestmark = [pytest.mark.serverless, pytest.mark.usefixtures("valid_aws_config")]


@pytest.mark.rawdeployment
@pytest.mark.parametrize(
"unprivileged_model_namespace, ovms_kserve_serving_runtime, ovms_raw_inference_service",
[
pytest.param(
{"name": "kserve-raw-stop-resume"},
RunTimeConfigs.ONNX_OPSET13_RUNTIME_CONFIG,
{
"name": ModelFormat.ONNX,
"model-version": ModelVersion.OPSET13,
"model-dir": "test-dir",
"deployment-mode": KServeDeploymentType.RAW_DEPLOYMENT,
"stop": "False",
},
)
],
indirect=True,
)
class TestStopRaw:
@pytest.mark.smoke
def test_raw_onnx_rest_inference(self, ovms_raw_inference_service):
"""Verify that kserve Raw ONNX model can be queried using REST"""
verify_inference_response(
inference_service=ovms_raw_inference_service,
inference_config=ONNX_INFERENCE_CONFIG,
inference_type=Inference.INFER,
protocol=Protocols.HTTPS,
use_default_query=True,
)

def test_stop_ann_update_to_true_delete_pod_rollout(self, unprivileged_client, ovms_raw_inference_service):
"""Verify pod rollout is deleted when the stop annotation updated to true"""
ResourceEditor(
patches={
ovms_raw_inference_service: {
"metadata": {
"annotations": {Annotations.KserveIo.FORCE_STOP_RUNTIME: "true"},
}
}
}
).update()

"""Verify pods do not exist"""
verify_no_inference_pods(client=unprivileged_client, isvc=ovms_raw_inference_service)


@pytest.mark.rawdeployment
@pytest.mark.parametrize(
"unprivileged_model_namespace, ovms_kserve_serving_runtime, ovms_raw_inference_service",
[
pytest.param(
{"name": "kserve-raw-stop-resume"},
RunTimeConfigs.ONNX_OPSET13_RUNTIME_CONFIG,
{
"name": ModelFormat.ONNX,
"model-version": ModelVersion.OPSET13,
"model-dir": "test-dir",
"deployment-mode": KServeDeploymentType.RAW_DEPLOYMENT,
"stop": "True",
},
)
],
indirect=True,
)
class TestStoppedResumeRaw:
@pytest.mark.smoke
def test_stop_ann_true_no_pod_rollout(self, unprivileged_client, ovms_raw_inference_service):
"""Verify no pod rollout when the stop annotation is true"""
"""Verify pods do not exist"""
verify_no_inference_pods(client=unprivileged_client, isvc=ovms_raw_inference_service)

def test_stop_ann_update_to_false_pod_rollout(self, ovms_raw_inference_service):
"""Verify pod rollout when the stop annotation is updated to false"""
ResourceEditor(
patches={
ovms_raw_inference_service: {
"metadata": {
"annotations": {Annotations.KserveIo.FORCE_STOP_RUNTIME: "false"},
}
}
}
).update()

"""Verify that kserve Raw ONNX model can be queried using REST"""
verify_inference_response(
inference_service=ovms_raw_inference_service,
inference_config=ONNX_INFERENCE_CONFIG,
inference_type=Inference.INFER,
protocol=Protocols.HTTPS,
use_default_query=True,
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
import pytest

from tests.model_serving.model_server.utils import verify_inference_response
from utilities.constants import (
KServeDeploymentType,
ModelFormat,
ModelVersion,
Protocols,
RunTimeConfigs,
Annotations,
)
from utilities.inference_utils import Inference
from utilities.manifests.onnx import ONNX_INFERENCE_CONFIG
from ocp_resources.resource import ResourceEditor
from tests.model_serving.model_server.serverless.utils import verify_no_inference_pods

pytestmark = [pytest.mark.serverless, pytest.mark.usefixtures("valid_aws_config")]


@pytest.mark.serverless
@pytest.mark.parametrize(
"unprivileged_model_namespace, ovms_kserve_serving_runtime, ovms_kserve_inference_service",
[
pytest.param(
{"name": "kserve-serverless-stop-resume"},
RunTimeConfigs.ONNX_OPSET13_RUNTIME_CONFIG,
{
"name": ModelFormat.ONNX,
"model-version": ModelVersion.OPSET13,
"model-dir": "test-dir",
"deployment-mode": KServeDeploymentType.SERVERLESS,
"stop": "False",
},
)
],
indirect=True,
)
class TestStopServerless:
@pytest.mark.smoke
def test_serverless_onnx_rest_inference(self, ovms_kserve_inference_service):
"""Verify that kserve Serverless ONNX model can be queried using REST"""
verify_inference_response(
inference_service=ovms_kserve_inference_service,
inference_config=ONNX_INFERENCE_CONFIG,
inference_type=Inference.INFER,
protocol=Protocols.HTTPS,
use_default_query=True,
)

def test_stop_ann_update_to_true_delete_pod_rollout(self, unprivileged_client, ovms_kserve_inference_service):
"""Verify pod rollout is deleted when the stop annotation updated to true"""
ResourceEditor(
patches={
ovms_kserve_inference_service: {
"metadata": {
"annotations": {Annotations.KserveIo.FORCE_STOP_RUNTIME: "true"},
}
}
}
).update()

"""Verify pods do not exist"""
verify_no_inference_pods(client=unprivileged_client, isvc=ovms_kserve_inference_service)


@pytest.mark.serverless
@pytest.mark.parametrize(
"unprivileged_model_namespace, ovms_kserve_serving_runtime, ovms_kserve_inference_service",
[
pytest.param(
{"name": "kserve-serverless-stop-resume"},
RunTimeConfigs.ONNX_OPSET13_RUNTIME_CONFIG,
{
"name": ModelFormat.ONNX,
"model-version": ModelVersion.OPSET13,
"model-dir": "test-dir",
"deployment-mode": KServeDeploymentType.SERVERLESS,
"stop": "True",
},
)
],
indirect=True,
)
class TestStoppedResumeServerless:
@pytest.mark.smoke
def test_stop_ann_true_no_pod_rollout(self, unprivileged_client, ovms_kserve_inference_service):
"""Verify no pod rollout when the stop annotation is true"""
"""Verify pods do not exist"""
verify_no_inference_pods(client=unprivileged_client, isvc=ovms_kserve_inference_service)

def test_stop_ann_update_to_false_pod_rollout(self, ovms_kserve_inference_service):
"""Verify pod rollout when the stop annotation is updated to false"""
ResourceEditor(
patches={
ovms_kserve_inference_service: {
"metadata": {
"annotations": {Annotations.KserveIo.FORCE_STOP_RUNTIME: "false"},
}
}
}
).update()

"""Verify that kserve Serverless ONNX model can be queried using REST"""
verify_inference_response(
inference_service=ovms_kserve_inference_service,
inference_config=ONNX_INFERENCE_CONFIG,
inference_type=Inference.INFER,
protocol=Protocols.HTTPS,
use_default_query=True,
)
4 changes: 3 additions & 1 deletion utilities/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ class AcceleratorType:
class ApiGroups:
HAPROXY_ROUTER_OPENSHIFT_IO: str = "haproxy.router.openshift.io"
OPENDATAHUB_IO: str = "opendatahub.io"
KSERVE: str = "serving.kserve.io"


class Annotations:
Expand All @@ -130,7 +131,8 @@ class KubernetesIo:
CREATED_BY: str = f"{Resource.ApiGroup.APP_KUBERNETES_IO}/created-by"

class KserveIo:
DEPLOYMENT_MODE: str = "serving.kserve.io/deploymentMode"
DEPLOYMENT_MODE: str = f"{ApiGroups.KSERVE}/deploymentMode"
FORCE_STOP_RUNTIME: str = f"{ApiGroups.KSERVE}/stop"

class KserveAuth:
SECURITY: str = f"security.{ApiGroups.OPENDATAHUB_IO}/enable-auth"
Expand Down
9 changes: 7 additions & 2 deletions utilities/inference_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -568,6 +568,7 @@ def create_isvc(
model_version: str | None = None,
wait_for_predictor_pods: bool = True,
autoscaler_mode: str | None = None,
stop_resume: str | None = None,
multi_node_worker_spec: dict[str, int] | None = None,
timeout: int = Timeout.TIMEOUT_15MIN,
scale_metric: str | None = None,
Expand Down Expand Up @@ -686,6 +687,9 @@ def create_isvc(
if autoscaler_mode:
_annotations["serving.kserve.io/autoscalerClass"] = autoscaler_mode

if stop_resume:
_annotations["serving.kserve.io/stop"] = stop_resume

if multi_node_worker_spec is not None:
predictor_dict["workerSpec"] = multi_node_worker_spec

Expand All @@ -709,7 +713,8 @@ def create_isvc(
) as inference_service:
timeout_watch = TimeoutWatch(timeout=timeout)

if wait_for_predictor_pods:
# Skip waiting for pods if stop_resume is "True" since no pods should be created
if wait_for_predictor_pods and stop_resume != "True":
verify_no_failed_pods(
client=client,
isvc=inference_service,
Expand All @@ -723,7 +728,7 @@ def create_isvc(
timeout=timeout_watch.remaining_time(),
)

if wait:
if wait and stop_resume != "True":
# Modelmesh 2nd server in the ns will fail to be Ready; isvc needs to be re-applied
if deployment_mode == KServeDeploymentType.MODEL_MESH:
for isvc in InferenceService.get(dyn_client=client, namespace=namespace):
Expand Down