|
3 | 3 |
|
4 | 4 | from tests.model_serving.model_server.utils import verify_inference_response |
5 | 5 | from utilities.constants import Annotations, Protocols |
6 | | -from utilities.inference_utils import Inference, UserInference |
| 6 | +from utilities.inference_utils import Inference |
7 | 7 | from utilities.infra import check_pod_status_in_time, get_pods_by_isvc_label |
8 | | -from utilities.jira import is_jira_open |
9 | 8 | from utilities.manifests.onnx import ONNX_INFERENCE_CONFIG |
10 | 9 |
|
11 | 10 | pytestmark = pytest.mark.usefixtures("valid_aws_config") |
@@ -50,14 +49,15 @@ def test_disabled_raw_model_authentication(self, patched_remove_raw_authenticati |
50 | 49 | ) |
51 | 50 |
|
52 | 51 | @pytest.mark.sanity |
53 | | - @pytest.mark.jira("RHOAIENG-19275", run=False) |
54 | 52 | def test_raw_disable_enable_authentication_no_pod_rollout(self, http_s3_ovms_raw_inference_service): |
55 | 53 | """Verify no pod rollout when disabling and enabling authentication""" |
56 | 54 | pod = get_pods_by_isvc_label( |
57 | 55 | client=http_s3_ovms_raw_inference_service.client, |
58 | 56 | isvc=http_s3_ovms_raw_inference_service, |
59 | 57 | )[0] |
60 | 58 |
|
| 59 | + import pdb; pdb.set_trace() |
| 60 | + |
61 | 61 | ResourceEditor( |
62 | 62 | patches={ |
63 | 63 | http_s3_ovms_raw_inference_service: { |
@@ -101,37 +101,15 @@ def test_re_enabled_raw_model_authentication(self, http_s3_ovms_raw_inference_se |
101 | 101 | ) |
102 | 102 | @pytest.mark.dependency(name="test_cross_model_authentication_raw") |
103 | 103 | def test_cross_model_authentication_raw( |
104 | | - self, http_s3_ovms_raw_inference_service_2, http_raw_inference_token, admin_client |
| 104 | + self, http_s3_ovms_raw_inference_service_2, http_raw_inference_token |
105 | 105 | ): |
106 | 106 | """Verify model with another model token""" |
107 | | - if is_jira_open(jira_id="RHOAIENG-19645", admin_client=admin_client): |
108 | | - inference = UserInference( |
109 | | - inference_service=http_s3_ovms_raw_inference_service_2, |
110 | | - inference_config=ONNX_INFERENCE_CONFIG, |
111 | | - inference_type=Inference.INFER, |
112 | | - protocol=Protocols.HTTPS, |
113 | | - ) |
114 | | - |
115 | | - res = inference.run_inference_flow( |
116 | | - model_name=http_s3_ovms_raw_inference_service_2.name, |
117 | | - use_default_query=True, |
118 | | - token=http_raw_inference_token, |
119 | | - insecure=False, |
120 | | - ) |
121 | | - output = res.get("output", res) |
122 | | - if isinstance(output, dict): |
123 | | - output = str(output) |
124 | | - status_line = output.splitlines()[0] |
125 | | - # Updated: Now expecting 403 Forbidden for cross-model authentication |
126 | | - # (token from service 1 cannot access service 2) |
127 | | - assert "403 Forbidden" in status_line, f"Expected '403 Forbidden' in status line, got: {status_line}" |
128 | | - else: |
129 | | - verify_inference_response( |
130 | | - inference_service=http_s3_ovms_raw_inference_service_2, |
131 | | - inference_config=ONNX_INFERENCE_CONFIG, |
132 | | - inference_type=Inference.INFER, |
133 | | - protocol=Protocols.HTTPS, |
134 | | - use_default_query=True, |
135 | | - token=http_raw_inference_token, |
136 | | - authorized_user=False, |
137 | | - ) |
| 107 | + verify_inference_response( |
| 108 | + inference_service=http_s3_ovms_raw_inference_service_2, |
| 109 | + inference_config=ONNX_INFERENCE_CONFIG, |
| 110 | + inference_type=Inference.INFER, |
| 111 | + protocol=Protocols.HTTPS, |
| 112 | + use_default_query=True, |
| 113 | + token=http_raw_inference_token, |
| 114 | + authorized_user=False, |
| 115 | + ) |
0 commit comments