Skip to content

Commit d98a04a

Browse files
pre-commit-ci[bot]Sandeep20013
authored andcommitted
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci Signed-off-by: Sandeep20013 <sandeepm20013@gmail.com>
1 parent 3916cc7 commit d98a04a

2 files changed

Lines changed: 5 additions & 6 deletions

File tree

tests/fixtures/guardrails.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -259,9 +259,8 @@ def orchestrator_config_gpu(
259259
"type": "text_contents",
260260
"service": {
261261
"hostname": (
262-
f"{PROMPT_INJECTION_DETECTOR}-predictor."
263-
f"{model_namespace.name}.svc.cluster.local"
264-
),
262+
f"{PROMPT_INJECTION_DETECTOR}-predictor.{model_namespace.name}.svc.cluster.local"
263+
),
265264
"port": 80,
266265
},
267266
"chunker_id": "whole_doc_chunker",

tests/fixtures/inference.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -263,8 +263,8 @@ def vllm_gpu_runtime(
263263
runtime_image=(
264264
"registry.redhat.io/rhaiis/vllm-cuda-rhel9@"
265265
"sha256:ec799bb5eeb7e25b4b25a8917ab5161da6b6f1ab830cbba61bba371cffb0c34d"
266-
),
267-
containers={
266+
),
267+
containers={
268268
"kserve-container": {
269269
"command": ["python", "-m", "vllm.entrypoints.openai.api_server"],
270270
"args": [
@@ -300,7 +300,7 @@ def qwen_gpu_isvc(
300300
storage_uri=(
301301
"oci://quay.io/trustyai_testing/models/qwen2.5-3b-instruct@"
302302
"sha256:6f9d9843599a9959de23c76d6b5adb556505482a7e732b2fcbca695a9c4ce545"
303-
),
303+
),
304304
enable_auth=False,
305305
wait_for_predictor_pods=True,
306306
resources={

0 commit comments

Comments
 (0)