Skip to content

Commit d0a3117

Browse files
committed
tests/fixtures/guardrails.py: replace early return with if/else in orchestrator_config_gpu fixture
Signed-off-by: Sandeep20013 <sandeepm20013@gmail.com>
1 parent 884c283 commit d0a3117

File tree

1 file changed

+40
-41
lines changed

1 file changed

+40
-41
lines changed

tests/fixtures/guardrails.py

Lines changed: 40 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,6 @@ def guardrails_orchestrator_gateway_route(
226226
ensure_exists=True,
227227
)
228228

229-
230229
@pytest.fixture(scope="class")
231230
def orchestrator_config_gpu(
232231
request: FixtureRequest,
@@ -266,51 +265,51 @@ def orchestrator_config_gpu(
266265
)
267266
yield cm
268267
cm.clean_up()
269-
return
270268

271-
param = getattr(request, "param", {}) or {}
269+
else:
270+
param = getattr(request, "param", {}) or {}
272271

273-
if param and param.get("orchestrator_config_data"):
274-
orchestrator_data = param["orchestrator_config_data"]
272+
if param and param.get("orchestrator_config_data"):
273+
orchestrator_data = param["orchestrator_config_data"]
275274

276-
else:
277-
# Decide detectors dynamically
278-
if param and param.get("use_builtin_detectors"):
279-
detectors = BUILTIN_DETECTOR_CONFIG
280275
else:
281-
detectors = {
282-
PROMPT_INJECTION_DETECTOR: {
283-
"type": "text_contents",
284-
"service": {
285-
"hostname": f"{PROMPT_INJECTION_DETECTOR}-predictor.{model_namespace.name}.svc.cluster.local",
286-
"port": 80,
276+
# Decide detectors dynamically
277+
if param and param.get("use_builtin_detectors"):
278+
detectors = BUILTIN_DETECTOR_CONFIG
279+
else:
280+
detectors = {
281+
PROMPT_INJECTION_DETECTOR: {
282+
"type": "text_contents",
283+
"service": {
284+
"hostname": f"{PROMPT_INJECTION_DETECTOR}-predictor.{model_namespace.name}.svc.cluster.local",
285+
"port": 80,
286+
},
287+
"chunker_id": "whole_doc_chunker",
288+
"default_threshold": 0.5,
287289
},
288-
"chunker_id": "whole_doc_chunker",
289-
"default_threshold": 0.5,
290-
},
291-
HAP_DETECTOR: {
292-
"type": "text_contents",
293-
"service": {
294-
"hostname": f"{HAP_DETECTOR}-predictor.{model_namespace.name}.svc.cluster.local",
295-
"port": 80,
290+
HAP_DETECTOR: {
291+
"type": "text_contents",
292+
"service": {
293+
"hostname": f"{HAP_DETECTOR}-predictor.{model_namespace.name}.svc.cluster.local",
294+
"port": 80,
295+
},
296+
"chunker_id": "whole_doc_chunker",
297+
"default_threshold": 0.5,
296298
},
297-
"chunker_id": "whole_doc_chunker",
298-
"default_threshold": 0.5,
299-
},
300-
}
299+
}
301300

302-
orchestrator_data = {
303-
"config.yaml": yaml.dump({
304-
"openai": get_vllm_chat_config(model_namespace.name),
305-
"detectors": detectors,
306-
})
307-
}
301+
orchestrator_data = {
302+
"config.yaml": yaml.dump({
303+
"openai": get_vllm_chat_config(model_namespace.name),
304+
"detectors": detectors,
305+
})
306+
}
308307

309-
with ConfigMap(
310-
client=admin_client,
311-
name="fms-orchestr8-config-nlp",
312-
namespace=model_namespace.name,
313-
data=orchestrator_data,
314-
teardown=teardown_resources,
315-
) as cm:
316-
yield cm
308+
with ConfigMap(
309+
client=admin_client,
310+
name="fms-orchestr8-config-nlp",
311+
namespace=model_namespace.name,
312+
data=orchestrator_data,
313+
teardown=teardown_resources,
314+
) as cm:
315+
yield cm

0 commit comments

Comments
 (0)