Skip to content

Commit c6e328c

Browse files
Edward Arthur Quarm JnrRaghul-M
authored andcommitted
feat(model-validation-v2-audio-feature): address coderabbit comments. other pr comments
1 parent 6249dd1 commit c6e328c

File tree

2 files changed

+20
-15
lines changed

2 files changed

+20
-15
lines changed

tests/model_serving/model_runtime/model_validation/conftest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ def deployment_config(request: FixtureRequest) -> dict[str, Any]:
152152
config["runtime_argument"] = serving_argument
153153
config["deployment_type"] = deployment_type
154154
config["gpu_count"] = request.param.get("gpu_count", 1)
155-
config["model_output_type"] = request.param.get("model_output_type") or "text"
155+
config["model_output_type"] = request.param.get("model_output_type", "text")
156156
config["timeout"] = TIMEOUT_20MIN
157157
return config
158158

tests/model_serving/model_runtime/utils.py

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def run_raw_inference(
6060
endpoint: str,
6161
completion_query: list[dict[str, str]] = COMPLETION_QUERY,
6262
) -> tuple[Any, list[Any], list[Any]]:
63-
LOGGER.info(pod_name)
63+
LOGGER.info("audio_inference:start endpoint=%s pod=%s", endpoint, pod_name)
6464
with portforward.forward(
6565
pod_or_service=pod_name,
6666
namespace=isvc.namespace,
@@ -177,12 +177,12 @@ def validate_raw_openai_inference_request(
177177
raise NotSupportedError(f"Model output type {model_output_type} is not supported for raw inference request.")
178178

179179

180-
def download_audio_file(
181-
audio_file_url: str = AUDIO_FILE_URL,
182-
destination_path: str = "/tmp/harvard.wav",
183-
) -> None:
180+
def download_audio_file(audio_file_url: str = AUDIO_FILE_URL, destination_path: str = AUDIO_FILE_LOCAL_PATH) -> None:
184181
"""
185-
Download an audio file and save to file_path if it's missing or empty.
182+
Download an audio file and save to destination_path if it's missing or empty.
183+
184+
:param audio_file_url: The URL of the audio file to download.
185+
:param destination_path: The local path where the audio file should be saved.
186186
"""
187187
dir_ = os.path.dirname(destination_path)
188188
os.makedirs(dir_, exist_ok=True)
@@ -225,14 +225,19 @@ def validate_serverless_openai_inference_request(
225225
) -> None:
226226
if model_output_type == "audio":
227227
LOGGER.info("Running audio inference test")
228-
model_info, completion_responses = run_audio_inference(
229-
url=url,
230-
endpoint=OPENAI_ENDPOINT_NAME,
231-
model_name=model_name,
232-
)
233-
validate_audio_inference_output(model_info=model_info, completion_responses=completion_responses)
234-
if os.path.exists(AUDIO_FILE_LOCAL_PATH):
235-
os.remove(AUDIO_FILE_LOCAL_PATH)
228+
try:
229+
model_info, completion_responses = run_audio_inference(
230+
url=url,
231+
endpoint=OPENAI_ENDPOINT_NAME,
232+
model_name=model_name,
233+
)
234+
validate_audio_inference_output(model_info=model_info, completion_responses=completion_responses)
235+
finally:
236+
try:
237+
if os.path.exists(AUDIO_FILE_LOCAL_PATH):
238+
os.remove(AUDIO_FILE_LOCAL_PATH)
239+
except OSError as e:
240+
LOGGER.error("Error removing audio file: %s", e)
236241
return
237242
elif model_output_type == "text":
238243
model_info, completion_responses = fetch_openai_response(

0 commit comments

Comments
 (0)