diff --git a/camel/agents/search_agent.py b/camel/agents/search_agent.py
index 08109caca7..08d2e6c9e4 100644
--- a/camel/agents/search_agent.py
+++ b/camel/agents/search_agent.py
@@ -75,17 +75,17 @@ def summarize_text(self, text: str, query: str) -> str:
summary_prompt = summary_prompt.format(query=query)
# Max length of each chunk
max_len = 3000
- results = ""
+ results = []
chunks = create_chunks(text, max_len)
# Summarize
for i, chunk in enumerate(chunks, start=1):
- prompt = summary_prompt + str(i) + ": " + chunk
+ prompt = f"{summary_prompt}{i}: {chunk}"
user_msg = BaseMessage.make_user_message(
role_name="User",
content=prompt,
)
result = self.step(user_msg).msg.content
- results += result + "\n"
+ results.append(result)
# Final summarization
final_prompt = TextPrompt(
@@ -95,7 +95,7 @@ def summarize_text(self, text: str, query: str) -> str:
explain why.\n Query:\n{query}.\n\nText:\n'''
)
final_prompt = final_prompt.format(query=query)
- prompt = final_prompt + results
+ prompt = final_prompt + "\n".join(results)
user_msg = BaseMessage.make_user_message(
role_name="User",
diff --git a/camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py b/camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py
index fe48151f7d..c62b7897ae 100644
--- a/camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py
+++ b/camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py
@@ -12,6 +12,7 @@
# limitations under the License.
# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
import json
+import logging
import re
from typing import Any, Dict, List, Optional
@@ -23,6 +24,8 @@
FunctionCallFormatter,
)
+logger = logging.getLogger(__name__)
+
class HermesToolResponse(ToolResponse):
r"""Represents a single tool/function call with validation"""
@@ -60,7 +63,7 @@ def extract_tool_calls(self, message: str) -> List[HermesToolCall]:
call_dict = json.loads(match.group(1).replace("'", '"'))
tool_calls.append(HermesToolCall.model_validate(call_dict))
except Exception as e:
- print(f"Warning: Failed to parse tool call: {e}")
+ logger.warning(f"Failed to parse tool call: {e}")
continue
return tool_calls
@@ -87,7 +90,7 @@ def extract_tool_response(
response_dict = json.loads(response_json.replace("'", '"'))
return HermesToolResponse.model_validate(response_dict)
except Exception as e:
- print(f"Warning: Failed to parse tool response: {e}")
+ logger.warning(f"Failed to parse tool response: {e}")
return None
return None
diff --git a/camel/models/openai_audio_models.py b/camel/models/openai_audio_models.py
index 033905c3ab..bc976fca99 100644
--- a/camel/models/openai_audio_models.py
+++ b/camel/models/openai_audio_models.py
@@ -242,35 +242,48 @@ def speech_to_text(
audio_chunks = self._split_audio(audio_file_path)
texts = []
for chunk_path in audio_chunks:
- audio_data = open(chunk_path, "rb")
+ with open(chunk_path, "rb") as audio_data:
+ if translate_into_english:
+ translation = (
+ self._client.audio.translations.create(
+ model="whisper-1",
+ file=audio_data,
+ **kwargs,
+ )
+ )
+ texts.append(translation.text)
+ else:
+ transcription = (
+ self._client.audio.transcriptions.create(
+ model="whisper-1",
+ file=audio_data,
+ **kwargs,
+ )
+ )
+ texts.append(transcription.text)
+ os.remove(chunk_path) # Delete temporary chunk file
+ return " ".join(texts)
+ else:
+ # Process the entire audio file
+ with open(audio_file_path, "rb") as audio_data:
if translate_into_english:
- translation = self._client.audio.translations.create(
- model="whisper-1", file=audio_data, **kwargs
+ translation = (
+ self._client.audio.translations.create(
+ model="whisper-1",
+ file=audio_data,
+ **kwargs,
+ )
)
- texts.append(translation.text)
+ return translation.text
else:
transcription = (
self._client.audio.transcriptions.create(
- model="whisper-1", file=audio_data, **kwargs
+ model="whisper-1",
+ file=audio_data,
+ **kwargs,
)
)
- texts.append(transcription.text)
- os.remove(chunk_path) # Delete temporary chunk file
- return " ".join(texts)
- else:
- # Process the entire audio file
- audio_data = open(audio_file_path, "rb")
-
- if translate_into_english:
- translation = self._client.audio.translations.create(
- model="whisper-1", file=audio_data, **kwargs
- )
- return translation.text
- else:
- transcription = self._client.audio.transcriptions.create(
- model="whisper-1", file=audio_data, **kwargs
- )
- return transcription.text
+ return transcription.text
except Exception as e:
raise Exception("Error during STT API call") from e
diff --git a/test/messages/test_hermes_function_formatter.py b/test/messages/test_hermes_function_formatter.py
new file mode 100644
index 0000000000..2d4b5c60e8
--- /dev/null
+++ b/test/messages/test_hermes_function_formatter.py
@@ -0,0 +1,59 @@
+# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
+import logging
+
+from camel.messages.conversion.sharegpt.hermes.hermes_function_formatter import (
+ HermesFunctionFormatter,
+)
+
+
+def test_extract_tool_calls_invalid_json_logs_warning(caplog):
+ r"""Test that invalid tool call JSON triggers a warning log
+ instead of a print statement."""
+ formatter = HermesFunctionFormatter()
+ message = "\n{invalid json}\n"
+
+ with caplog.at_level(logging.WARNING):
+ result = formatter.extract_tool_calls(message)
+
+ assert result == []
+ assert "Failed to parse tool call" in caplog.text
+
+
+def test_extract_tool_response_invalid_json_logs_warning(caplog):
+ r"""Test that invalid tool response JSON triggers a warning log
+ instead of a print statement."""
+ formatter = HermesFunctionFormatter()
+ message = "\n{invalid json}\n"
+
+ with caplog.at_level(logging.WARNING):
+ result = formatter.extract_tool_response(message)
+
+ assert result is None
+ assert "Failed to parse tool response" in caplog.text
+
+
+def test_extract_tool_calls_valid_json():
+ r"""Test that valid tool calls are extracted correctly."""
+ formatter = HermesFunctionFormatter()
+ message = (
+ '\n{"name": "add", "arguments": {"a": 1, "b": 2}}'
+ "\n"
+ )
+
+ result = formatter.extract_tool_calls(message)
+
+ assert len(result) == 1
+ assert result[0].name == "add"
+ assert result[0].arguments == {"a": 1, "b": 2}
diff --git a/test/models/test_openai_audio_resource_leak.py b/test/models/test_openai_audio_resource_leak.py
new file mode 100644
index 0000000000..f6047c1e7b
--- /dev/null
+++ b/test/models/test_openai_audio_resource_leak.py
@@ -0,0 +1,78 @@
+# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
+import os
+import tempfile
+from unittest.mock import MagicMock, Mock, patch
+
+from camel.models import OpenAIAudioModels
+
+
+@patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"})
+@patch("camel.models.openai_audio_models.OpenAI")
+def test_speech_to_text_closes_file_handle(mock_openai_cls):
+ r"""Test that speech_to_text properly closes file handles
+ using context managers."""
+ mock_client = MagicMock()
+ mock_openai_cls.return_value = mock_client
+ mock_response = Mock()
+ mock_response.text = "transcribed text"
+ mock_client.audio.transcriptions.create.return_value = mock_response
+
+ openai_audio = OpenAIAudioModels()
+
+ with tempfile.NamedTemporaryFile(
+ suffix=".wav", delete=False
+ ) as temp_file:
+ temp_file.write(b"Test audio data")
+ temp_file_path = temp_file.name
+
+ try:
+ result = openai_audio.speech_to_text(temp_file_path)
+ assert result == "transcribed text"
+ mock_client.audio.transcriptions.create.assert_called_once()
+
+ # Verify the file argument was passed from a context manager
+ call_args = mock_client.audio.transcriptions.create.call_args
+ assert call_args.kwargs["file"] is not None
+ finally:
+ os.remove(temp_file_path)
+
+
+@patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"})
+@patch("camel.models.openai_audio_models.OpenAI")
+def test_speech_to_text_translate_closes_file_handle(mock_openai_cls):
+ r"""Test that speech_to_text with translation properly closes
+ file handles."""
+ mock_client = MagicMock()
+ mock_openai_cls.return_value = mock_client
+ mock_response = Mock()
+ mock_response.text = "translated text"
+ mock_client.audio.translations.create.return_value = mock_response
+
+ openai_audio = OpenAIAudioModels()
+
+ with tempfile.NamedTemporaryFile(
+ suffix=".wav", delete=False
+ ) as temp_file:
+ temp_file.write(b"Test audio data")
+ temp_file_path = temp_file.name
+
+ try:
+ result = openai_audio.speech_to_text(
+ temp_file_path, translate_into_english=True
+ )
+ assert result == "translated text"
+ mock_client.audio.translations.create.assert_called_once()
+ finally:
+ os.remove(temp_file_path)