Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions camel/agents/search_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,17 +75,17 @@ def summarize_text(self, text: str, query: str) -> str:
summary_prompt = summary_prompt.format(query=query)
# Max length of each chunk
max_len = 3000
results = ""
results = []
chunks = create_chunks(text, max_len)
# Summarize
for i, chunk in enumerate(chunks, start=1):
prompt = summary_prompt + str(i) + ": " + chunk
prompt = f"{summary_prompt}{i}: {chunk}"
user_msg = BaseMessage.make_user_message(
role_name="User",
content=prompt,
)
result = self.step(user_msg).msg.content
results += result + "\n"
results.append(result)

# Final summarization
final_prompt = TextPrompt(
Expand All @@ -95,7 +95,7 @@ def summarize_text(self, text: str, query: str) -> str:
explain why.\n Query:\n{query}.\n\nText:\n'''
)
final_prompt = final_prompt.format(query=query)
prompt = final_prompt + results
prompt = final_prompt + "\n".join(results)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
prompt = final_prompt + "\n".join(results)
prompt = final_prompt + "\n".join(results) + "\n"

This preserves the original behaviour


user_msg = BaseMessage.make_user_message(
role_name="User",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
# limitations under the License.
# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
import json
import logging
import re
from typing import Any, Dict, List, Optional

Expand All @@ -23,6 +24,8 @@
FunctionCallFormatter,
)

logger = logging.getLogger(__name__)


class HermesToolResponse(ToolResponse):
r"""Represents a single tool/function call with validation"""
Expand Down Expand Up @@ -60,7 +63,7 @@ def extract_tool_calls(self, message: str) -> List[HermesToolCall]:
call_dict = json.loads(match.group(1).replace("'", '"'))
tool_calls.append(HermesToolCall.model_validate(call_dict))
except Exception as e:
print(f"Warning: Failed to parse tool call: {e}")
logger.warning(f"Failed to parse tool call: {e}")
continue

return tool_calls
Expand All @@ -87,7 +90,7 @@ def extract_tool_response(
response_dict = json.loads(response_json.replace("'", '"'))
return HermesToolResponse.model_validate(response_dict)
except Exception as e:
print(f"Warning: Failed to parse tool response: {e}")
logger.warning(f"Failed to parse tool response: {e}")
return None
return None

Expand Down
57 changes: 35 additions & 22 deletions camel/models/openai_audio_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,35 +242,48 @@ def speech_to_text(
audio_chunks = self._split_audio(audio_file_path)
texts = []
for chunk_path in audio_chunks:
audio_data = open(chunk_path, "rb")
with open(chunk_path, "rb") as audio_data:
if translate_into_english:
translation = (
self._client.audio.translations.create(
model="whisper-1",
file=audio_data,
**kwargs,
)
)
texts.append(translation.text)
else:
transcription = (
self._client.audio.transcriptions.create(
model="whisper-1",
file=audio_data,
**kwargs,
)
)
texts.append(transcription.text)
os.remove(chunk_path) # Delete temporary chunk file
return " ".join(texts)
else:
# Process the entire audio file
with open(audio_file_path, "rb") as audio_data:
if translate_into_english:
translation = self._client.audio.translations.create(
model="whisper-1", file=audio_data, **kwargs
translation = (
self._client.audio.translations.create(
model="whisper-1",
file=audio_data,
**kwargs,
)
)
texts.append(translation.text)
return translation.text
else:
transcription = (
self._client.audio.transcriptions.create(
model="whisper-1", file=audio_data, **kwargs
model="whisper-1",
file=audio_data,
**kwargs,
)
)
texts.append(transcription.text)
os.remove(chunk_path) # Delete temporary chunk file
return " ".join(texts)
else:
# Process the entire audio file
audio_data = open(audio_file_path, "rb")

if translate_into_english:
translation = self._client.audio.translations.create(
model="whisper-1", file=audio_data, **kwargs
)
return translation.text
else:
transcription = self._client.audio.transcriptions.create(
model="whisper-1", file=audio_data, **kwargs
)
return transcription.text
return transcription.text
except Exception as e:
raise Exception("Error during STT API call") from e

Expand Down
59 changes: 59 additions & 0 deletions test/messages/test_hermes_function_formatter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
import logging

from camel.messages.conversion.sharegpt.hermes.hermes_function_formatter import (
HermesFunctionFormatter,
)


def test_extract_tool_calls_invalid_json_logs_warning(caplog):
r"""Test that invalid tool call JSON triggers a warning log
instead of a print statement."""
formatter = HermesFunctionFormatter()
message = "<tool_call>\n{invalid json}\n</tool_call>"

with caplog.at_level(logging.WARNING):
result = formatter.extract_tool_calls(message)

assert result == []
assert "Failed to parse tool call" in caplog.text


def test_extract_tool_response_invalid_json_logs_warning(caplog):
r"""Test that invalid tool response JSON triggers a warning log
instead of a print statement."""
formatter = HermesFunctionFormatter()
message = "<tool_response>\n{invalid json}\n</tool_response>"

with caplog.at_level(logging.WARNING):
result = formatter.extract_tool_response(message)

assert result is None
assert "Failed to parse tool response" in caplog.text


def test_extract_tool_calls_valid_json():
r"""Test that valid tool calls are extracted correctly."""
formatter = HermesFunctionFormatter()
message = (
'<tool_call>\n{"name": "add", "arguments": {"a": 1, "b": 2}}'
"\n</tool_call>"
)

result = formatter.extract_tool_calls(message)

assert len(result) == 1
assert result[0].name == "add"
assert result[0].arguments == {"a": 1, "b": 2}
78 changes: 78 additions & 0 deletions test/models/test_openai_audio_resource_leak.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
import os
import tempfile
from unittest.mock import MagicMock, Mock, patch

from camel.models import OpenAIAudioModels


@patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"})
@patch("camel.models.openai_audio_models.OpenAI")
def test_speech_to_text_closes_file_handle(mock_openai_cls):
r"""Test that speech_to_text properly closes file handles
using context managers."""
mock_client = MagicMock()
mock_openai_cls.return_value = mock_client
mock_response = Mock()
mock_response.text = "transcribed text"
mock_client.audio.transcriptions.create.return_value = mock_response

openai_audio = OpenAIAudioModels()

with tempfile.NamedTemporaryFile(
suffix=".wav", delete=False
) as temp_file:
temp_file.write(b"Test audio data")
temp_file_path = temp_file.name

try:
result = openai_audio.speech_to_text(temp_file_path)
assert result == "transcribed text"
mock_client.audio.transcriptions.create.assert_called_once()

# Verify the file argument was passed from a context manager
call_args = mock_client.audio.transcriptions.create.call_args
assert call_args.kwargs["file"] is not None
finally:
os.remove(temp_file_path)


@patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"})
@patch("camel.models.openai_audio_models.OpenAI")
def test_speech_to_text_translate_closes_file_handle(mock_openai_cls):
r"""Test that speech_to_text with translation properly closes
file handles."""
mock_client = MagicMock()
mock_openai_cls.return_value = mock_client
mock_response = Mock()
mock_response.text = "translated text"
mock_client.audio.translations.create.return_value = mock_response

openai_audio = OpenAIAudioModels()

with tempfile.NamedTemporaryFile(
suffix=".wav", delete=False
) as temp_file:
temp_file.write(b"Test audio data")
temp_file_path = temp_file.name

try:
result = openai_audio.speech_to_text(
temp_file_path, translate_into_english=True
)
assert result == "translated text"
mock_client.audio.translations.create.assert_called_once()
finally:
os.remove(temp_file_path)
Loading