Skip to content

Commit d1d664e

Browse files
authored
enhance: volcano reasoning handling (#3681)
1 parent d51cfa5 commit d1d664e

File tree

6 files changed

+256
-147
lines changed

6 files changed

+256
-147
lines changed

.github/ISSUE_TEMPLATE/bug_report.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ body:
2626
attributes:
2727
label: What version of camel are you using?
2828
description: Run command `python3 -c 'print(__import__("camel").__version__)'` in your shell and paste the output here.
29-
placeholder: E.g., 0.2.83a5
29+
placeholder: E.g., 0.2.83a6
3030
validations:
3131
required: true
3232

camel/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
from camel.logger import disable_logging, enable_logging, set_log_level
1616

17-
__version__ = '0.2.83a5'
17+
__version__ = '0.2.83a6'
1818

1919
__all__ = [
2020
'__version__',

camel/models/volcano_model.py

Lines changed: 145 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,14 @@
1313
# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
1414

1515
import os
16-
from typing import Any, Dict, Optional, Union
16+
from typing import Any, Dict, List, Optional, Type, Union
1717

18+
from openai import AsyncStream, Stream
19+
from pydantic import BaseModel
20+
21+
from camel.messages import OpenAIMessage
1822
from camel.models.openai_compatible_model import OpenAICompatibleModel
19-
from camel.types import ModelType
23+
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
2024
from camel.utils import (
2125
BaseTokenCounter,
2226
api_keys_required,
@@ -85,3 +89,142 @@ def __init__(
8589
max_retries,
8690
**kwargs,
8791
)
92+
# Store the last reasoning_content from model response
93+
# This will be injected into the next request's assistant message
94+
self._last_reasoning_content: Optional[str] = None
95+
96+
def _inject_reasoning_content(
97+
self,
98+
messages: List[OpenAIMessage],
99+
) -> List[OpenAIMessage]:
100+
r"""Inject the last reasoning_content into assistant messages.
101+
102+
For Volcano Engine's doubao-seed models with deep thinking enabled,
103+
the reasoning_content from the model response needs to be passed back
104+
in subsequent requests for proper context management.
105+
106+
Args:
107+
messages: The original messages list.
108+
109+
Returns:
110+
Messages with reasoning_content added to the last assistant
111+
message that has tool_calls.
112+
"""
113+
if not self._last_reasoning_content:
114+
return messages
115+
116+
# Find the last assistant message with tool_calls and inject
117+
# reasoning_content
118+
processed: List[OpenAIMessage] = []
119+
reasoning_injected = False
120+
121+
for msg in reversed(messages):
122+
if (
123+
not reasoning_injected
124+
and isinstance(msg, dict)
125+
and msg.get("role") == "assistant"
126+
and msg.get("tool_calls")
127+
and "reasoning_content" not in msg
128+
):
129+
# Inject reasoning_content into this message
130+
new_msg = dict(msg)
131+
new_msg["reasoning_content"] = self._last_reasoning_content
132+
processed.append(new_msg) # type: ignore[arg-type]
133+
reasoning_injected = True
134+
else:
135+
processed.append(msg)
136+
137+
# Only clear after successful injection
138+
if reasoning_injected:
139+
self._last_reasoning_content = None
140+
141+
return list(reversed(processed))
142+
143+
def _extract_reasoning_content(
144+
self, response: ChatCompletion
145+
) -> Optional[str]:
146+
r"""Extract reasoning_content from the model response.
147+
148+
Args:
149+
response: The model response.
150+
151+
Returns:
152+
The reasoning_content if available, None otherwise.
153+
"""
154+
if response.choices:
155+
return getattr(
156+
response.choices[0].message, "reasoning_content", None
157+
)
158+
return None
159+
160+
def run(
161+
self,
162+
messages: List[OpenAIMessage],
163+
response_format: Optional[Type[BaseModel]] = None,
164+
tools: Optional[List[Dict[str, Any]]] = None,
165+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
166+
r"""Runs inference of Volcano Engine chat completion.
167+
168+
Overrides the base run method to inject reasoning_content from
169+
previous responses into subsequent requests, as required by
170+
Volcano Engine's doubao-seed models with deep thinking enabled.
171+
172+
Args:
173+
messages: Message list with the chat history in OpenAI API format.
174+
response_format: The format of the response.
175+
tools: The schema of the tools to use for the request.
176+
177+
Returns:
178+
ChatCompletion in the non-stream mode, or
179+
Stream[ChatCompletionChunk] in the stream mode.
180+
"""
181+
# Inject reasoning_content from previous response
182+
processed_messages = self._inject_reasoning_content(messages)
183+
184+
# Call parent's run
185+
response = super().run(processed_messages, response_format, tools)
186+
187+
# Extract and store reasoning_content for next request
188+
if isinstance(response, ChatCompletion):
189+
self._last_reasoning_content = self._extract_reasoning_content(
190+
response
191+
)
192+
193+
return response
194+
195+
async def arun(
196+
self,
197+
messages: List[OpenAIMessage],
198+
response_format: Optional[Type[BaseModel]] = None,
199+
tools: Optional[List[Dict[str, Any]]] = None,
200+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
201+
r"""Runs async inference of Volcano Engine chat completion.
202+
203+
Overrides the base arun method to inject reasoning_content from
204+
previous responses into subsequent requests, as required by
205+
Volcano Engine's doubao-seed models with deep thinking enabled.
206+
207+
Args:
208+
messages: Message list with the chat history in OpenAI API format.
209+
response_format: The format of the response.
210+
tools: The schema of the tools to use for the request.
211+
212+
Returns:
213+
ChatCompletion in the non-stream mode, or
214+
AsyncStream[ChatCompletionChunk] in the stream mode.
215+
"""
216+
# Inject reasoning_content from previous response
217+
processed_messages = self._inject_reasoning_content(messages)
218+
219+
# Call parent's arun
220+
response = await super().arun(
221+
processed_messages, response_format, tools
222+
)
223+
224+
# Extract and store reasoning_content for next request
225+
if isinstance(response, ChatCompletion):
226+
self._last_reasoning_content = self._extract_reasoning_content(
227+
response
228+
)
229+
230+
return response

docs/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
project = 'CAMEL'
2828
copyright = '2024, CAMEL-AI.org'
2929
author = 'CAMEL-AI.org'
30-
release = '0.2.83a5'
30+
release = '0.2.83a6'
3131

3232
html_favicon = (
3333
'https://raw.githubusercontent.com/camel-ai/camel/master/misc/favicon.png'

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
44

55
[project]
66
name = "camel-ai"
7-
version = "0.2.83a5"
7+
version = "0.2.83a6"
88
description = "Communicative Agents for AI Society Study"
99
authors = [{ name = "CAMEL-AI.org" }]
1010
requires-python = ">=3.10,<3.15"

0 commit comments

Comments
 (0)