Skip to content

Commit a2ad855

Browse files
authored
Merge branch 'master' into terminal-deps
2 parents 0e06d9c + 6c2116b commit a2ad855

14 files changed

+266
-542
lines changed

camel/agents/chat_agent.py

Lines changed: 190 additions & 70 deletions
Large diffs are not rendered by default.

camel/configs/deepseek_config.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -96,13 +96,12 @@ class DeepSeekConfig(BaseConfig):
9696
tool_choice: Optional[Union[dict[str, str], str]] = None
9797
logprobs: Optional[bool] = None
9898
top_logprobs: Optional[int] = None
99+
stream_options: Optional[dict[str, bool]] = None
99100

100101
def __init__(self, include_usage: bool = True, **kwargs):
102+
if kwargs.get("stream") and "stream_options" not in kwargs:
103+
kwargs["stream_options"] = {"include_usage": include_usage}
101104
super().__init__(**kwargs)
102-
# Only set stream_options when stream is True
103-
# Otherwise, it will raise error when calling the API
104-
if self.stream:
105-
self.stream_options = {"include_usage": include_usage}
106105

107106

108107
DEEPSEEK_API_PARAMS = {param for param in DeepSeekConfig.model_fields.keys()}

camel/messages/base.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,8 +71,10 @@ class BaseMessage:
7171
images associated with the message. (default: :obj:`auto`)
7272
video_detail (Literal["auto", "low", "high"]): Detail level of the
7373
videos associated with the message. (default: :obj:`auto`)
74-
parsed: Optional[Union[Type[BaseModel], dict]]: Optional object which
74+
parsed (Optional[Union[Type[BaseModel], dict]]): Optional object which
7575
is parsed from the content. (default: :obj:`None`)
76+
reasoning_content (Optional[str]): Optional reasoning trace associated
77+
with the message. (default: :obj:`None`)
7678
"""
7779

7880
role_name: str
@@ -85,6 +87,7 @@ class BaseMessage:
8587
image_detail: Literal["auto", "low", "high"] = "auto"
8688
video_detail: Literal["auto", "low", "high"] = "auto"
8789
parsed: Optional[Union[BaseModel, dict]] = None
90+
reasoning_content: Optional[str] = None
8891

8992
@classmethod
9093
def make_user_message(
@@ -219,6 +222,12 @@ def create_new_instance(self, content: str) -> "BaseMessage":
219222
role_type=self.role_type,
220223
meta_dict=self.meta_dict,
221224
content=content,
225+
video_bytes=self.video_bytes,
226+
image_list=self.image_list,
227+
image_detail=self.image_detail,
228+
video_detail=self.video_detail,
229+
parsed=self.parsed,
230+
reasoning_content=self.reasoning_content,
222231
)
223232

224233
def __add__(self, other: Any) -> Union["BaseMessage", Any]:

camel/models/deepseek_model.py

Lines changed: 2 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -165,44 +165,6 @@ def _prepare_request(
165165

166166
return request_config
167167

168-
def _post_handle_response(
169-
self, response: ChatCompletion
170-
) -> ChatCompletion:
171-
r"""Handle reasoning content with <think> tags at the beginning."""
172-
if (
173-
self.model_type in [ModelType.DEEPSEEK_REASONER]
174-
and os.environ.get("GET_REASONING_CONTENT", "false").lower()
175-
== "true"
176-
):
177-
reasoning_content = response.choices[0].message.reasoning_content # type: ignore[attr-defined]
178-
combined_content = ( # type: ignore[operator]
179-
f"<think>\n{reasoning_content}\n</think>\n"
180-
if reasoning_content
181-
else ""
182-
) + response.choices[0].message.content
183-
184-
response = ChatCompletion.construct(
185-
id=response.id,
186-
choices=[
187-
dict(
188-
index=response.choices[0].index,
189-
message={
190-
"role": response.choices[0].message.role,
191-
"content": combined_content,
192-
"tool_calls": None,
193-
},
194-
finish_reason=response.choices[0].finish_reason
195-
if response.choices[0].finish_reason
196-
else None,
197-
)
198-
],
199-
created=response.created,
200-
model=response.model,
201-
object="chat.completion",
202-
usage=response.usage,
203-
)
204-
return response
205-
206168
@observe()
207169
def _run(
208170
self,
@@ -244,7 +206,7 @@ def _run(
244206
**request_config,
245207
)
246208

247-
return self._post_handle_response(response)
209+
return response
248210

249211
@observe()
250212
async def _arun(
@@ -286,4 +248,4 @@ async def _arun(
286248
**request_config,
287249
)
288250

289-
return self._post_handle_response(response)
251+
return response

camel/models/modelscope_model.py

Lines changed: 2 additions & 175 deletions
Original file line numberDiff line numberDiff line change
@@ -13,19 +13,11 @@
1313
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
1414

1515
import os
16-
import time
17-
from typing import Any, Dict, List, Optional, Union
18-
19-
from openai import AsyncStream, Stream
16+
from typing import Any, Dict, Optional, Union
2017

2118
from camel.configs import ModelScopeConfig
22-
from camel.messages import OpenAIMessage
2319
from camel.models.openai_compatible_model import OpenAICompatibleModel
24-
from camel.types import (
25-
ChatCompletion,
26-
ChatCompletionChunk,
27-
ModelType,
28-
)
20+
from camel.types import ModelType
2921
from camel.utils import (
3022
BaseTokenCounter,
3123
api_keys_required,
@@ -96,168 +88,3 @@ def __init__(
9688
max_retries=max_retries,
9789
**kwargs,
9890
)
99-
100-
def _post_handle_response(
101-
self, response: Union[ChatCompletion, Stream[ChatCompletionChunk]]
102-
) -> ChatCompletion:
103-
r"""Handle reasoning content with <think> tags at the beginning."""
104-
if not isinstance(response, Stream):
105-
# Handle non-streaming response (existing logic)
106-
if self.model_config_dict.get("extra_body", {}).get(
107-
"enable_thinking", False
108-
):
109-
reasoning_content = response.choices[
110-
0
111-
].message.reasoning_content # type: ignore[attr-defined]
112-
combined_content = (
113-
f"<think>\n{reasoning_content}\n</think>\n"
114-
if reasoning_content
115-
else ""
116-
)
117-
response_content = response.choices[0].message.content or ""
118-
combined_content += response_content
119-
120-
# Construct a new ChatCompletion with combined content
121-
return ChatCompletion.construct(
122-
id=response.id,
123-
choices=[
124-
dict(
125-
finish_reason=response.choices[0].finish_reason,
126-
index=response.choices[0].index,
127-
logprobs=response.choices[0].logprobs,
128-
message=dict(
129-
role=response.choices[0].message.role,
130-
content=combined_content,
131-
),
132-
)
133-
],
134-
created=response.created,
135-
model=response.model,
136-
object="chat.completion",
137-
system_fingerprint=response.system_fingerprint,
138-
usage=response.usage,
139-
)
140-
else:
141-
return response # Return original if no thinking enabled
142-
143-
# Handle streaming response
144-
accumulated_reasoning = ""
145-
accumulated_content = ""
146-
final_chunk = None
147-
usage_data = None # Initialize usage data
148-
role = "assistant" # Default role
149-
150-
for chunk in response:
151-
final_chunk = chunk # Keep track of the last chunk for metadata
152-
if chunk.choices:
153-
delta = chunk.choices[0].delta
154-
if delta.role:
155-
role = delta.role # Update role if provided
156-
if (
157-
hasattr(delta, 'reasoning_content')
158-
and delta.reasoning_content
159-
):
160-
accumulated_reasoning += delta.reasoning_content
161-
if delta.content:
162-
accumulated_content += delta.content
163-
164-
if hasattr(chunk, 'usage') and chunk.usage:
165-
usage_data = chunk.usage
166-
167-
combined_content = (
168-
f"<think>\n{accumulated_reasoning}\n</think>\n"
169-
if accumulated_reasoning
170-
else ""
171-
) + accumulated_content
172-
173-
# Construct the final ChatCompletion object from accumulated
174-
# stream data
175-
if final_chunk:
176-
finish_reason = "stop" # Default finish reason
177-
logprobs = None
178-
if final_chunk.choices:
179-
finish_reason = (
180-
final_chunk.choices[0].finish_reason or finish_reason
181-
)
182-
if hasattr(final_chunk.choices[0], 'logprobs'):
183-
logprobs = final_chunk.choices[0].logprobs
184-
185-
return ChatCompletion.construct(
186-
# Use data from the final chunk or defaults
187-
id=final_chunk.id
188-
if hasattr(final_chunk, 'id')
189-
else "streamed-completion",
190-
choices=[
191-
dict(
192-
finish_reason=finish_reason,
193-
index=0,
194-
logprobs=logprobs,
195-
message=dict(
196-
role=role,
197-
content=combined_content,
198-
),
199-
)
200-
],
201-
created=final_chunk.created
202-
if hasattr(final_chunk, 'created')
203-
else int(time.time()),
204-
model=final_chunk.model
205-
if hasattr(final_chunk, 'model')
206-
else self.model_type,
207-
object="chat.completion",
208-
system_fingerprint=final_chunk.system_fingerprint
209-
if hasattr(final_chunk, 'system_fingerprint')
210-
else None,
211-
usage=usage_data,
212-
)
213-
else:
214-
# Handle cases where the stream was empty or invalid
215-
return ChatCompletion.construct(
216-
id="empty-stream",
217-
choices=[
218-
dict(
219-
finish_reason="error",
220-
index=0,
221-
message=dict(role="assistant", content=""),
222-
)
223-
],
224-
created=int(time.time()),
225-
model=self.model_type,
226-
object="chat.completion",
227-
usage=usage_data,
228-
)
229-
230-
def _request_chat_completion(
231-
self,
232-
messages: List[OpenAIMessage],
233-
tools: Optional[List[Dict[str, Any]]] = None,
234-
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
235-
request_config = self.model_config_dict.copy()
236-
237-
if tools:
238-
request_config["tools"] = tools
239-
240-
return self._post_handle_response(
241-
self._client.chat.completions.create(
242-
messages=messages,
243-
model=self.model_type,
244-
**request_config,
245-
)
246-
)
247-
248-
async def _arequest_chat_completion(
249-
self,
250-
messages: List[OpenAIMessage],
251-
tools: Optional[List[Dict[str, Any]]] = None,
252-
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
253-
request_config = self.model_config_dict.copy()
254-
255-
if tools:
256-
request_config["tools"] = tools
257-
258-
response = await self._async_client.chat.completions.create(
259-
messages=messages,
260-
model=self.model_type,
261-
**request_config,
262-
)
263-
return self._post_handle_response(response)

0 commit comments

Comments
 (0)