|
13 | 13 | # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= |
14 | 14 |
|
15 | 15 | import os |
16 | | -import time |
17 | | -from typing import Any, Dict, List, Optional, Union |
18 | | - |
19 | | -from openai import AsyncStream, Stream |
| 16 | +from typing import Any, Dict, Optional, Union |
20 | 17 |
|
21 | 18 | from camel.configs import ModelScopeConfig |
22 | | -from camel.messages import OpenAIMessage |
23 | 19 | from camel.models.openai_compatible_model import OpenAICompatibleModel |
24 | | -from camel.types import ( |
25 | | - ChatCompletion, |
26 | | - ChatCompletionChunk, |
27 | | - ModelType, |
28 | | -) |
| 20 | +from camel.types import ModelType |
29 | 21 | from camel.utils import ( |
30 | 22 | BaseTokenCounter, |
31 | 23 | api_keys_required, |
@@ -96,168 +88,3 @@ def __init__( |
96 | 88 | max_retries=max_retries, |
97 | 89 | **kwargs, |
98 | 90 | ) |
99 | | - |
100 | | - def _post_handle_response( |
101 | | - self, response: Union[ChatCompletion, Stream[ChatCompletionChunk]] |
102 | | - ) -> ChatCompletion: |
103 | | - r"""Handle reasoning content with <think> tags at the beginning.""" |
104 | | - if not isinstance(response, Stream): |
105 | | - # Handle non-streaming response (existing logic) |
106 | | - if self.model_config_dict.get("extra_body", {}).get( |
107 | | - "enable_thinking", False |
108 | | - ): |
109 | | - reasoning_content = response.choices[ |
110 | | - 0 |
111 | | - ].message.reasoning_content # type: ignore[attr-defined] |
112 | | - combined_content = ( |
113 | | - f"<think>\n{reasoning_content}\n</think>\n" |
114 | | - if reasoning_content |
115 | | - else "" |
116 | | - ) |
117 | | - response_content = response.choices[0].message.content or "" |
118 | | - combined_content += response_content |
119 | | - |
120 | | - # Construct a new ChatCompletion with combined content |
121 | | - return ChatCompletion.construct( |
122 | | - id=response.id, |
123 | | - choices=[ |
124 | | - dict( |
125 | | - finish_reason=response.choices[0].finish_reason, |
126 | | - index=response.choices[0].index, |
127 | | - logprobs=response.choices[0].logprobs, |
128 | | - message=dict( |
129 | | - role=response.choices[0].message.role, |
130 | | - content=combined_content, |
131 | | - ), |
132 | | - ) |
133 | | - ], |
134 | | - created=response.created, |
135 | | - model=response.model, |
136 | | - object="chat.completion", |
137 | | - system_fingerprint=response.system_fingerprint, |
138 | | - usage=response.usage, |
139 | | - ) |
140 | | - else: |
141 | | - return response # Return original if no thinking enabled |
142 | | - |
143 | | - # Handle streaming response |
144 | | - accumulated_reasoning = "" |
145 | | - accumulated_content = "" |
146 | | - final_chunk = None |
147 | | - usage_data = None # Initialize usage data |
148 | | - role = "assistant" # Default role |
149 | | - |
150 | | - for chunk in response: |
151 | | - final_chunk = chunk # Keep track of the last chunk for metadata |
152 | | - if chunk.choices: |
153 | | - delta = chunk.choices[0].delta |
154 | | - if delta.role: |
155 | | - role = delta.role # Update role if provided |
156 | | - if ( |
157 | | - hasattr(delta, 'reasoning_content') |
158 | | - and delta.reasoning_content |
159 | | - ): |
160 | | - accumulated_reasoning += delta.reasoning_content |
161 | | - if delta.content: |
162 | | - accumulated_content += delta.content |
163 | | - |
164 | | - if hasattr(chunk, 'usage') and chunk.usage: |
165 | | - usage_data = chunk.usage |
166 | | - |
167 | | - combined_content = ( |
168 | | - f"<think>\n{accumulated_reasoning}\n</think>\n" |
169 | | - if accumulated_reasoning |
170 | | - else "" |
171 | | - ) + accumulated_content |
172 | | - |
173 | | - # Construct the final ChatCompletion object from accumulated |
174 | | - # stream data |
175 | | - if final_chunk: |
176 | | - finish_reason = "stop" # Default finish reason |
177 | | - logprobs = None |
178 | | - if final_chunk.choices: |
179 | | - finish_reason = ( |
180 | | - final_chunk.choices[0].finish_reason or finish_reason |
181 | | - ) |
182 | | - if hasattr(final_chunk.choices[0], 'logprobs'): |
183 | | - logprobs = final_chunk.choices[0].logprobs |
184 | | - |
185 | | - return ChatCompletion.construct( |
186 | | - # Use data from the final chunk or defaults |
187 | | - id=final_chunk.id |
188 | | - if hasattr(final_chunk, 'id') |
189 | | - else "streamed-completion", |
190 | | - choices=[ |
191 | | - dict( |
192 | | - finish_reason=finish_reason, |
193 | | - index=0, |
194 | | - logprobs=logprobs, |
195 | | - message=dict( |
196 | | - role=role, |
197 | | - content=combined_content, |
198 | | - ), |
199 | | - ) |
200 | | - ], |
201 | | - created=final_chunk.created |
202 | | - if hasattr(final_chunk, 'created') |
203 | | - else int(time.time()), |
204 | | - model=final_chunk.model |
205 | | - if hasattr(final_chunk, 'model') |
206 | | - else self.model_type, |
207 | | - object="chat.completion", |
208 | | - system_fingerprint=final_chunk.system_fingerprint |
209 | | - if hasattr(final_chunk, 'system_fingerprint') |
210 | | - else None, |
211 | | - usage=usage_data, |
212 | | - ) |
213 | | - else: |
214 | | - # Handle cases where the stream was empty or invalid |
215 | | - return ChatCompletion.construct( |
216 | | - id="empty-stream", |
217 | | - choices=[ |
218 | | - dict( |
219 | | - finish_reason="error", |
220 | | - index=0, |
221 | | - message=dict(role="assistant", content=""), |
222 | | - ) |
223 | | - ], |
224 | | - created=int(time.time()), |
225 | | - model=self.model_type, |
226 | | - object="chat.completion", |
227 | | - usage=usage_data, |
228 | | - ) |
229 | | - |
230 | | - def _request_chat_completion( |
231 | | - self, |
232 | | - messages: List[OpenAIMessage], |
233 | | - tools: Optional[List[Dict[str, Any]]] = None, |
234 | | - ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: |
235 | | - request_config = self.model_config_dict.copy() |
236 | | - |
237 | | - if tools: |
238 | | - request_config["tools"] = tools |
239 | | - |
240 | | - return self._post_handle_response( |
241 | | - self._client.chat.completions.create( |
242 | | - messages=messages, |
243 | | - model=self.model_type, |
244 | | - **request_config, |
245 | | - ) |
246 | | - ) |
247 | | - |
248 | | - async def _arequest_chat_completion( |
249 | | - self, |
250 | | - messages: List[OpenAIMessage], |
251 | | - tools: Optional[List[Dict[str, Any]]] = None, |
252 | | - ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]: |
253 | | - request_config = self.model_config_dict.copy() |
254 | | - |
255 | | - if tools: |
256 | | - request_config["tools"] = tools |
257 | | - |
258 | | - response = await self._async_client.chat.completions.create( |
259 | | - messages=messages, |
260 | | - model=self.model_type, |
261 | | - **request_config, |
262 | | - ) |
263 | | - return self._post_handle_response(response) |
0 commit comments