1111# See the License for the specific language governing permissions and
1212# limitations under the License.
1313# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. =========
14- import copy
1514import os
1615import warnings
1716from typing import Any , Callable , Dict , List , Optional , Type , Union
@@ -345,14 +344,7 @@ def _request_chat_completion(
345344 messages : List [OpenAIMessage ],
346345 tools : Optional [List [Dict [str , Any ]]] = None ,
347346 ) -> Union [ChatCompletion , Stream [ChatCompletionChunk ]]:
348- request_config = self .model_config_dict .copy ()
349-
350- if tools :
351- request_config ["tools" ] = tools
352- else :
353- # Remove parallel_tool_calls if no tools are specified
354- # as OpenAI API only allows it when tools are present
355- request_config .pop ("parallel_tool_calls" , None )
347+ request_config = self ._prepare_request_config (tools )
356348
357349 return self ._client .chat .completions .create (
358350 messages = messages ,
@@ -365,14 +357,7 @@ async def _arequest_chat_completion(
365357 messages : List [OpenAIMessage ],
366358 tools : Optional [List [Dict [str , Any ]]] = None ,
367359 ) -> Union [ChatCompletion , AsyncStream [ChatCompletionChunk ]]:
368- request_config = self .model_config_dict .copy ()
369-
370- if tools :
371- request_config ["tools" ] = tools
372- else :
373- # Remove parallel_tool_calls if no tools are specified
374- # as OpenAI API only allows it when tools are present
375- request_config .pop ("parallel_tool_calls" , None )
360+ request_config = self ._prepare_request_config (tools )
376361
377362 return await self ._async_client .chat .completions .create (
378363 messages = messages ,
@@ -386,18 +371,11 @@ def _request_parse(
386371 response_format : Type [BaseModel ],
387372 tools : Optional [List [Dict [str , Any ]]] = None ,
388373 ) -> ChatCompletion :
389- request_config = copy .deepcopy (self .model_config_dict )
390-
374+ request_config = self ._prepare_request_config (tools )
391375 request_config ["response_format" ] = response_format
392376 # Remove stream from request config since OpenAI does not support it
393377 # with structured response
394378 request_config .pop ("stream" , None )
395- if tools :
396- request_config ["tools" ] = tools
397- else :
398- # Remove parallel_tool_calls if no tools are specified
399- # as OpenAI API only allows it when tools are present
400- request_config .pop ("parallel_tool_calls" , None )
401379
402380 return self ._client .beta .chat .completions .parse (
403381 messages = messages ,
@@ -411,18 +389,11 @@ async def _arequest_parse(
411389 response_format : Type [BaseModel ],
412390 tools : Optional [List [Dict [str , Any ]]] = None ,
413391 ) -> ChatCompletion :
414- request_config = copy .deepcopy (self .model_config_dict )
415-
392+ request_config = self ._prepare_request_config (tools )
416393 request_config ["response_format" ] = response_format
417394 # Remove stream from request config since OpenAI does not support it
418395 # with structured response
419396 request_config .pop ("stream" , None )
420- if tools :
421- request_config ["tools" ] = tools
422- else :
423- # Remove parallel_tool_calls if no tools are specified
424- # as OpenAI API only allows it when tools are present
425- request_config .pop ("parallel_tool_calls" , None )
426397
427398 return await self ._async_client .beta .chat .completions .parse (
428399 messages = messages ,
@@ -440,19 +411,10 @@ def _request_stream_parse(
440411
441412 Note: This uses OpenAI's beta streaming API for structured outputs.
442413 """
443-
444- request_config = copy .deepcopy (self .model_config_dict )
445-
414+ request_config = self ._prepare_request_config (tools )
446415 # Remove stream from config as it's handled by the stream method
447416 request_config .pop ("stream" , None )
448417
449- if tools :
450- request_config ["tools" ] = tools
451- else :
452- # Remove parallel_tool_calls if no tools are specified
453- # as OpenAI API only allows it when tools are present
454- request_config .pop ("parallel_tool_calls" , None )
455-
456418 # Use the beta streaming API for structured outputs
457419 return self ._client .beta .chat .completions .stream (
458420 messages = messages ,
@@ -471,19 +433,10 @@ async def _arequest_stream_parse(
471433
472434 Note: This uses OpenAI's beta streaming API for structured outputs.
473435 """
474-
475- request_config = copy .deepcopy (self .model_config_dict )
476-
436+ request_config = self ._prepare_request_config (tools )
477437 # Remove stream from config as it's handled by the stream method
478438 request_config .pop ("stream" , None )
479439
480- if tools :
481- request_config ["tools" ] = tools
482- else :
483- # Remove parallel_tool_calls if no tools are specified
484- # as OpenAI API only allows it when tools are present
485- request_config .pop ("parallel_tool_calls" , None )
486-
487440 # Use the beta streaming API for structured outputs
488441 return self ._async_client .beta .chat .completions .stream (
489442 messages = messages ,
0 commit comments