@@ -106,6 +106,11 @@ def __str__(self) -> str:
106106 )
107107
108108 def as_dict (self ) -> dict [str , Any ]:
109+ r"""Returns the function calling record as a dictionary.
110+
111+ Returns:
112+ dict[str, Any]: The function calling record as a dictionary.
113+ """
109114 return self .model_dump ()
110115
111116
@@ -221,6 +226,15 @@ def __init__(
221226
222227 # ruff: noqa: E501
223228 def _generate_tool_prompt (self , tool_schema_list : List [Dict ]) -> str :
229+ r"""Generates a tool prompt based on the provided tool schema list.
230+
231+ Args:
232+ tool_schema_list (List[Dict]): A list of dictionaries, each
233+ containing a tool schema.
234+
235+ Returns:
236+ str: A string representing the tool prompt.
237+ """
224238 tool_prompts = []
225239
226240 for tool in tool_schema_list :
@@ -241,21 +255,36 @@ def _generate_tool_prompt(self, tool_schema_list: List[Dict]) -> str:
241255
242256 { tool_prompt_str }
243257
244- If you choose to call a function ONLY reply in the following format with no prefix or suffix:
258+ If you choose to call a function ONLY reply in the following format with no
259+ prefix or suffix:
245260
246- <function=example_function_name>{{"example_name": "example_value"}}</function>
261+ <function=example_function_name>{{"example_name": "example_value"}}
262+ </function>
247263
248264 Reminder:
249- - Function calls MUST follow the specified format, start with <function= and end with </function>
265+ - Function calls MUST follow the specified format, start with <function=
266+ and end with </function>
250267 - Required parameters MUST be specified
251268 - Only call one function at a time
252269 - Put the entire function call reply on one line
253- - If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls
270+ - If there is no function call available, answer the question like normal
271+ with your current knowledge and do not tell the user about function calls
254272 """
255273 '''
256274 return final_prompt
257275
258276 def _parse_tool_response (self , response : str ):
277+ r"""Parses the tool response to extract the function name and
278+ arguments.
279+
280+ Args:
281+ response (str): The response from the model containing the
282+ function call.
283+
284+ Returns:
285+ Optional[Dict[str, Any]]: The parsed function name and arguments
286+ if found, otherwise :obj:`None`.
287+ """
259288 function_regex = r"<function=(\w+)>(.*?)</function>"
260289 match = re .search (function_regex , response )
261290
@@ -270,12 +299,7 @@ def _parse_tool_response(self, response: str):
270299 return None
271300
272301 def reset (self ):
273- r"""Resets the :obj:`ChatAgent` to its initial state and returns the
274- stored messages.
275-
276- Returns:
277- List[BaseMessage]: The stored messages.
278- """
302+ r"""Resets the :obj:`ChatAgent` to its initial state."""
279303 self .terminated = False
280304 self .init_messages ()
281305 for terminator in self .response_terminators :
@@ -292,7 +316,7 @@ def system_message(self) -> Optional[BaseMessage]:
292316 return self ._system_message
293317
294318 @system_message .setter
295- def system_message (self , message : BaseMessage ):
319+ def system_message (self , message : BaseMessage ) -> None :
296320 r"""The setter method for the property :obj:`system_message`.
297321
298322 Args:
@@ -827,6 +851,17 @@ def _structure_output_with_function(
827851 ]:
828852 r"""Internal function of structuring the output of the agent based on
829853 the given output schema.
854+
855+ Args:
856+ response_format (Type[BaseModel]): The output schema to use for
857+ structuring the output.
858+
859+ Returns:
860+ Tuple[List[BaseMessage], List[str], Dict[str, int], str,
861+ FunctionCallingRecord, int]:
862+ A tuple containing the output messages, finish reasons, usage
863+ dictionary, response ID, function calling record, and number of
864+ tokens.
830865 """
831866 from camel .toolkits import FunctionTool
832867
@@ -919,9 +954,39 @@ def _step_get_info(
919954 num_tokens : int ,
920955 external_tool_request : Optional [ChatCompletionMessageToolCall ] = None ,
921956 ) -> Dict [str , Any ]:
922- # Loop over responses terminators, get list of termination
923- # tuples with whether the terminator terminates the agent
924- # and termination reason
957+ r"""Process the output of a chat step and gather information about the
958+ step.
959+
960+ This method checks for termination conditions, updates the agent's
961+ state, and collects information about the chat step, including tool
962+ calls and termination reasons.
963+
964+ Args:
965+ output_messages (List[BaseMessage]): The messages generated in
966+ this step.
967+ finish_reasons (List[str]): The reasons for finishing the
968+ generation for each message.
969+ usage_dict (Dict[str, int]): Dictionary containing token usage
970+ information.
971+ response_id (str): The ID of the response from the model.
972+ tool_calls (List[FunctionCallingRecord]): Records of function calls
973+ made during this step.
974+ num_tokens (int): The number of tokens used in this step.
975+ external_tool_request (Optional[ChatCompletionMessageToolCall]):
976+ Any external tool request made during this step.
977+ (default::obj:`None`)
978+
979+ Returns:
980+ Dict[str, Any]: A dictionary containing information about the chat
981+ step, including termination status, reasons, and tool call
982+ information.
983+
984+ Note:
985+ This method iterates over all response terminators and checks if
986+ any of them signal termination. If a terminator signals
987+ termination, the agent's state is updated accordingly, and the
988+ termination reason is recorded.
989+ """
925990 termination = [
926991 terminator .is_terminated (output_messages )
927992 for terminator in self .response_terminators
@@ -952,7 +1017,8 @@ def _step_get_info(
9521017 def handle_batch_response (
9531018 self , response : ChatCompletion
9541019 ) -> Tuple [List [BaseMessage ], List [str ], Dict [str , int ], str ]:
955- r"""
1020+ r"""Process a batch response from the model and extract the necessary
1021+ information.
9561022
9571023 Args:
9581024 response (dict): Model response.
@@ -985,7 +1051,18 @@ def handle_batch_response(
9851051 response .id ,
9861052 )
9871053
988- def _safe_model_dump (self , obj ):
1054+ def _safe_model_dump (self , obj ) -> dict :
1055+ r"""Safely dump a Pydantic model to a dictionary.
1056+
1057+ This method attempts to use the `model_dump` method if available,
1058+ otherwise it falls back to the `dict` method.
1059+
1060+ Args:
1061+ obj: The Pydantic model instance to be dumped.
1062+
1063+ Returns:
1064+ dict: A dictionary representation of the Pydantic model.
1065+ """
9891066 # Check if the `model_dump` method exists (Pydantic v2)
9901067 if hasattr (obj , 'model_dump' ):
9911068 return obj .model_dump ()
@@ -1000,7 +1077,8 @@ def handle_stream_response(
10001077 response : Stream [ChatCompletionChunk ],
10011078 prompt_tokens : int ,
10021079 ) -> Tuple [List [BaseMessage ], List [str ], Dict [str , int ], str ]:
1003- r"""
1080+ r"""Process a stream response from the model and extract the necessary
1081+ information.
10041082
10051083 Args:
10061084 response (dict): Model response.
0 commit comments