From c400490faa1bd320dcbd4fa9bac3e56c4b057d59 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 8 Feb 2026 01:10:10 +0000 Subject: [PATCH 1/6] Initial plan From c7ebe881c7f616c73add61438704bebd45f50489 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 8 Feb 2026 01:15:37 +0000 Subject: [PATCH 2/6] Remove unused HuggingFaceToolAgent and BabyAGI modules Co-authored-by: lightaime <23632352+lightaime@users.noreply.github.com> --- README.ja.md | 2 +- README.md | 2 +- README.zh.md | 2 +- camel/agents/__init__.py | 2 - camel/agents/tool_agents/__init__.py | 2 - .../tool_agents/hugging_face_tool_agent.py | 206 ------------- camel/societies/__init__.py | 2 - camel/societies/babyagi_playing.py | 284 ------------------ docs/camel.agents.tool_agents.rst | 8 - docs/camel.societies.rst | 8 - docs/key_modules/societies.md | 1 - docs/key_modules/society.md | 23 -- examples/ai_society/babyagi_playing.py | 74 ----- examples/embodiment/hugging_face_tool.py | 53 ---- test/agents/test_babyagi_playing.py | 228 -------------- test/agents/test_embodied_agent.py | 19 +- .../test_hugging_face_tool_agent.py | 72 ----- 17 files changed, 4 insertions(+), 984 deletions(-) delete mode 100644 camel/agents/tool_agents/hugging_face_tool_agent.py delete mode 100644 camel/societies/babyagi_playing.py delete mode 100644 examples/ai_society/babyagi_playing.py delete mode 100644 examples/embodiment/hugging_face_tool.py delete mode 100644 test/agents/test_babyagi_playing.py delete mode 100644 test/agents/tool_agents/test_hugging_face_tool_agent.py diff --git a/README.ja.md b/README.ja.md index 1c38fe58f2..808645c06d 100644 --- a/README.ja.md +++ b/README.ja.md @@ -565,7 +565,7 @@ CAMELのマルチエージェントフレームワークがインフラ自動化 また、プロジェクトの初期ロゴをデザインしてくださったHaya Hammoud氏にも感謝いたします。 私たちは、エージェントを構築、比較、カスタマイズするための他の作品から素晴らしい研究アイデアを実装しました。これらのモジュールを使用する場合は、元の作品を引用してください: -- `TaskCreationAgent`、`TaskPrioritizationAgent`、`BabyAGI`:*Nakajima et al.*から:[Task-Driven Autonomous Agent](https://yoheinakajima.com/task-driven-autonomous-agent-utilizing-gpt-4-pinecone-and-langchain-for-diverse-applications/)。[[例](https://github.com/camel-ai/camel/blob/master/examples/ai_society/babyagi_playing.py)] +- `TaskCreationAgent`と`TaskPrioritizationAgent`:*Nakajima et al.*から:[Task-Driven Autonomous Agent](https://yoheinakajima.com/task-driven-autonomous-agent-utilizing-gpt-4-pinecone-and-langchain-for-diverse-applications/)。 - `PersonaHub`:*Tao Ge et al.*から:[Scaling Synthetic Data Creation with 1,000,000,000 Personas](https://arxiv.org/pdf/2406.20094)。[[例](https://github.com/camel-ai/camel/blob/master/examples/personas/personas_generation.py)] diff --git a/README.md b/README.md index 7066c84761..062fedd71d 100644 --- a/README.md +++ b/README.md @@ -562,7 +562,7 @@ Special thanks to [Nomic AI](https://home.nomic.ai/) for giving us extended acce We would also like to thank Haya Hammoud for designing the initial logo of our project. We implemented amazing research ideas from other works for you to build, compare and customize your agents. If you use any of these modules, please kindly cite the original works: -- `TaskCreationAgent`, `TaskPrioritizationAgent` and `BabyAGI` from *Nakajima et al.*: [Task-Driven Autonomous Agent](https://yoheinakajima.com/task-driven-autonomous-agent-utilizing-gpt-4-pinecone-and-langchain-for-diverse-applications/). [[Example](https://github.com/camel-ai/camel/blob/master/examples/ai_society/babyagi_playing.py)] +- `TaskCreationAgent` and `TaskPrioritizationAgent` from *Nakajima et al.*: [Task-Driven Autonomous Agent](https://yoheinakajima.com/task-driven-autonomous-agent-utilizing-gpt-4-pinecone-and-langchain-for-diverse-applications/). - `PersonaHub` from *Tao Ge et al.*: [Scaling Synthetic Data Creation with 1,000,000,000 Personas](https://arxiv.org/pdf/2406.20094). [[Example](https://github.com/camel-ai/camel/blob/master/examples/personas/personas_generation.py)] diff --git a/README.zh.md b/README.zh.md index 61bf724b72..38f0dffd11 100644 --- a/README.zh.md +++ b/README.zh.md @@ -489,7 +489,7 @@ pip install camel-ai 我们还要感谢 Haya Hammoud 设计了我们项目的初始徽标。 我们实现了来自其他研究工作的优秀创意,供您构建、比较和定制智能体。如果您使用了其中的任何模块,请务必引用原始作品: -- `TaskCreationAgent`, `TaskPrioritizationAgent` and `BabyAGI` from *Nakajima et al.*: [Task-Driven Autonomous Agent](https://yoheinakajima.com/task-driven-autonomous-agent-utilizing-gpt-4-pinecone-and-langchain-for-diverse-applications/). [[Example](https://github.com/camel-ai/camel/blob/master/examples/ai_society/babyagi_playing.py)] +- `TaskCreationAgent` and `TaskPrioritizationAgent` from *Nakajima et al.*: [Task-Driven Autonomous Agent](https://yoheinakajima.com/task-driven-autonomous-agent-utilizing-gpt-4-pinecone-and-langchain-for-diverse-applications/). - `PersonaHub` from *Tao Ge et al.*: [Scaling Synthetic Data Creation with 1,000,000,000 Personas](https://arxiv.org/pdf/2406.20094). [[Example](https://github.com/camel-ai/camel/blob/master/examples/personas/personas_generation.py)] diff --git a/camel/agents/__init__.py b/camel/agents/__init__.py index 2619fb69a7..e2b46f779d 100644 --- a/camel/agents/__init__.py +++ b/camel/agents/__init__.py @@ -27,7 +27,6 @@ TaskSpecifyAgent, ) from .tool_agents.base import BaseToolAgent -from .tool_agents.hugging_face_tool_agent import HuggingFaceToolAgent __all__ = [ 'BaseAgent', @@ -38,7 +37,6 @@ 'TaskPrioritizationAgent', 'CriticAgent', 'BaseToolAgent', - 'HuggingFaceToolAgent', 'EmbodiedAgent', 'RoleAssignmentAgent', 'SearchAgent', diff --git a/camel/agents/tool_agents/__init__.py b/camel/agents/tool_agents/__init__.py index 59612f52dd..adb7855929 100644 --- a/camel/agents/tool_agents/__init__.py +++ b/camel/agents/tool_agents/__init__.py @@ -12,9 +12,7 @@ # limitations under the License. # ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= from .base import BaseToolAgent -from .hugging_face_tool_agent import HuggingFaceToolAgent __all__ = [ 'BaseToolAgent', - 'HuggingFaceToolAgent', ] diff --git a/camel/agents/tool_agents/hugging_face_tool_agent.py b/camel/agents/tool_agents/hugging_face_tool_agent.py deleted file mode 100644 index bd74a8373b..0000000000 --- a/camel/agents/tool_agents/hugging_face_tool_agent.py +++ /dev/null @@ -1,206 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -from typing import Any, Optional - -from camel.agents.tool_agents.base import BaseToolAgent - - -# flake8: noqa :E501 -class HuggingFaceToolAgent(BaseToolAgent): - r"""Tool agent for calling HuggingFace models. This agent is a wrapper - around agents from the `transformers` library. For more information - about the available models, please see the `transformers` documentation - at https://huggingface.co/docs/transformers/transformers_agents. - - Args: - name (str): The name of the agent. - *args (Any): Additional positional arguments to pass to the underlying - Agent class. - remote (bool, optional): Flag indicating whether to run the agent - remotely. (default: :obj:`True`) - **kwargs (Any): Additional keyword arguments to pass to the underlying - Agent class. - """ - - def __init__( - self, - name: str, - *args: Any, - remote: bool = True, - **kwargs: Any, - ) -> None: - try: - # TODO: Support other tool agents - import transformers - from packaging import version - - if version.parse(transformers.__version__) < version.parse( - "4.31.0" - ): - raise ValueError( - "The version of \"transformers\" package should >= 4.31.0" - ) - - from transformers.tools import OpenAiAgent - from transformers.tools.agent_types import AgentImage - except (ImportError, ValueError): - raise ValueError( - "Could not import transformers tool agents. " - "Please setup the environment with " - "pip install huggingface_hub==0.14.1 transformers==4.31.0 diffusers accelerate==0.20.3 datasets torch soundfile sentencepiece opencv-python" - ) - self.agent_image_type = AgentImage - self.agent = OpenAiAgent(*args, **kwargs) - description = f"""The `{name}` is a tool agent that can perform a variety of tasks including: -- Document question answering: given a document (such as a PDF) in image format, answer a question on this document -- Text question answering: given a long text and a question, answer the question in the text -- Unconditional image captioning: Caption the image! -- Image question answering: given an image, answer a question on this image -- Image segmentation: given an image and a prompt, output the segmentation mask of that prompt -- Speech to text: given an audio recording of a person talking, transcribe the speech into text -- Text to speech: convert text to speech -- Zero-shot text classification: given a text and a list of labels, identify to which label the text corresponds the most -- Text summarization: summarize a long text in one or a few sentences -- Translation: translate the text into a given language -- Text downloading: to download a text from a web URL -- Text to image: generate an image according to a prompt, leveraging stable diffusion -- Image transformation: modify an image given an initial image and a prompt, leveraging instruct pix2pix stable diffusion -- Text to video: generate a small video according to a prompt - -Here are some python code examples of what you can do with this agent: - -Single execution (step) mode, the single execution method is when using the step() method of the agent: -``` -# Text to image -rivers_and_lakes_image = {name}.step("Draw me a picture of rivers and lakes.") -rivers_and_lakes_image.save("./rivers_and_lakes_image.png") - -# Text to image -> Image transformation -sea_add_island_image = {name}.step("Draw me a picture of the sea then transform the picture to add an island") -sea_add_island_image.save("./sea_add_island_image.png") - -# If you'd like to keep a state across executions or to pass non-text objects to the agent, -# you can do so by specifying variables that you would like the agent to use. For example, -# you could generate the first image of rivers and lakes, and ask the model to update that picture to add an island by doing the following: -picture = {name}.step("Generate a picture of rivers and lakes.") -picture.save("./picture.png") -updated_picture = {name}.step("Transform the image in `picture` to add an island to it.", picture=picture) -updated_picture.save("./updated_picture.png") - -capybara_sea_image = {name}.step("Draw me a picture of the `prompt`", prompt="a capybara swimming in the sea") -capybara_sea_image.save("./capybara_sea_image.png") - -# Document question answering -answer = {name}.step( - "In the following `document`, where will the TRRF Scientific Advisory Council Meeting take place?", - document=document, -) -print(answer) - - -# Text to image -boat_image = {name}.step("Generate an image of a boat in the water") -boat_image.save("./boat_image.png") - -# Unconditional image captioning -boat_image_caption = {name}.step("Can you caption the `boat_image`?", boat_image=boat_image) -print(boat_image_caption) - -# Text to image -> Unconditional image captioning -> Text to speech -boat_audio = {name}.step("Can you generate an image of a boat? Please read out loud the contents of the image afterwards") - -# Text downloading -document = {name}.step("Download the text from http://hf.co") -print(document) - -# Text summarization -summary = {name}.step("Summarize the following text: `document`", document=document) -print(summary) - -# Text downloading -> Text summarization -> Text to speech -audio = {name}.step("Read out loud the summary of http://hf.co") -``` - -Chat-based execution (chat), the agent also has a chat-based approach, using the chat() method: -``` -# Clean the chat history -{name}.reset() - -# Text to image -capybara_image = {name}.chat("Show me an an image of a capybara") -capybara_image.save("./capybara_image.png") - -# Image transformation -transformed_capybara_image = {name}.chat("Transform the image so that it snows") -transformed_capybara_image.save("./transformed_capybara_image.png") - -# Image segmentation -segmented_transformed_capybara_image = {name}.chat("Show me a mask of the snowy capybaras") -segmented_transformed_capybara_image.save("./segmented_transformed_capybara_image.png") -``` -""" - super(HuggingFaceToolAgent, self).__init__(name, description) - self.remote = remote - - def reset(self) -> None: - r"""Resets the chat history of the agent.""" - self.agent.prepare_for_new_chat() - - def step( - self, - *args: Any, - remote: Optional[bool] = None, - **kwargs: Any, - ) -> Any: - r"""Runs the agent in single execution mode. - - Args: - *args (Any): Positional arguments to pass to the agent. - remote (bool, optional): Flag indicating whether to run the agent - remotely. Overrides the default setting. (default: :obj:`None`) - **kwargs (Any): Keyword arguments to pass to the agent. - - Returns: - str: The response from the agent. - """ - if remote is None: - remote = self.remote - agent_output = self.agent.run(*args, remote=remote, **kwargs) - if isinstance(agent_output, self.agent_image_type): - agent_output = agent_output.to_raw() - return agent_output - - def chat( - self, - *args: Any, - remote: Optional[bool] = None, - **kwargs: Any, - ) -> Any: - r"""Runs the agent in a chat conversation mode. - - Args: - *args (Any): Positional arguments to pass to the agent. - remote (bool, optional): Flag indicating whether to run the agent - remotely. Overrides the default setting. (default: :obj:`None`) - **kwargs (Any): Keyword arguments to pass to the agent. - - Returns: - str: The response from the agent. - """ - if remote is None: - remote = self.remote - agent_output = self.agent.chat(*args, remote=remote, **kwargs) - if isinstance(agent_output, self.agent_image_type): - agent_output = agent_output.to_raw() - return agent_output diff --git a/camel/societies/__init__.py b/camel/societies/__init__.py index 94c22619e7..dc8c54c39a 100644 --- a/camel/societies/__init__.py +++ b/camel/societies/__init__.py @@ -11,12 +11,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -from .babyagi_playing import BabyAGI from .role_playing import RolePlaying from .workforce import Workforce __all__ = [ 'RolePlaying', - 'BabyAGI', 'Workforce', ] diff --git a/camel/societies/babyagi_playing.py b/camel/societies/babyagi_playing.py deleted file mode 100644 index 612f201ba5..0000000000 --- a/camel/societies/babyagi_playing.py +++ /dev/null @@ -1,284 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -from collections import deque -from typing import Dict, List, Optional - -from camel.agents import ( - ChatAgent, - TaskCreationAgent, - TaskPrioritizationAgent, - TaskSpecifyAgent, -) -from camel.agents.chat_agent import ChatAgentResponse -from camel.generators import SystemMessageGenerator -from camel.logger import get_logger -from camel.messages import BaseMessage -from camel.prompts import TextPrompt -from camel.types import RoleType, TaskType - -logger = get_logger(__name__) - - -class BabyAGI: - r"""The BabyAGI Agent adapted from `"Task-driven Autonomous Agent" - `_. - - Args: - assistant_role_name (str): The name of the role played by the - assistant. - user_role_name (str): The name of the role played by the user. - task_prompt (str, optional): A prompt for the task to be performed. - (default: :obj:`""`) - task_type (TaskType, optional): The type of task to perform. - (default: :obj:`TaskType.AI_SOCIETY`) - max_task_history (int): The maximum number of previous tasks - information to include in the task agent. - (default: :obj:10) - assistant_agent_kwargs (Dict, optional): Additional arguments to pass - to the assistant agent. (default: :obj:`None`) - task_specify_agent_kwargs (Dict, optional): Additional arguments to - pass to the task specify agent. (default: :obj:`None`) - task_creation_agent_kwargs (Dict, optional): Additional arguments to - pass to the task creation agent. (default: :obj:`None`) - task_prioritization_agent_kwargs (Dict, optional): Additional arguments - to pass to the task prioritization agent. (default: :obj:`None`) - sys_msg_generator_kwargs (Dict, optional): Additional arguments to - pass to the system message generator. (default: :obj:`None`) - extend_task_specify_meta_dict (Dict, optional): A dict to extend the - task specify meta dict with. (default: :obj:`None`) - output_language (str, optional): The language to be output by the - agents. (default: :obj:`None`) - message_window_size (int, optional): The maximum number of previous - messages to include in the context window. If `None`, no windowing - is performed. (default: :obj:`None`) - """ - - def __init__( - self, - assistant_role_name: str, - user_role_name: str, - task_prompt: str = "", - task_type: TaskType = TaskType.AI_SOCIETY, - max_task_history: int = 10, - assistant_agent_kwargs: Optional[Dict] = None, - task_specify_agent_kwargs: Optional[Dict] = None, - task_creation_agent_kwargs: Optional[Dict] = None, - task_prioritization_agent_kwargs: Optional[Dict] = None, - sys_msg_generator_kwargs: Optional[Dict] = None, - extend_task_specify_meta_dict: Optional[Dict] = None, - output_language: Optional[str] = None, - message_window_size: Optional[int] = None, - ) -> None: - self.task_type = task_type - self.task_prompt = task_prompt - self.specified_task_prompt: TextPrompt - self.init_specified_task_prompt( - assistant_role_name, - user_role_name, - task_specify_agent_kwargs, - extend_task_specify_meta_dict, - output_language, - ) - - sys_msg_generator = SystemMessageGenerator( - task_type=self.task_type, **(sys_msg_generator_kwargs or {}) - ) - - init_assistant_sys_msg = sys_msg_generator.from_dicts( - meta_dicts=[ - dict( - assistant_role=assistant_role_name, - user_role=user_role_name, - task=self.specified_task_prompt, - ) - ], - role_tuples=[ - (assistant_role_name, RoleType.ASSISTANT), - ], - ) - - self.assistant_agent: ChatAgent - self.assistant_sys_msg: Optional[BaseMessage] - self.task_creation_agent: TaskCreationAgent - self.task_prioritization_agent: TaskPrioritizationAgent - self.init_agents( - init_assistant_sys_msg[0], - assistant_agent_kwargs, - task_creation_agent_kwargs, - task_prioritization_agent_kwargs, - output_language, - message_window_size, - ) - - self.subtasks: deque = deque([]) - self.solved_subtasks: List[str] = [] - self.MAX_TASK_HISTORY = max_task_history - - def init_specified_task_prompt( - self, - assistant_role_name: str, - user_role_name: str, - task_specify_agent_kwargs: Optional[Dict], - extend_task_specify_meta_dict: Optional[Dict], - output_language: Optional[str], - ): - r"""Use a task specify agent to generate a specified task prompt. - Generated specified task prompt will be used to replace original - task prompt. If there is no task specify agent, specified task - prompt will not be generated. - - Args: - assistant_role_name (str): The name of the role played by the - assistant. - user_role_name (str): The name of the role played by the user. - task_specify_agent_kwargs (Dict, optional): Additional arguments - to pass to the task specify agent. - extend_task_specify_meta_dict (Dict, optional): A dict to extend - the task specify meta dict with. - output_language (str, optional): The language to be output by the - agents. - """ - task_specify_meta_dict = dict() - if self.task_type in [TaskType.AI_SOCIETY, TaskType.MISALIGNMENT]: - task_specify_meta_dict.update( - dict( - assistant_role=assistant_role_name, - user_role=user_role_name, - ) - ) - task_specify_meta_dict.update(extend_task_specify_meta_dict or {}) - task_specify_agent = TaskSpecifyAgent( - task_type=self.task_type, - output_language=output_language, - **(task_specify_agent_kwargs or {}), - ) - self.specified_task_prompt = task_specify_agent.run( - self.task_prompt, - meta_dict=task_specify_meta_dict, - ) - - def init_agents( - self, - init_assistant_sys_msg: BaseMessage, - assistant_agent_kwargs: Optional[Dict], - task_creation_agent_kwargs: Optional[Dict], - task_prioritization_agent_kwargs: Optional[Dict], - output_language: Optional[str], - message_window_size: Optional[int] = None, - ): - r"""Initialize assistant and user agents with their system messages. - - Args: - init_assistant_sys_msg (BaseMessage): Assistant agent's initial - system message. - assistant_agent_kwargs (Dict, optional): Additional arguments to - pass to the assistant agent. - task_creation_agent_kwargs (Dict, optional): Additional arguments - to pass to the task creation agent. - task_prioritization_agent_kwargs (Dict, optional): Additional - arguments to pass to the task prioritization agent. - output_language (str, optional): The language to be output by the - agents. - message_window_size (int, optional): The maximum number of previous - messages to include in the context window. If `None`, no - windowing is performed. (default: :obj:`None`) - """ - self.assistant_agent = ChatAgent( - init_assistant_sys_msg, - output_language=output_language, - message_window_size=message_window_size, - **(assistant_agent_kwargs or {}), - ) - self.assistant_sys_msg = self.assistant_agent.system_message - self.assistant_agent.reset() - - self.task_creation_agent = TaskCreationAgent( - objective=self.specified_task_prompt, - role_name=getattr(self.assistant_sys_msg, 'role_name', None) - or "assistant", - output_language=output_language, - message_window_size=message_window_size, - **(task_creation_agent_kwargs or {}), - ) - self.task_creation_agent.reset() - - self.task_prioritization_agent = TaskPrioritizationAgent( - objective=self.specified_task_prompt, - output_language=output_language, - message_window_size=message_window_size, - **(task_prioritization_agent_kwargs or {}), - ) - self.task_prioritization_agent.reset() - - def step(self) -> ChatAgentResponse: - r"""BabyAGI agent would pull the first task from the task list, - complete the task based on the context, then creates new tasks and - re-prioritizes the task list based on the objective and the result of - the previous task. It returns assistant message. - - Returns: - ChatAgentResponse: it contains the resulting assistant message, - whether the assistant agent terminated the conversation, - and any additional assistant information. - - """ - if not self.subtasks: - new_subtask_list = self.task_creation_agent.run(task_list=[]) - prioritized_subtask_list = self.task_prioritization_agent.run( - new_subtask_list - ) - self.subtasks = deque(prioritized_subtask_list) - - task_name = self.subtasks.popleft() - assistant_msg_msg = BaseMessage.make_user_message( - role_name=getattr(self.assistant_sys_msg, 'role_name', None) - or "assistant", - content=f"{task_name}", - ) - - assistant_response = self.assistant_agent.step(assistant_msg_msg) - assistant_msg = assistant_response.msgs[0] - - self.solved_subtasks.append(task_name) - past_tasks = self.solved_subtasks + list(self.subtasks) - - new_subtask_list = self.task_creation_agent.run( - task_list=past_tasks[-self.MAX_TASK_HISTORY :] - ) - - if new_subtask_list: - self.subtasks.extend(new_subtask_list) - prioritized_subtask_list = self.task_prioritization_agent.run( - task_list=list(self.subtasks)[-self.MAX_TASK_HISTORY :] - ) - self.subtasks = deque(prioritized_subtask_list) - else: - logger.info("no new tasks") - assistant_response.info['task_name'] = task_name - assistant_response.info['subtasks'] = list(self.subtasks) - if not self.subtasks: - terminated = True - assistant_response.info['termination_reasons'] = ( - "All tasks are solved" - ) - return ChatAgentResponse( - msgs=[assistant_msg], - terminated=terminated, - info=assistant_response.info, - ) - return ChatAgentResponse( - msgs=[assistant_msg], - terminated=assistant_response.terminated, - info=assistant_response.info, - ) diff --git a/docs/camel.agents.tool_agents.rst b/docs/camel.agents.tool_agents.rst index ec85836f04..d59c8d2683 100644 --- a/docs/camel.agents.tool_agents.rst +++ b/docs/camel.agents.tool_agents.rst @@ -12,14 +12,6 @@ camel.agents.tool\_agents.base module :undoc-members: :show-inheritance: -camel.agents.tool\_agents.hugging\_face\_tool\_agent module ------------------------------------------------------------ - -.. automodule:: camel.agents.tool_agents.hugging_face_tool_agent - :members: - :undoc-members: - :show-inheritance: - Module contents --------------- diff --git a/docs/camel.societies.rst b/docs/camel.societies.rst index 5e27e77dc2..9a22d3a144 100644 --- a/docs/camel.societies.rst +++ b/docs/camel.societies.rst @@ -4,14 +4,6 @@ camel.societies package Submodules ---------- -camel.societies.babyagi\_playing module ---------------------------------------- - -.. automodule:: camel.societies.babyagi_playing - :members: - :undoc-members: - :show-inheritance: - camel.societies.role\_playing module ------------------------------------ diff --git a/docs/key_modules/societies.md b/docs/key_modules/societies.md index 9312e8f021..f68cd4d940 100644 --- a/docs/key_modules/societies.md +++ b/docs/key_modules/societies.md @@ -165,7 +165,6 @@ if __name__ == "__main__": diff --git a/docs/key_modules/society.md b/docs/key_modules/society.md index ea01bab8f3..c48f0ca14c 100644 --- a/docs/key_modules/society.md +++ b/docs/key_modules/society.md @@ -19,7 +19,6 @@ icon: people-group Frameworks: @@ -47,15 +46,6 @@ icon: people-group - - Task-driven, fully autonomous project agent
- Based on BabyAGI, with seamless CAMEL integration. - -
--- @@ -184,24 +174,11 @@ if __name__ == "__main__": --- - - Autonomous project agent—no human in the loop.
- Original BabyAGI repo - -
- ---- -a\
  • Use RolePlaying for most multi-agent conversations, with or without a critic.
  • Define specific roles and prompt-guardrails for your agents—structure is everything!
  • -
  • Try BabyAGI when you want open-ended, research-oriented, or autonomous projects.
  • Leverage the with_task_specify and with_task_planner options for highly complex tasks.
  • Monitor for infinite loops—every agent response should have a clear next step or end.
diff --git a/examples/ai_society/babyagi_playing.py b/examples/ai_society/babyagi_playing.py deleted file mode 100644 index 1288c37658..0000000000 --- a/examples/ai_society/babyagi_playing.py +++ /dev/null @@ -1,74 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= - -from colorama import Fore - -from camel.societies import BabyAGI -from camel.utils import print_text_animated - - -def main(model=None, chat_turn_limit=15) -> None: - task_prompt = "Develop a trading bot for the stock market" - babyagi_session = BabyAGI( - assistant_role_name="Python Programmer", - assistant_agent_kwargs=dict(model=model), - user_role_name="Stock Trader", - task_prompt=task_prompt, - task_specify_agent_kwargs=dict(model=model), - ) - - print( - Fore.GREEN - + f"AI Assistant sys message:\n{babyagi_session.assistant_sys_msg}\n" - ) - - print(Fore.YELLOW + f"Original task prompt:\n{task_prompt}\n") - print( - Fore.CYAN - + f"Specified task prompt:\n{babyagi_session.specified_task_prompt}\n" - ) - print( - Fore.RED - + f"Final task prompt:\n{babyagi_session.specified_task_prompt}\n" - ) - - n = 0 - while n < chat_turn_limit: - n += 1 - assistant_response = babyagi_session.step() - if assistant_response.terminated: - print( - Fore.GREEN - + ( - "AI Assistant terminated. Reason: " - f"{assistant_response.info['termination_reasons']}." - ) - ) - break - print_text_animated( - Fore.RED + "Task Name:\n\n" - f"{assistant_response.info['task_name']}\n" - ) - print_text_animated( - Fore.GREEN + "AI Assistant:\n\n" - f"{assistant_response.msg.content}\n" - ) - print_text_animated( - Fore.BLUE + "Remaining Subtasks:\n\n" - f"{assistant_response.info['subtasks'][:5]}\n" - ) - - -if __name__ == "__main__": - main() diff --git a/examples/embodiment/hugging_face_tool.py b/examples/embodiment/hugging_face_tool.py deleted file mode 100644 index 7a510e1b3b..0000000000 --- a/examples/embodiment/hugging_face_tool.py +++ /dev/null @@ -1,53 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -from typing import List - -from camel.agents import EmbodiedAgent, HuggingFaceToolAgent -from camel.agents.tool_agents.base import BaseToolAgent -from camel.generators import SystemMessageGenerator -from camel.types import RoleType - - -def main(): - # Create an embodied agent - role_name = "Artist" - meta_dict = dict(role=role_name, task="Drawing") - sys_msg = SystemMessageGenerator().from_dict( - meta_dict=meta_dict, - role_tuple=(f"{role_name}'s Embodiment", RoleType.EMBODIMENT), - ) - tool_agents = [ - HuggingFaceToolAgent( - 'hugging_face_tool_agent', - remote=True, - ) - ] - tool_agents: List[BaseToolAgent] - embodied_agent = EmbodiedAgent( - sys_msg, - verbose=True, - tool_agents=tool_agents, - ) - - user_msg = ( - "Draw all the Camelidae species, " - "caption the image content, " - "save the images by species name." - ) - response = embodied_agent.step(user_msg) - print(response.msg.content) - - -if __name__ == "__main__": - main() diff --git a/test/agents/test_babyagi_playing.py b/test/agents/test_babyagi_playing.py deleted file mode 100644 index 8382ddc653..0000000000 --- a/test/agents/test_babyagi_playing.py +++ /dev/null @@ -1,228 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -from copy import deepcopy -from unittest.mock import MagicMock - -import pytest -from openai.types.chat.chat_completion import Choice -from openai.types.chat.chat_completion_message import ChatCompletionMessage -from openai.types.completion_usage import CompletionUsage - -from camel.agents import ChatAgent, TaskCreationAgent, TaskPrioritizationAgent -from camel.messages import BaseMessage -from camel.models import ModelFactory -from camel.societies import BabyAGI -from camel.types import ( - ChatCompletion, - ModelPlatformType, - ModelType, - RoleType, - TaskType, -) - -model_backend_rsp_base = ChatCompletion( - id="mock_response_id", - choices=[ - Choice( - finish_reason="stop", - index=0, - logprobs=None, - message=ChatCompletionMessage( - content="Mock task specifier response", - role="assistant", - function_call=None, - tool_calls=None, - ), - ) - ], - created=123456789, - model="gpt-4o-2024-05-13", - object="chat.completion", - usage=CompletionUsage( - completion_tokens=32, - prompt_tokens=15, - total_tokens=47, - ), -) - - -parametrize = pytest.mark.parametrize( - 'model', - [ - ModelFactory.create( - model_platform=ModelPlatformType.OPENAI, - model_type=ModelType.STUB, - ), - pytest.param(None, marks=pytest.mark.model_backend), - ], -) - - -@parametrize -def test_babyagi_playing_init(model): - task_prompt = "Develop a trading bot for the stock market" - - babyagi_playing = BabyAGI( - assistant_role_name="Python Programmer", - assistant_agent_kwargs=dict(model=model), - user_role_name="Stock Trader", - task_prompt=task_prompt, - task_specify_agent_kwargs=dict(model=model), - message_window_size=5, - ) - - assert babyagi_playing.task_type == TaskType.AI_SOCIETY - assert babyagi_playing.specified_task_prompt is not None - - assert isinstance(babyagi_playing.assistant_sys_msg, BaseMessage) - assert babyagi_playing.assistant_sys_msg.role_type == RoleType.ASSISTANT - - assert isinstance(babyagi_playing.assistant_agent, ChatAgent) - assert isinstance(babyagi_playing.task_creation_agent, TaskCreationAgent) - assert isinstance( - babyagi_playing.task_prioritization_agent, TaskPrioritizationAgent - ) - - assert len(babyagi_playing.subtasks) == 0 - assert len(babyagi_playing.solved_subtasks) == 0 - - -@parametrize -def test_babyagi_playing_step(model, step_call_count=3): - task_prompt = "Develop a trading bot for the stock market" - - babyagi_playing = BabyAGI( - assistant_role_name="Python Programmer", - assistant_agent_kwargs=dict(model=model), - user_role_name="Stock Trader", - task_prompt=task_prompt, - task_specify_agent_kwargs=dict(model=model), - message_window_size=5, - ) - - # Mock model_backend responses - # Initial task list when first calling babyagi_playing.step() - task_creation_agent_model_rsp0 = deepcopy(model_backend_rsp_base) - task_creation_agent_model_rsp0.choices[0].message.content = "1. Task 0" - task_prioritization_agent_model_rsp0 = deepcopy(model_backend_rsp_base) - task_prioritization_agent_model_rsp0.choices[ - 0 - ].message.content = "1. Task 0" - - # Solve the highest priority (left most) task: Task 0 - assistant_agent_model_rsp1 = deepcopy(model_backend_rsp_base) - assistant_agent_model_rsp1.choices[ - 0 - ].message.content = "Solution for Task 0" - - # Task 0 is solved, remove from TaskPrioritizationAgent model response - task_creation_agent_model_rsp1 = deepcopy(model_backend_rsp_base) - task_creation_agent_model_rsp1.choices[ - 0 - ].message.content = "1. \nTask 1\n\n2. Task 2\n\n3. Task 3" - task_prioritization_agent_model_rsp1 = deepcopy(model_backend_rsp_base) - task_prioritization_agent_model_rsp1.choices[ - 0 - ].message.content = "1. Task 2 \nTask 1 \n3. Task 3 " - - # Solve the highest priority (left most) task: Task 2 - assistant_agent_model_rsp2 = deepcopy(model_backend_rsp_base) - assistant_agent_model_rsp2.choices[ - 0 - ].message.content = "Solution for Task 2" - - # Task 2 is solved, remove from TaskPrioritizationAgent model response - task_creation_agent_model_rsp2 = deepcopy(model_backend_rsp_base) - task_creation_agent_model_rsp2.choices[ - 0 - ].message.content = "1. Task 4\n\n2. Task 5" - task_prioritization_agent_model_rsp2 = deepcopy(model_backend_rsp_base) - task_prioritization_agent_model_rsp2.choices[ - 0 - ].message.content = "1. Task 1 \nTask 3 \n3. Task 4 \n4. Task 5" - - # Solve the highest priority (left most) task: Task 1 - assistant_agent_model_rsp3 = deepcopy(model_backend_rsp_base) - assistant_agent_model_rsp3.choices[ - 0 - ].message.content = "Solution for Task 1" - - # Task 1 is solved, remove from TaskPrioritizationAgent model response - task_creation_agent_model_rsp3 = deepcopy(model_backend_rsp_base) - task_creation_agent_model_rsp3.choices[ - 0 - ].message.content = "1. Task 6\n\n2. Task 7\n\n3. Task 8" - task_prioritization_agent_model_rsp3 = deepcopy(model_backend_rsp_base) - task_prioritization_agent_model_rsp3.choices[0].message.content = ( - "1. Task 6 \nTask 3 \n3. Task 4 \n" - "4. Task 7 \n5. Task 5 \n6. Task 8" - ) - - babyagi_playing.task_creation_agent.model_backend.run = MagicMock( - side_effect=[ - task_creation_agent_model_rsp0, - task_creation_agent_model_rsp1, - task_creation_agent_model_rsp2, - task_creation_agent_model_rsp3, - ] - ) - babyagi_playing.task_prioritization_agent.model_backend.run = MagicMock( - side_effect=[ - task_prioritization_agent_model_rsp0, - task_prioritization_agent_model_rsp1, - task_prioritization_agent_model_rsp2, - task_prioritization_agent_model_rsp3, - ] - ) - babyagi_playing.assistant_agent.model_backend.run = MagicMock( - side_effect=[ - assistant_agent_model_rsp1, - assistant_agent_model_rsp2, - assistant_agent_model_rsp3, - ] - ) - - print(f"AI Assistant sys message:\n{babyagi_playing.assistant_sys_msg}\n") - print(f"Original task prompt:\n{task_prompt}\n") - print(f"Specified task prompt:\n{babyagi_playing.specified_task_prompt}\n") - - for i in range(step_call_count): - # Call assistant for multiple times to make test units more robust - assistant_response = babyagi_playing.step() - - assert isinstance( - assistant_response.msgs, list - ), f"Error in calling round {i+1}" - assert ( - len(assistant_response.msgs) == 1 - ), f"Error in calling round {i+1}" - assert isinstance( - assistant_response.msgs[0], BaseMessage - ), f"Error in calling round {i+1}" - assert isinstance( - assistant_response.terminated, bool - ), f"Error in calling round {i+1}" - assert ( - assistant_response.terminated is False - ), f"Error in calling round {i+1}" - assert isinstance( - assistant_response.info, dict - ), f"Error in calling round {i+1}" - - assert ( - len(babyagi_playing.subtasks) > 0 - ), f"Error in calling round {i+1}" - assert ( - len(babyagi_playing.solved_subtasks) == i + 1 - ), f"Error in calling round {i+1}" diff --git a/test/agents/test_embodied_agent.py b/test/agents/test_embodied_agent.py index 83d106cc7f..26f83a812a 100644 --- a/test/agents/test_embodied_agent.py +++ b/test/agents/test_embodied_agent.py @@ -20,7 +20,7 @@ from openai.types.chat.chat_completion_message import ChatCompletionMessage from openai.types.completion_usage import CompletionUsage -from camel.agents import EmbodiedAgent, HuggingFaceToolAgent +from camel.agents import EmbodiedAgent from camel.generators import SystemMessageGenerator from camel.messages import BaseMessage from camel.types import ChatCompletion, RoleType @@ -51,24 +51,7 @@ ) -@pytest.mark.skip(reason="Wait huggingface to update openaiv1") @pytest.mark.model_backend -def test_get_action_space_prompt(): - role_name = "Artist" - meta_dict = dict(role=role_name, task="Drawing") - sys_msg = SystemMessageGenerator().from_dict( - meta_dict=meta_dict, - role_tuple=(f"{role_name}'s Embodiment", RoleType.EMBODIMENT), - ) - agent = EmbodiedAgent( - sys_msg, tool_agents=[HuggingFaceToolAgent("hugging_face_tool_agent")] - ) - assert "hugging_face_tool_agent" in agent.get_tool_agent_names() - - -@pytest.mark.skip(reason="Wait huggingface to update openaiv1") -@pytest.mark.model_backend -@pytest.mark.very_slow def test_step(step_call_count=3): # Create an embodied agent role_name = "Artist" diff --git a/test/agents/tool_agents/test_hugging_face_tool_agent.py b/test/agents/tool_agents/test_hugging_face_tool_agent.py deleted file mode 100644 index 3464b620c1..0000000000 --- a/test/agents/tool_agents/test_hugging_face_tool_agent.py +++ /dev/null @@ -1,72 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -import binascii - -import pytest -import requests - -from camel.agents import HuggingFaceToolAgent - - -@pytest.mark.skip(reason="Wait huggingface to update openaiv1") -def test_hugging_face_tool_agent_initialization(): - agent = HuggingFaceToolAgent("hugging_face_tool_agent") - assert agent.name == "hugging_face_tool_agent" - assert agent.remote is True - assert agent.description.startswith(f"The `{agent.name}` is a tool agent") - - -@pytest.mark.skip(reason="Wait huggingface to update openaiv1") -@pytest.mark.model_backend -@pytest.mark.very_slow -def test_hugging_face_tool_agent_step(step_call_count=3): - from PIL.PngImagePlugin import PngImageFile - - agent = HuggingFaceToolAgent("hugging_face_tool_agent") - for i in range(step_call_count): - try: - result = agent.step("Generate an image of a boat in the water") - except (binascii.Error, requests.exceptions.ConnectionError) as ex: - print( - "Warning: caught an exception, ignoring it since " - f"it is a known issue of Huggingface ({ex!s})" - ) - return - assert isinstance( - result, PngImageFile - ), f"Error in calling round {i+1}" - - -@pytest.mark.skip(reason="Wait huggingface to update openaiv1") -@pytest.mark.model_backend -@pytest.mark.very_slow -def test_hugging_face_tool_agent_chat(): - from PIL.PngImagePlugin import PngImageFile - - agent = HuggingFaceToolAgent("hugging_face_tool_agent") - try: - result = agent.chat("Show me an image of a capybara") - except (binascii.Error, requests.exceptions.ConnectionError) as ex: - print( - "Warning: caught an exception, ignoring it since " - f"it is a known issue of Huggingface ({ex!s})" - ) - return - assert isinstance(result, PngImageFile) - - -@pytest.mark.skip(reason="Wait huggingface to update openaiv1") -def test_hugging_face_tool_agent_reset(): - agent = HuggingFaceToolAgent("hugging_face_tool_agent") - agent.reset() From 2f493a27beefd8bdb41f2f7d4c0d77957bf7ef11 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 8 Feb 2026 01:16:20 +0000 Subject: [PATCH 3/6] Remove test file for deleted babyagi example Co-authored-by: lightaime <23632352+lightaime@users.noreply.github.com> --- examples/test/test_babyagi_example.py | 196 -------------------------- 1 file changed, 196 deletions(-) delete mode 100644 examples/test/test_babyagi_example.py diff --git a/examples/test/test_babyagi_example.py b/examples/test/test_babyagi_example.py deleted file mode 100644 index 4ffa21b048..0000000000 --- a/examples/test/test_babyagi_example.py +++ /dev/null @@ -1,196 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -import pytest -from mock import MagicMock, patch - -import examples.ai_society.babyagi_playing -from camel.models import ModelFactory -from camel.societies.babyagi_playing import BabyAGI -from camel.types import ModelPlatformType, ModelType - -parametrize = pytest.mark.parametrize( - 'model', - [ - ModelFactory.create( - model_platform=ModelPlatformType.OPENAI, - model_type=ModelType.STUB, - ), - pytest.param(None, marks=pytest.mark.model_backend), - ], -) - - -@parametrize -def test_ai_society_babyagi_playing_example(model): - r"""Test that the BabyAGI example structure works without actually running - the resource-intensive parts. - """ - # Mock BabyAGI to avoid actual instantiation and execution - with patch( - 'examples.ai_society.babyagi_playing.BabyAGI' - ) as mock_babyagi_class: - mock_babyagi_instance = MagicMock() - mock_babyagi_class.return_value = mock_babyagi_instance - mock_response = MagicMock() - mock_response.terminated = True - mock_response.info = { - 'termination_reasons': 'Test completed', - 'task_name': 'Test', - 'subtasks': [], - } - mock_response.msg = MagicMock() - mock_babyagi_instance.step.return_value = mock_response - - # Mock print functions to avoid console output - with patch('examples.ai_society.babyagi_playing.print_text_animated'): - with patch('examples.ai_society.babyagi_playing.print'): - # Run the main function with our mocks - examples.ai_society.babyagi_playing.main( - model=model, chat_turn_limit=2 - ) - - mock_babyagi_class.assert_called_once() - assert mock_babyagi_instance.step.called - - -@parametrize -def test_babyagi_initialization(model): - r"""Test that BabyAGI initializes with correct parameters.""" - task_prompt = "Test task prompt" - assistant_role = "Test Assistant" - user_role = "Test User" - - # Mock the TaskSpecifyAgent to avoid actual API calls - with patch( - 'camel.societies.babyagi_playing.TaskSpecifyAgent' - ) as mock_task_specify_agent_class: - # Create a mock instance and mock the run method - mock_task_specify_agent = MagicMock() - mock_task_specify_agent_class.return_value = mock_task_specify_agent - mock_task_specify_agent.run.return_value = "Specified test task" - - # Mock the SystemMessageGenerator to avoid actual API calls - with patch( - 'camel.societies.babyagi_playing.SystemMessageGenerator' - ) as mock_sys_msg_generator_class: - # Create a mock instance and mock the from_dicts method - mock_sys_msg_generator = MagicMock() - mock_sys_msg_generator_class.return_value = mock_sys_msg_generator - mock_sys_msg = [MagicMock()] - mock_sys_msg_generator.from_dicts.return_value = mock_sys_msg - - # Mock ChatAgent to avoid actual API calls - with patch( - 'camel.societies.babyagi_playing.ChatAgent' - ) as mock_chat_agent_class: - mock_chat_agent = MagicMock() - mock_chat_agent.model = model - mock_chat_agent_class.return_value = mock_chat_agent - - babyagi = BabyAGI( - assistant_role_name=assistant_role, - assistant_agent_kwargs=dict(model=model), - user_role_name=user_role, - task_prompt=task_prompt, - task_specify_agent_kwargs=dict(model=model), - ) - - assert babyagi.task_prompt == task_prompt - assert mock_task_specify_agent.run.called - assert mock_sys_msg_generator.from_dicts.called - - -@parametrize -def test_babyagi_step_functionality(model): - r"""Test that BabyAGI step method works as expected.""" - with patch( - 'examples.ai_society.babyagi_playing.BabyAGI' - ) as mock_babyagi_class: - mock_babyagi = MagicMock() - mock_babyagi_class.return_value = mock_babyagi - - # Create a mock response for the step method - mock_response = MagicMock() - mock_response.terminated = False - mock_response.msg = MagicMock() - mock_response.msg.content = "Test response content" - mock_response.info = { - 'task_name': 'Test Task', - 'subtasks': ['Subtask 1', 'Subtask 2'], - } - mock_babyagi.step.return_value = mock_response - - # Mock print functions to avoid console output - with patch('examples.ai_society.babyagi_playing.print_text_animated'): - with patch('examples.ai_society.babyagi_playing.print'): - # Run the main function with our mocks - examples.ai_society.babyagi_playing.main( - model=model, chat_turn_limit=1 - ) - - # Verify BabyAGI was instantiated with expected parameters - mock_babyagi_class.assert_called_once_with( - assistant_role_name="Python Programmer", - assistant_agent_kwargs=dict(model=model), - user_role_name="Stock Trader", - task_prompt="Develop a trading bot for the stock market", - task_specify_agent_kwargs=dict(model=model), - ) - - assert mock_babyagi.step.called - - -@parametrize -def test_babyagi_termination(model): - r"""Test that BabyAGI terminates correctly when response indicates - termination. - """ - with patch( - 'examples.ai_society.babyagi_playing.BabyAGI' - ) as mock_babyagi_class: - mock_babyagi = MagicMock() - mock_babyagi_class.return_value = mock_babyagi - - # Create a mock response that indicates termination - mock_response = MagicMock() - mock_response.terminated = True - mock_response.msg = MagicMock() - mock_response.msg.content = "Final task completed" - mock_response.info = { - 'termination_reasons': 'Task completed', - 'task_name': 'Final Task', - 'subtasks': [], - } - mock_babyagi.step.return_value = mock_response - - # Mock print functions to avoid console output - with patch('examples.ai_society.babyagi_playing.print_text_animated'): - with patch('examples.ai_society.babyagi_playing.print'): - # Run the main function with our mocks - # Set a high chat_turn_limit to verify early termination - examples.ai_society.babyagi_playing.main( - model=model, chat_turn_limit=10 - ) - - # Verify BabyAGI was instantiated with expected parameters - mock_babyagi_class.assert_called_once() - - # Verify step was called exactly once (since it should terminate after - # first step) - mock_babyagi.step.assert_called_once() - - # Test the termination condition - response = mock_babyagi.step.return_value - assert response.terminated - assert 'termination_reasons' in response.info From b798c2d062f2352db0159cfda664cff30a33eaba Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 8 Feb 2026 01:33:58 +0000 Subject: [PATCH 4/6] Remove entire tool_agents directory and BaseToolAgent functionality Co-authored-by: lightaime <23632352+lightaime@users.noreply.github.com> --- camel/agents/__init__.py | 2 - camel/agents/embodied_agent.py | 63 ++----------------- camel/agents/tool_agents/__init__.py | 18 ------ camel/agents/tool_agents/base.py | 39 ------------ docs/camel.agents.rst | 8 --- docs/camel.agents.tool_agents.rst | 21 ------- .../tool_agents/test_tool_agent_base.py | 28 --------- 7 files changed, 4 insertions(+), 175 deletions(-) delete mode 100644 camel/agents/tool_agents/__init__.py delete mode 100644 camel/agents/tool_agents/base.py delete mode 100644 docs/camel.agents.tool_agents.rst delete mode 100644 test/agents/tool_agents/test_tool_agent_base.py diff --git a/camel/agents/__init__.py b/camel/agents/__init__.py index e2b46f779d..2e5f4b7e6e 100644 --- a/camel/agents/__init__.py +++ b/camel/agents/__init__.py @@ -26,7 +26,6 @@ TaskPrioritizationAgent, TaskSpecifyAgent, ) -from .tool_agents.base import BaseToolAgent __all__ = [ 'BaseAgent', @@ -36,7 +35,6 @@ 'TaskCreationAgent', 'TaskPrioritizationAgent', 'CriticAgent', - 'BaseToolAgent', 'EmbodiedAgent', 'RoleAssignmentAgent', 'SearchAgent', diff --git a/camel/agents/embodied_agent.py b/camel/agents/embodied_agent.py index 31d2fa40ff..2f2171ade2 100644 --- a/camel/agents/embodied_agent.py +++ b/camel/agents/embodied_agent.py @@ -11,17 +11,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -from typing import Any, List, Optional +from typing import Any, Optional from colorama import Fore from camel.agents.chat_agent import ChatAgent -from camel.agents.tool_agents.base import BaseToolAgent -from camel.interpreters import ( - BaseInterpreter, - InternalPythonInterpreter, - SubprocessInterpreter, -) +from camel.interpreters import BaseInterpreter, SubprocessInterpreter from camel.messages import BaseMessage from camel.models import BaseModelBackend from camel.responses import ChatAgentResponse @@ -51,13 +46,9 @@ class EmbodiedAgent(ChatAgent): message_window_size (int, optional): The maximum number of previous messages to include in the context window. If `None`, no windowing is performed. (default: :obj:`None`) - tool_agents (List[BaseToolAgent], optional): The tools agents to use in - the embodied agent. (default: :obj:`None`) code_interpreter (BaseInterpreter, optional): The code interpreter to - execute codes. If `code_interpreter` and `tool_agent` are both - `None`, default to `SubProcessInterpreter`. If `code_interpreter` - is `None` and `tool_agents` is not `None`, default to - `InternalPythonInterpreter`. (default: :obj:`None`) + execute codes. Default to `SubProcessInterpreter`. + (default: :obj:`None`) verbose (bool, optional): Whether to print the critic's messages. logger_color (Any): The color of the logger displayed to the user. (default: :obj:`Fore.MAGENTA`) @@ -68,22 +59,16 @@ def __init__( system_message: BaseMessage, model: Optional[BaseModelBackend] = None, message_window_size: Optional[int] = None, - tool_agents: Optional[List[BaseToolAgent]] = None, code_interpreter: Optional[BaseInterpreter] = None, verbose: bool = False, logger_color: Any = Fore.MAGENTA, ) -> None: - self.tool_agents = tool_agents self.code_interpreter: BaseInterpreter if code_interpreter is not None: self.code_interpreter = code_interpreter - elif self.tool_agents: - self.code_interpreter = InternalPythonInterpreter() else: self.code_interpreter = SubprocessInterpreter() - if self.tool_agents: - system_message = self._set_tool_agents(system_message) self.verbose = verbose self.logger_color = logger_color super().__init__( @@ -92,46 +77,6 @@ def __init__( message_window_size=message_window_size, ) - def _set_tool_agents(self, system_message: BaseMessage) -> BaseMessage: - action_space_prompt = self._get_tool_agents_prompt() - result_message = system_message.create_new_instance( - content=system_message.content.format( - action_space=action_space_prompt - ) - ) - if self.tool_agents is not None: - self.code_interpreter.update_action_space( - {tool.name: tool for tool in self.tool_agents} - ) - return result_message - - def _get_tool_agents_prompt(self) -> str: - r"""Returns the action space prompt. - - Returns: - str: The action space prompt. - """ - if self.tool_agents is not None: - return "\n".join( - [ - f"*** {tool.name} ***:\n {tool.description}" - for tool in self.tool_agents - ] - ) - else: - return "" - - def get_tool_agent_names(self) -> List[str]: - r"""Returns the names of tool agents. - - Returns: - List[str]: The names of tool agents. - """ - if self.tool_agents is not None: - return [tool.name for tool in self.tool_agents] - else: - return [] - # ruff: noqa: E501 def step(self, input_message: BaseMessage) -> ChatAgentResponse: # type: ignore[override] r"""Performs a step in the conversation. diff --git a/camel/agents/tool_agents/__init__.py b/camel/agents/tool_agents/__init__.py deleted file mode 100644 index adb7855929..0000000000 --- a/camel/agents/tool_agents/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -from .base import BaseToolAgent - -__all__ = [ - 'BaseToolAgent', -] diff --git a/camel/agents/tool_agents/base.py b/camel/agents/tool_agents/base.py deleted file mode 100644 index e448ada28d..0000000000 --- a/camel/agents/tool_agents/base.py +++ /dev/null @@ -1,39 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -from camel.agents import BaseAgent - - -class BaseToolAgent(BaseAgent): - r"""Creates a :obj:`BaseToolAgent` object with the specified name and - description. - - Args: - name (str): The name of the tool agent. - description (str): The description of the tool agent. - """ - - def __init__(self, name: str, description: str) -> None: - self.name = name - self.description = description - - def reset(self) -> None: - r"""Resets the agent to its initial state.""" - pass - - def step(self) -> None: - r"""Performs a single step of the agent.""" - pass - - def __str__(self) -> str: - return f"{self.name}: {self.description}" diff --git a/docs/camel.agents.rst b/docs/camel.agents.rst index 2d9c896b90..6416e9369e 100644 --- a/docs/camel.agents.rst +++ b/docs/camel.agents.rst @@ -1,14 +1,6 @@ camel.agents package ==================== -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - camel.agents.tool_agents - Submodules ---------- diff --git a/docs/camel.agents.tool_agents.rst b/docs/camel.agents.tool_agents.rst deleted file mode 100644 index d59c8d2683..0000000000 --- a/docs/camel.agents.tool_agents.rst +++ /dev/null @@ -1,21 +0,0 @@ -camel.agents.tool\_agents package -================================= - -Submodules ----------- - -camel.agents.tool\_agents.base module -------------------------------------- - -.. automodule:: camel.agents.tool_agents.base - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- - -.. automodule:: camel.agents.tool_agents - :members: - :undoc-members: - :show-inheritance: diff --git a/test/agents/tool_agents/test_tool_agent_base.py b/test/agents/tool_agents/test_tool_agent_base.py deleted file mode 100644 index 1fc74b0e40..0000000000 --- a/test/agents/tool_agents/test_tool_agent_base.py +++ /dev/null @@ -1,28 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -from camel.agents import BaseToolAgent - - -class DummyToolAgent(BaseToolAgent): - def reset(self): - pass - - def step(self): - pass - - -def test_tool_agent_initialization(): - tool_agent = DummyToolAgent("tool_agent", "description") - assert tool_agent.name == "tool_agent" - assert tool_agent.description == "description" From 8bdabefcc15ca45025624d45ae55058a1c6ff1c1 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 8 Feb 2026 02:00:46 +0000 Subject: [PATCH 5/6] Remove embodied_agent and deductive_reasoner_agent modules Co-authored-by: lightaime <23632352+lightaime@users.noreply.github.com> --- camel/agents/__init__.py | 2 - camel/agents/deductive_reasoner_agent.py | 303 ---------- camel/agents/embodied_agent.py | 146 ----- .../internal_python_interpreter.py | 6 +- docs/camel.agents.rst | 16 - docs/conf.py | 1 - .../advanced_features/embodied_agents.ipynb | 517 ------------------ docs/cookbooks/advanced_features/index.rst | 1 - docs/index.rst | 1 - .../advanced_features/embodied_agents.mdx | 126 ----- .../camel.agents.deductive_reasoner_agent.mdx | 68 --- .../reference/camel.agents.embodied_agent.mdx | 90 --- .../deduce_conditions_and_quality.py | 40 -- examples/embodiment/code_execution.py | 44 -- test/agents/test_deductive_reasoner_agent.py | 137 ----- test/agents/test_embodied_agent.py | 83 --- 16 files changed, 3 insertions(+), 1578 deletions(-) delete mode 100644 camel/agents/deductive_reasoner_agent.py delete mode 100644 camel/agents/embodied_agent.py delete mode 100644 docs/cookbooks/advanced_features/embodied_agents.ipynb delete mode 100644 docs/mintlify/cookbooks/advanced_features/embodied_agents.mdx delete mode 100644 docs/mintlify/reference/camel.agents.deductive_reasoner_agent.mdx delete mode 100644 docs/mintlify/reference/camel.agents.embodied_agent.mdx delete mode 100644 examples/deductive_reasoner_agent/deduce_conditions_and_quality.py delete mode 100644 examples/embodiment/code_execution.py delete mode 100644 test/agents/test_deductive_reasoner_agent.py delete mode 100644 test/agents/test_embodied_agent.py diff --git a/camel/agents/__init__.py b/camel/agents/__init__.py index 2e5f4b7e6e..f76ef7d972 100644 --- a/camel/agents/__init__.py +++ b/camel/agents/__init__.py @@ -14,7 +14,6 @@ from .base import BaseAgent from .chat_agent import ChatAgent from .critic_agent import CriticAgent -from .embodied_agent import EmbodiedAgent from .knowledge_graph_agent import KnowledgeGraphAgent from .mcp_agent import MCPAgent from .repo_agent import RepoAgent @@ -35,7 +34,6 @@ 'TaskCreationAgent', 'TaskPrioritizationAgent', 'CriticAgent', - 'EmbodiedAgent', 'RoleAssignmentAgent', 'SearchAgent', 'KnowledgeGraphAgent', diff --git a/camel/agents/deductive_reasoner_agent.py b/camel/agents/deductive_reasoner_agent.py deleted file mode 100644 index f01079a410..0000000000 --- a/camel/agents/deductive_reasoner_agent.py +++ /dev/null @@ -1,303 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -import re -from typing import Dict, List, Optional, Union - -from camel.agents.chat_agent import ChatAgent -from camel.logger import get_logger -from camel.messages import BaseMessage -from camel.models import BaseModelBackend -from camel.prompts import TextPrompt -from camel.types import RoleType - -logger = get_logger(__name__) - -# AgentOps decorator setting -try: - import os - - if os.getenv("AGENTOPS_API_KEY") is not None: - from agentops import track_agent - else: - raise ImportError -except (ImportError, AttributeError): - from camel.utils import track_agent - - -@track_agent(name="DeductiveReasonerAgent") -class DeductiveReasonerAgent(ChatAgent): - r"""An agent responsible for deductive reasoning. Model of deductive - reasoning: - - L: A ⊕ C -> q * B - - A represents the known starting state. - - B represents the known target state. - - C represents the conditions required to transition from A to B. - - Q represents the quality or effectiveness of the transition from - A to B. - - L represents the path or process from A to B. - - Args: - model (BaseModelBackend, optional): The model backend to use for - generating responses. (default: :obj:`OpenAIModel` with - `GPT_4O_MINI`) - """ - - def __init__( - self, - model: Optional[BaseModelBackend] = None, - ) -> None: - system_message = BaseMessage( - role_name="Insight Agent", - role_type=RoleType.ASSISTANT, - meta_dict=None, - content="You assign roles based on tasks.", - ) - super().__init__(system_message, model=model) - - def deduce_conditions_and_quality( - self, - starting_state: str, - target_state: str, - role_descriptions_dict: Optional[Dict[str, str]] = None, - ) -> Dict[str, Union[List[str], Dict[str, str]]]: - r"""Derives the conditions and quality from the starting state and the - target state based on the model of the deductive reasoning and the - knowledge base. It can optionally consider the roles involved in the - scenario, which allows tailoring the output more closely to the AI - agent's environment. - - Args: - starting_state (str): The initial or starting state from which - conditions are deduced. - target_state (str): The target state of the task. - role_descriptions_dict (Optional[Dict[str, str]], optional): The - descriptions of the roles. (default: :obj:`None`) - role_descriptions_dict (Optional[Dict[str, str]], optional): A - dictionary describing the roles involved in the scenario. This - is optional and can be used to provide a context for the - CAMEL's role-playing, enabling the generation of more relevant - and tailored conditions and quality assessments. This could be - generated using a `RoleAssignmentAgent()` or defined manually - by the user. - - Returns: - Dict[str, Union[List[str], Dict[str, str]]]: A dictionary with the - extracted data from the message. The dictionary contains three - keys: - - 'conditions': A list where each key is a condition ID and - each value is the corresponding condition text. - - 'labels': A list of label strings extracted from the message. - - 'quality': A string of quality assessment strings extracted - from the message. - """ - self.reset() - - deduce_prompt = """You are a deductive reasoner. You are tasked to - complete the TASK based on the THOUGHT OF DEDUCTIVE REASONING, the - STARTING STATE A and the TARGET STATE B. You are given the CONTEXT - CONTENT to help you complete the TASK. -Your answer MUST strictly adhere to the structure of ANSWER TEMPLATE, ONLY -fill in the BLANKs, and DO NOT alter or modify any other part of the template - -===== MODELING OF DEDUCTIVE REASONING ===== -You are tasked with understanding a mathematical model based on the components -${A, B, C, Q, L}$. In this model: ``L: A ⊕ C -> q * B``. -- $A$ represents the known starting state. -- $B$ represents the known target state. -- $C$ represents the conditions required to transition from $A$ to $B$. -- $Q$ represents the quality or effectiveness of the transition from $A$ to -$B$. -- $L$ represents the path or process from $A$ to $B$. - -===== THOUGHT OF DEDUCTIVE REASONING ===== -1. Define the Parameters of A and B: - - Characterization: Before delving into transitions, thoroughly understand - the nature and boundaries of both $A$ and $B$. This includes the type, - properties, constraints, and possible interactions between the two. - - Contrast and Compare: Highlight the similarities and differences between - $A$ and $B$. This comparative analysis will give an insight into what - needs changing and what remains constant. -2. Historical & Empirical Analysis: - - Previous Transitions according to the Knowledge Base of GPT: (if - applicable) Extract conditions and patterns from the historical instances - where a similar transition from a state comparable to $A$ moved towards - $B$. - - Scientific Principles: (if applicable) Consider the underlying - scientific principles governing or related to the states and their - transition. For example, if $A$ and $B$ are physical states, laws of - physics might apply. -3. Logical Deduction of Conditions ($C$): - - Direct Path Analysis: What are the immediate and direct conditions - required to move from $A$ to $B$? - - Intermediate States: Are there states between $A$ and $B$ that must be - traversed or can be used to make the transition smoother or more - efficient? If yes, what is the content? - - Constraints & Limitations: Identify potential barriers or restrictions - in moving from $A$ to $B$. These can be external (e.g., environmental - factors) or internal (properties of $A$ or $B$). - - Resource and Information Analysis: What resources and information are - required for the transition? This could be time, entity, factor, code - language, software platform, unknowns, etc. - - External Influences: Consider socio-economic, political, or - environmental factors (if applicable) that could influence the transition - conditions. - - Creative/Heuristic Reasoning: Open your mind to multiple possible $C$'s, - no matter how unconventional they might seem. Utilize analogies, - metaphors, or brainstorming techniques to envision possible conditions or - paths from $A$ to $B$. - - The conditions $C$ should be multiple but in one sentence. And each - condition should be concerned with one aspect/entity. -4. Entity/Label Recognition of Conditions ($C$): - - Identify and categorize entities of Conditions ($C$) such as the names, - locations, dates, specific technical terms or contextual parameters that - might be associated with events, innovations post-2022. - - The output of the entities/labels will be used as tags or labels for - semantic similarity searches. The entities/labels may be the words, or - phrases, each of them should contain valuable, high information entropy - information, and should be independent. - - Ensure that the identified entities are formatted in a manner suitable - for database indexing and retrieval. Organize the entities into - categories, and combine the category with its instance into a continuous - phrase, without using colons or other separators. - - Format these entities for database indexing: output the category rather - than its instance/content into a continuous phrase. For example, instead - of "Jan. 02", identify it as "Event time". -5. Quality Assessment ($Q$): - - Efficiency: How efficient is the transition from $A$ to $B$, which - measures the resources used versus the desired outcome? - - Effectiveness: Did the transition achieve the desired outcome or was the - target state achieved as intended? - - Safety & Risks: Assess any risks associated with the transition and the - measures to mitigate them. - - Feedback Mechanisms: Incorporate feedback loops to continuously monitor - and adjust the quality of transition, making it more adaptive. -6. Iterative Evaluation: - - Test & Refine: Based on the initially deduced conditions and assessed - quality, iterate the process to refine and optimize the transition. This - might involve tweaking conditions, employing different paths, or changing - resources. - - Feedback Integration: Use feedback to make improvements and increase the - quality of the transition. -7. Real-world scenarios often present challenges that may not be captured by -models and frameworks. While using the model, maintain an adaptive mindset: - - Scenario Exploration: Continuously imagine various possible scenarios, - both positive and negative, to prepare for unexpected events. - - Flexibility: Be prepared to modify conditions ($C$) or alter the path/ - process ($L$) if unforeseen challenges arise. - - Feedback Integration: Rapidly integrate feedback from actual - implementations to adjust the model's application, ensuring relevancy and - effectiveness. - -===== TASK ===== -Given the starting state $A$ and the target state $B$, assuming that a path -$L$ always exists between $A$ and $B$, how can one deduce or identify the -necessary conditions $C$ and the quality $Q$ of the transition? - -===== STARTING STATE $A$ ===== -{starting_state} - -===== TARGET STATE $B$ ===== -{target_state} - -{role_with_description_prompt} -===== ANSWER TEMPLATE ===== -- Characterization and comparison of $A$ and $B$:\n -- Historical & Empirical Analysis:\n/None -- Logical Deduction of Conditions ($C$) (multiple conditions can be deduced): - condition : - . -- Entity/Label Recognition of Conditions:\n[, , ...] (include -square brackets) -- Quality Assessment ($Q$) (do not use symbols): - . -- Iterative Evaluation:\n/None""" - - if role_descriptions_dict is not None: - role_names = role_descriptions_dict.keys() - role_with_description_prompt = ( - "===== ROLES WITH DESCRIPTIONS =====\n" - + "\n".join( - f"{role_name}:\n{role_descriptions_dict[role_name]}\n" - for role_name in role_names - ) - + "\n\n" - ) - else: - role_with_description_prompt = "" - deduce_prompt = TextPrompt(deduce_prompt) - - deduce = deduce_prompt.format( - starting_state=starting_state, - target_state=target_state, - role_with_description_prompt=role_with_description_prompt, - ) - - conditions_and_quality_generation_msg = BaseMessage.make_user_message( - role_name="Deductive Reasoner", content=deduce - ) - - response = self.step( - input_message=conditions_and_quality_generation_msg - ) - - if response.terminated: - raise RuntimeError( - "Deduction failed. Error:\n" + f"{response.info}" - ) - msg: BaseMessage = response.msg - logger.info(f"Message content:\n{msg.content}") - - # Extract the conditions from the message - conditions_dict = { - f"condition {i}": cdt.replace("<", "") - .replace(">", "") - .strip() - .strip('\n') - for i, cdt in re.findall( - r"condition (\d+):\s*(.+?)(?=condition \d+|- Entity)", - msg.content, - re.DOTALL, - ) - } - - # Extract the labels from the message - labels = [ - label.strip().strip('\n').strip("\"'") - for label in re.findall( - r"Entity/Label Recognition of Conditions:\n\[(.+?)\]", - msg.content, - re.DOTALL, - )[0].split(",") - ] - - # Extract the quality from the message - quality = next( - q.strip().strip('\n') - for q in re.findall( - r"Quality Assessment \(\$Q\$\) \(do not use symbols\):" - r"\n(.+?)- Iterative", - msg.content, - re.DOTALL, - ) - ) - - # Convert them into JSON format - conditions_and_quality_json: Dict[ - str, Union[List[str], Dict[str, str]] - ] = {} - conditions_and_quality_json["conditions"] = conditions_dict - conditions_and_quality_json["labels"] = labels - conditions_and_quality_json["evaluate_quality"] = quality - - return conditions_and_quality_json diff --git a/camel/agents/embodied_agent.py b/camel/agents/embodied_agent.py deleted file mode 100644 index 2f2171ade2..0000000000 --- a/camel/agents/embodied_agent.py +++ /dev/null @@ -1,146 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -from typing import Any, Optional - -from colorama import Fore - -from camel.agents.chat_agent import ChatAgent -from camel.interpreters import BaseInterpreter, SubprocessInterpreter -from camel.messages import BaseMessage -from camel.models import BaseModelBackend -from camel.responses import ChatAgentResponse -from camel.utils import print_text_animated - -# AgentOps decorator setting -try: - import os - - if os.getenv("AGENTOPS_API_KEY") is not None: - from agentops import track_agent - else: - raise ImportError -except (ImportError, AttributeError): - from camel.utils import track_agent - - -@track_agent(name="EmbodiedAgent") -class EmbodiedAgent(ChatAgent): - r"""Class for managing conversations of CAMEL Embodied Agents. - - Args: - system_message (BaseMessage): The system message for the chat agent. - model (BaseModelBackend, optional): The model backend to use for - generating responses. (default: :obj:`OpenAIModel` with - `GPT_4O_MINI`) - message_window_size (int, optional): The maximum number of previous - messages to include in the context window. If `None`, no windowing - is performed. (default: :obj:`None`) - code_interpreter (BaseInterpreter, optional): The code interpreter to - execute codes. Default to `SubProcessInterpreter`. - (default: :obj:`None`) - verbose (bool, optional): Whether to print the critic's messages. - logger_color (Any): The color of the logger displayed to the user. - (default: :obj:`Fore.MAGENTA`) - """ - - def __init__( - self, - system_message: BaseMessage, - model: Optional[BaseModelBackend] = None, - message_window_size: Optional[int] = None, - code_interpreter: Optional[BaseInterpreter] = None, - verbose: bool = False, - logger_color: Any = Fore.MAGENTA, - ) -> None: - self.code_interpreter: BaseInterpreter - if code_interpreter is not None: - self.code_interpreter = code_interpreter - else: - self.code_interpreter = SubprocessInterpreter() - - self.verbose = verbose - self.logger_color = logger_color - super().__init__( - system_message=system_message, - model=model, - message_window_size=message_window_size, - ) - - # ruff: noqa: E501 - def step(self, input_message: BaseMessage) -> ChatAgentResponse: # type: ignore[override] - r"""Performs a step in the conversation. - - Args: - input_message (BaseMessage): The input message. - - Returns: - ChatAgentResponse: A struct containing the output messages, - a boolean indicating whether the chat session has terminated, - and information about the chat session. - """ - response = super().step(input_message) - - if response.msgs is None or len(response.msgs) == 0: - raise RuntimeError("Got None output messages.") - if response.terminated: - raise RuntimeError(f"{self.__class__.__name__} step failed.") - - # NOTE: Only single output messages are supported - explanations, codes = response.msg.extract_text_and_code_prompts() - - if self.verbose: - for explanation, code in zip(explanations, codes): - print_text_animated( - self.logger_color + f"> Explanation:\n{explanation}" - ) - print_text_animated(self.logger_color + f"> Code:\n{code}") - - if len(explanations) > len(codes): - print_text_animated( - self.logger_color + f"> Explanation:\n{explanations[-1]}" - ) - - content = response.msg.content - - if codes is not None: - try: - content = "\n> Executed Results:\n" - for block_idx, code in enumerate(codes): - executed_output = self.code_interpreter.run( - code, code.code_type - ) - content += ( - f"Executing code block {block_idx}: {{\n" - + executed_output - + "}\n" - ) - except InterruptedError as e: - content = ( - f"\n> Running code fail: {e}\n" - "Please regenerate the code." - ) - - # TODO: Handle errors - content = input_message.content + f"\n> Embodied Actions:\n{content}" - message = BaseMessage( - input_message.role_name, - input_message.role_type, - input_message.meta_dict, - content, - ) - return ChatAgentResponse( - msgs=[message], - terminated=response.terminated, - info=response.info, - ) diff --git a/camel/interpreters/internal_python_interpreter.py b/camel/interpreters/internal_python_interpreter.py index c75556abe3..9bf314ddcd 100644 --- a/camel/interpreters/internal_python_interpreter.py +++ b/camel/interpreters/internal_python_interpreter.py @@ -65,9 +65,9 @@ class InternalPythonInterpreter(BaseInterpreter): names to their corresponding functions or objects. The interpreter can only execute functions that are either directly listed in this dictionary or are member functions of objects listed in this - dictionary. The concept of :obj:`action_space` is derived from - EmbodiedAgent, representing the actions that an agent is capable of - performing. If `None`, set to empty dict. (default: :obj:`None`) + dictionary. The concept of :obj:`action_space` represents the + actions that an agent is capable of performing. If `None`, set to + empty dict. (default: :obj:`None`) import_white_list (List[str], optional): A list that stores the Python modules or functions that can be imported in the code. All submodules and functions of the modules listed in this list are diff --git a/docs/camel.agents.rst b/docs/camel.agents.rst index 6416e9369e..4984798d0b 100644 --- a/docs/camel.agents.rst +++ b/docs/camel.agents.rst @@ -28,22 +28,6 @@ camel.agents.critic\_agent module :undoc-members: :show-inheritance: -camel.agents.deductive\_reasoner\_agent module ----------------------------------------------- - -.. automodule:: camel.agents.deductive_reasoner_agent - :members: - :undoc-members: - :show-inheritance: - -camel.agents.embodied\_agent module ------------------------------------ - -.. automodule:: camel.agents.embodied_agent - :members: - :undoc-members: - :show-inheritance: - camel.agents.knowledge\_graph\_agent module ------------------------------------------- diff --git a/docs/conf.py b/docs/conf.py index 3e18fd938d..e28e418056 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -94,7 +94,6 @@ rediraffe_redirects = { "cookbooks/create_your_first_agent": "cookbooks/basic_concepts/create_your_first_agent", "cookbooks/create_your_first_agents_society": "cookbooks/basic_concepts/create_your_first_agents_society", - "cookbooks/embodied_agents": "cookbooks/advanced_features/embodied_agents", "cookbooks/critic_agents_and_tree_search": "cookbooks/advanced_features/critic_agents_and_tree_search", "cookbooks/agents_society": "cookbooks/basic_concepts/create_your_first_agents_society", "cookbooks/agents_message": "cookbooks/basic_concepts/agents_message", diff --git a/docs/cookbooks/advanced_features/embodied_agents.ipynb b/docs/cookbooks/advanced_features/embodied_agents.ipynb deleted file mode 100644 index 5ce2b35c4d..0000000000 --- a/docs/cookbooks/advanced_features/embodied_agents.ipynb +++ /dev/null @@ -1,517 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "APw8wDolb0L9" - }, - "source": [ - "# Embodied Agents" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "vLCfmNtRb-jR" - }, - "source": [ - "You can also check this cookbook in colab [here](https://colab.research.google.com/drive/17qCB6ezYfva87dNWlGA3D3zQ20NI-Sfk?usp=sharing)\n", - "\n", - "⭐ Star us on [*Github*](https://github.com/camel-ai/camel), join our [*Discord*](https://discord.camel-ai.org) or follow our [*X*](https://x.com/camelaiorg)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "jHQhWNnhcOch" - }, - "source": [ - "## Philosophical Bits\n", - "\n", - "We believe the essence of intelligence emerges from its dynamic interactions with the external environment, where the use of various tools becomes a pivotal factor in its development and manifestation.\n", - "\n", - "The `EmbodiedAgent()` in CAMEL is an advanced conversational agent that leverages **code interpreters** and **tool agents** (*e.g.*, `HuggingFaceToolAgent()`) to execute diverse tasks efficiently. This agent represents a blend of advanced programming and AI capabilities, and is able to interact and respond within a dynamic environment." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "VUaGurDIVJBg" - }, - "source": [ - "## Quick Start\n", - "Let's first play with a `ChatAgent` instance by simply initialize it with a system message and interact with user messages." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "u9NVFz-HVLXb" - }, - "source": [ - "### 🕹 Step 0: Preparations" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "UtcC3c-KVZmU" - }, - "outputs": [], - "source": [ - "%pip install \"camel-ai==0.2.16\"" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "id": "CmgKGeCxVON-" - }, - "outputs": [], - "source": [ - "from camel.agents import EmbodiedAgent\n", - "from camel.generators import SystemMessageGenerator as sys_msg_gen\n", - "from camel.messages import BaseMessage as bm\n", - "from camel.types import RoleType" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MyTTCe3IR_Lr" - }, - "source": [ - "### Setting Up API Keys" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "REqzgGL9SEaD" - }, - "source": [ - "You'll need to set up your API keys for OpenAI." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "PNBFEXc-R-0s", - "outputId": "6e97c175-684c-47d7-866c-c23aea59910a" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Enter your API key: ··········\n" - ] - } - ], - "source": [ - "import os\n", - "from getpass import getpass\n", - "\n", - "# Prompt for the API key securely\n", - "openai_api_key = getpass('Enter your API key: ')\n", - "os.environ[\"OPENAI_API_KEY\"] = openai_api_key" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Alternatively, if running on Colab, you could save your API keys and tokens as **Colab Secrets**, and use them across notebooks.\n", - "\n", - "To do so, **comment out** the above **manual** API key prompt code block(s), and **uncomment** the following codeblock.\n", - "\n", - "⚠️ Don't forget granting access to the API key you would be using to the current notebook." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# import os\n", - "# from google.colab import userdata\n", - "\n", - "# os.environ[\"OPENAI_API_KEY\"] = userdata.get(\"OPENAI_API_KEY\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "wcetZrjEcyo_" - }, - "source": [ - "### 🕹 Step 1: Define the Role\n", - "We first need to set up the necessary information." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "id": "i-pIc9eTc0SH" - }, - "outputs": [], - "source": [ - "# Set the role name and the task\n", - "role = 'Programmer'\n", - "task = 'Writing and executing codes.'\n", - "\n", - "# Create the meta_dict and the role_tuple\n", - "meta_dict = dict(role=role, task=task)\n", - "role_tuple = (role, RoleType.EMBODIMENT)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Lf6oJvsQc3lj" - }, - "source": [ - "The `meta_dict` and `role_type` will be used to generate the system message.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "id": "yA62jAfDc5CK" - }, - "outputs": [], - "source": [ - "# Generate the system message based on this\n", - "sys_msg = sys_msg_gen().from_dict(meta_dict=meta_dict, role_tuple=role_tuple)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "BEUEpNa_c8sS" - }, - "source": [ - "### 🕹 Step 2: Initialize the Agent 🐫\n", - "Based on the system message, we are ready to initialize our embodied agent." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "id": "VetPjL70c9be" - }, - "outputs": [], - "source": [ - "embodied_agent = EmbodiedAgent(system_message=sys_msg,\n", - " tool_agents=None,\n", - " code_interpreter=None,\n", - " verbose=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "o6aDvI1qc_kH" - }, - "source": [ - "Be aware that the default argument values for `tool_agents` and `code_interpreter` are `None`, and the underlying code interpreter is using the `SubProcessInterpreter()`, which handles the execution of code in Python and Bash within a subprocess." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MXzNaWp9dCvo" - }, - "source": [ - "### 🕹 Step 3: Interact with the Agent with `.step()`\n", - "Use the base message wrapper to generate the user message." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "id": "Ts52u_UVdDJA" - }, - "outputs": [], - "source": [ - "usr_msg = bm.make_user_message(\n", - " role_name='user',\n", - " content=('1. write a bash script to install numpy. '\n", - " '2. then write a python script to compute '\n", - " 'the dot product of [8, 9] and [5, 4], '\n", - " 'and print the result. '\n", - " '3. then write a script to search for '\n", - " 'the weather at london with wttr.in/london.'))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "BlHF6LkRdFLo" - }, - "source": [ - "And feed that into your agents:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "6hKpSzssdGvc", - "outputId": "c7a8a799-de9f-4453-926f-c39ecef258a9" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[35m> Explanation:\n", - "To accomplish the tasks you've outlined, I will perform the following actions:\n", - "\n", - "1. **Write a Bash script** to install NumPy using `pip`. This will ensure that the necessary library is available for the Python script that follows.\n", - "2. **Write a Python script** to compute the dot product of the two lists `[8, 9]` and `[5, 4]`. The dot product is calculated as the sum of the products of the corresponding entries of the two sequences.\n", - "3. **Write a script** to fetch the weather information for London using the `wttr.in` service. This will provide a simple way to get weather data from the command line.\n", - "\n", - "### Step 1: Bash Script to Install NumPy\n", - "\n", - "I'll create a Bash script named `install_numpy.sh` that will check if `pip` is installed and then use it to install NumPy.\n", - "\n", - "### Step 2: Python Script for Dot Product Calculation\n", - "\n", - "I'll create a Python script named `dot_product.py` that will compute the dot product of the two lists and print the result.\n", - "\n", - "### Step 3: Script to Fetch Weather Information\n", - "\n", - "I'll create a Bash script named `get_weather.sh` that will use `curl` to fetch the weather information for London from `wttr.in`.\n", - "\n", - "Now, let's implement these actions in code.\n", - "\n", - "\u001b[35m> Code:\n", - "# Step 1: Bash script to install NumPy\n", - "echo '#!/bin/bash' > install_numpy.sh\n", - "echo 'if command -v pip &> /dev/null; then' >> install_numpy.sh\n", - "echo ' echo \"Installing NumPy...\"' >> install_numpy.sh\n", - "echo ' pip install numpy' >> install_numpy.sh\n", - "echo 'else' >> install_numpy.sh\n", - "echo ' echo \"pip is not installed. Please install pip first.\"' >> install_numpy.sh\n", - "echo 'fi' >> install_numpy.sh\n", - "chmod +x install_numpy.sh\n", - "\n", - "# Step 2: Python script for dot product calculation\n", - "echo 'import numpy as np' > dot_product.py\n", - "echo 'a = np.array([8, 9])' >> dot_product.py\n", - "echo 'b = np.array([5, 4])' >> dot_product.py\n", - "echo 'dot_product = np.dot(a, b)' >> dot_product.py\n", - "echo 'print(\"Dot product:\", dot_product)' >> dot_product.py\n", - "\n", - "# Step 3: Bash script to fetch weather information\n", - "echo '#!/bin/bash' > get_weather.sh\n", - "echo 'curl wttr.in/london' >> get_weather.sh\n", - "chmod +x get_weather.sh\n", - "\n", - "\u001b[35m> Explanation:\n", - "### Explanation of the Code\n", - "\n", - "1. **install_numpy.sh**: This script checks if `pip` is installed and installs NumPy if it is. It provides feedback if `pip` is not found.\n", - "2. **dot_product.py**: This Python script uses NumPy to calculate the dot product of two arrays and prints the result.\n", - "3. **get_weather.sh**: This script uses `curl` to fetch and display the weather information for London from `wttr.in`.\n", - "\n", - "### Next Steps\n", - "\n", - "I will execute these scripts in the appropriate order to ensure that NumPy is installed before running the Python script. After that, I will fetch the weather information for London. \n", - "\n", - "Let's execute the installation script first.\n", - "\n", - "\u001b[35m> Code:\n", - "./install_numpy.sh\n", - "\n", - "\u001b[35m> Explanation:\n", - "After confirming that NumPy is installed, I will run the Python script:\n", - "\n", - "\u001b[35m> Code:\n", - "python3 dot_product.py\n", - "\n", - "\u001b[35m> Explanation:\n", - "Finally, I will run the weather script:\n", - "\n", - "\u001b[35m> Code:\n", - "./get_weather.sh\n", - "\n", - "\u001b[35m> Explanation:\n", - "Now, I will perform these actions.\n", - "\n", - "The following bash code will run on your computer:\n", - "\u001b[36m# Step 1: Bash script to install NumPy\n", - "echo '#!/bin/bash' > install_numpy.sh\n", - "echo 'if command -v pip &> /dev/null; then' >> install_numpy.sh\n", - "echo ' echo \"Installing NumPy...\"' >> install_numpy.sh\n", - "echo ' pip install numpy' >> install_numpy.sh\n", - "echo 'else' >> install_numpy.sh\n", - "echo ' echo \"pip is not installed. Please install pip first.\"' >> install_numpy.sh\n", - "echo 'fi' >> install_numpy.sh\n", - "chmod +x install_numpy.sh\n", - "\n", - "# Step 2: Python script for dot product calculation\n", - "echo 'import numpy as np' > dot_product.py\n", - "echo 'a = np.array([8, 9])' >> dot_product.py\n", - "echo 'b = np.array([5, 4])' >> dot_product.py\n", - "echo 'dot_product = np.dot(a, b)' >> dot_product.py\n", - "echo 'print(\"Dot product:\", dot_product)' >> dot_product.py\n", - "\n", - "# Step 3: Bash script to fetch weather information\n", - "echo '#!/bin/bash' > get_weather.sh\n", - "echo 'curl wttr.in/london' >> get_weather.sh\n", - "chmod +x get_weather.sh\u001b[39m\n", - "Running code? [Y/n]:y\n", - "The following bash code will run on your computer:\n", - "\u001b[36m./install_numpy.sh\u001b[39m\n", - "Running code? [Y/n]:y\n", - "The following bash code will run on your computer:\n", - "\u001b[36mpython3 dot_product.py\u001b[39m\n", - "Running code? [Y/n]:y\n", - "The following bash code will run on your computer:\n", - "\u001b[36m./get_weather.sh\u001b[39m\n", - "Running code? [Y/n]:y\n", - "======stderr======\n", - "\u001b[31m % Total % Received % Xferd Average Speed Time Time Time Current\n", - " Dload Upload Total Spent Left Speed\n", - "\n", - " 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\n", - "100 8906 100 8906 0 0 9068 0 --:--:-- --:--:-- --:--:-- 9069\n", - "100 8906 100 8906 0 0 9068 0 --:--:-- --:--:-- --:--:-- 9069\n", - "\u001b[39m\n", - "==================\n" - ] - } - ], - "source": [ - "response = embodied_agent.step(usr_msg)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "rP4Zma5ddJxh" - }, - "source": [ - "Under the hood, the agent will perform multiple actions within its action space in the OS to fulfill the user request. It will compose code to implement the action – no worries, it will ask for your permission before execution.\n", - "\n", - "Ideally you should get the output similar to this, if you allow the agent to perform actions:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "qA0bpOFldKPV", - "outputId": "421418d2-61ca-4c65-f08e-83038c50a85a" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1. write a bash script to install numpy. 2. then write a python script to compute the dot product of [8, 9] and [5, 4], and print the result. 3. then write a script to search for the weather at london with wttr.in/london.\n", - "> Embodied Actions:\n", - "\n", - "> Executed Results:\n", - "Executing code block 0: {\n", - "Installing NumPy...\n", - "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (1.26.4)\n", - "}\n", - "Executing code block 1: {\n", - "The dot product of [8, 9] and [5, 4] is: 76\n", - "}\n", - "Executing code block 2: {\n", - "Fetching weather information for London...\n", - "Weather report: London\n", - "\n", - " \u001b[38;5;250m .-. \u001b[0m Light rain shower\n", - " \u001b[38;5;250m ( ). \u001b[0m \u001b[38;5;118m15\u001b[0m °C\u001b[0m \n", - " \u001b[38;5;250m (___(__) \u001b[0m \u001b[1m↗\u001b[0m \u001b[38;5;190m12\u001b[0m km/h\u001b[0m \n", - " \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 3 km\u001b[0m \n", - " \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 3.4 mm\u001b[0m \n", - " ┌─────────────┐ \n", - "┌──────────────────────────────┬───────────────────────┤ Thu 26 Sep ├───────────────────────┬──────────────────────────────┐\n", - "│ Morning │ Noon └──────┬──────┘ Evening │ Night │\n", - "├──────────────────────────────┼──────────────────────────────┼──────────────────────────────┼──────────────────────────────┤\n", - "│ \u001b[38;5;226m _`/\"\"\u001b[38;5;250m.-. \u001b[0m Patchy light d…│ \u001b[38;5;226m _`/\"\"\u001b[38;5;250m.-. \u001b[0m Patchy light d…│ \u001b[38;5;226m _`/\"\"\u001b[38;5;250m.-. \u001b[0m Light rain sho…│ \u001b[38;5;226m _`/\"\"\u001b[38;5;250m.-. \u001b[0m Patchy rain ne…│\n", - "│ \u001b[38;5;226m ,\\_\u001b[38;5;250m( ). \u001b[0m \u001b[38;5;154m16\u001b[0m °C\u001b[0m │ \u001b[38;5;226m ,\\_\u001b[38;5;250m( ). \u001b[0m \u001b[38;5;154m17\u001b[0m °C\u001b[0m │ \u001b[38;5;226m ,\\_\u001b[38;5;250m( ). \u001b[0m \u001b[38;5;118m+14\u001b[0m(\u001b[38;5;082m12\u001b[0m) °C\u001b[0m │ \u001b[38;5;226m ,\\_\u001b[38;5;250m( ). \u001b[0m \u001b[38;5;082m+12\u001b[0m(\u001b[38;5;082m10\u001b[0m) °C\u001b[0m │\n", - "│ \u001b[38;5;226m /\u001b[38;5;250m(___(__) \u001b[0m \u001b[1m↗\u001b[0m \u001b[38;5;220m19\u001b[0m-\u001b[38;5;208m24\u001b[0m km/h\u001b[0m │ \u001b[38;5;226m /\u001b[38;5;250m(___(__) \u001b[0m \u001b[1m↗\u001b[0m \u001b[38;5;214m20\u001b[0m-\u001b[38;5;214m23\u001b[0m km/h\u001b[0m │ \u001b[38;5;226m /\u001b[38;5;250m(___(__) \u001b[0m \u001b[1m↗\u001b[0m \u001b[38;5;214m23\u001b[0m-\u001b[38;5;196m35\u001b[0m km/h\u001b[0m │ \u001b[38;5;226m /\u001b[38;5;250m(___(__) \u001b[0m \u001b[1m↗\u001b[0m \u001b[38;5;214m20\u001b[0m-\u001b[38;5;202m29\u001b[0m km/h\u001b[0m │\n", - "│ \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 5 km\u001b[0m │ \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 5 km\u001b[0m │ \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 10 km\u001b[0m │ \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 10 km\u001b[0m │\n", - "│ \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 0.5 mm | 100%\u001b[0m │ \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 0.2 mm | 100%\u001b[0m │ \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 0.6 mm | 100%\u001b[0m │ \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 0.0 mm | 65%\u001b[0m │\n", - "└──────────────────────────────┴──────────────────────────────┴──────────────────────────────┴──────────────────────────────┘\n", - " ┌─────────────┐ \n", - "┌──────────────────────────────┬───────────────────────┤ Fri 27 Sep ├───────────────────────┬──────────────────────────────┐\n", - "│ Morning │ Noon └──────┬──────┘ Evening │ Night │\n", - "├──────────────────────────────┼──────────────────────────────┼──────────────────────────────┼──────────────────────────────┤\n", - "│ \u001b[38;5;250m .-. \u001b[0m Light rain │ \u001b[38;5;226m _`/\"\"\u001b[38;5;250m.-. \u001b[0m Patchy rain ne…│ \u001b[38;5;226m \\ / \u001b[0m Sunny │ \u001b[38;5;226m \\ / \u001b[0m Clear │\n", - "│ \u001b[38;5;250m ( ). \u001b[0m \u001b[38;5;046m+9\u001b[0m(\u001b[38;5;047m7\u001b[0m) °C\u001b[0m │ \u001b[38;5;226m ,\\_\u001b[38;5;250m( ). \u001b[0m \u001b[38;5;082m+11\u001b[0m(\u001b[38;5;046m9\u001b[0m) °C\u001b[0m │ \u001b[38;5;226m .-. \u001b[0m \u001b[38;5;082m+11\u001b[0m(\u001b[38;5;046m8\u001b[0m) °C\u001b[0m │ \u001b[38;5;226m .-. \u001b[0m \u001b[38;5;046m+9\u001b[0m(\u001b[38;5;046m8\u001b[0m) °C\u001b[0m │\n", - "│ \u001b[38;5;250m (___(__) \u001b[0m \u001b[1m↘\u001b[0m \u001b[38;5;220m18\u001b[0m-\u001b[38;5;208m26\u001b[0m km/h\u001b[0m │ \u001b[38;5;226m /\u001b[38;5;250m(___(__) \u001b[0m \u001b[1m↘\u001b[0m \u001b[38;5;214m23\u001b[0m-\u001b[38;5;202m31\u001b[0m km/h\u001b[0m │ \u001b[38;5;226m ― ( ) ― \u001b[0m \u001b[1m↘\u001b[0m \u001b[38;5;220m16\u001b[0m-\u001b[38;5;208m24\u001b[0m km/h\u001b[0m │ \u001b[38;5;226m ― ( ) ― \u001b[0m \u001b[1m↘\u001b[0m \u001b[38;5;190m10\u001b[0m-\u001b[38;5;220m16\u001b[0m km/h\u001b[0m │\n", - "│ \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 9 km\u001b[0m │ \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 10 km\u001b[0m │ \u001b[38;5;226m `-’ \u001b[0m 10 km\u001b[0m │ \u001b[38;5;226m `-’ \u001b[0m 10 km\u001b[0m │\n", - "│ \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 0.8 mm | 100%\u001b[0m │ \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 0.1 mm | 100%\u001b[0m │ \u001b[38;5;226m / \\ \u001b[0m 0.0 mm | 0%\u001b[0m │ \u001b[38;5;226m / \\ \u001b[0m 0.0 mm | 0%\u001b[0m │\n", - "└──────────────────────────────┴──────────────────────────────┴──────────────────────────────┴──────────────────────────────┘\n", - " ┌─────────────┐ \n", - "┌──────────────────────────────┬───────────────────────┤ Sat 28 Sep ├───────────────────────┬──────────────────────────────┐\n", - "│ Morning │ Noon └──────┬──────┘ Evening │ Night │\n", - "├──────────────────────────────┼──────────────────────────────┼──────────────────────────────┼──────────────────────────────┤\n", - "│ \u001b[38;5;226m \\ / \u001b[0m Sunny │ \u001b[38;5;226m _`/\"\"\u001b[38;5;250m.-. \u001b[0m Patchy rain ne…│ Cloudy │ \u001b[38;5;226m \\ /\u001b[0m Partly Cloudy │\n", - "│ \u001b[38;5;226m .-. \u001b[0m \u001b[38;5;082m+11\u001b[0m(\u001b[38;5;046m9\u001b[0m) °C\u001b[0m │ \u001b[38;5;226m ,\\_\u001b[38;5;250m( ). \u001b[0m \u001b[38;5;118m+14\u001b[0m(\u001b[38;5;118m13\u001b[0m) °C\u001b[0m │ \u001b[38;5;250m .--. \u001b[0m \u001b[38;5;118m13\u001b[0m °C\u001b[0m │ \u001b[38;5;226m _ /\"\"\u001b[38;5;250m.-. \u001b[0m \u001b[38;5;082m12\u001b[0m °C\u001b[0m │\n", - "│ \u001b[38;5;226m ― ( ) ― \u001b[0m \u001b[1m→\u001b[0m \u001b[38;5;190m11\u001b[0m-\u001b[38;5;226m14\u001b[0m km/h\u001b[0m │ \u001b[38;5;226m /\u001b[38;5;250m(___(__) \u001b[0m \u001b[1m→\u001b[0m \u001b[38;5;190m12\u001b[0m-\u001b[38;5;226m14\u001b[0m km/h\u001b[0m │ \u001b[38;5;250m .-( ). \u001b[0m \u001b[1m→\u001b[0m \u001b[38;5;118m6\u001b[0m-\u001b[38;5;190m10\u001b[0m km/h\u001b[0m │ \u001b[38;5;226m \\_\u001b[38;5;250m( ). \u001b[0m \u001b[1m↗\u001b[0m \u001b[38;5;118m5\u001b[0m-\u001b[38;5;154m8\u001b[0m km/h\u001b[0m │\n", - "│ \u001b[38;5;226m `-’ \u001b[0m 10 km\u001b[0m │ \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 10 km\u001b[0m │ \u001b[38;5;250m (___.__)__) \u001b[0m 10 km\u001b[0m │ \u001b[38;5;226m /\u001b[38;5;250m(___(__) \u001b[0m 10 km\u001b[0m │\n", - "│ \u001b[38;5;226m / \\ \u001b[0m 0.0 mm | 0%\u001b[0m │ \u001b[38;5;111m ‘ ‘ ‘ ‘ \u001b[0m 0.1 mm | 100%\u001b[0m │ 0.0 mm | 0%\u001b[0m │ 0.0 mm | 0%\u001b[0m │\n", - "└──────────────────────────────┴──────────────────────────────┴──────────────────────────────┴──────────────────────────────┘\n", - "Location: London, Greater London, England, UK [51.5073219,-0.1276474]\n", - "\n", - "Follow \u001b[46m\u001b[30m@igor_chubin\u001b[0m for wttr.in updates\n", - "(stderr: % Total % Received % Xferd Average Speed Time Time Time Current\n", - " Dload Upload Total Spent Left Speed\n", - "\n", - " 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\n", - " 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0\n", - " 0 0 0 0 0 0 0 0 --:--:-- 0:00:02 --:--:-- 0\n", - "100 8977 100 8977 0 0 3495 0 0:00:02 0:00:02 --:--:-- 3495\n", - ")}\n", - "Executing code block 3: {\n", - "}\n", - "\n" - ] - } - ], - "source": [ - "print(response.msg.content)" - ] - } - ], - "metadata": { - "colab": { - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/docs/cookbooks/advanced_features/index.rst b/docs/cookbooks/advanced_features/index.rst index 9f930719c8..c92e6afb47 100644 --- a/docs/cookbooks/advanced_features/index.rst +++ b/docs/cookbooks/advanced_features/index.rst @@ -21,5 +21,4 @@ Advanced Features agents_with_MCP agents_tracking critic_agents_and_tree_search - embodied_agents agent_generate_structured_output diff --git a/docs/index.rst b/docs/index.rst index 1ba6d24614..83ce5e62f9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -26,7 +26,6 @@ Main Documentation cookbooks/create_your_first_agent.ipynb cookbooks/create_your_first_agents_society.ipynb - cookbooks/embodied_agents.ipynb cookbooks/critic_agents_and_tree_search.ipynb .. toctree:: diff --git a/docs/mintlify/cookbooks/advanced_features/embodied_agents.mdx b/docs/mintlify/cookbooks/advanced_features/embodied_agents.mdx deleted file mode 100644 index d9f4799999..0000000000 --- a/docs/mintlify/cookbooks/advanced_features/embodied_agents.mdx +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: "Embodied Agents" ---- - -You can also check this cookbook in colab [here](https://colab.research.google.com/drive/17qCB6ezYfva87dNWlGA3D3zQ20NI-Sfk?usp=sharing) - -⭐ Star us on [*Github*](https://github.com/camel-ai/camel), join our [*Discord*](https://discord.camel-ai.org) or follow our [*X*](https://x.com/camelaiorg) - -## Philosophical Bits - -We believe the essence of intelligence emerges from its dynamic interactions with the external environment, where the use of various tools becomes a pivotal factor in its development and manifestation. - -The `EmbodiedAgent()` in CAMEL is an advanced conversational agent that leverages **code interpreters** and **tool agents** (*e.g.*, `HuggingFaceToolAgent()`) to execute diverse tasks efficiently. This agent represents a blend of advanced programming and AI capabilities, and is able to interact and respond within a dynamic environment. - -## Quick Start -Let's first play with a `ChatAgent` instance by simply initialize it with a system message and interact with user messages. - -### 🕹 Step 0: Preparations - - -```python -%pip install "camel-ai==0.2.16" -``` - - -```python -from camel.agents import EmbodiedAgent -from camel.generators import SystemMessageGenerator as sys_msg_gen -from camel.messages import BaseMessage as bm -from camel.types import RoleType -``` - -### Setting Up API Keys - -You'll need to set up your API keys for OpenAI. - - -```python -import os -from getpass import getpass - -# Prompt for the API key securely -openai_api_key = getpass('Enter your API key: ') -os.environ["OPENAI_API_KEY"] = openai_api_key -``` - -Alternatively, if running on Colab, you could save your API keys and tokens as **Colab Secrets**, and use them across notebooks. - -To do so, **comment out** the above **manual** API key prompt code block(s), and **uncomment** the following codeblock. - -⚠️ Don't forget granting access to the API key you would be using to the current notebook. - - -```python -# import os -# from google.colab import userdata - -# os.environ["OPENAI_API_KEY"] = userdata.get("OPENAI_API_KEY") -``` - -### 🕹 Step 1: Define the Role -We first need to set up the necessary information. - - -```python -# Set the role name and the task -role = 'Programmer' -task = 'Writing and executing codes.' - -# Create the meta_dict and the role_tuple -meta_dict = dict(role=role, task=task) -role_tuple = (role, RoleType.EMBODIMENT) -``` - -The `meta_dict` and `role_type` will be used to generate the system message. - - - -```python -# Generate the system message based on this -sys_msg = sys_msg_gen().from_dict(meta_dict=meta_dict, role_tuple=role_tuple) -``` - -### 🕹 Step 2: Initialize the Agent 🐫 -Based on the system message, we are ready to initialize our embodied agent. - - -```python -embodied_agent = EmbodiedAgent(system_message=sys_msg, - tool_agents=None, - code_interpreter=None, - verbose=True) -``` - -Be aware that the default argument values for `tool_agents` and `code_interpreter` are `None`, and the underlying code interpreter is using the `SubProcessInterpreter()`, which handles the execution of code in Python and Bash within a subprocess. - -### 🕹 Step 3: Interact with the Agent with `.step()` -Use the base message wrapper to generate the user message. - - -```python -usr_msg = bm.make_user_message( - role_name='user', - content=('1. write a bash script to install numpy. ' - '2. then write a python script to compute ' - 'the dot product of [8, 9] and [5, 4], ' - 'and print the result. ' - '3. then write a script to search for ' - 'the weather at london with wttr.in/london.')) -``` - -And feed that into your agents: - - -```python -response = embodied_agent.step(usr_msg) -``` - -Under the hood, the agent will perform multiple actions within its action space in the OS to fulfill the user request. It will compose code to implement the action – no worries, it will ask for your permission before execution. - -Ideally you should get the output similar to this, if you allow the agent to perform actions: - - -```python -print(response.msg.content) -``` diff --git a/docs/mintlify/reference/camel.agents.deductive_reasoner_agent.mdx b/docs/mintlify/reference/camel.agents.deductive_reasoner_agent.mdx deleted file mode 100644 index f673c579b9..0000000000 --- a/docs/mintlify/reference/camel.agents.deductive_reasoner_agent.mdx +++ /dev/null @@ -1,68 +0,0 @@ - - - - -## DeductiveReasonerAgent - -```python -class DeductiveReasonerAgent(ChatAgent): -``` - -An agent responsible for deductive reasoning. Model of deductive -reasoning: -- L: A ⊕ C -> q * B -- A represents the known starting state. -- B represents the known target state. -- C represents the conditions required to transition from A to B. -- Q represents the quality or effectiveness of the transition from -A to B. -- L represents the path or process from A to B. - -**Parameters:** - -- **model** (BaseModelBackend, optional): The model backend to use for generating responses. (default: :obj:`OpenAIModel` with `GPT_4O_MINI`) - - - -### __init__ - -```python -def __init__(self, model: Optional[BaseModelBackend] = None): -``` - - - -### deduce_conditions_and_quality - -```python -def deduce_conditions_and_quality( - self, - starting_state: str, - target_state: str, - role_descriptions_dict: Optional[Dict[str, str]] = None -): -``` - -Derives the conditions and quality from the starting state and the -target state based on the model of the deductive reasoning and the -knowledge base. It can optionally consider the roles involved in the -scenario, which allows tailoring the output more closely to the AI -agent's environment. - -**Parameters:** - -- **starting_state** (str): The initial or starting state from which conditions are deduced. -- **target_state** (str): The target state of the task. -- **role_descriptions_dict** (Optional[Dict[str, str]], optional): The descriptions of the roles. (default: :obj:`None`) -- **role_descriptions_dict** (Optional[Dict[str, str]], optional): A dictionary describing the roles involved in the scenario. This is optional and can be used to provide a context for the CAMEL's role-playing, enabling the generation of more relevant and tailored conditions and quality assessments. This could be generated using a `RoleAssignmentAgent()` or defined manually by the user. - -**Returns:** - - Dict[str, Union[List[str], Dict[str, str]]]: A dictionary with the -extracted data from the message. The dictionary contains three -keys: -- 'conditions': A list where each key is a condition ID and -each value is the corresponding condition text. -- 'labels': A list of label strings extracted from the message. -- 'quality': A string of quality assessment strings extracted -from the message. diff --git a/docs/mintlify/reference/camel.agents.embodied_agent.mdx b/docs/mintlify/reference/camel.agents.embodied_agent.mdx deleted file mode 100644 index 25d87f949f..0000000000 --- a/docs/mintlify/reference/camel.agents.embodied_agent.mdx +++ /dev/null @@ -1,90 +0,0 @@ - - - - -## EmbodiedAgent - -```python -class EmbodiedAgent(ChatAgent): -``` - -Class for managing conversations of CAMEL Embodied Agents. - -**Parameters:** - -- **system_message** (BaseMessage): The system message for the chat agent. -- **model** (BaseModelBackend, optional): The model backend to use for generating responses. (default: :obj:`OpenAIModel` with `GPT_4O_MINI`) -- **message_window_size** (int, optional): The maximum number of previous messages to include in the context window. If `None`, no windowing is performed. (default: :obj:`None`) -- **tool_agents** (List[BaseToolAgent], optional): The tools agents to use in the embodied agent. (default: :obj:`None`) -- **code_interpreter** (BaseInterpreter, optional): The code interpreter to execute codes. If `code_interpreter` and `tool_agent` are both `None`, default to `SubProcessInterpreter`. If `code_interpreter` is `None` and `tool_agents` is not `None`, default to `InternalPythonInterpreter`. (default: :obj:`None`) -- **verbose** (bool, optional): Whether to print the critic's messages. -- **logger_color** (Any): The color of the logger displayed to the user. (default: :obj:`Fore.MAGENTA`) - - - -### __init__ - -```python -def __init__( - self, - system_message: BaseMessage, - model: Optional[BaseModelBackend] = None, - message_window_size: Optional[int] = None, - tool_agents: Optional[List[BaseToolAgent]] = None, - code_interpreter: Optional[BaseInterpreter] = None, - verbose: bool = False, - logger_color: Any = Fore.MAGENTA -): -``` - - - -### _set_tool_agents - -```python -def _set_tool_agents(self, system_message: BaseMessage): -``` - - - -### _get_tool_agents_prompt - -```python -def _get_tool_agents_prompt(self): -``` - -**Returns:** - - str: The action space prompt. - - - -### get_tool_agent_names - -```python -def get_tool_agent_names(self): -``` - -**Returns:** - - List[str]: The names of tool agents. - - - -### step - -```python -def step(self, input_message: BaseMessage): -``` - -Performs a step in the conversation. - -**Parameters:** - -- **input_message** (BaseMessage): The input message. - -**Returns:** - - ChatAgentResponse: A struct containing the output messages, -a boolean indicating whether the chat session has terminated, -and information about the chat session. diff --git a/examples/deductive_reasoner_agent/deduce_conditions_and_quality.py b/examples/deductive_reasoner_agent/deduce_conditions_and_quality.py deleted file mode 100644 index d483ea9b13..0000000000 --- a/examples/deductive_reasoner_agent/deduce_conditions_and_quality.py +++ /dev/null @@ -1,40 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -import json - -from colorama import Fore - -from camel.agents.deductive_reasoner_agent import DeductiveReasonerAgent - - -def main(model=None) -> None: - # Construct deductive reasoner agent - insight_agent = DeductiveReasonerAgent(model=model) - - starting_state = "The current empty website." - target_state = "A website with search capabilities." - conditions_and_quality = insight_agent.deduce_conditions_and_quality( - starting_state=starting_state, target_state=target_state - ) - print( - Fore.GREEN - + "Conditions and quality from the starting state:\n" - + f"{json.dumps(conditions_and_quality, - indent=4, ensure_ascii=False)}", - Fore.RESET, - ) - - -if __name__ == "__main__": - main() diff --git a/examples/embodiment/code_execution.py b/examples/embodiment/code_execution.py deleted file mode 100644 index ad31ddf5d8..0000000000 --- a/examples/embodiment/code_execution.py +++ /dev/null @@ -1,44 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -from camel.agents import EmbodiedAgent -from camel.generators import SystemMessageGenerator -from camel.types import RoleType - - -def main(): - # Create an embodied agent - role_name = "Programmer" - meta_dict = dict(role=role_name, task="Programming") - sys_msg = SystemMessageGenerator().from_dict( - meta_dict=meta_dict, - role_tuple=(role_name, RoleType.EMBODIMENT), - ) - embodied_agent = EmbodiedAgent( - sys_msg, - verbose=True, - ) - print(embodied_agent.system_message.content) - - user_msg = ( - "Write a bash script to install numpy, " - "then write a python script to compute " - "the dot product of [6.75,3] and [4,5] and print the result, " - "then write a script to open a browser and search today's weather." - ) - response = embodied_agent.step(user_msg) - print(response.msg.content) - - -if __name__ == "__main__": - main() diff --git a/test/agents/test_deductive_reasoner_agent.py b/test/agents/test_deductive_reasoner_agent.py deleted file mode 100644 index be612afc64..0000000000 --- a/test/agents/test_deductive_reasoner_agent.py +++ /dev/null @@ -1,137 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -from mock import patch - -from camel.agents import ChatAgent -from camel.agents.deductive_reasoner_agent import DeductiveReasonerAgent -from camel.messages import BaseMessage -from camel.responses import ChatAgentResponse -from camel.types import RoleType - - -@patch.object(ChatAgent, 'step') -def test_deductive_reasoner_agent(mock_step): - mock_content = generate_mock_content() - mock_msg = BaseMessage( - role_name="Deductive Reasoner", - role_type=RoleType.ASSISTANT, - meta_dict=None, - content=mock_content, - ) - - # Mock the step function - mock_step.return_value = ChatAgentResponse( - msgs=[mock_msg], terminated=False, info={} - ) - - starting_state = "I was walking down the street in New York with a Anna." - target_state = "I remind Anna to pay attention to personal safety." - - # Construct deductive reasoner agent - deductive_reasoner_agent = DeductiveReasonerAgent() - - # Generate the conditions and quality dictionary based on the mock step - # function - conditions_and_quality = ( - deductive_reasoner_agent.deduce_conditions_and_quality( - starting_state=starting_state, target_state=target_state - ) - ) - - expected_dict = generate_expected_content() - - assert conditions_and_quality == expected_dict - - -# Generate mock content for the deductive reasoner agent -def generate_mock_content(): - return """- Characterization and comparison of $A$ and $B$: -$A$ is an empty website, while $B$ is a website with search capabilities. - -- Historical & Empirical Analysis: -None - -- Logical Deduction of Conditions ($C$) (multiple conditions can be deduced): - condition 1: - The website needs to have a search bar. - condition 2: - The website needs to have a database of indexed content. - condition 3: - The website needs to have a search algorithm or function implemented. - condition 4: - The website needs to have a user interface that allows users to input - search queries. - condition 5: - The website needs to have a backend system that processes search - queries and retrieves relevant results. - -- Entity/Label Recognition of Conditions: -["Website search bar", "Indexed content database", "Search algorithm/ -function", "User interface for search queries", "Backend system for query -processing"] - -- Quality Assessment ($Q$) (do not use symbols): - The transition from $A$ to $B$ would be considered efficient if the search - capabilities are implemented with minimal resource usage. - The transition would be considered effective if the website successfully - provides accurate search results. - Safety and risks should be assessed to ensure user privacy and data - security during the transition. - Feedback mechanisms should be incorporated to continuously improve the - search capabilities based on user feedback. - -- Iterative Evaluation: -None""" - - -# Generate expected dictionary of conditions and quality -def generate_expected_content(): - return { - "conditions": { - "condition 1": "The website needs to have a search bar.", - "condition 2": ( - "The website needs to have a database of indexed content." - ), - "condition 3": ( - "The website needs to have a search algorithm or function " - "implemented." - ), - "condition 4": ( - "The website needs to have a user interface that allows users " - "to input\n search queries." - ), - "condition 5": ( - "The website needs to have a backend system that processes " - "search\n queries and retrieves relevant results." - ), - }, - "labels": [ - "Website search bar", - "Indexed content database", - "Search algorithm/\nfunction", - "User interface for search queries", - "Backend system for query\nprocessing", - ], - "evaluate_quality": ( - "The transition from $A$ to $B$ would be considered efficient if " - "the search\n capabilities are implemented with minimal " - "resource usage.\n The transition would be considered " - "effective if the website successfully\n provides accurate " - "search results.\n Safety and risks should be assessed to" - " ensure user privacy and data\n security during the " - "transition.\n Feedback mechanisms should be incorporated " - "to continuously improve the\n search capabilities based on" - " user feedback." - ), - } diff --git a/test/agents/test_embodied_agent.py b/test/agents/test_embodied_agent.py deleted file mode 100644 index 26f83a812a..0000000000 --- a/test/agents/test_embodied_agent.py +++ /dev/null @@ -1,83 +0,0 @@ -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ========= Copyright 2023-2026 @ CAMEL-AI.org. All Rights Reserved. ========= -import binascii -from unittest.mock import MagicMock - -import pytest -import requests -from openai.types.chat.chat_completion import Choice -from openai.types.chat.chat_completion_message import ChatCompletionMessage -from openai.types.completion_usage import CompletionUsage - -from camel.agents import EmbodiedAgent -from camel.generators import SystemMessageGenerator -from camel.messages import BaseMessage -from camel.types import ChatCompletion, RoleType - -model_backend_rsp_base = ChatCompletion( - id="mock_response_id", - choices=[ - Choice( - finish_reason="stop", - index=0, - logprobs=None, - message=ChatCompletionMessage( - content="This is a mock response content.", - role="assistant", - function_call=None, - tool_calls=None, - ), - ) - ], - created=123456789, - model="gpt-4o-2024-05-13", - object="chat.completion", - usage=CompletionUsage( - completion_tokens=32, - prompt_tokens=15, - total_tokens=47, - ), -) - - -@pytest.mark.model_backend -def test_step(step_call_count=3): - # Create an embodied agent - role_name = "Artist" - meta_dict = dict(role=role_name, task="Drawing") - sys_msg = SystemMessageGenerator().from_dict( - meta_dict=meta_dict, - role_tuple=(f"{role_name}'s Embodiment", RoleType.EMBODIMENT), - ) - embodied_agent = EmbodiedAgent(sys_msg, verbose=True) - embodied_agent.model_backend.run = MagicMock( - return_value=model_backend_rsp_base - ) - - user_msg = BaseMessage.make_user_message( - role_name=role_name, - content="Draw all the Camelidae species.", - ) - for i in range(step_call_count): - try: - response = embodied_agent.step(user_msg) - except (binascii.Error, requests.exceptions.ConnectionError) as ex: - print( - "Warning: caught an exception, ignoring it since " - f"it is a known issue of Huggingface ({ex!s})" - ) - return - assert isinstance(response.msg, BaseMessage), f"Error in round {i+1}" - assert not response.terminated, f"Error in round {i+1}" - assert isinstance(response.info, dict), f"Error in round {i+1}" From d73cb439bc5f3ffea27ac99e4cf39da8dd63f741 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 8 Feb 2026 02:01:54 +0000 Subject: [PATCH 6/6] Update agent documentation to remove EmbodiedAgent and DeductiveReasonerAgent references Co-authored-by: lightaime <23632352+lightaime@users.noreply.github.com> --- docs/key_modules/agents.md | 6 ------ docs/mintlify/key_modules/agents.mdx | 6 ------ 2 files changed, 12 deletions(-) diff --git a/docs/key_modules/agents.md b/docs/key_modules/agents.md index 9a97f0d7b6..caba2f75d0 100644 --- a/docs/key_modules/agents.md +++ b/docs/key_modules/agents.md @@ -42,12 +42,6 @@ The `ChatAgent` is the primary implementation that handles conversations with la **`CriticAgent`** Specialized agent for evaluating and critiquing responses or solutions. Used in scenarios requiring quality assessment or validation. - **`DeductiveReasonerAgent`** - Focused on logical reasoning and deduction. Breaks down complex problems into smaller, manageable steps. - - **`EmbodiedAgent`** - Designed for embodied AI scenarios, capable of understanding and responding to physical world contexts. - **`KnowledgeGraphAgent`** Specialized in building and utilizing knowledge graphs for enhanced reasoning and information management. diff --git a/docs/mintlify/key_modules/agents.mdx b/docs/mintlify/key_modules/agents.mdx index 9a97f0d7b6..caba2f75d0 100644 --- a/docs/mintlify/key_modules/agents.mdx +++ b/docs/mintlify/key_modules/agents.mdx @@ -42,12 +42,6 @@ The `ChatAgent` is the primary implementation that handles conversations with la **`CriticAgent`** Specialized agent for evaluating and critiquing responses or solutions. Used in scenarios requiring quality assessment or validation. - **`DeductiveReasonerAgent`** - Focused on logical reasoning and deduction. Breaks down complex problems into smaller, manageable steps. - - **`EmbodiedAgent`** - Designed for embodied AI scenarios, capable of understanding and responding to physical world contexts. - **`KnowledgeGraphAgent`** Specialized in building and utilizing knowledge graphs for enhanced reasoning and information management.