Skip to content

[BUG] OpenAI's o1 Reasoning Model: 'system' Role Unsupported When Initializing Agents in Roleplaying #1833

@kejinwu201

Description

@kejinwu201

Required prerequisites

What version of camel are you using?

v0.2.27

System information

pip install

python 3.11.4

Problem description

When using the camel library with an OpenAI O1 model (specifically o1-mini or o1-preview), an error occurs because these models do not support the system role in chat messages.

Reproducible example code

from colorama import Fore
from camel.types import RoleType, TaskType
from camel.societies import RolePlaying
from camel.utils import print_text_animated
from camel.models import ModelFactory
from camel.types import ModelPlatformType, ModelType
import os
import dotenv
from camel.toolkits import HumanToolkit
human_toolkit = HumanToolkit()
from camel.toolkits import FunctionTool
from camel.memories import (
AgentMemory,
ChatHistoryMemory,
MemoryRecord,
ScoreBasedContextCreator,
)

dotenv.load_dotenv()

model = ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_0_FLASH,
)
reasoning_model = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.O1_MINI,
)
task_prompt = """

"""
def main(model=None, chat_turn_limit=50) -> None:

Initial the role-playing session on developing a trading bot task with default model (GPT_4O_MINI)

assistant_context_creator = ScoreBasedContextCreator(
    ModelFactory.create(model_platform=ModelPlatformType.DEFAULT, model_type=ModelType.DEFAULT).token_counter, # 使用默认模型的 token_counter,或者您可以传入您的 model 的 token_counter
    token_limit=10000  # token_limit 仍然从 agent_kwargs 传递
)
assistant_memory = ChatHistoryMemory(assistant_context_creator, window_size=999) # 设置 window_size

user_context_creator = ScoreBasedContextCreator(
    ModelFactory.create(model_platform=ModelPlatformType.DEFAULT, model_type=ModelType.DEFAULT).token_counter, # 使用默认模型的 token_counter,或者您可以传入您的 model 的 token_counter
    token_limit=10000  # token_limit 仍然从 agent_kwargs 传递
)
user_memory = ChatHistoryMemory(user_context_creator, window_size=999)
role_play_session = RolePlaying(
    assistant_role_name="",
    assistant_agent_kwargs=dict(memory=assistant_memory,model=reasoning_model,message_window_size=999,token_limit=10000,tools=[*human_toolkit.get_tools()]),
    user_role_name="",
    user_agent_kwargs=dict(memory=user_memory,model=model,message_window_size=999,token_limit=10000),
    task_prompt=task_prompt,
    sys_msg_generator_kwargs=sys_msg_generator_kwargs,
    with_task_specify=False,
    with_critic_in_the_loop=False,
    with_task_planner=True,
    output_language="chinese,中文",
)

Output initial message with different colors.

print(
    Fore.GREEN
    + f"AI Assistant sys message:\n{role_play_session.assistant_sys_msg}\n"
)
print(
    Fore.BLUE + f"AI User sys message:\n{role_play_session.user_sys_msg}\n"
)
print(Fore.RED + f"Final task prompt:\n{role_play_session.task_prompt}\n")

n = 0
input_msg = role_play_session.init_chat()

Output response step by step with different colors.

Keep output until detect the terminate content or reach the loop limit.

while n < chat_turn_limit:
    n += 1
    assistant_response, user_response = role_play_session.step(input_msg)

    if assistant_response.terminated:
        print(
            Fore.GREEN
            + (
                "AI Assistant terminated. Reason: "
                f"{assistant_response.info['termination_reasons']}."
            )
        )
        break
    if user_response.terminated:
        print(
            Fore.GREEN
            + (
                "AI User terminated. "
                f"Reason: {user_response.info['termination_reasons']}."
            )
        )
        break

    print_text_animated(
        Fore.BLUE + f"AI User:\n\n{user_response.msg.content}\n"
    )
    print_text_animated(
        Fore.GREEN + "AI Assistant:\n\n"
        f"{assistant_response.msg.content}\n"
    )

    if "CAMEL_TASK_DONE" in user_response.msg.content:
        break

    input_msg = assistant_response.msg

if name == "main":
main()

Traceback

D:\python\Lib\site-packages\camel\models\openai_model.py:109: UserWarning: Warning: You are using an O1 model (O1_MINI or O1_PREVIEW), which has certain limitations, reference: `https://platform.openai.com/docs/guides/reasoning`.
  warnings.warn(
2025-03-13 14:54:47,518 - camel.models.model_manager - ERROR - Error processing with model: <camel.models.openai_model.OpenAIModel object at 0x000002A1DB21A850>
2025-03-13 14:54:47,519 - camel.agents.chat_agent - ERROR - An error occurred while running model o1-mini, index: 0
Traceback (most recent call last):
  File "D:\python\Lib\site-packages\camel\agents\chat_agent.py", line 687, in _get_model_response
    response = self.model_backend.run(
               ^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\python\Lib\site-packages\camel\models\model_manager.py", line 226, in run
    raise exc
  File "D:\python\Lib\site-packages\camel\models\model_manager.py", line 216, in run
    response = self.current_model.run(messages, response_format, tools)
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\python\Lib\site-packages\camel\models\base_model.py", line 50, in wrapped_run
    return original_run(self, messages, *args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\python\Lib\site-packages\camel\models\base_model.py", line 191, in run
    return self._run(messages, response_format, tools)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\python\Lib\site-packages\camel\models\openai_model.py", line 161, in _run
    return self._request_chat_completion(messages, tools)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\python\Lib\site-packages\camel\models\openai_model.py", line 204, in _request_chat_completion
    return self._client.chat.completions.create(
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\python\Lib\site-packages\openai\_utils\_utils.py", line 279, in wrapper
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^
  File "D:\python\Lib\site-packages\openai\resources\chat\completions.py", line 850, in create
    return self._post(
           ^^^^^^^^^^^
  File "D:\python\Lib\site-packages\openai\_base_client.py", line 1283, in post
    return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
                           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\python\Lib\site-packages\openai\_base_client.py", line 960, in request
    return self._request(
           ^^^^^^^^^^^^^^
  File "D:\python\Lib\site-packages\openai\_base_client.py", line 1064, in _request
    raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'error': {'message': "Unsupported value: 'messages[0].role' does not support 'system' with this model.", 'type': 'invalid_request_error', 'param': 'messages[0].role', 'code': 'unsupported_value'}}
Traceback (most recent call last):
  File "d:\asksia_DeepStudy_test\backend\test\roleplaying.py", line 137, in <module>
    main()
  File "d:\asksia_DeepStudy_test\backend\test\roleplaying.py", line 102, in main
    assistant_response, user_response = role_play_session.step(input_msg)
                                        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\python\Lib\site-packages\camel\societies\role_playing.py", line 553, in step
    assistant_response = self.assistant_agent.step(user_msg)
                         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\python\Lib\site-packages\camel\agents\chat_agent.py", line 498, in step
    response = self._get_model_response(
               ^^^^^^^^^^^^^^^^^^^^^^^^^
  File "D:\python\Lib\site-packages\camel\agents\chat_agent.py", line 705, in _get_model_response
    raise ModelProcessingError(
camel.models.model_manager.ModelProcessingError: Unable to process messages: the only provided model did not run successfully. Error: Error code: 400 - {'error': {'message': "Unsupported value: 'messages[0].role' does not support 'system' with this model.", 'type': 'invalid_request_error', 'param': 'messages[0].role', 'code': 'unsupported_value'}}

Expected behavior

Expected Behavior:

The camel library should successfully interact with the OpenAI model to generate responses based on the provided conversation history, including system messages if provided, without raising errors related to unsupported roles.

Actual Behavior:

Instead of generating a response, the camel library encounters a BadRequestError from the OpenAI API. The error message clearly states: "Unsupported value: 'messages[0].role' does not support 'system' with this model." This indicates that the OpenAI O1 models (o1-mini, o1-preview) do not recognize or process messages with the system role. Consequently, camel raises a ModelProcessingError because it cannot get a successful response from the model.

Additional context

No response

Metadata

Metadata

Assignees

Labels

P0Task with high level prioritybugSomething isn't working

Type

Projects

Status

No status

Milestone

Relationships

None yet

Development

No branches or pull requests

Issue actions