Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 6 additions & 9 deletions camel/agents/_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from typing import Any, Dict, List, Optional, Union
from typing import Any, Dict, List, Optional

from openai import AsyncStream, Stream
from openai.types.chat import ChatCompletionChunk
from pydantic import BaseModel, ConfigDict

from camel.messages import BaseMessage
from camel.types import ChatCompletion


class ToolCallRequest(BaseModel):
Expand All @@ -33,11 +30,11 @@ class ModelResponse(BaseModel):
r"""The response from the model."""

model_config = ConfigDict(arbitrary_types_allowed=True)
response: Union[
ChatCompletion,
Stream[ChatCompletionChunk],
AsyncStream[ChatCompletionChunk],
]
# Phase 1: relax the annotation to decouple from provider schemas.
# Existing call sites do not rely on static typing here and tests
# often pass MagicMock; this change avoids tight coupling to
# ChatCompletion when adapters introduce unified responses.
response: Any
tool_call_requests: Optional[List[ToolCallRequest]]
output_messages: List[BaseMessage]
finish_reasons: List[str]
Expand Down
20 changes: 20 additions & 0 deletions camel/core/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""Core abstractions for CAMEL runtime.

This package hosts model-agnostic message types that can be adapted to
either legacy Chat Completions or the newer OpenAI Responses API.
"""

__all__: list[str] = []
163 changes: 163 additions & 0 deletions camel/core/messages.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""Model-agnostic message abstractions and converters.

Phase 1 introduces `CamelMessage` to decouple CAMEL from the legacy
OpenAI Chat Completions message schema while keeping behaviour identical
via adapter conversion.
"""

from __future__ import annotations

from typing import Any, Dict, List, Literal, Optional, cast

from pydantic import BaseModel, Field

from camel.messages import OpenAIMessage


class CamelContentPart(BaseModel):
"""A single content fragment.

This is intentionally minimal for Phase 1. It currently models the
fragments we already consume through Chat Completions: text and image.
Tool-related parts are represented at the response layer for now.
"""

type: Literal["text", "image_url"]
payload: Dict[str, Any] = Field(default_factory=dict)


class CamelMessage(BaseModel):
"""A model-agnostic chat message used by CAMEL runtime.

The schema is compatible with both legacy Chat Completions and the
newer Responses API after conversion.
"""

role: Literal["system", "user", "assistant", "tool", "developer"]
content: List[CamelContentPart] = Field(default_factory=list)
name: Optional[str] = None
tool_call_id: Optional[str] = None
metadata: Optional[Dict[str, Any]] = None


def openai_messages_to_camel(
messages: List[OpenAIMessage],
) -> List[CamelMessage]:
"""Convert OpenAI ChatCompletion-style messages to `CamelMessage`.

Notes:
- Only text and image_url items are converted in Phase 1.
- Other fields are carried over when present (name, tool_call_id).
"""
result: List[CamelMessage] = []
for msg in messages:
role = msg.get("role", "user") # type: ignore[assignment]
parts: List[CamelContentPart] = []

content = msg.get("content")
if isinstance(content, str):
if content.strip():
parts.append(
CamelContentPart(type="text", payload={"text": content})
)
elif isinstance(content, list):
for item in content:
item_t = item.get("type") if isinstance(item, dict) else None
if item_t == "text":
text = item.get("text", "")
parts.append(
CamelContentPart(type="text", payload={"text": text})
)
elif item_t == "image_url":
image_url = item.get("image_url", {})
payload = {
"url": image_url.get("url"),
"detail": image_url.get("detail"),
}
parts.append(
CamelContentPart(type="image_url", payload=payload)
)

name_val = cast(Optional[str], msg.get("name", None))
tool_call_id = (
cast(Optional[str], msg.get("tool_call_id", None))
if role == "tool"
else None
)

result.append(
CamelMessage(
role=cast(Any, role), # mypy: role literal narrowing from dict
content=parts,
name=name_val,
tool_call_id=tool_call_id,
)
)

return result


def camel_messages_to_openai(
messages: List[CamelMessage],
) -> List[OpenAIMessage]:
"""Convert `CamelMessage` back to OpenAI ChatCompletion-style messages.

This is lossless for the text/image_url subset used in Phase 1.
"""
result: List[OpenAIMessage] = []
for cmsg in messages:
if cmsg.role == "tool":
# Tool message expects string content + tool_call_id
text_parts = [
p.payload.get("text", "")
for p in cmsg.content
if p.type == "text"
]
content_str = "\n".join([t for t in text_parts if t])
d: Dict[str, Any] = {"role": "tool", "content": content_str}
if cmsg.tool_call_id:
d["tool_call_id"] = cmsg.tool_call_id
result.append(cast(OpenAIMessage, d))
continue

# Non-tool roles: use hybrid content list
hybrid: List[Dict[str, Any]] = []
for part in cmsg.content:
if part.type == "text":
hybrid.append(
{"type": "text", "text": part.payload.get("text", "")}
)
elif part.type == "image_url":
url = part.payload.get("url")
detail = part.payload.get("detail") or "auto"
hybrid.append(
{
"type": "image_url",
"image_url": {"url": url, "detail": detail},
}
)

d = {"role": cmsg.role, "content": hybrid or ""}
if cmsg.name and cmsg.role in {
"system",
"user",
"assistant",
"developer",
}:
d["name"] = cmsg.name
result.append(cast(OpenAIMessage, d))

return result
6 changes: 6 additions & 0 deletions camel/responses/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""Unified response abstractions and adapters.

This package contains agent-visible response containers and provider-agnostic
response models/adapters used across CAMEL.
"""

from .agent_responses import ChatAgentResponse

__all__ = [
Expand Down
16 changes: 16 additions & 0 deletions camel/responses/adapters/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""Response adapters from provider-specific schemas to CAMEL types."""

__all__: list[str] = []
119 changes: 119 additions & 0 deletions camel/responses/adapters/chat_completions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""Adapters for mapping OpenAI Chat Completions to CAMEL abstractions."""

from __future__ import annotations

from typing import Any, Dict, List, Optional

from camel.messages.base import BaseMessage
from camel.responses.model_response import (
CamelModelResponse,
CamelToolCall,
CamelUsage,
)
from camel.types import ChatCompletion, RoleType


def _choice_tool_calls_to_camel(
choice_msg: Any,
) -> Optional[List[CamelToolCall]]:
tool_calls = getattr(choice_msg, "tool_calls", None)
if not tool_calls:
return None
result: List[CamelToolCall] = []
for tc in tool_calls:
func = getattr(tc, "function", None)
name = getattr(func, "name", None) if func else None
args_str = getattr(func, "arguments", "{}") if func else "{}"
try:
import json

args = json.loads(args_str) if isinstance(args_str, str) else {}
except Exception:
args = {}
result.append(
CamelToolCall(id=getattr(tc, "id", ""), name=name or "", args=args)
)
Comment on lines 62 to 88

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Preserve tool call IDs when adapting dict-based responses

The helper _choice_tool_calls_to_camel only reads tool call fields with getattr, which works for OpenAI SDK objects but silently drops data when the tool_calls list contains plain dicts. Many parts of the codebase synthesize ChatCompletion instances via ChatCompletion.construct(...) and pass dictionaries (see tests and model wrappers), so calling adapt_chat_to_camel_response on those objects yields CamelToolCall(id="", name="", args={}). Downstream consumers cannot route tool calls without the id or name. The adapter should also handle dict inputs (e.g. via tc.get("id")) to retain the tool call metadata.

Useful? React with 👍 / 👎.

return result


def adapt_chat_to_camel_response(
response: ChatCompletion,
) -> CamelModelResponse:
"""Convert an OpenAI ChatCompletion into a CamelModelResponse.

This performs the minimal mapping needed in Phase 1 and keeps the
original response accessible via the `raw` field.
"""
output_messages: List[BaseMessage] = []
finish_reasons: List[str] = []
tool_call_requests: Optional[List[CamelToolCall]] = None

for _, choice in enumerate(response.choices):
finish_reasons.append(str(choice.finish_reason))

msg = choice.message
# Skip empty (no content and no tool calls)
if (
getattr(msg, "content", None) is None
or str(getattr(msg, "content", "")).strip() == ""
) and not getattr(msg, "tool_calls", None):
continue

bm = BaseMessage(
role_name="assistant",
role_type=RoleType.ASSISTANT,
meta_dict={},
content=getattr(msg, "content", "") or "",
parsed=getattr(msg, "parsed", None),
)
output_messages.append(bm)

# Collect tool calls from the first non-empty choice only
# (align with existing usage)
if tool_call_requests is None:
tool_call_requests = _choice_tool_calls_to_camel(msg)

usage_raw: Dict[str, Any] = {}
usage_obj: Optional[Any] = getattr(response, "usage", None)
if usage_obj is not None:
try:
# Pydantic model -> dict
usage_raw = usage_obj.model_dump() # type: ignore[no-any-return]
except Exception:
try:
import dataclasses

usage_raw = dataclasses.asdict(usage_obj) # type: ignore[arg-type]
except Exception:
usage_raw = {}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we could avoid the nested try catch


usage = CamelUsage(
input_tokens=(usage_raw or {}).get("prompt_tokens"),
output_tokens=(usage_raw or {}).get("completion_tokens"),
total_tokens=(usage_raw or {}).get("total_tokens"),
raw=usage_raw or None,
)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is a bit odd.


return CamelModelResponse(
id=getattr(response, "id", ""),
model=getattr(response, "model", None),
created=getattr(response, "created", None),
output_messages=output_messages,
tool_call_requests=tool_call_requests,
finish_reasons=finish_reasons,
usage=usage,
raw=response,
)
Comment on lines 168 to 178
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the new adapter doesn't keep the logprobs information. if this is not by design, it must be addressed.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for catching that, I have added logprobs

Loading
Loading