-
Notifications
You must be signed in to change notification settings - Fork 1.7k
feat: add CAMEL abstraction for future support of new API style #3328
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from 1 commit
1b6ee86
ce19def
f8b8d8e
389cb6e
3196cfa
00dc3ff
450c4ed
bd01333
5b8acca
858efd1
cbb386c
4630c7c
b6b1f00
3dd2014
0473402
abdb22a
8ea658f
a3ccb85
fd78ecf
2fd72a0
05e27a5
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,20 @@ | ||
| # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= | ||
| """Core abstractions for CAMEL runtime. | ||
|
|
||
| This package hosts model-agnostic message types that can be adapted to | ||
| either legacy Chat Completions or the newer OpenAI Responses API. | ||
| """ | ||
|
|
||
| __all__: list[str] = [] |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,163 @@ | ||
| # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= | ||
| """Model-agnostic message abstractions and converters. | ||
|
|
||
| Phase 1 introduces `CamelMessage` to decouple CAMEL from the legacy | ||
| OpenAI Chat Completions message schema while keeping behaviour identical | ||
| via adapter conversion. | ||
| """ | ||
|
|
||
| from __future__ import annotations | ||
|
|
||
| from typing import Any, Dict, List, Literal, Optional, cast | ||
|
|
||
| from pydantic import BaseModel, Field | ||
|
|
||
| from camel.messages import OpenAIMessage | ||
|
|
||
|
|
||
| class CamelContentPart(BaseModel): | ||
| """A single content fragment. | ||
|
|
||
| This is intentionally minimal for Phase 1. It currently models the | ||
| fragments we already consume through Chat Completions: text and image. | ||
| Tool-related parts are represented at the response layer for now. | ||
| """ | ||
|
|
||
| type: Literal["text", "image_url"] | ||
| payload: Dict[str, Any] = Field(default_factory=dict) | ||
|
|
||
|
|
||
| class CamelMessage(BaseModel): | ||
| """A model-agnostic chat message used by CAMEL runtime. | ||
|
|
||
| The schema is compatible with both legacy Chat Completions and the | ||
| newer Responses API after conversion. | ||
| """ | ||
|
|
||
| role: Literal["system", "user", "assistant", "tool", "developer"] | ||
| content: List[CamelContentPart] = Field(default_factory=list) | ||
| name: Optional[str] = None | ||
| tool_call_id: Optional[str] = None | ||
| metadata: Optional[Dict[str, Any]] = None | ||
|
|
||
|
|
||
| def openai_messages_to_camel( | ||
| messages: List[OpenAIMessage], | ||
| ) -> List[CamelMessage]: | ||
| """Convert OpenAI ChatCompletion-style messages to `CamelMessage`. | ||
|
|
||
| Notes: | ||
| - Only text and image_url items are converted in Phase 1. | ||
| - Other fields are carried over when present (name, tool_call_id). | ||
| """ | ||
| result: List[CamelMessage] = [] | ||
| for msg in messages: | ||
| role = msg.get("role", "user") # type: ignore[assignment] | ||
| parts: List[CamelContentPart] = [] | ||
|
|
||
| content = msg.get("content") | ||
| if isinstance(content, str): | ||
| if content.strip(): | ||
| parts.append( | ||
| CamelContentPart(type="text", payload={"text": content}) | ||
| ) | ||
| elif isinstance(content, list): | ||
| for item in content: | ||
| item_t = item.get("type") if isinstance(item, dict) else None | ||
| if item_t == "text": | ||
| text = item.get("text", "") | ||
| parts.append( | ||
| CamelContentPart(type="text", payload={"text": text}) | ||
| ) | ||
| elif item_t == "image_url": | ||
| image_url = item.get("image_url", {}) | ||
| payload = { | ||
| "url": image_url.get("url"), | ||
| "detail": image_url.get("detail"), | ||
| } | ||
| parts.append( | ||
| CamelContentPart(type="image_url", payload=payload) | ||
| ) | ||
|
|
||
| name_val = cast(Optional[str], msg.get("name", None)) | ||
| tool_call_id = ( | ||
| cast(Optional[str], msg.get("tool_call_id", None)) | ||
| if role == "tool" | ||
| else None | ||
| ) | ||
|
|
||
| result.append( | ||
| CamelMessage( | ||
| role=cast(Any, role), # mypy: role literal narrowing from dict | ||
| content=parts, | ||
| name=name_val, | ||
| tool_call_id=tool_call_id, | ||
| ) | ||
| ) | ||
|
|
||
| return result | ||
|
|
||
|
|
||
| def camel_messages_to_openai( | ||
| messages: List[CamelMessage], | ||
| ) -> List[OpenAIMessage]: | ||
| """Convert `CamelMessage` back to OpenAI ChatCompletion-style messages. | ||
|
|
||
| This is lossless for the text/image_url subset used in Phase 1. | ||
| """ | ||
| result: List[OpenAIMessage] = [] | ||
| for cmsg in messages: | ||
| if cmsg.role == "tool": | ||
| # Tool message expects string content + tool_call_id | ||
| text_parts = [ | ||
| p.payload.get("text", "") | ||
| for p in cmsg.content | ||
| if p.type == "text" | ||
| ] | ||
| content_str = "\n".join([t for t in text_parts if t]) | ||
| d: Dict[str, Any] = {"role": "tool", "content": content_str} | ||
| if cmsg.tool_call_id: | ||
| d["tool_call_id"] = cmsg.tool_call_id | ||
| result.append(cast(OpenAIMessage, d)) | ||
| continue | ||
|
|
||
| # Non-tool roles: use hybrid content list | ||
| hybrid: List[Dict[str, Any]] = [] | ||
| for part in cmsg.content: | ||
| if part.type == "text": | ||
| hybrid.append( | ||
| {"type": "text", "text": part.payload.get("text", "")} | ||
| ) | ||
| elif part.type == "image_url": | ||
| url = part.payload.get("url") | ||
| detail = part.payload.get("detail") or "auto" | ||
| hybrid.append( | ||
| { | ||
| "type": "image_url", | ||
| "image_url": {"url": url, "detail": detail}, | ||
| } | ||
| ) | ||
|
|
||
| d = {"role": cmsg.role, "content": hybrid or ""} | ||
| if cmsg.name and cmsg.role in { | ||
| "system", | ||
| "user", | ||
| "assistant", | ||
| "developer", | ||
| }: | ||
| d["name"] = cmsg.name | ||
| result.append(cast(OpenAIMessage, d)) | ||
|
|
||
| return result |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,16 @@ | ||
| # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= | ||
| """Response adapters from provider-specific schemas to CAMEL types.""" | ||
|
|
||
| __all__: list[str] = [] |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,119 @@ | ||
| # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= | ||
| """Adapters for mapping OpenAI Chat Completions to CAMEL abstractions.""" | ||
|
|
||
| from __future__ import annotations | ||
|
|
||
| from typing import Any, Dict, List, Optional | ||
|
|
||
| from camel.messages.base import BaseMessage | ||
| from camel.responses.model_response import ( | ||
| CamelModelResponse, | ||
| CamelToolCall, | ||
| CamelUsage, | ||
| ) | ||
| from camel.types import ChatCompletion, RoleType | ||
|
|
||
|
|
||
| def _choice_tool_calls_to_camel( | ||
| choice_msg: Any, | ||
| ) -> Optional[List[CamelToolCall]]: | ||
| tool_calls = getattr(choice_msg, "tool_calls", None) | ||
| if not tool_calls: | ||
| return None | ||
| result: List[CamelToolCall] = [] | ||
| for tc in tool_calls: | ||
| func = getattr(tc, "function", None) | ||
| name = getattr(func, "name", None) if func else None | ||
| args_str = getattr(func, "arguments", "{}") if func else "{}" | ||
| try: | ||
| import json | ||
|
|
||
| args = json.loads(args_str) if isinstance(args_str, str) else {} | ||
| except Exception: | ||
| args = {} | ||
| result.append( | ||
| CamelToolCall(id=getattr(tc, "id", ""), name=name or "", args=args) | ||
| ) | ||
| return result | ||
|
|
||
|
|
||
| def adapt_chat_to_camel_response( | ||
| response: ChatCompletion, | ||
| ) -> CamelModelResponse: | ||
| """Convert an OpenAI ChatCompletion into a CamelModelResponse. | ||
|
|
||
| This performs the minimal mapping needed in Phase 1 and keeps the | ||
| original response accessible via the `raw` field. | ||
| """ | ||
| output_messages: List[BaseMessage] = [] | ||
| finish_reasons: List[str] = [] | ||
| tool_call_requests: Optional[List[CamelToolCall]] = None | ||
|
|
||
| for _, choice in enumerate(response.choices): | ||
| finish_reasons.append(str(choice.finish_reason)) | ||
|
|
||
| msg = choice.message | ||
| # Skip empty (no content and no tool calls) | ||
| if ( | ||
| getattr(msg, "content", None) is None | ||
| or str(getattr(msg, "content", "")).strip() == "" | ||
| ) and not getattr(msg, "tool_calls", None): | ||
| continue | ||
|
|
||
| bm = BaseMessage( | ||
| role_name="assistant", | ||
| role_type=RoleType.ASSISTANT, | ||
| meta_dict={}, | ||
| content=getattr(msg, "content", "") or "", | ||
| parsed=getattr(msg, "parsed", None), | ||
| ) | ||
| output_messages.append(bm) | ||
|
|
||
| # Collect tool calls from the first non-empty choice only | ||
| # (align with existing usage) | ||
| if tool_call_requests is None: | ||
| tool_call_requests = _choice_tool_calls_to_camel(msg) | ||
|
|
||
| usage_raw: Dict[str, Any] = {} | ||
| usage_obj: Optional[Any] = getattr(response, "usage", None) | ||
| if usage_obj is not None: | ||
| try: | ||
| # Pydantic model -> dict | ||
| usage_raw = usage_obj.model_dump() # type: ignore[no-any-return] | ||
| except Exception: | ||
| try: | ||
| import dataclasses | ||
|
|
||
| usage_raw = dataclasses.asdict(usage_obj) # type: ignore[arg-type] | ||
| except Exception: | ||
| usage_raw = {} | ||
|
||
|
|
||
| usage = CamelUsage( | ||
| input_tokens=(usage_raw or {}).get("prompt_tokens"), | ||
| output_tokens=(usage_raw or {}).get("completion_tokens"), | ||
| total_tokens=(usage_raw or {}).get("total_tokens"), | ||
| raw=usage_raw or None, | ||
| ) | ||
|
||
|
|
||
| return CamelModelResponse( | ||
| id=getattr(response, "id", ""), | ||
| model=getattr(response, "model", None), | ||
| created=getattr(response, "created", None), | ||
| output_messages=output_messages, | ||
| tool_call_requests=tool_call_requests, | ||
| finish_reasons=finish_reasons, | ||
| usage=usage, | ||
| raw=response, | ||
| ) | ||
|
Comment on lines
168
to
178
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. the new adapter doesn't keep the logprobs information. if this is not by design, it must be addressed.
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for catching that, I have added logprobs |
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The helper
_choice_tool_calls_to_camelonly reads tool call fields withgetattr, which works for OpenAI SDK objects but silently drops data when thetool_callslist contains plain dicts. Many parts of the codebase synthesizeChatCompletioninstances viaChatCompletion.construct(...)and pass dictionaries (see tests and model wrappers), so callingadapt_chat_to_camel_responseon those objects yieldsCamelToolCall(id="", name="", args={}). Downstream consumers cannot route tool calls without the id or name. The adapter should also handledictinputs (e.g. viatc.get("id")) to retain the tool call metadata.Useful? React with 👍 / 👎.