Skip to content

Commit abdb22a

Browse files
committed
add log probs
1 parent 0473402 commit abdb22a

File tree

6 files changed

+89
-4
lines changed

6 files changed

+89
-4
lines changed

camel/models/openai_responses_model.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -119,9 +119,7 @@ def _run(
119119
messages: List[OpenAIMessage],
120120
response_format: Optional[Type[BaseModel]] = None,
121121
tools: Optional[List[Dict[str, Any]]] = None,
122-
) -> Union[
123-
ChatCompletion, ChatCompletion
124-
]: # unused legacy types in signature
122+
) -> ChatCompletion: # unused legacy types in signature
125123
# Update trace
126124
agent_session_id = get_current_agent_session_id()
127125
if agent_session_id:
@@ -194,7 +192,7 @@ async def _arun(
194192
messages: List[OpenAIMessage],
195193
response_format: Optional[Type[BaseModel]] = None,
196194
tools: Optional[List[Dict[str, Any]]] = None,
197-
) -> Union[ChatCompletion, ChatCompletion]:
195+
) -> ChatCompletion:
198196
agent_session_id = get_current_agent_session_id()
199197
if agent_session_id:
200198
update_langfuse_trace(

camel/responses/adapters/chat_completions.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,9 +100,11 @@ def adapt_chat_to_camel_response(
100100
output_messages: List[BaseMessage] = []
101101
finish_reasons: List[str] = []
102102
tool_call_requests: Optional[List[CamelToolCall]] = None
103+
logprobs_list: List[Any] = []
103104

104105
for _, choice in enumerate(response.choices):
105106
finish_reasons.append(str(choice.finish_reason))
107+
logprobs_list.append(getattr(choice, "logprobs", None))
106108

107109
msg = choice.message
108110
# Skip empty (no content and no tool calls)
@@ -126,6 +128,11 @@ def adapt_chat_to_camel_response(
126128
if tool_call_requests is None:
127129
tool_call_requests = _choice_tool_calls_to_camel(msg)
128130

131+
# Preserve logprobs if caller requested them
132+
logprobs: Optional[List[Any]] = (
133+
logprobs_list if any(lp is not None for lp in logprobs_list) else None
134+
)
135+
129136
usage_raw: Dict[str, Any] = {}
130137
usage_obj: Optional[Any] = getattr(response, "usage", None)
131138
if usage_obj is not None:
@@ -166,5 +173,6 @@ def adapt_chat_to_camel_response(
166173
tool_call_requests=tool_call_requests,
167174
finish_reasons=finish_reasons,
168175
usage=usage,
176+
logprobs=logprobs,
169177
raw=response,
170178
)

camel/responses/adapters/responses_adapter.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ def responses_to_camel_response(
4545

4646
audio_bytes: Optional[bytes] = None
4747
audio_transcript: Optional[str] = None
48+
logprobs_list: List[Any] = []
4849

4950
text = getattr(resp, "output_text", None)
5051
parts: List[str] = []
@@ -64,6 +65,13 @@ def responses_to_camel_response(
6465
val = chunk.get("text") or chunk.get("output_text")
6566
if val:
6667
parts.append(str(val))
68+
lp = (
69+
chunk.get("logprobs")
70+
if isinstance(chunk, dict)
71+
else getattr(chunk, "logprobs", None)
72+
)
73+
if lp is not None:
74+
logprobs_list.append(lp)
6775
elif chunk_type == "output_audio":
6876
audio = chunk.get("audio")
6977
if isinstance(audio, dict):
@@ -184,6 +192,7 @@ def responses_to_camel_response(
184192
total_tokens=usage_dict.get("total_tokens"),
185193
raw=usage_raw or None,
186194
)
195+
logprobs: Optional[List[Any]] = logprobs_list if logprobs_list else None
187196

188197
return CamelModelResponse(
189198
id=getattr(resp, "id", ""),
@@ -193,6 +202,7 @@ def responses_to_camel_response(
193202
tool_call_requests=tool_call_requests if tool_call_requests else None,
194203
finish_reasons=["stop"],
195204
usage=usage,
205+
logprobs=logprobs,
196206
raw=resp,
197207
)
198208

camel/responses/model_response.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,8 @@ class CamelModelResponse(BaseModel):
5959
tool_call_requests: Optional[List[CamelToolCall]] = None
6060
finish_reasons: List[str] = Field(default_factory=list)
6161
usage: CamelUsage = Field(default_factory=CamelUsage)
62+
# Optional logprobs aligned to provider choices (if requested)
63+
logprobs: Optional[List[Any]] = None
6264

6365
# Keep a handle to the original provider response for debugging/tests
6466
raw: Any = None

test/models/test_openai_responses_model.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,37 @@ def test_responses_to_camel_response_with_audio():
139139
assert camel_response.output_messages[0].audio_transcript == "Hello world"
140140

141141

142+
def test_responses_to_camel_response_keeps_logprobs():
143+
mock_response = MagicMock()
144+
mock_response.id = "resp_124"
145+
mock_response.model = "gpt-4o-mini"
146+
mock_response.created = 1234567891
147+
148+
logprob_entry = {
149+
"token": "Hi",
150+
"bytes": [72, 105],
151+
"logprob": -0.1,
152+
"top_logprobs": [],
153+
}
154+
mock_text_chunk = {
155+
"type": "output_text",
156+
"text": "Hi there",
157+
"logprobs": [logprob_entry],
158+
}
159+
160+
mock_item = MagicMock()
161+
mock_item.content = [mock_text_chunk]
162+
163+
mock_response.output = [mock_item]
164+
mock_response.output_text = None
165+
mock_response.usage = None
166+
167+
camel_response = responses_to_camel_response(mock_response)
168+
169+
assert camel_response.logprobs is not None
170+
assert camel_response.logprobs[0][0]["token"] == "Hi"
171+
172+
142173
def test_openai_messages_to_camel_with_audio():
143174
from camel.core.messages import openai_messages_to_camel
144175

test/responses/test_chat_adapter.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,12 @@
1414

1515
import json
1616

17+
from openai.types.chat.chat_completion import ChoiceLogprobs
18+
from openai.types.chat.chat_completion_token_logprob import (
19+
ChatCompletionTokenLogprob,
20+
TopLogprob,
21+
)
22+
1723
from camel.responses.adapters.chat_completions import (
1824
adapt_chat_to_camel_response,
1925
)
@@ -96,3 +102,33 @@ def test_adapt_tool_calls_if_present():
96102
)
97103
tc = cmr.tool_call_requests[0]
98104
assert tc.id == "call_1" and tc.name == "search" and tc.args == {"q": "x"}
105+
106+
107+
def test_adapt_chat_to_camel_response_preserves_logprobs():
108+
top_lp = TopLogprob.construct(token="Hi", bytes=None, logprob=-0.1)
109+
token_lp = ChatCompletionTokenLogprob.construct(
110+
token="Hi", bytes=None, logprob=-0.1, top_logprobs=[top_lp]
111+
)
112+
logprobs = ChoiceLogprobs.construct(content=[token_lp])
113+
114+
choice = dict(
115+
index=0,
116+
message=ChatCompletionMessage.construct(
117+
role="assistant", content="Hi", tool_calls=None
118+
),
119+
finish_reason="stop",
120+
logprobs=logprobs,
121+
)
122+
123+
cc = ChatCompletion.construct(
124+
id="chatcmpl-test-003",
125+
choices=[choice],
126+
created=1730000002,
127+
model="gpt-4o-mini",
128+
object="chat.completion",
129+
usage=None,
130+
)
131+
132+
cmr = adapt_chat_to_camel_response(cc)
133+
assert cmr.logprobs is not None
134+
assert cmr.logprobs[0].content[0].token == "Hi"

0 commit comments

Comments
 (0)