From 46be3c4b896c69832ce32b70dc5b7b7c37e817b1 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Fri, 9 Jan 2026 13:11:40 -0500 Subject: [PATCH 01/33] fix(ag-ui): preserve thinking signatures for Anthropic extended thinking Add support for preserving thinking metadata (signature, provider_name, etc.) through AG-UI round-trips, enabling multi-turn conversations with Anthropic's extended thinking models. Fixes #3911 Co-Authored-By: Claude Opus 4.5 --- .../pydantic_ai/ui/ag_ui/_adapter.py | 15 +- .../pydantic_ai/ui/ag_ui/_event_stream.py | 20 ++- tests/test_ag_ui.py | 138 ++++++++++++++++++ 3 files changed, 169 insertions(+), 4 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index a8f372a70c..3526b661e7 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -22,6 +22,7 @@ ModelMessage, SystemPromptPart, TextPart, + ThinkingPart, ToolCallPart, ToolReturnPart, UserPromptPart, @@ -228,7 +229,17 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: # no ) ) - case ActivityMessage(): - pass + case ActivityMessage() as activity_msg: + if activity_msg.activity_type == 'pydantic_ai_thinking': + content = activity_msg.content + builder.add( + ThinkingPart( + content=content.get('content', ''), + id=content.get('id'), + signature=content.get('signature'), + provider_name=content.get('provider_name'), + provider_details=content.get('provider_details'), + ) + ) return builder.messages diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index f620ca6a61..09dc8ab4c1 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -9,7 +9,7 @@ import json from collections.abc import AsyncIterator, Iterable from dataclasses import dataclass, field -from typing import Final +from typing import Any, Final from ..._utils import now_utc from ...messages import ( @@ -171,7 +171,23 @@ async def handle_thinking_end( self._thinking_text = False if not followed_by_thinking: - yield ThinkingEndEvent(type=EventType.THINKING_END) + pydantic_ai_meta: dict[str, Any] = {} + if part.id is not None: + pydantic_ai_meta['id'] = part.id + if part.signature is not None: + pydantic_ai_meta['signature'] = part.signature + if part.provider_name is not None: + pydantic_ai_meta['provider_name'] = part.provider_name + if part.provider_details is not None: + pydantic_ai_meta['provider_details'] = part.provider_details + + raw_event = {'pydantic_ai': pydantic_ai_meta} if pydantic_ai_meta else None + + yield ThinkingEndEvent( + type=EventType.THINKING_END, + raw_event=raw_event, + encryptedContent=part.signature, # pyright: ignore[reportCallIssue] + ) def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: return self._handle_tool_call_start(part) diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 949e7a3932..649d7ba604 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -34,6 +34,7 @@ SystemPromptPart, TextPart, TextPartDelta, + ThinkingPart, ToolCallPart, ToolCallPartDelta, ToolReturn, @@ -1109,6 +1110,143 @@ async def stream_function( ) +async def test_thinking_with_signature() -> None: + """Test that ThinkingEndEvent includes metadata (signature, provider_name, etc).""" + + async def stream_function( + messages: list[ModelMessage], agent_info: AgentInfo + ) -> AsyncIterator[DeltaThinkingCalls | str]: + yield {0: DeltaThinkingPart(content='Thinking deeply', signature='sig_abc123')} + yield 'Here is my response' + + agent = Agent(model=FunctionModel(stream_function=stream_function)) + + run_input = create_input( + UserMessage(id='msg_1', content='Think about something'), + ) + + events = await run_and_collect_events(agent, run_input) + + assert events == snapshot( + [ + { + 'type': 'RUN_STARTED', + 'timestamp': IsInt(), + 'threadId': (thread_id := IsSameStr()), + 'runId': (run_id := IsSameStr()), + }, + {'type': 'THINKING_START', 'timestamp': IsInt()}, + {'type': 'THINKING_TEXT_MESSAGE_START', 'timestamp': IsInt()}, + {'type': 'THINKING_TEXT_MESSAGE_CONTENT', 'timestamp': IsInt(), 'delta': 'Thinking deeply'}, + {'type': 'THINKING_TEXT_MESSAGE_END', 'timestamp': IsInt()}, + { + 'type': 'THINKING_END', + 'timestamp': IsInt(), + 'rawEvent': {'pydantic_ai': {'signature': 'sig_abc123', 'provider_name': 'function'}}, + 'encryptedContent': 'sig_abc123', + }, + { + 'type': 'TEXT_MESSAGE_START', + 'timestamp': IsInt(), + 'messageId': (message_id := IsSameStr()), + 'role': 'assistant', + }, + { + 'type': 'TEXT_MESSAGE_CONTENT', + 'timestamp': IsInt(), + 'messageId': message_id, + 'delta': 'Here is my response', + }, + {'type': 'TEXT_MESSAGE_END', 'timestamp': IsInt(), 'messageId': message_id}, + {'type': 'RUN_FINISHED', 'timestamp': IsInt(), 'threadId': thread_id, 'runId': run_id}, + ] + ) + + +def test_activity_message_thinking_roundtrip() -> None: + """Test that ActivityMessage with pydantic_ai_thinking converts to ThinkingPart.""" + messages = AGUIAdapter.load_messages( + [ + ActivityMessage( + id='activity-1', + activity_type='pydantic_ai_thinking', + content={ + 'content': 'Let me think about this...', + 'id': 'thinking-1', + 'signature': 'sig_abc123', + 'provider_name': 'anthropic', + 'provider_details': {'some': 'details'}, + }, + ), + AssistantMessage(id='msg-1', content='Here is my response'), + ] + ) + + assert messages == snapshot( + [ + ModelResponse( + parts=[ + ThinkingPart( + content='Let me think about this...', + id='thinking-1', + signature='sig_abc123', + provider_name='anthropic', + provider_details={'some': 'details'}, + ), + TextPart(content='Here is my response'), + ], + timestamp=IsDatetime(), + ) + ] + ) + + +async def test_thinking_end_event_with_all_metadata() -> None: + """Test that ThinkingEndEvent includes all metadata fields (id, signature, provider_name, provider_details).""" + run_input = create_input(UserMessage(id='msg_1', content='test')) + event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE) + + part = ThinkingPart( + content='Thinking content', + id='thinking-123', + signature='sig_xyz', + provider_name='anthropic', + provider_details={'model': 'claude-sonnet-4-5'}, + ) + + events = [e async for e in event_stream.handle_thinking_end(part, followed_by_thinking=False)] + + assert len(events) == 1 + event = events[0] + assert event.type.value == 'THINKING_END' + assert event.raw_event == { + 'pydantic_ai': { + 'id': 'thinking-123', + 'signature': 'sig_xyz', + 'provider_name': 'anthropic', + 'provider_details': {'model': 'claude-sonnet-4-5'}, + } + } + # Extra field for draft spec compatibility + assert getattr(event, 'encryptedContent') == 'sig_xyz' + + +def test_activity_message_other_types_ignored() -> None: + """Test that ActivityMessage with other activity types are ignored.""" + messages = AGUIAdapter.load_messages( + [ + ActivityMessage( + id='activity-1', + activity_type='some_other_activity', + content={'foo': 'bar'}, + ), + AssistantMessage(id='msg-1', content='Response'), + ] + ) + + assert messages == snapshot([ModelResponse(parts=[TextPart(content='Response')], timestamp=IsDatetime())]) + + async def test_tool_local_then_ag_ui() -> None: """Test mixed local and AG-UI tool calls.""" From fcace3f62e83aafeec5943005604db51eb388bc4 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Fri, 9 Jan 2026 14:12:35 -0500 Subject: [PATCH 02/33] coverage Co-Authored-By: Claude Opus 4.5 --- .../pydantic_ai/ui/ag_ui/_adapter.py | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 3526b661e7..28e7b2a7ff 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -230,16 +230,17 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: # no ) case ActivityMessage() as activity_msg: - if activity_msg.activity_type == 'pydantic_ai_thinking': - content = activity_msg.content - builder.add( - ThinkingPart( - content=content.get('content', ''), - id=content.get('id'), - signature=content.get('signature'), - provider_name=content.get('provider_name'), - provider_details=content.get('provider_details'), - ) + if activity_msg.activity_type != 'pydantic_ai_thinking': + continue + content = activity_msg.content + builder.add( + ThinkingPart( + content=content.get('content', ''), + id=content.get('id'), + signature=content.get('signature'), + provider_name=content.get('provider_name'), + provider_details=content.get('provider_details'), ) + ) return builder.messages From b97a6bd614b47a88d40fa9c0c2ea6a81d914848c Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Wed, 14 Jan 2026 13:15:29 -0500 Subject: [PATCH 03/33] add exhaustive check --- pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 28e7b2a7ff..3e5507c144 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -11,6 +11,8 @@ cast, ) +from typing_extensions import assert_never + from ... import ExternalToolset, ToolDefinition from ...messages import ( AudioUrl, @@ -163,8 +165,8 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: # no else: # pragma: no cover raise ValueError('BinaryInputContent must have either a `url` or `data` field.') user_prompt_content.append(binary_part) - case _: # pragma: no cover - raise ValueError(f'Unsupported user message part type: {type(part)}') + case _: + assert_never(part) if user_prompt_content: # pragma: no branch content_to_add = ( @@ -243,4 +245,7 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: # no ) ) + case _: # pragma: no cover + raise ValueError(f'Unsupported message type: {type(msg)}') + return builder.messages From d8dff4f8ac4e0901809ca1dae8483d4a1fc9ef9f Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Wed, 14 Jan 2026 14:06:27 -0500 Subject: [PATCH 04/33] small refac --- .../pydantic_ai/ui/ag_ui/_event_stream.py | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 09dc8ab4c1..e44bd4a7c0 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -171,21 +171,14 @@ async def handle_thinking_end( self._thinking_text = False if not followed_by_thinking: - pydantic_ai_meta: dict[str, Any] = {} - if part.id is not None: - pydantic_ai_meta['id'] = part.id - if part.signature is not None: - pydantic_ai_meta['signature'] = part.signature - if part.provider_name is not None: - pydantic_ai_meta['provider_name'] = part.provider_name - if part.provider_details is not None: - pydantic_ai_meta['provider_details'] = part.provider_details - - raw_event = {'pydantic_ai': pydantic_ai_meta} if pydantic_ai_meta else None + _args = ['id', 'signature', 'provider_name', 'provider_details'] + pydantic_ai_meta: dict[str, Any] = { + arg: getattr(part, arg) for arg in _args if getattr(part, arg) is not None + } yield ThinkingEndEvent( type=EventType.THINKING_END, - raw_event=raw_event, + raw_event={'pydantic_ai': pydantic_ai_meta} if pydantic_ai_meta else None, encryptedContent=part.signature, # pyright: ignore[reportCallIssue] ) From d27302f1694ab43413e6e799ae71e71cea78428b Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Wed, 14 Jan 2026 14:08:13 -0500 Subject: [PATCH 05/33] small refac test --- tests/test_ag_ui.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 649d7ba604..59cea03473 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -1216,19 +1216,22 @@ async def test_thinking_end_event_with_all_metadata() -> None: events = [e async for e in event_stream.handle_thinking_end(part, followed_by_thinking=False)] - assert len(events) == 1 - event = events[0] - assert event.type.value == 'THINKING_END' - assert event.raw_event == { - 'pydantic_ai': { - 'id': 'thinking-123', - 'signature': 'sig_xyz', - 'provider_name': 'anthropic', - 'provider_details': {'model': 'claude-sonnet-4-5'}, - } - } - # Extra field for draft spec compatibility - assert getattr(event, 'encryptedContent') == 'sig_xyz' + assert events == snapshot( + [ + { + 'type': 'THINKING_END', + 'raw_event': { + 'pydantic_ai': { + 'id': 'thinking-123', + 'signature': 'sig_xyz', + 'provider_name': 'anthropic', + 'provider_details': {'model': 'claude-sonnet-4-5'}, + } + }, + 'encryptedContent': 'sig_xyz', + } + ] + ) def test_activity_message_other_types_ignored() -> None: From 77ba04663d40875e682bd6dc53da262ee2675d8d Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Thu, 15 Jan 2026 20:57:47 -0500 Subject: [PATCH 06/33] fix test --- tests/test_ag_ui.py | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 59cea03473..8eddc0e540 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -1216,22 +1216,20 @@ async def test_thinking_end_event_with_all_metadata() -> None: events = [e async for e in event_stream.handle_thinking_end(part, followed_by_thinking=False)] - assert events == snapshot( - [ - { - 'type': 'THINKING_END', - 'raw_event': { - 'pydantic_ai': { - 'id': 'thinking-123', - 'signature': 'sig_xyz', - 'provider_name': 'anthropic', - 'provider_details': {'model': 'claude-sonnet-4-5'}, - } - }, - 'encryptedContent': 'sig_xyz', - } - ] - ) + # Can't use snapshot here: inline_snapshot can't access pydantic extra fields via getattr + assert len(events) == 1 + event = events[0] + assert event.type.value == 'THINKING_END' + assert event.raw_event == { + 'pydantic_ai': { + 'id': 'thinking-123', + 'signature': 'sig_xyz', + 'provider_name': 'anthropic', + 'provider_details': {'model': 'claude-sonnet-4-5'}, + } + } + assert event.__pydantic_extra__ is not None + assert event.__pydantic_extra__['encryptedContent'] == 'sig_xyz' def test_activity_message_other_types_ignored() -> None: From 2956cc38844d5d7055ca09fc24e3d6c59f2cc7b6 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Fri, 16 Jan 2026 17:39:53 -0500 Subject: [PATCH 07/33] address comments and add sources --- .../pydantic_ai/ui/ag_ui/_adapter.py | 2 + .../pydantic_ai/ui/ag_ui/_event_stream.py | 28 +++++--- tests/test_ag_ui.py | 65 +++++++++++++------ 3 files changed, 67 insertions(+), 28 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 3e5507c144..9a1eb5ed40 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -232,6 +232,8 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: # no ) case ActivityMessage() as activity_msg: + # Round-trip from ActivitySnapshotEvent emitted by _event_stream.py. + # See: https://docs.ag-ui.com/concepts/messages#activitymessage if activity_msg.activity_type != 'pydantic_ai_thinking': continue content = activity_msg.content diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index e44bd4a7c0..29b74de350 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -10,6 +10,7 @@ from collections.abc import AsyncIterator, Iterable from dataclasses import dataclass, field from typing import Any, Final +from uuid import uuid4 from ..._utils import now_utc from ...messages import ( @@ -31,6 +32,7 @@ try: from ag_ui.core import ( + ActivitySnapshotEvent, BaseEvent, EventType, RunAgentInput, @@ -171,15 +173,23 @@ async def handle_thinking_end( self._thinking_text = False if not followed_by_thinking: - _args = ['id', 'signature', 'provider_name', 'provider_details'] - pydantic_ai_meta: dict[str, Any] = { - arg: getattr(part, arg) for arg in _args if getattr(part, arg) is not None - } - - yield ThinkingEndEvent( - type=EventType.THINKING_END, - raw_event={'pydantic_ai': pydantic_ai_meta} if pydantic_ai_meta else None, - encryptedContent=part.signature, # pyright: ignore[reportCallIssue] + yield ThinkingEndEvent(type=EventType.THINKING_END) + + # Emit ActivitySnapshotEvent to preserve thinking metadata for round-trip. + # Frontends receive this and send it back as ActivityMessage, which _adapter.py + # converts back to ThinkingPart. This preserves signature/id needed by providers + # like Anthropic for extended thinking. + # See: https://docs.ag-ui.com/concepts/events#activity-events + content: dict[str, Any] = {'content': part.content} + for field in ('id', 'signature', 'provider_name', 'provider_details'): + value = getattr(part, field) + if value is not None: + content[field] = value + + yield ActivitySnapshotEvent( + activity_type='pydantic_ai_thinking', + message_id=part.id or f'thinking-{uuid4().hex[:8]}', + content=content, ) def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 8eddc0e540..e2543267c2 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -63,6 +63,7 @@ with try_import() as imports_successful: from ag_ui.core import ( ActivityMessage, + ActivitySnapshotEvent, AssistantMessage, BaseEvent, BinaryInputContent, @@ -75,6 +76,7 @@ StateSnapshotEvent, SystemMessage, TextInputContent, + ThinkingEndEvent, Tool, ToolCall, ToolMessage, @@ -1061,6 +1063,14 @@ async def stream_function( }, {'type': 'THINKING_START', 'timestamp': IsInt()}, {'type': 'THINKING_END', 'timestamp': IsInt()}, + { + 'type': 'ACTIVITY_SNAPSHOT', + 'timestamp': IsInt(), + 'activityType': 'pydantic_ai_thinking', + 'messageId': IsStr(), + 'content': {'content': ''}, + 'replace': True, + }, { 'type': 'TEXT_MESSAGE_START', 'timestamp': IsInt(), @@ -1100,6 +1110,14 @@ async def stream_function( }, {'type': 'THINKING_TEXT_MESSAGE_END', 'timestamp': IsInt()}, {'type': 'THINKING_END', 'timestamp': IsInt()}, + { + 'type': 'ACTIVITY_SNAPSHOT', + 'timestamp': IsInt(), + 'activityType': 'pydantic_ai_thinking', + 'messageId': IsStr(), + 'content': {'content': 'Thinking about the universe'}, + 'replace': True, + }, { 'type': 'RUN_FINISHED', 'timestamp': IsInt(), @@ -1111,7 +1129,7 @@ async def stream_function( async def test_thinking_with_signature() -> None: - """Test that ThinkingEndEvent includes metadata (signature, provider_name, etc).""" + """Test that ActivitySnapshotEvent is emitted after ThinkingEndEvent with metadata.""" async def stream_function( messages: list[ModelMessage], agent_info: AgentInfo @@ -1139,11 +1157,18 @@ async def stream_function( {'type': 'THINKING_TEXT_MESSAGE_START', 'timestamp': IsInt()}, {'type': 'THINKING_TEXT_MESSAGE_CONTENT', 'timestamp': IsInt(), 'delta': 'Thinking deeply'}, {'type': 'THINKING_TEXT_MESSAGE_END', 'timestamp': IsInt()}, + {'type': 'THINKING_END', 'timestamp': IsInt()}, { - 'type': 'THINKING_END', + 'type': 'ACTIVITY_SNAPSHOT', 'timestamp': IsInt(), - 'rawEvent': {'pydantic_ai': {'signature': 'sig_abc123', 'provider_name': 'function'}}, - 'encryptedContent': 'sig_abc123', + 'activityType': 'pydantic_ai_thinking', + 'messageId': IsStr(), + 'content': { + 'content': 'Thinking deeply', + 'signature': 'sig_abc123', + 'provider_name': 'function', + }, + 'replace': True, }, { 'type': 'TEXT_MESSAGE_START', @@ -1202,7 +1227,7 @@ def test_activity_message_thinking_roundtrip() -> None: async def test_thinking_end_event_with_all_metadata() -> None: - """Test that ThinkingEndEvent includes all metadata fields (id, signature, provider_name, provider_details).""" + """Test that ActivitySnapshotEvent includes all metadata fields (id, signature, provider_name, provider_details).""" run_input = create_input(UserMessage(id='msg_1', content='test')) event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE) @@ -1216,20 +1241,22 @@ async def test_thinking_end_event_with_all_metadata() -> None: events = [e async for e in event_stream.handle_thinking_end(part, followed_by_thinking=False)] - # Can't use snapshot here: inline_snapshot can't access pydantic extra fields via getattr - assert len(events) == 1 - event = events[0] - assert event.type.value == 'THINKING_END' - assert event.raw_event == { - 'pydantic_ai': { - 'id': 'thinking-123', - 'signature': 'sig_xyz', - 'provider_name': 'anthropic', - 'provider_details': {'model': 'claude-sonnet-4-5'}, - } - } - assert event.__pydantic_extra__ is not None - assert event.__pydantic_extra__['encryptedContent'] == 'sig_xyz' + assert events == snapshot( + [ + ThinkingEndEvent(), + ActivitySnapshotEvent( + message_id='thinking-123', + activity_type='pydantic_ai_thinking', + content={ + 'content': 'Thinking content', + 'id': 'thinking-123', + 'signature': 'sig_xyz', + 'provider_name': 'anthropic', + 'provider_details': {'model': 'claude-sonnet-4-5'}, + }, + ), + ] + ) def test_activity_message_other_types_ignored() -> None: From ae791a4a5bcfcd0b10b07360c2d6f53f86a33747 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Fri, 23 Jan 2026 14:02:14 -0500 Subject: [PATCH 08/33] full roundtrip + multimodal and tool return support --- .../pydantic_ai/ui/ag_ui/_adapter.py | 219 +++++++++++++++++- tests/test_ag_ui.py | 182 +++++++++++++++ 2 files changed, 391 insertions(+), 10 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 9a1eb5ed40..7ed571a4c0 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -2,6 +2,7 @@ from __future__ import annotations +import uuid from base64 import b64decode from collections.abc import Mapping, Sequence from functools import cached_property @@ -19,9 +20,14 @@ BinaryContent, BuiltinToolCallPart, BuiltinToolReturnPart, + CachePoint, DocumentUrl, + FilePart, ImageUrl, ModelMessage, + ModelRequest, + ModelResponse, + RetryPromptPart, SystemPromptPart, TextPart, ThinkingPart, @@ -41,11 +47,13 @@ BaseEvent, BinaryInputContent, DeveloperMessage, + FunctionCall, Message, RunAgentInput, SystemMessage, TextInputContent, Tool as AGUITool, + ToolCall, ToolMessage, UserMessage, ) @@ -93,6 +101,27 @@ def label(self) -> str: return 'the AG-UI frontend tools' # pragma: no cover +def _new_message_id() -> str: + """Generate a new unique message ID.""" + return str(uuid.uuid4()) + + +def _user_content_to_input( + item: str | ImageUrl | VideoUrl | AudioUrl | DocumentUrl | BinaryContent | CachePoint, +) -> TextInputContent | BinaryInputContent | None: + """Convert a user content item to AG-UI input content.""" + if isinstance(item, str): + return TextInputContent(type='text', text=item) + elif isinstance(item, (ImageUrl, VideoUrl, AudioUrl, DocumentUrl)): + return BinaryInputContent(type='binary', url=item.url, mime_type=item.media_type or '') + elif isinstance(item, BinaryContent): + return BinaryInputContent(type='binary', data=item.base64, mime_type=item.media_type) + elif isinstance(item, CachePoint): + return None + else: + assert_never(item) + + class AGUIAdapter(UIAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT, OutputDataT]): """UI adapter for the Agent-User Interaction (AG-UI) protocol.""" @@ -234,20 +263,190 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: # no case ActivityMessage() as activity_msg: # Round-trip from ActivitySnapshotEvent emitted by _event_stream.py. # See: https://docs.ag-ui.com/concepts/messages#activitymessage - if activity_msg.activity_type != 'pydantic_ai_thinking': - continue content = activity_msg.content - builder.add( - ThinkingPart( - content=content.get('content', ''), - id=content.get('id'), - signature=content.get('signature'), - provider_name=content.get('provider_name'), - provider_details=content.get('provider_details'), + if activity_msg.activity_type == 'pydantic_ai_thinking': + builder.add( + ThinkingPart( + content=content.get('content', ''), + id=content.get('id'), + signature=content.get('signature'), + provider_name=content.get('provider_name'), + provider_details=content.get('provider_details'), + ) + ) + elif activity_msg.activity_type == 'pydantic_ai_file': + builder.add( + FilePart( + content=BinaryContent.from_data_uri(content.get('url', '')), + id=content.get('id'), + provider_name=content.get('provider_name'), + provider_details=content.get('provider_details'), + ) ) - ) case _: # pragma: no cover raise ValueError(f'Unsupported message type: {type(msg)}') return builder.messages + + @staticmethod + def _dump_request_parts(msg: ModelRequest) -> tuple[list[Message], dict[str, str]]: + """Convert a `ModelRequest` into AG-UI messages. + + Returns: + A tuple of (messages, tool_call_id_to_name mapping). + """ + result: list[Message] = [] + tool_call_names: dict[str, str] = {} + system_content: list[str] = [] + user_content: list[TextInputContent | BinaryInputContent] = [] + + for part in msg.parts: + if isinstance(part, SystemPromptPart): + system_content.append(part.content) + elif isinstance(part, UserPromptPart): + if isinstance(part.content, str): + user_content.append(TextInputContent(type='text', text=part.content)) + else: + for item in part.content: + converted = _user_content_to_input(item) + if converted is not None: + user_content.append(converted) + elif isinstance(part, ToolReturnPart): + tool_call_names[part.tool_call_id] = part.tool_name + result.append( + ToolMessage( + id=_new_message_id(), + content=part.model_response_str(), + tool_call_id=part.tool_call_id, + ) + ) + elif isinstance(part, RetryPromptPart): + if part.tool_name: + tool_call_names[part.tool_call_id] = part.tool_name + result.append( + ToolMessage( + id=_new_message_id(), + content=part.model_response(), + tool_call_id=part.tool_call_id, + error=part.model_response(), + ) + ) + else: + user_content.append(TextInputContent(type='text', text=part.model_response())) + else: + assert_never(part) + + messages: list[Message] = [] + if system_content: + messages.append(SystemMessage(id=_new_message_id(), content='\n'.join(system_content))) + if user_content: + # Simplify to plain string if only single text item + if len(user_content) == 1 and isinstance(user_content[0], TextInputContent): + messages.append(UserMessage(id=_new_message_id(), content=user_content[0].text)) + else: + messages.append(UserMessage(id=_new_message_id(), content=user_content)) + messages.extend(result) + return messages, tool_call_names + + @staticmethod + def _dump_response_parts(msg: ModelResponse) -> list[Message]: + """Convert a `ModelResponse` into AG-UI messages.""" + result: list[Message] = [] + text_content: list[str] = [] + tool_calls_list: list[ToolCall] = [] + builtin_tool_returns: list[BuiltinToolReturnPart] = [] + + for part in msg.parts: + if isinstance(part, TextPart): + text_content.append(part.content) + elif isinstance(part, ThinkingPart): + thinking_content: dict[str, Any] = {'content': part.content} + for attr in ['id', 'signature', 'provider_name', 'provider_details']: + if getattr(part, attr) is not None: + thinking_content[attr] = getattr(part, attr) + result.append( + ActivityMessage( + id=_new_message_id(), + activity_type='pydantic_ai_thinking', + content=thinking_content, + ) + ) + elif isinstance(part, ToolCallPart): + tool_calls_list.append( + ToolCall( + id=part.tool_call_id, + function=FunctionCall(name=part.tool_name, arguments=part.args_as_json_str()), + ) + ) + elif isinstance(part, BuiltinToolCallPart): + prefixed_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', part.tool_call_id]) + tool_calls_list.append( + ToolCall( + id=prefixed_id, + function=FunctionCall(name=part.tool_name, arguments=part.args_as_json_str()), + ) + ) + elif isinstance(part, BuiltinToolReturnPart): + builtin_tool_returns.append(part) + elif isinstance(part, FilePart): + file_content: dict[str, Any] = { + 'url': part.content.data_uri, + 'media_type': part.content.media_type, + } + for attr in ['id', 'provider_name', 'provider_details']: + if getattr(part, attr) is not None: + file_content[attr] = getattr(part, attr) + result.append( + ActivityMessage( + id=_new_message_id(), + activity_type='pydantic_ai_file', + content=file_content, + ) + ) + else: + assert_never(part) + + if text_content or tool_calls_list: + result.append( + AssistantMessage( + id=_new_message_id(), + content='\n'.join(text_content) if text_content else None, + tool_calls=tool_calls_list if tool_calls_list else None, + ) + ) + + for part in builtin_tool_returns: + prefixed_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', part.tool_call_id]) + result.append( + ToolMessage( + id=_new_message_id(), + content=part.model_response_str(), + tool_call_id=prefixed_id, + ) + ) + + return result + + @classmethod + def dump_messages(cls, messages: Sequence[ModelMessage]) -> list[Message]: + """Transform Pydantic AI messages into AG-UI messages. + + Args: + messages: A sequence of ModelMessage objects to convert. + + Returns: + A list of AG-UI Message objects. + """ + result: list[Message] = [] + + for msg in messages: + if isinstance(msg, ModelRequest): + request_messages, _ = cls._dump_request_parts(msg) + result.extend(request_messages) + elif isinstance(msg, ModelResponse): + result.extend(cls._dump_response_parts(msg)) + else: + assert_never(msg) + + return result diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index e2543267c2..0fcf12f339 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -19,9 +19,11 @@ from pydantic_ai import ( AudioUrl, BinaryContent, + BinaryImage, BuiltinToolCallPart, BuiltinToolReturnPart, DocumentUrl, + FilePart, FunctionToolCallEvent, FunctionToolResultEvent, ImageUrl, @@ -1275,6 +1277,186 @@ def test_activity_message_other_types_ignored() -> None: assert messages == snapshot([ModelResponse(parts=[TextPart(content='Response')], timestamp=IsDatetime())]) +def _sync_part_timestamps(original_part: Any, new_part: Any) -> None: + """Sync timestamp attribute if both parts have it.""" + if hasattr(new_part, 'timestamp') and hasattr(original_part, 'timestamp'): + object.__setattr__(new_part, 'timestamp', original_part.timestamp) + + +def _sync_timestamps(original: list[ModelMessage], reloaded: list[ModelMessage]) -> None: + """Sync timestamps between original and reloaded messages for comparison.""" + for o, n in zip(original, reloaded): + if isinstance(n, ModelResponse) and isinstance(o, ModelResponse): + n.timestamp = o.timestamp + for op, np in zip(o.parts, n.parts): + _sync_part_timestamps(op, np) + elif isinstance(n, ModelRequest) and isinstance(o, ModelRequest): + for op, np in zip(o.parts, n.parts): + _sync_part_timestamps(op, np) + + +def test_dump_load_roundtrip_basic() -> None: + """Test that load_messages(dump_messages(msgs)) preserves basic messages.""" + original: list[ModelMessage] = [ + ModelRequest(parts=[SystemPromptPart(content='You are helpful'), UserPromptPart(content='Hello')]), + ModelResponse(parts=[TextPart(content='Hi!')]), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(original, reloaded) + + assert reloaded == original + + +def test_dump_load_roundtrip_thinking() -> None: + """Test full round-trip for thinking parts with all metadata.""" + original: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content='Think about this')]), + ModelResponse( + parts=[ + ThinkingPart( + content='Deep thoughts...', + id='think-001', + signature='sig_xyz', + provider_name='anthropic', + provider_details={'model': 'claude-sonnet-4-5'}, + ), + TextPart(content='Conclusion'), + ] + ), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(original, reloaded) + + assert reloaded == original + + +def test_dump_load_roundtrip_tools() -> None: + """Test full round-trip for tool calls and returns.""" + original: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content='Call tool')]), + ModelResponse(parts=[ToolCallPart(tool_name='my_tool', tool_call_id='call_abc', args='{"x": 1}')]), + ModelRequest(parts=[ToolReturnPart(tool_name='my_tool', tool_call_id='call_abc', content='result')]), + ModelResponse(parts=[TextPart(content='Done')]), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(original, reloaded) + + assert reloaded == original + + +def test_dump_load_roundtrip_multiple_thinking_parts() -> None: + """Test round-trip preserves multiple ThinkingParts with their metadata.""" + original: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content='Think hard')]), + ModelResponse( + parts=[ + ThinkingPart(content='First thought', id='think-1', signature='sig_1'), + ThinkingPart(content='Second thought', id='think-2', signature='sig_2'), + TextPart(content='Final answer'), + ] + ), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(original, reloaded) + + assert reloaded == original + + +def test_dump_load_roundtrip_binary_content() -> None: + """Test round-trip for binary content in user prompts (images, documents, etc.).""" + original: list[ModelMessage] = [ + ModelRequest( + parts=[ + UserPromptPart( + content=[ + 'Describe this image', + ImageUrl(url='https://example.com/image.png', media_type='image/png'), + BinaryContent(data=b'raw image data', media_type='image/jpeg'), + ] + ), + ] + ), + ModelResponse(parts=[TextPart(content='I see an image.')]), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(original, reloaded) + + assert reloaded == original + + +def test_dump_load_roundtrip_file_part() -> None: + """Test round-trip for FilePart in model responses. + + Note: BinaryImage is used because from_data_uri() returns BinaryImage for image/* media types. + """ + file_data = b'generated file content' + original: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content='Generate an image')]), + ModelResponse( + parts=[ + FilePart( + content=BinaryImage(data=file_data, media_type='image/png'), + id='file-001', + provider_name='openai', + provider_details={'model': 'gpt-image'}, + ), + TextPart(content='Here is your generated image.'), + ] + ), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(original, reloaded) + + assert reloaded == original + + +def test_dump_load_roundtrip_builtin_tool_return() -> None: + """Test round-trip for builtin tool calls with their return values. + + Note: The round-trip reorders parts within ModelResponse because AG-UI's AssistantMessage + has separate content and tool_calls fields. TextPart comes first (from content), then + BuiltinToolCallPart (from tool_calls), then BuiltinToolReturnPart (from subsequent ToolMessage). + """ + original: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content='Search for info')]), + ModelResponse( + parts=[ + TextPart(content='Based on the search...'), + BuiltinToolCallPart( + tool_name='web_search', + tool_call_id='call_123', + args='{"query": "test"}', + provider_name='anthropic', + ), + BuiltinToolReturnPart( + tool_name='web_search', + tool_call_id='call_123', + content='Search results here', + provider_name='anthropic', + ), + ] + ), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(original, reloaded) + + assert reloaded == original + + async def test_tool_local_then_ag_ui() -> None: """Test mixed local and AG-UI tool calls.""" From a713e0fcd6f35059b6a68c14e9a4861a4865151d Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Fri, 23 Jan 2026 14:18:53 -0500 Subject: [PATCH 09/33] coverage --- tests/test_ag_ui.py | 111 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 0fcf12f339..981582042e 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -22,6 +22,7 @@ BinaryImage, BuiltinToolCallPart, BuiltinToolReturnPart, + CachePoint, DocumentUrl, FilePart, FunctionToolCallEvent, @@ -33,6 +34,7 @@ PartDeltaEvent, PartEndEvent, PartStartEvent, + RetryPromptPart, SystemPromptPart, TextPart, TextPartDelta, @@ -1457,6 +1459,115 @@ def test_dump_load_roundtrip_builtin_tool_return() -> None: assert reloaded == original +def test_dump_load_roundtrip_cache_point() -> None: + """Test that CachePoint is filtered out during round-trip (it's metadata only).""" + original: list[ModelMessage] = [ + ModelRequest( + parts=[ + UserPromptPart(content=['Hello', CachePoint(), 'world']), + ] + ), + ModelResponse(parts=[TextPart(content='Hi!')]), + ] + expected: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content=['Hello', 'world'])]), + ModelResponse(parts=[TextPart(content='Hi!')]), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(expected, reloaded) + + assert reloaded == expected + + +def test_dump_load_roundtrip_retry_prompt_with_tool() -> None: + """Test round-trip for RetryPromptPart with tool_name (converted to ToolMessage with error).""" + original: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content='Call tool')]), + ModelResponse(parts=[ToolCallPart(tool_name='my_tool', tool_call_id='call_1', args='{}')]), + ModelRequest( + parts=[ + RetryPromptPart( + tool_name='my_tool', + tool_call_id='call_1', + content='Invalid args', + ) + ] + ), + ModelResponse(parts=[TextPart(content='OK')]), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(original, reloaded) + + # RetryPromptPart becomes ToolReturnPart on reload (same tool_call_id mapping) + assert len(reloaded) == 4 + assert isinstance(reloaded[2], ModelRequest) + retry_part = reloaded[2].parts[0] + assert isinstance(retry_part, ToolReturnPart) + assert retry_part.tool_name == 'my_tool' + assert retry_part.tool_call_id == 'call_1' + + +def test_dump_load_roundtrip_retry_prompt_without_tool() -> None: + """Test round-trip for RetryPromptPart without tool_name (converted to UserMessage).""" + original: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content='Do something')]), + ModelResponse(parts=[TextPart(content='Done')]), + ModelRequest(parts=[RetryPromptPart(content='Please try again')]), + ModelResponse(parts=[TextPart(content='OK')]), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(original, reloaded) + + # RetryPromptPart without tool becomes UserPromptPart on reload + # Content is formatted by RetryPromptPart.model_response() + assert len(reloaded) == 4 + assert isinstance(reloaded[2], ModelRequest) + retry_part = reloaded[2].parts[0] + assert isinstance(retry_part, UserPromptPart) + assert 'Please try again' in str(retry_part.content) + + +def test_dump_load_roundtrip_file_part_minimal() -> None: + """Test round-trip for FilePart without optional attributes (id, provider_name, provider_details).""" + file_data = b'minimal file' + original: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content='Generate')]), + ModelResponse( + parts=[ + FilePart(content=BinaryImage(data=file_data, media_type='image/png')), + TextPart(content='Done'), + ] + ), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(original, reloaded) + + assert reloaded == original + + +def test_dump_load_roundtrip_file_part_only() -> None: + """Test round-trip for response with only FilePart (no text, no tool calls).""" + file_data = b'only file' + original: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content='Generate image only')]), + ModelResponse(parts=[FilePart(content=BinaryImage(data=file_data, media_type='image/png'))]), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(original, reloaded) + + assert reloaded == original + + async def test_tool_local_then_ag_ui() -> None: """Test mixed local and AG-UI tool calls.""" From 6aa6821d70ad45a3f0cff68c67f4e93ee2d2a1bc Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Fri, 23 Jan 2026 14:56:22 -0500 Subject: [PATCH 10/33] coverage --- tests/test_ag_ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 981582042e..ad8b38eaaa 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -1287,7 +1287,7 @@ def _sync_part_timestamps(original_part: Any, new_part: Any) -> None: def _sync_timestamps(original: list[ModelMessage], reloaded: list[ModelMessage]) -> None: """Sync timestamps between original and reloaded messages for comparison.""" - for o, n in zip(original, reloaded): + for o, n in zip(original, reloaded): # pragma: no branch if isinstance(n, ModelResponse) and isinstance(o, ModelResponse): n.timestamp = o.timestamp for op, np in zip(o.parts, n.parts): From 58bea1f70e07d4ae5ada966774e7e665be9f5d9b Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Fri, 23 Jan 2026 15:56:48 -0500 Subject: [PATCH 11/33] ciovergae --- tests/test_ag_ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index ad8b38eaaa..aa8ee76a1c 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -1287,12 +1287,12 @@ def _sync_part_timestamps(original_part: Any, new_part: Any) -> None: def _sync_timestamps(original: list[ModelMessage], reloaded: list[ModelMessage]) -> None: """Sync timestamps between original and reloaded messages for comparison.""" - for o, n in zip(original, reloaded): # pragma: no branch + for o, n in zip(original, reloaded): if isinstance(n, ModelResponse) and isinstance(o, ModelResponse): n.timestamp = o.timestamp for op, np in zip(o.parts, n.parts): _sync_part_timestamps(op, np) - elif isinstance(n, ModelRequest) and isinstance(o, ModelRequest): + elif isinstance(n, ModelRequest) and isinstance(o, ModelRequest): # pragma: no branch for op, np in zip(o.parts, n.parts): _sync_part_timestamps(op, np) From 2d5b5d923f3d6bc8b72e92b165c3e23d879785e5 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Fri, 27 Feb 2026 11:15:48 -0500 Subject: [PATCH 12/33] bump agui --- .github/workflows/ci.yml | 8 +- Makefile | 20 +- .../pydantic_ai/ui/ag_ui/_adapter.py | 52 +- .../pydantic_ai/ui/ag_ui/_event_stream.py | 72 +-- pydantic_ai_slim/pyproject.toml | 12 +- tests/test_ag_ui.py | 168 ++++--- uv.lock | 466 +++++++++++------- 7 files changed, 480 insertions(+), 318 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0f29afeaff..e761bb6ba7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,7 +30,7 @@ jobs: cache-suffix: lint - name: Install dependencies - run: uv sync --all-extras --all-packages --group lint + run: uv sync --all-extras --no-extra outlines-vllm-offline --all-packages --group lint - uses: pre-commit/action@v3.0.0 with: @@ -113,7 +113,7 @@ jobs: - name: standard command: "" - name: all-extras - command: "--all-extras" + command: "--all-extras --no-extra outlines-vllm-offline" env: CI: true COVERAGE_PROCESS_START: ./pyproject.toml @@ -194,7 +194,7 @@ jobs: - run: unset UV_FROZEN - - run: uv run --all-extras --resolution lowest-direct coverage run -m pytest --durations=100 -n auto --dist=loadgroup + - run: uv run --all-extras --no-extra outlines-vllm-offline --resolution lowest-direct coverage run -m pytest --durations=100 -n auto --dist=loadgroup env: COVERAGE_FILE: .coverage/.coverage.${{matrix.python-version}}-lowest-versions @@ -232,7 +232,7 @@ jobs: restore-keys: | hf-${{ runner.os }}- - - run: uv run --all-extras python tests/import_examples.py + - run: uv run --all-extras --no-extra outlines-vllm-offline python tests/import_examples.py coverage: runs-on: ubuntu-latest diff --git a/Makefile b/Makefile index 067ad11dec..51fa6627c8 100644 --- a/Makefile +++ b/Makefile @@ -10,19 +10,19 @@ .PHONY: install install: .uv .pre-commit ## Install the package, dependencies, and pre-commit for local development - uv sync --frozen --all-extras --all-packages --group lint --group docs + uv sync --frozen --all-extras --no-extra outlines-vllm-offline --all-packages --group lint --group docs pre-commit install --install-hooks .PHONY: install-all-python install-all-python: ## Install and synchronize an interpreter for every python version - UV_PROJECT_ENVIRONMENT=.venv310 uv sync --python 3.10 --frozen --all-extras --all-packages --group lint --group docs - UV_PROJECT_ENVIRONMENT=.venv311 uv sync --python 3.11 --frozen --all-extras --all-packages --group lint --group docs - UV_PROJECT_ENVIRONMENT=.venv312 uv sync --python 3.12 --frozen --all-extras --all-packages --group lint --group docs - UV_PROJECT_ENVIRONMENT=.venv313 uv sync --python 3.13 --frozen --all-extras --all-packages --group lint --group docs + UV_PROJECT_ENVIRONMENT=.venv310 uv sync --python 3.10 --frozen --all-extras --no-extra outlines-vllm-offline --all-packages --group lint --group docs + UV_PROJECT_ENVIRONMENT=.venv311 uv sync --python 3.11 --frozen --all-extras --no-extra outlines-vllm-offline --all-packages --group lint --group docs + UV_PROJECT_ENVIRONMENT=.venv312 uv sync --python 3.12 --frozen --all-extras --no-extra outlines-vllm-offline --all-packages --group lint --group docs + UV_PROJECT_ENVIRONMENT=.venv313 uv sync --python 3.13 --frozen --all-extras --no-extra outlines-vllm-offline --all-packages --group lint --group docs .PHONY: sync sync: .uv ## Update local packages and uv.lock - uv sync --all-extras --all-packages --group lint --group docs + uv sync --all-extras --no-extra outlines-vllm-offline --all-packages --group lint --group docs .PHONY: format format: ## Format the code @@ -57,10 +57,10 @@ test: ## Run tests without coverage (fast, for local dev) .PHONY: test-all-python test-all-python: ## Run tests on Python 3.10 to 3.13 - COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv310 uv run --python 3.10 --all-extras --all-packages coverage run -p -m pytest - COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv311 uv run --python 3.11 --all-extras --all-packages coverage run -p -m pytest - COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv312 uv run --python 3.12 --all-extras --all-packages coverage run -p -m pytest - COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv313 uv run --python 3.13 --all-extras --all-packages coverage run -p -m pytest + COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv310 uv run --python 3.10 --all-extras --no-extra outlines-vllm-offline --all-packages coverage run -p -m pytest + COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv311 uv run --python 3.11 --all-extras --no-extra outlines-vllm-offline --all-packages coverage run -p -m pytest + COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv312 uv run --python 3.12 --all-extras --no-extra outlines-vllm-offline --all-packages coverage run -p -m pytest + COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv313 uv run --python 3.13 --all-extras --no-extra outlines-vllm-offline --all-packages coverage run -p -m pytest @uv run coverage combine @uv run coverage report diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 7ed571a4c0..88401d2615 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -2,6 +2,7 @@ from __future__ import annotations +import json import uuid from base64 import b64decode from collections.abc import Mapping, Sequence @@ -49,6 +50,7 @@ DeveloperMessage, FunctionCall, Message, + ReasoningMessage, RunAgentInput, SystemMessage, TextInputContent, @@ -260,21 +262,23 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: # no ) ) + case ReasoningMessage() as reasoning_msg: + metadata: dict[str, Any] = ( + json.loads(reasoning_msg.encrypted_value) if reasoning_msg.encrypted_value else {} + ) + builder.add( + ThinkingPart( + content=reasoning_msg.content, + id=metadata.get('id'), + signature=metadata.get('signature'), + provider_name=metadata.get('provider_name'), + provider_details=metadata.get('provider_details'), + ) + ) + case ActivityMessage() as activity_msg: - # Round-trip from ActivitySnapshotEvent emitted by _event_stream.py. - # See: https://docs.ag-ui.com/concepts/messages#activitymessage content = activity_msg.content - if activity_msg.activity_type == 'pydantic_ai_thinking': - builder.add( - ThinkingPart( - content=content.get('content', ''), - id=content.get('id'), - signature=content.get('signature'), - provider_name=content.get('provider_name'), - provider_details=content.get('provider_details'), - ) - ) - elif activity_msg.activity_type == 'pydantic_ai_file': + if activity_msg.activity_type == 'pydantic_ai_file': builder.add( FilePart( content=BinaryContent.from_data_uri(content.get('url', '')), @@ -284,8 +288,8 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: # no ) ) - case _: # pragma: no cover - raise ValueError(f'Unsupported message type: {type(msg)}') + case _: + assert_never(msg) return builder.messages @@ -361,15 +365,15 @@ def _dump_response_parts(msg: ModelResponse) -> list[Message]: if isinstance(part, TextPart): text_content.append(part.content) elif isinstance(part, ThinkingPart): - thinking_content: dict[str, Any] = {'content': part.content} - for attr in ['id', 'signature', 'provider_name', 'provider_details']: - if getattr(part, attr) is not None: - thinking_content[attr] = getattr(part, attr) + encrypted: dict[str, Any] = {} + for attr in ('id', 'signature', 'provider_name', 'provider_details'): + if (value := getattr(part, attr)) is not None: + encrypted[attr] = value result.append( - ActivityMessage( + ReasoningMessage( id=_new_message_id(), - activity_type='pydantic_ai_thinking', - content=thinking_content, + content=part.content, + encrypted_value=json.dumps(encrypted) if encrypted else None, ) ) elif isinstance(part, ToolCallPart): @@ -395,8 +399,8 @@ def _dump_response_parts(msg: ModelResponse) -> list[Message]: 'media_type': part.content.media_type, } for attr in ['id', 'provider_name', 'provider_details']: - if getattr(part, attr) is not None: - file_content[attr] = getattr(part, attr) + if (value := getattr(part, attr)) is not None: + file_content[attr] = value result.append( ActivityMessage( id=_new_message_id(), diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 8fe4c5714c..cd94621f6f 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -32,9 +32,14 @@ try: from ag_ui.core import ( - ActivitySnapshotEvent, BaseEvent, EventType, + ReasoningEncryptedValueEvent, + ReasoningEndEvent, + ReasoningMessageContentEvent, + ReasoningMessageEndEvent, + ReasoningMessageStartEvent, + ReasoningStartEvent, RunAgentInput, RunErrorEvent, RunFinishedEvent, @@ -42,11 +47,6 @@ TextMessageContentEvent, TextMessageEndEvent, TextMessageStartEvent, - ThinkingEndEvent, - ThinkingStartEvent, - ThinkingTextMessageContentEvent, - ThinkingTextMessageEndEvent, - ThinkingTextMessageStartEvent, ToolCallArgsEvent, ToolCallEndEvent, ToolCallResultEvent, @@ -74,7 +74,8 @@ class AGUIEventStream(UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]): """UI event stream transformer for the Agent-User Interaction (AG-UI) protocol.""" - _thinking_text: bool = False + _reasoning_message_id: str | None = None + _reasoning_text: bool = False _builtin_tool_call_ids: dict[str, str] = field(default_factory=dict[str, str]) _error: bool = False @@ -148,49 +149,48 @@ async def handle_thinking_start( self, part: ThinkingPart, follows_thinking: bool = False ) -> AsyncIterator[BaseEvent]: if not follows_thinking: - yield ThinkingStartEvent(type=EventType.THINKING_START) + self._reasoning_message_id = str(uuid4()) + yield ReasoningStartEvent(message_id=self._reasoning_message_id) if part.content: - yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) - yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=part.content) - self._thinking_text = True + yield ReasoningMessageStartEvent(message_id=self._reasoning_message_id or '', role='assistant') + yield ReasoningMessageContentEvent(message_id=self._reasoning_message_id or '', delta=part.content) + self._reasoning_text = True async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: if not delta.content_delta: return # pragma: no cover - if not self._thinking_text: - yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) - self._thinking_text = True + if not self._reasoning_text: + yield ReasoningMessageStartEvent(message_id=self._reasoning_message_id or '', role='assistant') + self._reasoning_text = True - yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=delta.content_delta) + yield ReasoningMessageContentEvent(message_id=self._reasoning_message_id or '', delta=delta.content_delta) async def handle_thinking_end( self, part: ThinkingPart, followed_by_thinking: bool = False ) -> AsyncIterator[BaseEvent]: - if self._thinking_text: - yield ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) - self._thinking_text = False + message_id = self._reasoning_message_id or '' + + if self._reasoning_text: + yield ReasoningMessageEndEvent(message_id=message_id) + self._reasoning_text = False if not followed_by_thinking: - yield ThinkingEndEvent(type=EventType.THINKING_END) - - # Emit ActivitySnapshotEvent to preserve thinking metadata for round-trip. - # Frontends receive this and send it back as ActivityMessage, which _adapter.py - # converts back to ThinkingPart. This preserves signature/id needed by providers - # like Anthropic for extended thinking. - # See: https://docs.ag-ui.com/concepts/events#activity-events - content: dict[str, Any] = {'content': part.content} - for field in ('id', 'signature', 'provider_name', 'provider_details'): - value = getattr(part, field) - if value is not None: - content[field] = value - - yield ActivitySnapshotEvent( - activity_type='pydantic_ai_thinking', - message_id=part.id or f'thinking-{uuid4().hex[:8]}', - content=content, - ) + encrypted: dict[str, Any] = {} + for attr in ('id', 'signature', 'provider_name', 'provider_details'): + if (value := getattr(part, attr)) is not None: + encrypted[attr] = value + + if encrypted: + yield ReasoningEncryptedValueEvent( + subtype='message', + entity_id=message_id, + encrypted_value=json.dumps(encrypted), + ) + + yield ReasoningEndEvent(message_id=message_id) + self._reasoning_message_id = None def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: return self._handle_tool_call_start(part) diff --git a/pydantic_ai_slim/pyproject.toml b/pydantic_ai_slim/pyproject.toml index 3483ecd22e..2d59ed4327 100644 --- a/pydantic_ai_slim/pyproject.toml +++ b/pydantic_ai_slim/pyproject.toml @@ -118,7 +118,7 @@ ui = ["starlette>=0.45.3"] # A2A a2a = ["fasta2a>=0.4.1"] # AG-UI -ag-ui = ["ag-ui-protocol>=0.1.10", "starlette>=0.45.3"] +ag-ui = ["ag-ui-protocol>=0.1.13", "starlette>=0.45.3"] # Web web = ["starlette>=0.45.3", "httpx>=0.27.0", "uvicorn>=0.38.0"] # Retries @@ -139,5 +139,15 @@ pai = "pydantic_ai._cli:cli_exit" # TODO remove this when clai has been out for [tool.hatch.build.targets.wheel] packages = ["pydantic_ai"] +# Remove when https://github.com/vllm-project/vllm/pull/30566 is merged. +# Check compatibility with `uv lock --upgrade-package vllm --upgrade-package transformers`. +[tool.uv] +conflicts = [ + [ + { extra = "huggingface" }, + { extra = "outlines-vllm-offline" }, + ], +] + [tool.uv.sources] pydantic-graph = { workspace = true } diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 3c9b589098..4aa51cde7b 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -12,7 +12,6 @@ import httpx import pytest from asgi_lifespan import LifespanManager -from dirty_equals import IsStr from pydantic import BaseModel from pydantic_ai import ( @@ -62,12 +61,11 @@ from pydantic_ai.tools import AgentDepsT, ToolDefinition from ._inline_snapshot import snapshot -from .conftest import IsDatetime, IsInt, IsSameStr, try_import +from .conftest import IsDatetime, IsInt, IsSameStr, IsStr, try_import with try_import() as imports_successful: from ag_ui.core import ( ActivityMessage, - ActivitySnapshotEvent, AssistantMessage, BaseEvent, BinaryInputContent, @@ -76,11 +74,11 @@ EventType, FunctionCall, Message, + ReasoningMessage, RunAgentInput, StateSnapshotEvent, SystemMessage, TextInputContent, - ThinkingEndEvent, Tool, ToolCall, ToolMessage, @@ -1065,16 +1063,8 @@ async def stream_function( 'threadId': (thread_id := IsSameStr()), 'runId': (run_id := IsSameStr()), }, - {'type': 'THINKING_START', 'timestamp': IsInt()}, - {'type': 'THINKING_END', 'timestamp': IsInt()}, - { - 'type': 'ACTIVITY_SNAPSHOT', - 'timestamp': IsInt(), - 'activityType': 'pydantic_ai_thinking', - 'messageId': IsStr(), - 'content': {'content': ''}, - 'replace': True, - }, + {'type': 'REASONING_START', 'timestamp': IsInt(), 'messageId': IsStr()}, + {'type': 'REASONING_END', 'timestamp': IsInt(), 'messageId': IsStr()}, { 'type': 'TEXT_MESSAGE_START', 'timestamp': IsInt(), @@ -1094,34 +1084,53 @@ async def stream_function( 'delta': ' and some more', }, {'type': 'TEXT_MESSAGE_END', 'timestamp': IsInt(), 'messageId': message_id}, - {'type': 'THINKING_START', 'timestamp': IsInt()}, - {'type': 'THINKING_TEXT_MESSAGE_START', 'timestamp': IsInt()}, - {'type': 'THINKING_TEXT_MESSAGE_CONTENT', 'timestamp': IsInt(), 'delta': 'Thinking '}, - {'type': 'THINKING_TEXT_MESSAGE_CONTENT', 'timestamp': IsInt(), 'delta': 'about the weather'}, - {'type': 'THINKING_TEXT_MESSAGE_END', 'timestamp': IsInt()}, - {'type': 'THINKING_TEXT_MESSAGE_START', 'timestamp': IsInt()}, + {'type': 'REASONING_START', 'timestamp': IsInt(), 'messageId': (reasoning_id := IsSameStr())}, + { + 'type': 'REASONING_MESSAGE_START', + 'timestamp': IsInt(), + 'messageId': reasoning_id, + 'role': 'assistant', + }, + { + 'type': 'REASONING_MESSAGE_CONTENT', + 'timestamp': IsInt(), + 'messageId': reasoning_id, + 'delta': 'Thinking ', + }, { - 'type': 'THINKING_TEXT_MESSAGE_CONTENT', + 'type': 'REASONING_MESSAGE_CONTENT', 'timestamp': IsInt(), + 'messageId': reasoning_id, + 'delta': 'about the weather', + }, + {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': reasoning_id}, + { + 'type': 'REASONING_MESSAGE_START', + 'timestamp': IsInt(), + 'messageId': reasoning_id, + 'role': 'assistant', + }, + { + 'type': 'REASONING_MESSAGE_CONTENT', + 'timestamp': IsInt(), + 'messageId': reasoning_id, 'delta': 'Thinking about the meaning of life', }, - {'type': 'THINKING_TEXT_MESSAGE_END', 'timestamp': IsInt()}, - {'type': 'THINKING_TEXT_MESSAGE_START', 'timestamp': IsInt()}, + {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': reasoning_id}, { - 'type': 'THINKING_TEXT_MESSAGE_CONTENT', + 'type': 'REASONING_MESSAGE_START', 'timestamp': IsInt(), - 'delta': 'Thinking about the universe', + 'messageId': reasoning_id, + 'role': 'assistant', }, - {'type': 'THINKING_TEXT_MESSAGE_END', 'timestamp': IsInt()}, - {'type': 'THINKING_END', 'timestamp': IsInt()}, { - 'type': 'ACTIVITY_SNAPSHOT', + 'type': 'REASONING_MESSAGE_CONTENT', 'timestamp': IsInt(), - 'activityType': 'pydantic_ai_thinking', - 'messageId': IsStr(), - 'content': {'content': 'Thinking about the universe'}, - 'replace': True, + 'messageId': reasoning_id, + 'delta': 'Thinking about the universe', }, + {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': reasoning_id}, + {'type': 'REASONING_END', 'timestamp': IsInt(), 'messageId': reasoning_id}, { 'type': 'RUN_FINISHED', 'timestamp': IsInt(), @@ -1133,7 +1142,7 @@ async def stream_function( async def test_thinking_with_signature() -> None: - """Test that ActivitySnapshotEvent is emitted after ThinkingEndEvent with metadata.""" + """Test that ReasoningEncryptedValueEvent is emitted with thinking metadata.""" async def stream_function( messages: list[ModelMessage], agent_info: AgentInfo @@ -1157,23 +1166,28 @@ async def stream_function( 'threadId': (thread_id := IsSameStr()), 'runId': (run_id := IsSameStr()), }, - {'type': 'THINKING_START', 'timestamp': IsInt()}, - {'type': 'THINKING_TEXT_MESSAGE_START', 'timestamp': IsInt()}, - {'type': 'THINKING_TEXT_MESSAGE_CONTENT', 'timestamp': IsInt(), 'delta': 'Thinking deeply'}, - {'type': 'THINKING_TEXT_MESSAGE_END', 'timestamp': IsInt()}, - {'type': 'THINKING_END', 'timestamp': IsInt()}, + {'type': 'REASONING_START', 'timestamp': IsInt(), 'messageId': (reasoning_id := IsSameStr())}, { - 'type': 'ACTIVITY_SNAPSHOT', + 'type': 'REASONING_MESSAGE_START', 'timestamp': IsInt(), - 'activityType': 'pydantic_ai_thinking', - 'messageId': IsStr(), - 'content': { - 'content': 'Thinking deeply', - 'signature': 'sig_abc123', - 'provider_name': 'function', - }, - 'replace': True, + 'messageId': reasoning_id, + 'role': 'assistant', }, + { + 'type': 'REASONING_MESSAGE_CONTENT', + 'timestamp': IsInt(), + 'messageId': reasoning_id, + 'delta': 'Thinking deeply', + }, + {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': reasoning_id}, + { + 'type': 'REASONING_ENCRYPTED_VALUE', + 'timestamp': IsInt(), + 'subtype': 'message', + 'entityId': reasoning_id, + 'encryptedValue': IsStr(), + }, + {'type': 'REASONING_END', 'timestamp': IsInt(), 'messageId': reasoning_id}, { 'type': 'TEXT_MESSAGE_START', 'timestamp': IsInt(), @@ -1192,20 +1206,21 @@ async def stream_function( ) -def test_activity_message_thinking_roundtrip() -> None: - """Test that ActivityMessage with pydantic_ai_thinking converts to ThinkingPart.""" +def test_reasoning_message_thinking_roundtrip() -> None: + """Test that ReasoningMessage converts to ThinkingPart with metadata from encrypted_value.""" messages = AGUIAdapter.load_messages( [ - ActivityMessage( - id='activity-1', - activity_type='pydantic_ai_thinking', - content={ - 'content': 'Let me think about this...', - 'id': 'thinking-1', - 'signature': 'sig_abc123', - 'provider_name': 'anthropic', - 'provider_details': {'some': 'details'}, - }, + ReasoningMessage( + id='reasoning-1', + content='Let me think about this...', + encrypted_value=json.dumps( + { + 'id': 'thinking-1', + 'signature': 'sig_abc123', + 'provider_name': 'anthropic', + 'provider_details': {'some': 'details'}, + } + ), ), AssistantMessage(id='msg-1', content='Here is my response'), ] @@ -1230,8 +1245,8 @@ def test_activity_message_thinking_roundtrip() -> None: ) -async def test_thinking_end_event_with_all_metadata() -> None: - """Test that ActivitySnapshotEvent includes all metadata fields (id, signature, provider_name, provider_details).""" +async def test_reasoning_events_with_all_metadata() -> None: + """Test that REASONING_* events emit encryptedValue with all metadata fields.""" run_input = create_input(UserMessage(id='msg_1', content='test')) event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE) @@ -1243,22 +1258,25 @@ async def test_thinking_end_event_with_all_metadata() -> None: provider_details={'model': 'claude-sonnet-4-5'}, ) - events = [e async for e in event_stream.handle_thinking_end(part, followed_by_thinking=False)] + events: list[BaseEvent] = [] + async for e in event_stream.handle_thinking_start(part, follows_thinking=False): + events.append(e) + async for e in event_stream.handle_thinking_end(part, followed_by_thinking=False): + events.append(e) - assert events == snapshot( + assert [e.model_dump(exclude_none=True) for e in events] == snapshot( [ - ThinkingEndEvent(), - ActivitySnapshotEvent( - message_id='thinking-123', - activity_type='pydantic_ai_thinking', - content={ - 'content': 'Thinking content', - 'id': 'thinking-123', - 'signature': 'sig_xyz', - 'provider_name': 'anthropic', - 'provider_details': {'model': 'claude-sonnet-4-5'}, - }, - ), + {'type': 'REASONING_START', 'message_id': IsStr()}, + {'type': 'REASONING_MESSAGE_START', 'message_id': IsStr(), 'role': 'assistant'}, + {'type': 'REASONING_MESSAGE_CONTENT', 'message_id': IsStr(), 'delta': 'Thinking content'}, + {'type': 'REASONING_MESSAGE_END', 'message_id': IsStr()}, + { + 'type': 'REASONING_ENCRYPTED_VALUE', + 'subtype': 'message', + 'entity_id': IsStr(), + 'encrypted_value': '{"id": "thinking-123", "signature": "sig_xyz", "provider_name": "anthropic", "provider_details": {"model": "claude-sonnet-4-5"}}', + }, + {'type': 'REASONING_END', 'message_id': IsStr()}, ] ) diff --git a/uv.lock b/uv.lock index 61104748b2..da108105df 100644 --- a/uv.lock +++ b/uv.lock @@ -7,6 +7,10 @@ resolution-markers = [ "python_full_version == '3.11.*'", "python_full_version < '3.11'", ] +conflicts = [[ + { package = "pydantic-ai-slim", extra = "huggingface" }, + { package = "pydantic-ai-slim", extra = "outlines-vllm-offline" }, +]] [manifest] members = [ @@ -23,7 +27,8 @@ name = "accelerate" version = "1.12.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "1.3.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "numpy" }, { name = "packaging" }, { name = "psutil" }, @@ -38,14 +43,14 @@ wheels = [ [[package]] name = "ag-ui-protocol" -version = "0.1.10" +version = "0.1.13" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/67/bb/5a5ec893eea5805fb9a3db76a9888c3429710dfb6f24bbb37568f2cf7320/ag_ui_protocol-0.1.10.tar.gz", hash = "sha256:3213991c6b2eb24bb1a8c362ee270c16705a07a4c5962267a083d0959ed894f4", size = 6945, upload-time = "2025-11-06T15:17:17.068Z" } +sdist = { url = "https://files.pythonhosted.org/packages/04/b5/fc0b65b561d00d88811c8a7d98ee735833f81554be244340950e7b65820c/ag_ui_protocol-0.1.13.tar.gz", hash = "sha256:811d7d7dcce4783dec252918f40b717ebfa559399bf6b071c4ba47c0c1e21bcb", size = 5671, upload-time = "2026-02-19T18:40:38.602Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8f/78/eb55fabaab41abc53f52c0918a9a8c0f747807e5306273f51120fd695957/ag_ui_protocol-0.1.10-py3-none-any.whl", hash = "sha256:c81e6981f30aabdf97a7ee312bfd4df0cd38e718d9fc10019c7d438128b93ab5", size = 7889, upload-time = "2025-11-06T15:17:15.325Z" }, + { url = "https://files.pythonhosted.org/packages/cd/9f/b833c1ab1999da35ebad54841ae85d2c2764c931da9a6f52d8541b6901b2/ag_ui_protocol-0.1.13-py3-none-any.whl", hash = "sha256:1393fa894c1e8416efe184168a50689e760d05b32f4646eebb8ff423dddf8e8f", size = 8053, upload-time = "2026-02-19T18:40:37.27Z" }, ] [[package]] @@ -73,7 +78,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohappyeyeballs" }, { name = "aiosignal" }, - { name = "async-timeout", marker = "python_full_version < '3.11'" }, + { name = "async-timeout", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "attrs" }, { name = "frozenlist" }, { name = "multidict" }, @@ -201,7 +206,7 @@ version = "1.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "frozenlist" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, + { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" } wheels = [ @@ -227,7 +232,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mako" }, { name = "sqlalchemy" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/02/a6/74c8cadc2882977d80ad756a13857857dbcf9bd405bc80b662eb10651282/alembic-1.17.2.tar.gz", hash = "sha256:bbe9751705c5e0f14877f02d46c53d10885e377e3d90eda810a016f9baa19e8e", size = 1988064, upload-time = "2025-11-14T20:35:04.057Z" } @@ -294,10 +299,10 @@ name = "anyio" version = "4.8.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "idna" }, { name = "sniffio" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, + { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a3/73/199a98fc2dae33535d6b8e8e6ec01f8c1d76c9adb096c6b7d64823038cde/anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a", size = 181126, upload-time = "2025-01-05T13:13:11.095Z" } wheels = [ @@ -350,7 +355,7 @@ dependencies = [ { name = "pyyaml" }, { name = "requests" }, { name = "requests-oauthlib" }, - { name = "tzdata", marker = "sys_platform == 'win32'" }, + { name = "tzdata", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a9/a7/bb182d81f35c3fe405505f0976da4b74f942cfdd53c7193b0fe50412aa27/apprise-1.9.6.tar.gz", hash = "sha256:4206be9cb5694a3d08dd8e0393bbb9b36212ac3a7769c2633620055e75c6caef", size = 1921714, upload-time = "2025-12-07T19:24:30.587Z" } wheels = [ @@ -383,7 +388,7 @@ name = "asgiref" version = "3.11.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/76/b9/4db2509eabd14b4a8c71d1b24c8d5734c52b8560a7b1e1a8b56c8d25568b/asgiref-3.11.0.tar.gz", hash = "sha256:13acff32519542a1736223fb79a715acdebe24286d98e8b164a73085f40da2c4", size = 37969, upload-time = "2025-11-19T15:32:20.106Z" } wheels = [ @@ -425,7 +430,7 @@ name = "asyncpg" version = "0.31.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "async-timeout", marker = "python_full_version < '3.11'" }, + { name = "async-timeout", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/fe/cc/d18065ce2380d80b1bcce927c24a2642efd38918e33fd724bc4bca904877/asyncpg-0.31.0.tar.gz", hash = "sha256:c989386c83940bfbd787180f2b1519415e2d3d6277a70d9d0f0145ac73500735", size = 993667, upload-time = "2025-11-24T23:27:00.812Z" } wheels = [ @@ -621,8 +626,8 @@ dependencies = [ { name = "pathspec" }, { name = "platformdirs" }, { name = "pytokens" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/c4/d9/07b458a3f1c525ac392b5edc6b191ff140b596f9d77092429417a54e249d/black-25.12.0.tar.gz", hash = "sha256:8d3dd9cea14bff7ddc0eb243c811cdb1a011ebb4800a5f0335a01a68654796a7", size = 659264, upload-time = "2025-12-08T01:40:52.501Z" } wheels = [ @@ -770,7 +775,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore-stubs" }, { name = "types-s3transfer" }, - { name = "typing-extensions", marker = "python_full_version < '3.12'" }, + { name = "typing-extensions", marker = "python_full_version < '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/ac/42/2a73afec394eec6350d59c4deb4bda2639f7fc0ca8dfb2a41dcc4115f07e/boto3_stubs-1.42.14.tar.gz", hash = "sha256:b06c4be79348573fa03fc7fbe4bd82ebbc7e1e27cf208c8f5ab7bfcb75f55c05", size = 101097, upload-time = "2025-12-19T20:41:44.497Z" } wheels = [ @@ -981,7 +986,7 @@ name = "cffi" version = "2.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pycparser", marker = "implementation_name != 'PyPy'" }, + { name = "pycparser", marker = "implementation_name != 'PyPy' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } wheels = [ @@ -1171,7 +1176,7 @@ name = "click" version = "8.1.8" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "colorama", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } wheels = [ @@ -1223,7 +1228,7 @@ dependencies = [ { name = "loguru", marker = "python_full_version < '3.12'" }, { name = "pydantic", marker = "python_full_version < '3.12'" }, { name = "torch", marker = "python_full_version < '3.12'" }, - { name = "transformers", marker = "python_full_version < '3.12'" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/fc/65/88dd1c58fb9d0ded51b5c86471b937a1525f91fad2211a6f051dc1ea822d/compressed_tensors-0.13.0.tar.gz", hash = "sha256:23893824d3498ea3f1a829f14a8fa85f9a5e76a34c711a038b8d7c619ca9a67c", size = 200995, upload-time = "2025-12-16T16:03:55.397Z" } wheels = [ @@ -1340,7 +1345,7 @@ wheels = [ [package.optional-dependencies] toml = [ - { name = "tomli", marker = "python_full_version <= '3.11'" }, + { name = "tomli", marker = "python_full_version <= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] [[package]] @@ -1348,8 +1353,8 @@ name = "cryptography" version = "46.0.5" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "cffi", marker = "platform_python_implementation != 'PyPy' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/60/04/ee2a9e8542e4fa2773b81771ff8349ff19cdd56b7258a0cc442639052edb/cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d", size = 750064, upload-time = "2026-02-10T19:18:38.255Z" } wheels = [ @@ -1496,8 +1501,8 @@ dependencies = [ { name = "docstring-parser" }, { name = "rich" }, { name = "rich-rst" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/40/99/e1b75193ee23bd10a05a3b90c065d419b1c8c18f61cae6b8218c7158f792/cyclopts-4.4.1.tar.gz", hash = "sha256:368a404926b46a49dc328a33ccd7e55ba879296a28e64a42afe2f6667704cecf", size = 159245, upload-time = "2025-12-21T13:59:02.266Z" } wheels = [ @@ -1513,7 +1518,8 @@ dependencies = [ { name = "filelock" }, { name = "fsspec", extra = ["http"] }, { name = "httpx" }, - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "1.3.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "multiprocess" }, { name = "numpy" }, { name = "packaging" }, @@ -1678,7 +1684,7 @@ name = "docker" version = "7.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "pywin32", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "requests" }, { name = "urllib3" }, ] @@ -1811,7 +1817,7 @@ name = "exceptiongroup" version = "1.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, + { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } wheels = [ @@ -1852,7 +1858,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "redis" }, { name = "sortedcontainers" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/5f/f9/57464119936414d60697fcbd32f38909bb5688b616ae13de6e98384433e0/fakeredis-2.33.0.tar.gz", hash = "sha256:d7bc9a69d21df108a6451bbffee23b3eba432c21a654afc7ff2d295428ec5770", size = 175187, upload-time = "2025-12-16T19:45:52.269Z" } wheels = [ @@ -1896,13 +1902,13 @@ wheels = [ [package.optional-dependencies] standard = [ { name = "email-validator", marker = "python_full_version < '3.12'" }, - { name = "fastapi-cli", extra = ["standard"], marker = "python_full_version < '3.12'" }, + { name = "fastapi-cli", extra = ["standard"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "httpx", marker = "python_full_version < '3.12'" }, { name = "jinja2", marker = "python_full_version < '3.12'" }, { name = "pydantic-extra-types", marker = "python_full_version < '3.12'" }, { name = "pydantic-settings", marker = "python_full_version < '3.12'" }, { name = "python-multipart", marker = "python_full_version < '3.12'" }, - { name = "uvicorn", extra = ["standard"], marker = "python_full_version < '3.12'" }, + { name = "uvicorn", extra = ["standard"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] [[package]] @@ -1913,7 +1919,7 @@ dependencies = [ { name = "rich-toolkit", marker = "python_full_version < '3.12'" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typer", marker = "python_full_version < '3.12'" }, - { name = "uvicorn", extra = ["standard"], marker = "python_full_version < '3.12'" }, + { name = "uvicorn", extra = ["standard"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/99/75/9407a6b452be4c988feacec9c9d2f58d8f315162a6c7258d5a649d933ebe/fastapi_cli-0.0.16.tar.gz", hash = "sha256:e8a2a1ecf7a4e062e3b2eec63ae34387d1e142d4849181d936b23c4bdfe29073", size = 19447, upload-time = "2025-11-10T19:01:07.856Z" } wheels = [ @@ -1923,7 +1929,7 @@ wheels = [ [package.optional-dependencies] standard = [ { name = "fastapi-cloud-cli", marker = "python_full_version < '3.12'" }, - { name = "uvicorn", extra = ["standard"], marker = "python_full_version < '3.12'" }, + { name = "uvicorn", extra = ["standard"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] [[package]] @@ -1933,12 +1939,12 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "fastar", marker = "python_full_version < '3.12'" }, { name = "httpx", marker = "python_full_version < '3.12'" }, - { name = "pydantic", extra = ["email"], marker = "python_full_version < '3.12'" }, + { name = "pydantic", extra = ["email"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "rich-toolkit", marker = "python_full_version < '3.12'" }, { name = "rignore", marker = "python_full_version < '3.12'" }, { name = "sentry-sdk", marker = "python_full_version < '3.12'" }, { name = "typer", marker = "python_full_version < '3.12'" }, - { name = "uvicorn", extra = ["standard"], marker = "python_full_version < '3.12'" }, + { name = "uvicorn", extra = ["standard"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/cf/0d/3b0d2991f481c122c552b4ae38a8b400a75ab0edbc85536f2a6224f72da2/fastapi_cloud_cli-0.7.0.tar.gz", hash = "sha256:8b025944475c3d53262105886dfe051f46383e4f287787a46892b524922ac0b6", size = 30906, upload-time = "2025-12-16T12:51:49.082Z" } wheels = [ @@ -2146,6 +2152,7 @@ version = "0.8.3" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/73/b1/1c3d635d955f2b4bf34d45abf8f35492e04dbd7804e94ce65d9f928ef3ec/fastrlock-0.8.3.tar.gz", hash = "sha256:4af6734d92eaa3ab4373e6c9a1dd0d5ad1304e172b1521733c6c3b3d73c8fa5d", size = 79327, upload-time = "2024-12-17T11:03:39.638Z" } wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/02/3f771177380d8690812d5b2b7736dc6b6c8cd1c317e4572e65f823eede08/fastrlock-0.8.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:cc5fa9166e05409f64a804d5b6d01af670979cdb12cd2594f555cb33cdc155bd", size = 55094, upload-time = "2024-12-17T11:01:49.721Z" }, { url = "https://files.pythonhosted.org/packages/be/b4/aae7ed94b8122c325d89eb91336084596cebc505dc629b795fcc9629606d/fastrlock-0.8.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:7a77ebb0a24535ef4f167da2c5ee35d9be1e96ae192137e9dc3ff75b8dfc08a5", size = 48220, upload-time = "2024-12-17T11:01:51.071Z" }, { url = "https://files.pythonhosted.org/packages/96/87/9807af47617fdd65c68b0fcd1e714542c1d4d3a1f1381f591f1aa7383a53/fastrlock-0.8.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:d51f7fb0db8dab341b7f03a39a3031678cf4a98b18533b176c533c122bfce47d", size = 49551, upload-time = "2024-12-17T11:01:52.316Z" }, { url = "https://files.pythonhosted.org/packages/9d/12/e201634810ac9aee59f93e3953cb39f98157d17c3fc9d44900f1209054e9/fastrlock-0.8.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:767ec79b7f6ed9b9a00eb9ff62f2a51f56fdb221c5092ab2dadec34a9ccbfc6e", size = 49398, upload-time = "2024-12-17T11:01:53.514Z" }, @@ -2153,6 +2160,7 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b5/9e/1ae90829dd40559ab104e97ebe74217d9da794c4bb43016da8367ca7a596/fastrlock-0.8.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:92577ff82ef4a94c5667d6d2841f017820932bc59f31ffd83e4a2c56c1738f90", size = 52495, upload-time = "2024-12-17T11:01:57.76Z" }, { url = "https://files.pythonhosted.org/packages/e5/8c/5e746ee6f3d7afbfbb0d794c16c71bfd5259a4e3fb1dda48baf31e46956c/fastrlock-0.8.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3df8514086e16bb7c66169156a8066dc152f3be892c7817e85bf09a27fa2ada2", size = 51972, upload-time = "2024-12-17T11:02:01.384Z" }, { url = "https://files.pythonhosted.org/packages/76/a7/8b91068f00400931da950f143fa0f9018bd447f8ed4e34bed3fe65ed55d2/fastrlock-0.8.3-cp310-cp310-win_amd64.whl", hash = "sha256:001fd86bcac78c79658bac496e8a17472d64d558cd2227fdc768aa77f877fe40", size = 30946, upload-time = "2024-12-17T11:02:03.491Z" }, + { url = "https://files.pythonhosted.org/packages/90/9e/647951c579ef74b6541493d5ca786d21a0b2d330c9514ba2c39f0b0b0046/fastrlock-0.8.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:f68c551cf8a34b6460a3a0eba44bd7897ebfc820854e19970c52a76bf064a59f", size = 55233, upload-time = "2024-12-17T11:02:04.795Z" }, { url = "https://files.pythonhosted.org/packages/be/91/5f3afba7d14b8b7d60ac651375f50fff9220d6ccc3bef233d2bd74b73ec7/fastrlock-0.8.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:55d42f6286b9d867370af4c27bc70d04ce2d342fe450c4a4fcce14440514e695", size = 48911, upload-time = "2024-12-17T11:02:06.173Z" }, { url = "https://files.pythonhosted.org/packages/d5/7a/e37bd72d7d70a8a551b3b4610d028bd73ff5d6253201d5d3cf6296468bee/fastrlock-0.8.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:bbc3bf96dcbd68392366c477f78c9d5c47e5d9290cb115feea19f20a43ef6d05", size = 50357, upload-time = "2024-12-17T11:02:07.418Z" }, { url = "https://files.pythonhosted.org/packages/0d/ef/a13b8bab8266840bf38831d7bf5970518c02603d00a548a678763322d5bf/fastrlock-0.8.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:77ab8a98417a1f467dafcd2226718f7ca0cf18d4b64732f838b8c2b3e4b55cb5", size = 50222, upload-time = "2024-12-17T11:02:08.745Z" }, @@ -2160,11 +2168,13 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c0/8f/65907405a8cdb2fc8beaf7d09a9a07bb58deff478ff391ca95be4f130b70/fastrlock-0.8.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c9d459ce344c21ff03268212a1845aa37feab634d242131bc16c2a2355d5f65", size = 53362, upload-time = "2024-12-17T11:02:12.476Z" }, { url = "https://files.pythonhosted.org/packages/ec/b9/ae6511e52738ba4e3a6adb7c6a20158573fbc98aab448992ece25abb0b07/fastrlock-0.8.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33e6fa4af4f3af3e9c747ec72d1eadc0b7ba2035456c2afb51c24d9e8a56f8fd", size = 52836, upload-time = "2024-12-17T11:02:13.74Z" }, { url = "https://files.pythonhosted.org/packages/88/3e/c26f8192c93e8e43b426787cec04bb46ac36e72b1033b7fe5a9267155fdf/fastrlock-0.8.3-cp311-cp311-win_amd64.whl", hash = "sha256:5e5f1665d8e70f4c5b4a67f2db202f354abc80a321ce5a26ac1493f055e3ae2c", size = 31046, upload-time = "2024-12-17T11:02:15.033Z" }, + { url = "https://files.pythonhosted.org/packages/00/df/56270f2e10c1428855c990e7a7e5baafa9e1262b8e789200bd1d047eb501/fastrlock-0.8.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:8cb2cf04352ea8575d496f31b3b88c42c7976e8e58cdd7d1550dfba80ca039da", size = 55727, upload-time = "2024-12-17T11:02:17.26Z" }, { url = "https://files.pythonhosted.org/packages/57/21/ea1511b0ef0d5457efca3bf1823effb9c5cad4fc9dca86ce08e4d65330ce/fastrlock-0.8.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:85a49a1f1e020097d087e1963e42cea6f307897d5ebe2cb6daf4af47ffdd3eed", size = 52201, upload-time = "2024-12-17T11:02:19.512Z" }, { url = "https://files.pythonhosted.org/packages/80/07/cdecb7aa976f34328372f1c4efd6c9dc1b039b3cc8d3f38787d640009a25/fastrlock-0.8.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5f13ec08f1adb1aa916c384b05ecb7dbebb8df9ea81abd045f60941c6283a670", size = 53924, upload-time = "2024-12-17T11:02:20.85Z" }, { url = "https://files.pythonhosted.org/packages/88/6d/59c497f8db9a125066dd3a7442fab6aecbe90d6fec344c54645eaf311666/fastrlock-0.8.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0ea4e53a04980d646def0f5e4b5e8bd8c7884288464acab0b37ca0c65c482bfe", size = 52140, upload-time = "2024-12-17T11:02:22.263Z" }, { url = "https://files.pythonhosted.org/packages/62/04/9138943c2ee803d62a48a3c17b69de2f6fa27677a6896c300369e839a550/fastrlock-0.8.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:38340f6635bd4ee2a4fb02a3a725759fe921f2ca846cb9ca44531ba739cc17b4", size = 53261, upload-time = "2024-12-17T11:02:24.418Z" }, { url = "https://files.pythonhosted.org/packages/e2/4b/db35a52589764c7745a613b6943bbd018f128d42177ab92ee7dde88444f6/fastrlock-0.8.3-cp312-cp312-win_amd64.whl", hash = "sha256:da06d43e1625e2ffddd303edcd6d2cd068e1c486f5fd0102b3f079c44eb13e2c", size = 31235, upload-time = "2024-12-17T11:02:25.708Z" }, + { url = "https://files.pythonhosted.org/packages/92/74/7b13d836c3f221cff69d6f418f46c2a30c4b1fe09a8ce7db02eecb593185/fastrlock-0.8.3-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:5264088185ca8e6bc83181dff521eee94d078c269c7d557cc8d9ed5952b7be45", size = 54157, upload-time = "2024-12-17T11:02:29.196Z" }, { url = "https://files.pythonhosted.org/packages/06/77/f06a907f9a07d26d0cca24a4385944cfe70d549a2c9f1c3e3217332f4f12/fastrlock-0.8.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a98ba46b3e14927550c4baa36b752d0d2f7387b8534864a8767f83cce75c160", size = 50954, upload-time = "2024-12-17T11:02:32.12Z" }, { url = "https://files.pythonhosted.org/packages/f9/4e/94480fb3fd93991dd6f4e658b77698edc343f57caa2870d77b38c89c2e3b/fastrlock-0.8.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dbdea6deeccea1917c6017d353987231c4e46c93d5338ca3e66d6cd88fbce259", size = 52535, upload-time = "2024-12-17T11:02:33.402Z" }, { url = "https://files.pythonhosted.org/packages/7d/a7/ee82bb55b6c0ca30286dac1e19ee9417a17d2d1de3b13bb0f20cefb86086/fastrlock-0.8.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c6e5bfecbc0d72ff07e43fed81671747914d6794e0926700677ed26d894d4f4f", size = 50942, upload-time = "2024-12-17T11:02:34.688Z" }, @@ -2477,14 +2487,15 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiofiles" }, { name = "anyio" }, - { name = "audioop-lts", marker = "python_full_version >= '3.13'" }, + { name = "audioop-lts", marker = "python_full_version >= '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "brotli" }, { name = "fastapi" }, { name = "ffmpy" }, { name = "gradio-client" }, { name = "groovy" }, { name = "httpx" }, - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "1.3.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "jinja2" }, { name = "markupsafe" }, { name = "numpy" }, @@ -2516,7 +2527,8 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "fsspec" }, { name = "httpx" }, - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "1.3.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "packaging" }, { name = "typing-extensions" }, ] @@ -2864,8 +2876,8 @@ wheels = [ [package.optional-dependencies] brotli = [ - { name = "brotli", marker = "platform_python_implementation == 'CPython'" }, - { name = "brotlicffi", marker = "platform_python_implementation != 'CPython'" }, + { name = "brotli", marker = "platform_python_implementation == 'CPython' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "brotlicffi", marker = "platform_python_implementation != 'CPython' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] http2 = [ { name = "h2" }, @@ -2883,21 +2895,50 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/fd/6668e5aec43ab844de6fc74927e155a3b37bf40d7c3790e49fc0406b6578/httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc", size = 8960, upload-time = "2025-10-10T21:48:21.158Z" }, ] +[[package]] +name = "huggingface-hub" +version = "0.36.2" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] +dependencies = [ + { name = "filelock", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "fsspec", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "hf-xet", marker = "(python_full_version < '3.12' and platform_machine == 'aarch64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (python_full_version < '3.12' and platform_machine == 'amd64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (python_full_version < '3.12' and platform_machine == 'arm64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (python_full_version < '3.12' and platform_machine == 'x86_64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'arm64' and platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'aarch64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'amd64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'arm64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "packaging", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "pyyaml", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "requests", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "tqdm", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "typing-extensions", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7c/b7/8cb61d2eece5fb05a83271da168186721c450eb74e3c31f7ef3169fa475b/huggingface_hub-0.36.2.tar.gz", hash = "sha256:1934304d2fb224f8afa3b87007d58501acfda9215b334eed53072dd5e815ff7a", size = 649782, upload-time = "2026-02-06T09:24:13.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/af/48ac8483240de756d2438c380746e7130d1c6f75802ef22f3c6d49982787/huggingface_hub-0.36.2-py3-none-any.whl", hash = "sha256:48f0c8eac16145dfce371e9d2d7772854a4f591bcb56c9cf548accf531d54270", size = 566395, upload-time = "2026-02-06T09:24:11.133Z" }, +] + [[package]] name = "huggingface-hub" version = "1.3.4" source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13'", + "python_full_version == '3.12.*'", + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] dependencies = [ - { name = "filelock" }, - { name = "fsspec" }, - { name = "hf-xet", marker = "platform_machine == 'AMD64' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" }, - { name = "httpx" }, - { name = "packaging" }, - { name = "pyyaml" }, - { name = "shellingham" }, - { name = "tqdm" }, - { name = "typer-slim" }, - { name = "typing-extensions" }, + { name = "filelock", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "fsspec", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "hf-xet", marker = "(python_full_version >= '3.12' and platform_machine == 'AMD64') or (python_full_version >= '3.12' and platform_machine == 'aarch64') or (python_full_version >= '3.12' and platform_machine == 'amd64') or (python_full_version >= '3.12' and platform_machine == 'arm64') or (python_full_version >= '3.12' and platform_machine == 'x86_64') or (platform_machine != 'AMD64' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'arm64' and platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'AMD64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'AMD64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'aarch64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'aarch64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'amd64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'amd64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'arm64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'arm64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'x86_64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "httpx", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "packaging", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "pyyaml", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "shellingham", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "tqdm", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "typer-slim", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "typing-extensions", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/af/25/74af9d16cd59ae15b12467a79a84aa0fe24be4aba68fc4da0c1864d49c17/huggingface_hub-1.3.4.tar.gz", hash = "sha256:c20d5484a611b7b7891d272e8fc9f77d5de025b0480bdacfa858efb3780b455f", size = 627683, upload-time = "2026-01-26T14:05:10.656Z" } wheels = [ @@ -3052,7 +3093,7 @@ dependencies = [ { name = "executing" }, { name = "pytest" }, { name = "rich" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/1c/b1/52b5ee59f73ed31d5fe21b10881bf2d121d07d54b23c0b6b74186792e620/inline_snapshot-0.31.1.tar.gz", hash = "sha256:4ea5ed70aa1d652713bbfd750606b94bd8a42483f7d3680433b3e92994495f64", size = 2606338, upload-time = "2025-11-07T07:36:18.932Z" } wheels = [ @@ -3094,7 +3135,7 @@ name = "jaraco-context" version = "6.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "backports-tarfile", marker = "python_full_version < '3.12'" }, + { name = "backports-tarfile", marker = "python_full_version < '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/cb/9c/a788f5bb29c61e456b8ee52ce76dbdd32fd72cd73dd67bc95f42c7a8d13c/jaraco_context-6.1.0.tar.gz", hash = "sha256:129a341b0a85a7db7879e22acd66902fda67882db771754574338898b2d5d86f", size = 15850, upload-time = "2026-01-13T02:53:53.847Z" } wheels = [ @@ -3342,13 +3383,13 @@ name = "keyring" version = "25.7.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "importlib-metadata", marker = "python_full_version < '3.12'" }, + { name = "importlib-metadata", marker = "python_full_version < '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "jaraco-classes" }, { name = "jaraco-context" }, { name = "jaraco-functools" }, - { name = "jeepney", marker = "sys_platform == 'linux'" }, - { name = "pywin32-ctypes", marker = "sys_platform == 'win32'" }, - { name = "secretstorage", marker = "sys_platform == 'linux'" }, + { name = "jeepney", marker = "sys_platform == 'linux' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "pywin32-ctypes", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "secretstorage", marker = "sys_platform == 'linux' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/43/4b/674af6ef2f97d56f0ab5153bf0bfa28ccb6c3ed4d1babf4305449668807b/keyring-25.7.0.tar.gz", hash = "sha256:fe01bd85eb3f8fb3dd0405defdeac9a5b4f6f0439edbb3149577f244a2e8245b", size = 63516, upload-time = "2025-11-16T16:26:09.482Z" } wheels = [ @@ -3392,7 +3433,7 @@ version = "0.6.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx" }, - { name = "orjson", marker = "platform_python_implementation != 'PyPy'" }, + { name = "orjson", marker = "platform_python_implementation != 'PyPy' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "packaging" }, { name = "pydantic" }, { name = "requests" }, @@ -3505,9 +3546,12 @@ version = "1.3.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/95/48/3f7a9d3ff1b36bba92b5107a3a21286821227afe9ea464736133994d61fb/llguidance-1.3.0.tar.gz", hash = "sha256:861249afd51dc325646834462ea827e57a5c2b2042e108e6aae7059fdad9104d", size = 1070460, upload-time = "2025-10-20T19:58:44.164Z" } wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/33/be5acb85cd8cdc4afde33d9c234eece9f318e087920255af3c05864cd3e7/llguidance-1.3.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:f7685222660a762e481ac633d49cc559c64980fe2ee59c8f932a5bb5cbc0c2c2", size = 3220647, upload-time = "2025-10-20T19:58:42.542Z" }, { url = "https://files.pythonhosted.org/packages/82/e6/b48bda5b15efeaeb62bd0dba8fc6a01d4ae5457a85dbb5d18632385fe15c/llguidance-1.3.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:098030ff0687261a3f1bd54cf21fe951fc861d56d37a0671250dd36677eaf224", size = 3099830, upload-time = "2025-10-20T19:58:40.826Z" }, { url = "https://files.pythonhosted.org/packages/aa/11/44389d3d1526d7a5c38ffd587a5ebc61d7bee443ac1dea95f2089ad58f5f/llguidance-1.3.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f6caca5d78db7f76e1fbb0fff8607b861c32d47fa3d5dee2fc49de27ee269df", size = 2835242, upload-time = "2025-10-20T19:58:34.518Z" }, + { url = "https://files.pythonhosted.org/packages/e7/ca/53ea256396405e4dee70d5a4a35e18543408e18bb16b251d6ca6b5d80310/llguidance-1.3.0-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0612bb3f034d2487b6e8f9561f02a94a6039d88273bf0c5c539a3bd3895e47d2", size = 3297480, upload-time = "2025-10-20T19:58:37.033Z" }, { url = "https://files.pythonhosted.org/packages/83/a8/1ff2bedb8f9acb46a2d2d603415d272bb622c142ea86f5b95445cc6e366c/llguidance-1.3.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc17e9dd602c3879bf91664a64bf72f54c74dbfbeb24ccfab6a5fe435b12f7aa", size = 3033133, upload-time = "2025-10-20T19:58:38.721Z" }, + { url = "https://files.pythonhosted.org/packages/d7/a7/9b8086c0cfdddf3f6d47b173a404fa7ac46272f7affbee082c36740f4f1c/llguidance-1.3.0-cp39-abi3-win32.whl", hash = "sha256:2f6f558485a43e273fc5c6c974a9a3ace5d5e170076db9b40e0560e41c3ff18f", size = 2598109, upload-time = "2025-10-20T19:58:47.656Z" }, { url = "https://files.pythonhosted.org/packages/5a/7e/809349638231f469b9056c0e1bfd924d5ef5558b3b3ec72d093b6fad33b1/llguidance-1.3.0-cp39-abi3-win_amd64.whl", hash = "sha256:1d1cd1c8618d1a13605d3e057c978651e551c8c469b481ee4041f1d6c436002d", size = 2789946, upload-time = "2025-10-20T19:58:45.958Z" }, ] @@ -3601,7 +3645,7 @@ dependencies = [ { name = "opentelemetry-sdk" }, { name = "protobuf" }, { name = "rich" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/e2/60/b8040db3598a55da64c45e3e689f2baa87389a4648a6f46ba80be3329f23/logfire-4.16.0.tar.gz", hash = "sha256:03a3ab8fdc13399309cb55d69cba7a6fcbad3526cfad85fc4f72e7d75e22b654", size = 550759, upload-time = "2025-12-04T16:16:39.477Z" } @@ -3940,12 +3984,12 @@ dependencies = [ { name = "pydantic-settings" }, { name = "pyjwt", extra = ["crypto"] }, { name = "python-multipart" }, - { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "pywin32", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "sse-starlette" }, { name = "starlette" }, { name = "typing-extensions" }, { name = "typing-inspection" }, - { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, + { name = "uvicorn", marker = "sys_platform != 'emscripten' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/d5/2d/649d80a0ecf6a1f82632ca44bec21c0461a9d9fc8934d38cb5b319f2db5e/mcp-1.25.0.tar.gz", hash = "sha256:56310361ebf0364e2d438e5b45f7668cbb124e158bb358333cd06e49e83a6802", size = 605387, upload-time = "2025-12-19T10:19:56.985Z" } wheels = [ @@ -3964,7 +4008,7 @@ version = "0.7.22" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markdown-it-py" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/fc/eb/b5cbf2484411af039a3d4aeb53a5160fae25dd8c84af6a4243bc2f3fedb3/mdformat-0.7.22.tar.gz", hash = "sha256:eef84fa8f233d3162734683c2a8a6222227a229b9206872e6139658d99acb1ea", size = 34610, upload-time = "2025-01-30T18:00:51.418Z" } wheels = [ @@ -4011,7 +4055,7 @@ dependencies = [ { name = "numpy", marker = "python_full_version < '3.12'" }, { name = "pillow", marker = "python_full_version < '3.12'" }, { name = "pydantic", marker = "python_full_version < '3.12'" }, - { name = "pydantic-extra-types", extra = ["pycountry"], marker = "python_full_version < '3.12'" }, + { name = "pydantic-extra-types", extra = ["pycountry"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "requests", marker = "python_full_version < '3.12'" }, { name = "tiktoken", marker = "python_full_version < '3.12'" }, { name = "typing-extensions", marker = "python_full_version < '3.12'" }, @@ -4050,7 +4094,7 @@ version = "1.6.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, - { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "colorama", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "ghp-import" }, { name = "jinja2" }, { name = "markdown" }, @@ -4197,7 +4241,7 @@ dependencies = [ { name = "griffe" }, { name = "mkdocs-autorefs" }, { name = "mkdocstrings" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/24/75/d30af27a2906f00eb90143470272376d728521997800f5dce5b340ba35bc/mkdocstrings_python-2.0.1.tar.gz", hash = "sha256:843a562221e6a471fefdd4b45cc6c22d2607ccbad632879234fa9692e9cf7732", size = 199345, upload-time = "2025-12-03T14:26:11.755Z" } wheels = [ @@ -4209,24 +4253,34 @@ name = "mlx" version = "0.30.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "mlx-metal", marker = "sys_platform == 'darwin'" }, + { name = "mlx-metal", marker = "sys_platform == 'darwin' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/cd/8d/16a34feb957ac33525b9b787b5132053a44bc94d1bf40c18639f6e05cd2a/mlx-0.30.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:391c650f0578ce359c8cffddb204b42798b622f9ee2b57b865d87716c00db536", size = 592926, upload-time = "2025-12-18T01:55:28.757Z" }, { url = "https://files.pythonhosted.org/packages/34/e6/0661455f5f4bd9de257874b28a96a33699d36a1e17ccde821341c0ac1c0e/mlx-0.30.1-cp310-cp310-macosx_15_0_arm64.whl", hash = "sha256:42fefcad72d7488c65649e152a1b28f00c2033d38121afa45ce65ae16ec6b988", size = 592926, upload-time = "2025-12-18T01:55:30.141Z" }, { url = "https://files.pythonhosted.org/packages/d8/37/a322af7dba9101064b5e858d1208e0e66cd83be7d060d14fa03ace37d52e/mlx-0.30.1-cp310-cp310-macosx_26_0_arm64.whl", hash = "sha256:a9db94e7e080672cc0dda9a5f121aebe0d49f7a8cb46706ecfd8b8ce7d99d541", size = 566952, upload-time = "2025-12-18T00:15:50.075Z" }, + { url = "https://files.pythonhosted.org/packages/c9/46/f0005d07fe5687bbf4efc15b468d27f2923f486b07a625d35c7d3cbb4962/mlx-0.30.1-cp310-cp310-manylinux_2_35_aarch64.whl", hash = "sha256:44b2142896c8dd8ab057dd785faf92fa83f3697b4b6bb01ff7515df12b6de666", size = 658049, upload-time = "2025-12-18T01:55:31.748Z" }, + { url = "https://files.pythonhosted.org/packages/cb/95/cc47c4607cc78f55ce3081ade9161961795c15c049bf219f27a393f85767/mlx-0.30.1-cp310-cp310-manylinux_2_35_x86_64.whl", hash = "sha256:37ea97b3c4bd71b19d87c6ef2c9e681e11f37908d8381fc2b785d2509b0681df", size = 692336, upload-time = "2025-12-18T01:55:33.224Z" }, { url = "https://files.pythonhosted.org/packages/07/14/74acbd677ececd17a44dafda1b472aebacef54f60ff9a41a801f711de9a7/mlx-0.30.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:acfd7d1b8e5b9fa1b7e9fab4cc5ba6a492c559fbb1c5aeab16c1d7a148ab4f1b", size = 593048, upload-time = "2025-12-18T01:55:34.9Z" }, { url = "https://files.pythonhosted.org/packages/58/8c/5309848afb9c53d363f59b88ae5811de248e2817e91aeadf007e2ac8d22b/mlx-0.30.1-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:b62030471272d1835b8137164bd43d863cc93ff1d67ec4f1f87bb4c8613dd5a6", size = 593043, upload-time = "2025-12-18T01:55:36.839Z" }, { url = "https://files.pythonhosted.org/packages/e8/5a/0039815a930f0193e2cffb27c57dc6971004bce0086c2bbbdb10395c272c/mlx-0.30.1-cp311-cp311-macosx_26_0_arm64.whl", hash = "sha256:0489cd340f2d262cb3aaad4368e40e84b152e182e4cea37ba018e56c72e1d020", size = 567014, upload-time = "2025-12-18T00:15:51.731Z" }, + { url = "https://files.pythonhosted.org/packages/de/c7/6bdb5497c1f5ed3e33afa7785761ad87fd3436c071805d9a93c905943f04/mlx-0.30.1-cp311-cp311-manylinux_2_35_aarch64.whl", hash = "sha256:fbdcfc3ed556a7e701a8eb67da299e2a25f52615193833ca6374decca3be5bf4", size = 658930, upload-time = "2025-12-18T01:55:38.441Z" }, + { url = "https://files.pythonhosted.org/packages/91/02/2d86a1c116e951eb4d88fe313c321e23628ce7404712e1258cacf925a8b8/mlx-0.30.1-cp311-cp311-manylinux_2_35_x86_64.whl", hash = "sha256:68ec854e7b5f89454e67d6c2fa7bb416b8afb148003ccd775904ec6ec4744818", size = 692484, upload-time = "2025-12-18T01:55:40.254Z" }, { url = "https://files.pythonhosted.org/packages/3a/4b/ad57b2f0ede3f0d009c0e3e1270c219bd18f9025388855ee149680cffa20/mlx-0.30.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:deaef3ecd2f99930867a29de748e3bffa9cc7e4dfa834f2501c37ed29aece1cc", size = 593397, upload-time = "2025-12-18T01:55:41.814Z" }, { url = "https://files.pythonhosted.org/packages/ef/14/7fa03a0f66ac3cfb2fd6752178a1488f13c7233fff26eed0f832d961db35/mlx-0.30.1-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:86ccdcda0b5ea4768b87da25beae5b83ac7cc802506116b6845cea6f450e2377", size = 593397, upload-time = "2025-12-18T01:55:43Z" }, { url = "https://files.pythonhosted.org/packages/9c/c8/9f1343dbe2381f9653df4e0a62dc8bf38f575a2553dc2aa6916de32d2a85/mlx-0.30.1-cp312-cp312-macosx_26_0_arm64.whl", hash = "sha256:a625cb434b2acc5674fe10683374641dab9671fb354ae7c2c67a1fb0405eeb37", size = 567576, upload-time = "2025-12-18T00:15:55.114Z" }, + { url = "https://files.pythonhosted.org/packages/15/ff/485ed9c99c18ef89ac987178c0a526cb4148ba38b14838d315311d9d76a8/mlx-0.30.1-cp312-cp312-manylinux_2_35_aarch64.whl", hash = "sha256:ccc1ff3aca8fb1073c7dcd1274cebe48ae75f852d14b16c7db8228fbbad594dd", size = 643654, upload-time = "2025-12-18T01:55:44.165Z" }, + { url = "https://files.pythonhosted.org/packages/8a/d3/54d3bf5e404c3b6424b49c505dc8b3c06c6bb498fe720195b1fafbd69b5e/mlx-0.30.1-cp312-cp312-manylinux_2_35_x86_64.whl", hash = "sha256:55ed7fc4b389d6e49dac6d34a97b41e61cbe3662ac29c3d29cf612e6b2ed9827", size = 687305, upload-time = "2025-12-18T01:55:45.526Z" }, { url = "https://files.pythonhosted.org/packages/f9/fd/c6f56cd87d48763ed63655ace627c06db9819eae7d43d132f40d4965947a/mlx-0.30.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:743520758bc8261b2ed8f3b3dc96e4e9236769dd8f61fb17877c5e44037e2058", size = 593366, upload-time = "2025-12-18T01:55:46.786Z" }, { url = "https://files.pythonhosted.org/packages/dc/53/96d8c48b21f91c4216b6d2ef6dfc10862e5fb0b811a2aaf02c96c78601de/mlx-0.30.1-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:fc9745bc1860ca60128e3a6d36157da06d936e2b4007a4dcba990b40202f598f", size = 593368, upload-time = "2025-12-18T01:55:48.363Z" }, { url = "https://files.pythonhosted.org/packages/70/ce/476c3b7d3a4153bd0e1c5af1f1b6c09a804b652bbed34072404b322c22e0/mlx-0.30.1-cp313-cp313-macosx_26_0_arm64.whl", hash = "sha256:a1480399c67bb327a66c5527b73915132e3fcaae3bce9634e5c81ccad9f43229", size = 567561, upload-time = "2025-12-18T00:15:56.153Z" }, + { url = "https://files.pythonhosted.org/packages/33/41/7ad1e639fd7dd1cf01a62c1c5b051024a859888c27504996e9d8380e6754/mlx-0.30.1-cp313-cp313-manylinux_2_35_aarch64.whl", hash = "sha256:8e19850a4236a8e174f851f5789b8b62a8eb74f5a8fa49ad8ba286c5ddb5f9bf", size = 643122, upload-time = "2025-12-18T01:55:49.607Z" }, + { url = "https://files.pythonhosted.org/packages/d0/dc/72d3737c5b0662eb5e785d353dbc5e34d793d27b09b99e39993ee051bd19/mlx-0.30.1-cp313-cp313-manylinux_2_35_x86_64.whl", hash = "sha256:1c8ed5bcd9f1910fca209e95859ac737e60b3e1954181b820fa269158f81049a", size = 687254, upload-time = "2025-12-18T01:55:51.239Z" }, { url = "https://files.pythonhosted.org/packages/9b/cc/523448996247bb05d9d68e23bccf3dafdda660befb9330f6bd5fa13361e8/mlx-0.30.1-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:d34cc2c25b0ee41c1349f14650db760e282685339858e305453f62405c12bc1b", size = 596006, upload-time = "2025-12-18T01:55:52.463Z" }, { url = "https://files.pythonhosted.org/packages/23/0e/f9f2f9659c34c87be8f4167f6a1d6ed7e826f4889d20eecd4c0d8122f0e9/mlx-0.30.1-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:4e47d301e9095b87f0bda8827bfd6ffe744223aba5cee8f28e25894d647f5823", size = 596008, upload-time = "2025-12-18T01:55:54.02Z" }, { url = "https://files.pythonhosted.org/packages/56/a7/49e41fb141de95b6a376091a963c737839c9cda04e423c67f57460a50458/mlx-0.30.1-cp314-cp314-macosx_26_0_arm64.whl", hash = "sha256:cfba13e2a52255d663a1ad62f0f83eb3991e42147edf9a8d38cdd224e48ca49b", size = 570406, upload-time = "2025-12-18T00:15:57.177Z" }, + { url = "https://files.pythonhosted.org/packages/73/99/a43cb112167cf865c069f5e108ae42f5314663930ff3dd86c2d23d984191/mlx-0.30.1-cp314-cp314-manylinux_2_35_aarch64.whl", hash = "sha256:bebfec377208eb29cc88aa86c897c7446aa0984838669e138f273f9225d627ff", size = 646461, upload-time = "2025-12-18T01:55:55.285Z" }, + { url = "https://files.pythonhosted.org/packages/d4/ff/1e1968f107b4221a98dc26832586b1f646b27ddf3e55c95051c09d751f0a/mlx-0.30.1-cp314-cp314-manylinux_2_35_x86_64.whl", hash = "sha256:d18012d5cf0f013bc4a405cfd1e9d2d28e798f4d2dc4f15aa0fbffff73c02ba2", size = 687114, upload-time = "2025-12-18T01:55:56.506Z" }, ] [[package]] @@ -4235,12 +4289,13 @@ version = "0.29.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jinja2" }, - { name = "mlx", marker = "sys_platform == 'darwin'" }, + { name = "mlx", marker = "sys_platform == 'darwin' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "numpy" }, { name = "protobuf" }, { name = "pyyaml" }, { name = "sentencepiece" }, - { name = "transformers" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "transformers", version = "5.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/e3/62/f46e1355256a114808517947f8e83ad6be310c7288c551db0fa678f47923/mlx_lm-0.29.1.tar.gz", hash = "sha256:b99180d8f33d33a077b814e550bfb2d8a59ae003d668fd1f4b3fff62a381d34b", size = 232302, upload-time = "2025-12-16T16:58:27.959Z" } wheels = [ @@ -4439,7 +4494,7 @@ name = "multidict" version = "6.7.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/80/1e/5492c365f222f907de1039b91f922b93fa4f764c713ee858d235495d8f50/multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5", size = 101834, upload-time = "2025-10-06T14:52:30.657Z" } wheels = [ @@ -4600,10 +4655,10 @@ name = "mypy" version = "1.19.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "librt", marker = "platform_python_implementation != 'PyPy'" }, + { name = "librt", marker = "platform_python_implementation != 'PyPy' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "mypy-extensions" }, { name = "pathspec" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/f5/db/4efed9504bc01309ab9c2da7e352cc223569f05478012b5d9ece38fd44d2/mypy-1.19.1.tar.gz", hash = "sha256:19d88bb05303fe63f71dd2c6270daca27cb9401c4ca8255fe50d1d920e0eb9ba", size = 3582404, upload-time = "2025-12-15T05:03:48.42Z" } @@ -4646,7 +4701,7 @@ name = "mypy-boto3-bedrock-runtime" version = "1.42.3" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.12'" }, + { name = "typing-extensions", marker = "python_full_version < '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/1b/95/cb46d84a7a1408e14cac8a8dbbb24a612e438dd10b5f284fb5e01deece3a/mypy_boto3_bedrock_runtime-1.42.3.tar.gz", hash = "sha256:15686cf925719f14bc0d6c85530808736005fb431f007e37d40e10daff4032cc", size = 29476, upload-time = "2025-12-04T20:56:45.423Z" } wheels = [ @@ -4744,8 +4799,8 @@ resolution-markers = [ "python_full_version < '3.11'", ] dependencies = [ - { name = "llvmlite", version = "0.44.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.12'" }, - { name = "numpy", marker = "python_full_version < '3.12'" }, + { name = "llvmlite", version = "0.44.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "numpy", marker = "python_full_version < '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/1c/a0/e21f57604304aa03ebb8e098429222722ad99176a4f979d34af1d1ee80da/numba-0.61.2.tar.gz", hash = "sha256:8750ee147940a6637b80ecf7f95062185ad8726c8c28a2295b8ec1160a196f7d", size = 2820615, upload-time = "2025-04-09T02:58:07.659Z" } wheels = [ @@ -4780,8 +4835,8 @@ resolution-markers = [ "python_full_version == '3.12.*'", ] dependencies = [ - { name = "llvmlite", version = "0.46.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, - { name = "numpy", marker = "python_full_version >= '3.12'" }, + { name = "llvmlite", version = "0.46.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "numpy", marker = "python_full_version >= '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/dc/60/0145d479b2209bd8fdae5f44201eceb8ce5a23e0ed54c71f57db24618665/numba-0.63.1.tar.gz", hash = "sha256:b320aa675d0e3b17b40364935ea52a7b1c670c9037c39cf92c49502a75902f4b", size = 2761666, upload-time = "2025-12-10T02:57:39.002Z" } wheels = [ @@ -4874,7 +4929,9 @@ name = "nvidia-cublas-cu12" version = "12.8.4.1" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/29/99/db44d685f0e257ff0e213ade1964fc459b4a690a73293220e98feb3307cf/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:b86f6dd8935884615a0683b663891d43781b819ac4f2ba2b0c9604676af346d0", size = 590537124, upload-time = "2025-03-07T01:43:53.556Z" }, { url = "https://files.pythonhosted.org/packages/dc/61/e24b560ab2e2eaeb3c839129175fb330dfcfc29e5203196e5541a4c44682/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:8ac4e771d5a348c551b2a426eda6193c19aa630236b418086020df5ba9667142", size = 594346921, upload-time = "2025-03-07T01:44:31.254Z" }, + { url = "https://files.pythonhosted.org/packages/70/61/7d7b3c70186fb651d0fbd35b01dbfc8e755f69fd58f817f3d0f642df20c3/nvidia_cublas_cu12-12.8.4.1-py3-none-win_amd64.whl", hash = "sha256:47e9b82132fa8d2b4944e708049229601448aaad7e6f296f630f2d1a32de35af", size = 567544208, upload-time = "2025-03-07T01:53:30.535Z" }, ] [[package]] @@ -4882,7 +4939,9 @@ name = "nvidia-cuda-cupti-cu12" version = "12.8.90" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/1f/b3bd73445e5cb342727fd24fe1f7b748f690b460acadc27ea22f904502c8/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4412396548808ddfed3f17a467b104ba7751e6b58678a4b840675c56d21cf7ed", size = 9533318, upload-time = "2025-03-07T01:40:10.421Z" }, { url = "https://files.pythonhosted.org/packages/f8/02/2adcaa145158bf1a8295d83591d22e4103dbfd821bcaf6f3f53151ca4ffa/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea0cb07ebda26bb9b29ba82cda34849e73c166c18162d3913575b0c9db9a6182", size = 10248621, upload-time = "2025-03-07T01:40:21.213Z" }, + { url = "https://files.pythonhosted.org/packages/41/bc/83f5426095d93694ae39fe1311431b5d5a9bb82e48bf0dd8e19be2765942/nvidia_cuda_cupti_cu12-12.8.90-py3-none-win_amd64.whl", hash = "sha256:bb479dcdf7e6d4f8b0b01b115260399bf34154a1a2e9fe11c85c517d87efd98e", size = 7015759, upload-time = "2025-03-07T01:51:11.355Z" }, ] [[package]] @@ -4891,6 +4950,8 @@ version = "12.8.93" source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/05/6b/32f747947df2da6994e999492ab306a903659555dddc0fbdeb9d71f75e52/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:a7756528852ef889772a84c6cd89d41dfa74667e24cca16bb31f8f061e3e9994", size = 88040029, upload-time = "2025-03-07T01:42:13.562Z" }, + { url = "https://files.pythonhosted.org/packages/eb/d1/e50d0acaab360482034b84b6e27ee83c6738f7d32182b987f9c7a4e32962/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fc1fec1e1637854b4c0a65fb9a8346b51dd9ee69e61ebaccc82058441f15bce8", size = 43106076, upload-time = "2025-03-07T01:41:59.817Z" }, + { url = "https://files.pythonhosted.org/packages/45/51/52a3d84baa2136cc8df15500ad731d74d3a1114d4c123e043cb608d4a32b/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-win_amd64.whl", hash = "sha256:7a4b6b2904850fe78e0bd179c4b655c404d4bb799ef03ddc60804247099ae909", size = 73586838, upload-time = "2025-03-07T01:52:13.483Z" }, ] [[package]] @@ -4898,7 +4959,9 @@ name = "nvidia-cuda-runtime-cu12" version = "12.8.90" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/75/f865a3b236e4647605ea34cc450900854ba123834a5f1598e160b9530c3a/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:52bf7bbee900262ffefe5e9d5a2a69a30d97e2bc5bb6cc866688caa976966e3d", size = 965265, upload-time = "2025-03-07T01:39:43.533Z" }, { url = "https://files.pythonhosted.org/packages/0d/9b/a997b638fcd068ad6e4d53b8551a7d30fe8b404d6f1804abf1df69838932/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adade8dcbd0edf427b7204d480d6066d33902cab2a4707dcfc48a2d0fd44ab90", size = 954765, upload-time = "2025-03-07T01:40:01.615Z" }, + { url = "https://files.pythonhosted.org/packages/30/a5/a515b7600ad361ea14bfa13fb4d6687abf500adc270f19e89849c0590492/nvidia_cuda_runtime_cu12-12.8.90-py3-none-win_amd64.whl", hash = "sha256:c0c6027f01505bfed6c3b21ec546f69c687689aad5f1a377554bc6ca4aa993a8", size = 944318, upload-time = "2025-03-07T01:51:01.794Z" }, ] [[package]] @@ -4909,7 +4972,9 @@ dependencies = [ { name = "nvidia-cublas-cu12" }, ] wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/41/e79269ce215c857c935fd86bcfe91a451a584dfc27f1e068f568b9ad1ab7/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:c9132cc3f8958447b4910a1720036d9eff5928cc3179b0a51fb6d167c6cc87d8", size = 705026878, upload-time = "2025-06-06T21:52:51.348Z" }, { url = "https://files.pythonhosted.org/packages/ba/51/e123d997aa098c61d029f76663dedbfb9bc8dcf8c60cbd6adbe42f76d049/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8", size = 706758467, upload-time = "2025-06-06T21:54:08.597Z" }, + { url = "https://files.pythonhosted.org/packages/3d/90/0bd6e586701b3a890fd38aa71c387dab4883d619d6e5ad912ccbd05bfd67/nvidia_cudnn_cu12-9.10.2.21-py3-none-win_amd64.whl", hash = "sha256:c6288de7d63e6cf62988f0923f96dc339cea362decb1bf5b3141883392a7d65e", size = 692992268, upload-time = "2025-06-06T21:55:18.114Z" }, ] [[package]] @@ -4942,7 +5007,9 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12" }, ] wheels = [ + { url = "https://files.pythonhosted.org/packages/60/bc/7771846d3a0272026c416fbb7e5f4c1f146d6d80704534d0b187dd6f4800/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:848ef7224d6305cdb2a4df928759dca7b1201874787083b6e7550dd6765ce69a", size = 193109211, upload-time = "2025-03-07T01:44:56.873Z" }, { url = "https://files.pythonhosted.org/packages/1f/13/ee4e00f30e676b66ae65b4f08cb5bcbb8392c03f54f2d5413ea99a5d1c80/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d2dd21ec0b88cf61b62e6b43564355e5222e4a3fb394cac0db101f2dd0d4f74", size = 193118695, upload-time = "2025-03-07T01:45:27.821Z" }, + { url = "https://files.pythonhosted.org/packages/7d/ec/ce1629f1e478bb5ccd208986b5f9e0316a78538dd6ab1d0484f012f8e2a1/nvidia_cufft_cu12-11.3.3.83-py3-none-win_amd64.whl", hash = "sha256:7a64a98ef2a7c47f905aaf8931b69a3a43f27c55530c698bb2ed7c75c0b42cb7", size = 192216559, upload-time = "2025-03-07T01:53:57.106Z" }, ] [[package]] @@ -4951,6 +5018,7 @@ version = "1.13.1.3" source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/bb/fe/1bcba1dfbfb8d01be8d93f07bfc502c93fa23afa6fd5ab3fc7c1df71038a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1d069003be650e131b21c932ec3d8969c1715379251f8d23a1860554b1cb24fc", size = 1197834, upload-time = "2025-03-07T01:45:50.723Z" }, + { url = "https://files.pythonhosted.org/packages/1e/f5/5607710447a6fe9fd9b3283956fceeee8a06cda1d2f56ce31371f595db2a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:4beb6d4cce47c1a0f1013d72e02b0994730359e17801d395bdcbf20cfb3bb00a", size = 1120705, upload-time = "2025-03-07T01:45:41.434Z" }, ] [[package]] @@ -4958,7 +5026,9 @@ name = "nvidia-curand-cu12" version = "10.3.9.90" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/45/5e/92aa15eca622a388b80fbf8375d4760738df6285b1e92c43d37390a33a9a/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:dfab99248034673b779bc6decafdc3404a8a6f502462201f2f31f11354204acd", size = 63625754, upload-time = "2025-03-07T01:46:10.735Z" }, { url = "https://files.pythonhosted.org/packages/fb/aa/6584b56dc84ebe9cf93226a5cde4d99080c8e90ab40f0c27bda7a0f29aa1/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:b32331d4f4df5d6eefa0554c565b626c7216f87a06a4f56fab27c3b68a830ec9", size = 63619976, upload-time = "2025-03-07T01:46:23.323Z" }, + { url = "https://files.pythonhosted.org/packages/b9/75/70c05b2f3ed5be3bb30b7102b6eb78e100da4bbf6944fd6725c012831cab/nvidia_curand_cu12-10.3.9.90-py3-none-win_amd64.whl", hash = "sha256:f149a8ca457277da854f89cf282d6ef43176861926c7ac85b2a0fbd237c587ec", size = 62765309, upload-time = "2025-03-07T01:54:20.478Z" }, ] [[package]] @@ -4971,7 +5041,9 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12" }, ] wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/32/f7cd6ce8a7690544d084ea21c26e910a97e077c9b7f07bf5de623ee19981/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:db9ed69dbef9715071232caa9b69c52ac7de3a95773c2db65bdba85916e4e5c0", size = 267229841, upload-time = "2025-03-07T01:46:54.356Z" }, { url = "https://files.pythonhosted.org/packages/85/48/9a13d2975803e8cf2777d5ed57b87a0b6ca2cc795f9a4f59796a910bfb80/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:4376c11ad263152bd50ea295c05370360776f8c3427b30991df774f9fb26c450", size = 267506905, upload-time = "2025-03-07T01:47:16.273Z" }, + { url = "https://files.pythonhosted.org/packages/13/c0/76ca8551b8a84146ffa189fec81c26d04adba4bc0dbe09cd6e6fd9b7de04/nvidia_cusolver_cu12-11.7.3.90-py3-none-win_amd64.whl", hash = "sha256:4a550db115fcabc4d495eb7d39ac8b58d4ab5d8e63274d3754df1c0ad6a22d34", size = 256720438, upload-time = "2025-03-07T01:54:39.898Z" }, ] [[package]] @@ -4982,7 +5054,9 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12" }, ] wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/f7/cd777c4109681367721b00a106f491e0d0d15cfa1fd59672ce580ce42a97/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b6c161cb130be1a07a27ea6923df8141f3c295852f4b260c65f18f3e0a091dc", size = 288117129, upload-time = "2025-03-07T01:47:40.407Z" }, { url = "https://files.pythonhosted.org/packages/c2/f5/e1854cb2f2bcd4280c44736c93550cc300ff4b8c95ebe370d0aa7d2b473d/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ec05d76bbbd8b61b06a80e1eaf8cf4959c3d4ce8e711b65ebd0443bb0ebb13b", size = 288216466, upload-time = "2025-03-07T01:48:13.779Z" }, + { url = "https://files.pythonhosted.org/packages/62/07/f3b2ad63f8e3d257a599f422ae34eb565e70c41031aecefa3d18b62cabd1/nvidia_cusparse_cu12-12.5.8.93-py3-none-win_amd64.whl", hash = "sha256:9a33604331cb2cac199f2e7f5104dfbb8a5a898c367a53dfda9ff2acb6b6b4dd", size = 284937404, upload-time = "2025-03-07T01:55:07.742Z" }, ] [[package]] @@ -4990,7 +5064,9 @@ name = "nvidia-cusparselt-cu12" version = "0.7.1" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/73/b9/598f6ff36faaece4b3c50d26f50e38661499ff34346f00e057760b35cc9d/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8878dce784d0fac90131b6817b607e803c36e629ba34dc5b433471382196b6a5", size = 283835557, upload-time = "2025-02-26T00:16:54.265Z" }, { url = "https://files.pythonhosted.org/packages/56/79/12978b96bd44274fe38b5dde5cfb660b1d114f70a65ef962bcbbed99b549/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f1bb701d6b930d5a7cea44c19ceb973311500847f81b634d802b7b539dc55623", size = 287193691, upload-time = "2025-02-26T00:15:44.104Z" }, + { url = "https://files.pythonhosted.org/packages/2f/d8/a6b0d0d0c2435e9310f3e2bb0d9c9dd4c33daef86aa5f30b3681defd37ea/nvidia_cusparselt_cu12-0.7.1-py3-none-win_amd64.whl", hash = "sha256:f67fbb5831940ec829c9117b7f33807db9f9678dc2a617fbe781cac17b4e1075", size = 271020911, upload-time = "2025-02-26T00:14:47.204Z" }, ] [[package]] @@ -5038,6 +5114,7 @@ name = "nvidia-nccl-cu12" version = "2.27.5" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/bb/1c/857979db0ef194ca5e21478a0612bcdbbe59458d7694361882279947b349/nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:31432ad4d1fb1004eb0c56203dc9bc2178a1ba69d1d9e02d64a6938ab5e40e7a", size = 322400625, upload-time = "2025-06-26T04:11:04.496Z" }, { url = "https://files.pythonhosted.org/packages/6e/89/f7a07dc961b60645dbbf42e80f2bc85ade7feb9a491b11a1e973aa00071f/nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ad730cf15cb5d25fe849c6e6ca9eb5b76db16a80f13f425ac68d8e2e55624457", size = 322348229, upload-time = "2025-06-26T04:11:28.385Z" }, ] @@ -5047,6 +5124,8 @@ version = "12.8.93" source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/f6/74/86a07f1d0f42998ca31312f998bd3b9a7eff7f52378f4f270c8679c77fb9/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:81ff63371a7ebd6e6451970684f916be2eab07321b73c9d244dc2b4da7f73b88", size = 39254836, upload-time = "2025-03-07T01:49:55.661Z" }, + { url = "https://files.pythonhosted.org/packages/2a/a2/8cee5da30d13430e87bf99bb33455d2724d0a4a9cb5d7926d80ccb96d008/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:adccd7161ace7261e01bb91e44e88da350895c270d23f744f0820c818b7229e7", size = 38386204, upload-time = "2025-03-07T01:49:43.612Z" }, + { url = "https://files.pythonhosted.org/packages/ed/d7/34f02dad2e30c31b10a51f6b04e025e5dd60e5f936af9045a9b858a05383/nvidia_nvjitlink_cu12-12.8.93-py3-none-win_amd64.whl", hash = "sha256:bd93fbeeee850917903583587f4fc3a4eafa022e34572251368238ab5e6bd67f", size = 268553710, upload-time = "2025-03-07T01:56:24.13Z" }, ] [[package]] @@ -5054,6 +5133,7 @@ name = "nvidia-nvshmem-cu12" version = "3.3.20" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/92/9d/3dd98852568fb845ec1f7902c90a22b240fe1cbabda411ccedf2fd737b7b/nvidia_nvshmem_cu12-3.3.20-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0b0b960da3842212758e4fa4696b94f129090b30e5122fea3c5345916545cff0", size = 124484616, upload-time = "2025-08-04T20:24:59.172Z" }, { url = "https://files.pythonhosted.org/packages/3b/6c/99acb2f9eb85c29fc6f3a7ac4dccfd992e22666dd08a642b303311326a97/nvidia_nvshmem_cu12-3.3.20-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d00f26d3f9b2e3c3065be895e3059d6479ea5c638a3f38c9fec49b1b9dd7c1e5", size = 124657145, upload-time = "2025-08-04T20:25:19.995Z" }, ] @@ -5062,7 +5142,9 @@ name = "nvidia-nvtx-cu12" version = "12.8.90" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/10/c0/1b303feea90d296f6176f32a2a70b5ef230f9bdeb3a72bddb0dc922dc137/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d7ad891da111ebafbf7e015d34879f7112832fc239ff0d7d776b6cb685274615", size = 91161, upload-time = "2025-03-07T01:42:23.922Z" }, { url = "https://files.pythonhosted.org/packages/a2/eb/86626c1bbc2edb86323022371c39aa48df6fd8b0a1647bc274577f72e90b/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b17e2001cc0d751a5bc2c6ec6d26ad95913324a4adb86788c944f8ce9ba441f", size = 89954, upload-time = "2025-03-07T01:42:44.131Z" }, + { url = "https://files.pythonhosted.org/packages/9f/99/4c9c0c329bf9fc125008c3b54c7c94c0023518d06fc025ae36431375e1fe/nvidia_nvtx_cu12-12.8.90-py3-none-win_amd64.whl", hash = "sha256:619c8304aedc69f02ea82dd244541a83c3d9d40993381b3b590f1adaed3db41e", size = 56492, upload-time = "2025-03-07T01:52:24.69Z" }, ] [[package]] @@ -5461,10 +5543,11 @@ wheels = [ [package.optional-dependencies] llamacpp = [ - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "1.3.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "llama-cpp-python" }, - { name = "numba", version = "0.61.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.12'" }, - { name = "numba", version = "0.63.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, + { name = "numba", version = "0.61.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "numba", version = "0.63.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] mlxlm = [ { name = "datasets" }, @@ -5478,7 +5561,8 @@ transformers = [ { name = "accelerate" }, { name = "datasets" }, { name = "setuptools" }, - { name = "transformers" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "transformers", version = "5.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] [[package]] @@ -5641,8 +5725,8 @@ name = "pendulum" version = "3.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "python-dateutil", marker = "python_full_version < '3.13'" }, - { name = "tzdata", marker = "python_full_version < '3.13'" }, + { name = "python-dateutil", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "tzdata", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/23/7c/009c12b86c7cc6c403aec80f8a4308598dfc5995e5c523a5491faaa3952e/pendulum-3.1.0.tar.gz", hash = "sha256:66f96303560f41d097bee7d2dc98ffca716fbb3a832c4b3062034c2d45865015", size = 85930, upload-time = "2025-04-19T14:30:01.675Z" } wheels = [ @@ -5863,7 +5947,7 @@ dependencies = [ { name = "orjson" }, { name = "packaging" }, { name = "pathspec" }, - { name = "pendulum", marker = "python_full_version < '3.13'" }, + { name = "pendulum", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "pluggy" }, { name = "prometheus-client" }, { name = "pydantic" }, @@ -5879,7 +5963,7 @@ dependencies = [ { name = "rfc3339-validator" }, { name = "rich" }, { name = "ruamel-yaml" }, - { name = "ruamel-yaml-clib", marker = "platform_python_implementation == 'CPython'" }, + { name = "ruamel-yaml-clib", marker = "platform_python_implementation == 'CPython' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "semver" }, { name = "sniffio" }, { name = "sqlalchemy", extra = ["asyncio"] }, @@ -5888,7 +5972,7 @@ dependencies = [ { name = "typing-extensions" }, { name = "uvicorn" }, { name = "websockets" }, - { name = "whenever", marker = "python_full_version >= '3.13'" }, + { name = "whenever", marker = "python_full_version >= '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/5b/46/139cfabbc729f13d4b6df74b56c01ddcbb1118936b312b2ef82e9826d8bc/prefect-3.6.13.tar.gz", hash = "sha256:ee0b39fa390c204ccb3762be00a729edd45c5aa54e0245951f8682f92bfb016b", size = 10811400, upload-time = "2026-01-23T04:17:29.594Z" } wheels = [ @@ -6105,8 +6189,8 @@ name = "psycopg" version = "3.3.2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, - { name = "tzdata", marker = "sys_platform == 'win32'" }, + { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "tzdata", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/e0/1a/7d9ef4fdc13ef7f15b934c393edc97a35c281bb7d3c3329fbfcbe915a7c2/psycopg-3.3.2.tar.gz", hash = "sha256:707a67975ee214d200511177a6a80e56e654754c9afca06a7194ea6bbfde9ca7", size = 165630, upload-time = "2025-12-06T17:34:53.899Z" } wheels = [ @@ -6115,7 +6199,7 @@ wheels = [ [package.optional-dependencies] binary = [ - { name = "psycopg-binary", marker = "implementation_name != 'pypy'" }, + { name = "psycopg-binary", marker = "implementation_name != 'pypy' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] [[package]] @@ -6523,7 +6607,8 @@ email = [ name = "pydantic-ai" source = { editable = "." } dependencies = [ - { name = "pydantic-ai-slim", extra = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "fastmcp", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "retries", "temporal", "ui", "vertexai", "xai"] }, + { name = "pydantic-ai-slim", extra = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "fastmcp", "google", "groq", "logfire", "mcp", "mistral", "openai", "retries", "temporal", "ui", "vertexai", "xai"] }, + { name = "pydantic-ai-slim", extra = ["huggingface"], marker = "extra == 'extra-16-pydantic-ai-slim-huggingface'" }, ] [package.optional-dependencies] @@ -6540,7 +6625,7 @@ outlines-llamacpp = [ { name = "pydantic-ai-slim", extra = ["outlines-llamacpp"] }, ] outlines-mlxlm = [ - { name = "pydantic-ai-slim", extra = ["outlines-mlxlm"], marker = "platform_machine == 'arm64' and sys_platform == 'darwin'" }, + { name = "pydantic-ai-slim", extra = ["outlines-mlxlm"], marker = "(platform_machine == 'arm64' and sys_platform == 'darwin') or (platform_machine != 'arm64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'darwin' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] outlines-sglang = [ { name = "pydantic-ai-slim", extra = ["outlines-sglang"] }, @@ -6549,7 +6634,7 @@ outlines-transformers = [ { name = "pydantic-ai-slim", extra = ["outlines-transformers"] }, ] outlines-vllm-offline = [ - { name = "pydantic-ai-slim", extra = ["outlines-vllm-offline"] }, + { name = "pydantic-ai-slim", extra = ["outlines-vllm-offline"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] prefect = [ { name = "pydantic-ai-slim", extra = ["prefect"] }, @@ -6712,7 +6797,7 @@ requires-dist = [ name = "pydantic-ai-slim" source = { editable = "pydantic_ai_slim" } dependencies = [ - { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "genai-prices" }, { name = "griffelib" }, { name = "httpx" }, @@ -6743,7 +6828,7 @@ cli = [ { name = "rich" }, ] cohere = [ - { name = "cohere", marker = "sys_platform != 'emscripten'" }, + { name = "cohere", marker = "sys_platform != 'emscripten' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] dbos = [ { name = "dbos" }, @@ -6767,7 +6852,7 @@ groq = [ { name = "groq" }, ] huggingface = [ - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "1.3.4", source = { registry = "https://pypi.org/simple" } }, ] logfire = [ { name = "logfire", extra = ["httpx"] }, @@ -6786,20 +6871,21 @@ openrouter = [ { name = "openai" }, ] outlines-llamacpp = [ - { name = "outlines", extra = ["llamacpp"], marker = "python_full_version < '3.14'" }, + { name = "outlines", extra = ["llamacpp"], marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] outlines-mlxlm = [ - { name = "outlines", extra = ["mlxlm"], marker = "python_full_version < '3.14' and platform_machine == 'arm64' and sys_platform == 'darwin'" }, + { name = "outlines", extra = ["mlxlm"], marker = "(python_full_version < '3.14' and platform_machine == 'arm64' and sys_platform == 'darwin') or (python_full_version >= '3.14' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine != 'arm64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'darwin' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] outlines-sglang = [ - { name = "outlines", extra = ["sglang"], marker = "python_full_version < '3.14'" }, - { name = "pillow", marker = "python_full_version < '3.14'" }, + { name = "outlines", extra = ["sglang"], marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "pillow", marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] outlines-transformers = [ - { name = "outlines", extra = ["transformers"], marker = "(python_full_version < '3.14' and platform_machine != 'x86_64') or (python_full_version < '3.14' and sys_platform != 'darwin')" }, - { name = "pillow", marker = "python_full_version < '3.14'" }, - { name = "torch", marker = "(python_full_version < '3.14' and platform_machine != 'x86_64') or (python_full_version < '3.14' and sys_platform != 'darwin')" }, - { name = "transformers", marker = "python_full_version < '3.14'" }, + { name = "outlines", extra = ["transformers"], marker = "(python_full_version < '3.14' and platform_machine != 'x86_64') or (python_full_version < '3.14' and sys_platform != 'darwin') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "pillow", marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "torch", marker = "(python_full_version < '3.14' and platform_machine != 'x86_64') or (python_full_version < '3.14' and sys_platform != 'darwin') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "transformers", version = "5.0.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.12' and python_full_version < '3.14') or (python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (python_full_version < '3.12' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (python_full_version >= '3.14' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] outlines-vllm-offline = [ { name = "outlines", marker = "python_full_version < '3.14'" }, @@ -6813,7 +6899,7 @@ retries = [ { name = "tenacity" }, ] sentence-transformers = [ - { name = "sentence-transformers", marker = "python_full_version < '3.14'" }, + { name = "sentence-transformers", marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] tavily = [ { name = "tavily-python" }, @@ -6829,7 +6915,7 @@ vertexai = [ { name = "requests" }, ] voyageai = [ - { name = "voyageai", marker = "python_full_version < '3.14'" }, + { name = "voyageai", marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] web = [ { name = "httpx" }, @@ -6842,7 +6928,7 @@ xai = [ [package.metadata] requires-dist = [ - { name = "ag-ui-protocol", marker = "extra == 'ag-ui'", specifier = ">=0.1.10" }, + { name = "ag-ui-protocol", marker = "extra == 'ag-ui'", specifier = ">=0.1.13" }, { name = "anthropic", marker = "extra == 'anthropic'", specifier = ">=0.80.0" }, { name = "argcomplete", marker = "extra == 'cli'", specifier = ">=3.5.0" }, { name = "boto3", marker = "extra == 'bedrock'", specifier = ">=1.42.14" }, @@ -7117,7 +7203,7 @@ version = "0.16.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cloudpickle" }, - { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "fakeredis", extra = ["lua"] }, { name = "opentelemetry-api" }, { name = "opentelemetry-exporter-prometheus" }, @@ -7207,13 +7293,13 @@ name = "pytest" version = "9.0.2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, - { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "colorama", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "iniconfig" }, { name = "packaging" }, { name = "pluggy" }, { name = "pygments" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" } wheels = [ @@ -7585,7 +7671,7 @@ name = "redis" version = "7.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "async-timeout", marker = "python_full_version < '3.11.3'" }, + { name = "async-timeout", marker = "python_full_version < '3.11.3' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/43/c8/983d5c6579a411d8a99bc5823cc5712768859b5ce2c8afe1a65b37832c81/redis-7.1.0.tar.gz", hash = "sha256:b1cc3cfa5a2cb9c2ab3ba700864fb0ad75617b41f01352ce5779dabf6d5f9c3c", size = 4796669, upload-time = "2025-11-19T15:54:39.961Z" } wheels = [ @@ -7599,7 +7685,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, { name = "rpds-py" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, + { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/2f/db/98b5c277be99dd18bfd91dd04e1b759cad18d1a338188c936e92f921c7e2/referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa", size = 74744, upload-time = "2025-01-25T08:48:16.138Z" } wheels = [ @@ -8065,7 +8151,7 @@ name = "ruamel-yaml" version = "0.18.17" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "ruamel-yaml-clib", marker = "python_full_version < '3.15' and platform_python_implementation == 'CPython'" }, + { name = "ruamel-yaml-clib", marker = "(python_full_version < '3.15' and platform_python_implementation == 'CPython') or (python_full_version >= '3.15' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_python_implementation != 'CPython' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/3a/2b/7a1f1ebcd6b3f14febdc003e658778d81e76b40df2267904ee6b13f0c5c6/ruamel_yaml-0.18.17.tar.gz", hash = "sha256:9091cd6e2d93a3a4b157ddb8fabf348c3de7f1fb1381346d985b6b247dcd8d3c", size = 149602, upload-time = "2025-12-17T20:02:55.757Z" } wheels = [ @@ -8214,10 +8300,10 @@ resolution-markers = [ "python_full_version < '3.11'", ] dependencies = [ - { name = "joblib", marker = "python_full_version < '3.11'" }, - { name = "numpy", marker = "python_full_version < '3.11'" }, - { name = "scipy", version = "1.15.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, - { name = "threadpoolctl", marker = "python_full_version < '3.11'" }, + { name = "joblib", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "numpy", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "scipy", version = "1.15.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "threadpoolctl", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/98/c2/a7855e41c9d285dfe86dc50b250978105dce513d6e459ea66a6aeb0e1e0c/scikit_learn-1.7.2.tar.gz", hash = "sha256:20e9e49ecd130598f1ca38a1d85090e1a600147b9c02fa6f15d69cb53d968fda", size = 7193136, upload-time = "2025-09-09T08:21:29.075Z" } wheels = [ @@ -8263,10 +8349,10 @@ resolution-markers = [ "python_full_version == '3.11.*'", ] dependencies = [ - { name = "joblib", marker = "python_full_version >= '3.11'" }, - { name = "numpy", marker = "python_full_version >= '3.11'" }, - { name = "scipy", version = "1.16.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, - { name = "threadpoolctl", marker = "python_full_version >= '3.11'" }, + { name = "joblib", marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "numpy", marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "scipy", version = "1.16.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "threadpoolctl", marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/0e/d4/40988bf3b8e34feec1d0e6a051446b1f66225f8529b9309becaeef62b6c4/scikit_learn-1.8.0.tar.gz", hash = "sha256:9bccbb3b40e3de10351f8f5068e105d0f4083b1a65fa07b6634fbc401a6287fd", size = 7335585, upload-time = "2025-12-10T07:08:53.618Z" } wheels = [ @@ -8316,7 +8402,7 @@ resolution-markers = [ "python_full_version < '3.11'", ] dependencies = [ - { name = "numpy", marker = "python_full_version < '3.11'" }, + { name = "numpy", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/0f/37/6964b830433e654ec7485e45a00fc9a27cf868d622838f6b6d9c5ec0d532/scipy-1.15.3.tar.gz", hash = "sha256:eae3cf522bc7df64b42cad3925c876e1b0b6c35c1337c93e12c0f366f55b0eaf", size = 59419214, upload-time = "2025-05-08T16:13:05.955Z" } wheels = [ @@ -8377,7 +8463,7 @@ resolution-markers = [ "python_full_version == '3.11.*'", ] dependencies = [ - { name = "numpy", marker = "python_full_version >= '3.11'" }, + { name = "numpy", marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/0a/ca/d8ace4f98322d01abcd52d381134344bf7b431eba7ed8b42bdea5a3c2ac9/scipy-1.16.3.tar.gz", hash = "sha256:01e87659402762f43bd2fee13370553a17ada367d42e7487800bf2916535aecb", size = 30597883, upload-time = "2025-10-28T17:38:54.068Z" } wheels = [ @@ -8541,15 +8627,17 @@ name = "sentence-transformers" version = "5.2.2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "1.3.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "numpy" }, - { name = "scikit-learn", version = "1.7.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, - { name = "scikit-learn", version = "1.8.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, - { name = "scipy", version = "1.15.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, - { name = "scipy", version = "1.16.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "scikit-learn", version = "1.7.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "scikit-learn", version = "1.8.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "scipy", version = "1.15.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "scipy", version = "1.16.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "torch" }, { name = "tqdm" }, - { name = "transformers" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "transformers", version = "5.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a6/bc/0bc9c0ec1cf83ab2ec6e6f38667d167349b950fff6dd2086b79bd360eeca/sentence_transformers-5.2.2.tar.gz", hash = "sha256:7033ee0a24bc04c664fd490abf2ef194d387b3a58a97adcc528783ff505159fa", size = 381607, upload-time = "2026-01-27T11:11:02.658Z" } @@ -8798,7 +8886,7 @@ name = "sqlalchemy" version = "2.0.45" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "greenlet", marker = "platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64'" }, + { name = "greenlet", marker = "platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/be/f9/5e4491e5ccf42f5d9cfc663741d261b3e6e1683ae7812114e7636409fcc6/sqlalchemy-2.0.45.tar.gz", hash = "sha256:1632a4bda8d2d25703fdad6363058d882541bdaaee0e5e3ddfa0cd3229efce88", size = 9869912, upload-time = "2025-12-09T21:05:16.737Z" } @@ -8866,7 +8954,7 @@ version = "0.50.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, + { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/ba/b8/73a0e6a6e079a9d9cfa64113d771e421640b6f679a52eeb9b32f72d871a1/starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca", size = 2646985, upload-time = "2025-11-01T15:25:27.516Z" } wheels = [ @@ -8945,7 +9033,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "nexus-rpc" }, { name = "protobuf" }, - { name = "python-dateutil", marker = "python_full_version < '3.11'" }, + { name = "python-dateutil", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "types-protobuf" }, { name = "typing-extensions" }, ] @@ -9063,7 +9151,8 @@ name = "tokenizers" version = "0.22.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "1.3.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/1c/46/fb6854cec3278fbfa4a75b50232c77622bc517ac886156e6afbfa4d8fc6e/tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9", size = 363123, upload-time = "2025-09-19T09:49:23.424Z" } wheels = [ @@ -9158,26 +9247,26 @@ dependencies = [ { name = "filelock" }, { name = "fsspec" }, { name = "jinja2" }, - { name = "networkx", version = "3.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, - { name = "networkx", version = "3.6.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, - { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cufile-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-nvshmem-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "setuptools", marker = "python_full_version >= '3.12'" }, + { name = "networkx", version = "3.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "networkx", version = "3.6.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cublas-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cuda-cupti-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cuda-nvrtc-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cuda-runtime-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cudnn-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cufft-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cufile-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-curand-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cusolver-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cusparse-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cusparselt-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-nccl-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-nvshmem-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-nvtx-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "setuptools", marker = "python_full_version >= '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "sympy" }, - { name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "triton", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "typing-extensions" }, ] wheels = [ @@ -9294,28 +9383,59 @@ name = "tqdm" version = "4.67.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "colorama", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, ] +[[package]] +name = "transformers" +version = "4.57.6" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] +dependencies = [ + { name = "filelock", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "numpy", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "packaging", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "pyyaml", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "regex", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "requests", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "safetensors", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "tokenizers", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "tqdm", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c4/35/67252acc1b929dc88b6602e8c4a982e64f31e733b804c14bc24b47da35e6/transformers-4.57.6.tar.gz", hash = "sha256:55e44126ece9dc0a291521b7e5492b572e6ef2766338a610b9ab5afbb70689d3", size = 10134912, upload-time = "2026-01-16T10:38:39.284Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/b8/e484ef633af3887baeeb4b6ad12743363af7cce68ae51e938e00aaa0529d/transformers-4.57.6-py3-none-any.whl", hash = "sha256:4c9e9de11333ddfe5114bc872c9f370509198acf0b87a832a0ab9458e2bd0550", size = 11993498, upload-time = "2026-01-16T10:38:31.289Z" }, +] + [[package]] name = "transformers" version = "5.0.0" source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13'", + "python_full_version == '3.12.*'", + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] dependencies = [ - { name = "filelock" }, - { name = "huggingface-hub" }, - { name = "numpy" }, - { name = "packaging" }, - { name = "pyyaml" }, - { name = "regex" }, - { name = "safetensors" }, - { name = "tokenizers" }, - { name = "tqdm" }, - { name = "typer-slim" }, + { name = "filelock", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "1.3.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "numpy", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "packaging", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "pyyaml", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "regex", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "safetensors", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "tokenizers", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "tqdm", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "typer-slim", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/bc/79/845941711811789c85fb7e2599cea425a14a07eda40f50896b9d3fda7492/transformers-5.0.0.tar.gz", hash = "sha256:5f5634efed6cf76ad068cc5834c7adbc32db78bbd6211fb70df2325a9c37dec8", size = 8424830, upload-time = "2026-01-26T10:46:46.813Z" } wheels = [ @@ -9327,12 +9447,19 @@ name = "triton" version = "3.5.1" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/2e/f95e673222afa2c7f0c687d8913e98fcf2589ef0b1405de76894e37fe18f/triton-3.5.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f63e34dcb32d7bd3a1d0195f60f30d2aee8b08a69a0424189b71017e23dfc3d2", size = 159821655, upload-time = "2025-11-11T17:51:44.09Z" }, { url = "https://files.pythonhosted.org/packages/fd/6e/676ab5019b4dde8b9b7bab71245102fc02778ef3df48218b298686b9ffd6/triton-3.5.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5fc53d849f879911ea13f4a877243afc513187bc7ee92d1f2c0f1ba3169e3c94", size = 170320692, upload-time = "2025-11-11T17:40:46.074Z" }, + { url = "https://files.pythonhosted.org/packages/dc/dc/6ce44d055f2fc2403c4ec6b3cfd3a9b25f57b7d95efadccdea91497f8e81/triton-3.5.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:da47169e30a779bade679ce78df4810fca6d78a955843d2ddb11f226adc517dc", size = 159928005, upload-time = "2025-11-11T17:51:50.008Z" }, { url = "https://files.pythonhosted.org/packages/b0/72/ec90c3519eaf168f22cb1757ad412f3a2add4782ad3a92861c9ad135d886/triton-3.5.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:61413522a48add32302353fdbaaf92daaaab06f6b5e3229940d21b5207f47579", size = 170425802, upload-time = "2025-11-11T17:40:53.209Z" }, + { url = "https://files.pythonhosted.org/packages/db/53/2bcc46879910991f09c063eea07627baef2bc62fe725302ba8f46a2c1ae5/triton-3.5.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:275a045b6ed670dd1bd005c3e6c2d61846c74c66f4512d6f33cc027b11de8fd4", size = 159940689, upload-time = "2025-11-11T17:51:55.938Z" }, { url = "https://files.pythonhosted.org/packages/f2/50/9a8358d3ef58162c0a415d173cfb45b67de60176e1024f71fbc4d24c0b6d/triton-3.5.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d2c6b915a03888ab931a9fd3e55ba36785e1fe70cbea0b40c6ef93b20fc85232", size = 170470207, upload-time = "2025-11-11T17:41:00.253Z" }, + { url = "https://files.pythonhosted.org/packages/f1/ba/805684a992ee32d486b7948d36aed2f5e3c643fc63883bf8bdca1c3f3980/triton-3.5.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56765ffe12c554cd560698398b8a268db1f616c120007bfd8829d27139abd24a", size = 159955460, upload-time = "2025-11-11T17:52:01.861Z" }, { url = "https://files.pythonhosted.org/packages/27/46/8c3bbb5b0a19313f50edcaa363b599e5a1a5ac9683ead82b9b80fe497c8d/triton-3.5.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f3f4346b6ebbd4fad18773f5ba839114f4826037c9f2f34e0148894cd5dd3dba", size = 170470410, upload-time = "2025-11-11T17:41:06.319Z" }, + { url = "https://files.pythonhosted.org/packages/84/1e/7df59baef41931e21159371c481c31a517ff4c2517343b62503d0cd2be99/triton-3.5.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:02c770856f5e407d24d28ddc66e33cf026e6f4d360dcb8b2fabe6ea1fc758621", size = 160072799, upload-time = "2025-11-11T17:52:07.293Z" }, { url = "https://files.pythonhosted.org/packages/37/92/e97fcc6b2c27cdb87ce5ee063d77f8f26f19f06916aa680464c8104ef0f6/triton-3.5.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0b4d2c70127fca6a23e247f9348b8adde979d2e7a20391bfbabaac6aebc7e6a8", size = 170579924, upload-time = "2025-11-11T17:41:12.455Z" }, + { url = "https://files.pythonhosted.org/packages/14/f9/0430e879c1e63a1016cb843261528fd3187c872c3a9539132efc39514753/triton-3.5.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f617aa7925f9ea9968ec2e1adaf93e87864ff51549c8f04ce658f29bbdb71e2d", size = 159956163, upload-time = "2025-11-11T17:52:12.999Z" }, { url = "https://files.pythonhosted.org/packages/a4/e6/c595c35e5c50c4bc56a7bac96493dad321e9e29b953b526bbbe20f9911d0/triton-3.5.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0637b1efb1db599a8e9dc960d53ab6e4637db7d4ab6630a0974705d77b14b60", size = 170480488, upload-time = "2025-11-11T17:41:18.222Z" }, + { url = "https://files.pythonhosted.org/packages/41/1e/63d367c576c75919e268e4fbc33c1cb33b6dc12bb85e8bfe531c2a8bd5d3/triton-3.5.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8932391d7f93698dfe5bc9bead77c47a24f97329e9f20c10786bb230a9083f56", size = 160073620, upload-time = "2025-11-11T17:52:18.403Z" }, { url = "https://files.pythonhosted.org/packages/16/b5/b0d3d8b901b6a04ca38df5e24c27e53afb15b93624d7fd7d658c7cd9352a/triton-3.5.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bac7f7d959ad0f48c0e97d6643a1cc0fd5786fe61cb1f83b537c6b2d54776478", size = 170582192, upload-time = "2025-11-11T17:41:23.963Z" }, ] @@ -9456,7 +9583,7 @@ name = "tzlocal" version = "5.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "tzdata", marker = "sys_platform == 'win32'" }, + { name = "tzdata", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/8b/2e/c14812d3d4d9cd1773c6be938f89e5735a1f11a9f184ac3639b93cef35d5/tzlocal-5.3.1.tar.gz", hash = "sha256:cceffc7edecefea1f595541dbd6e990cb1ea3d19bf01b2809f362a03dd7921fd", size = 30761, upload-time = "2025-03-05T21:17:41.549Z" } wheels = [ @@ -9508,7 +9635,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "h11" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/c3/d1/8f3c683c9561a4e6689dd3b1d345c815f10f86acd044ee1fb9a4dcd0b8c5/uvicorn-0.40.0.tar.gz", hash = "sha256:839676675e87e73694518b5574fd0f24c9d97b46bea16df7b8c05ea1a51071ea", size = 81761, upload-time = "2025-12-21T14:16:22.45Z" } wheels = [ @@ -9598,7 +9725,7 @@ dependencies = [ { name = "depyf", marker = "python_full_version < '3.12'" }, { name = "diskcache", marker = "python_full_version < '3.12'" }, { name = "einops", marker = "python_full_version < '3.12'" }, - { name = "fastapi", extra = ["standard"], marker = "python_full_version < '3.12'" }, + { name = "fastapi", extra = ["standard"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "filelock", marker = "python_full_version < '3.12'" }, { name = "flashinfer-python", marker = "python_full_version < '3.12'" }, { name = "gguf", marker = "python_full_version < '3.12'" }, @@ -9609,7 +9736,7 @@ dependencies = [ { name = "llguidance", marker = "(python_full_version < '3.12' and platform_machine == 'aarch64') or (python_full_version < '3.12' and platform_machine == 'arm64') or (python_full_version < '3.12' and platform_machine == 'ppc64le') or (python_full_version < '3.12' and platform_machine == 's390x') or (python_full_version < '3.12' and platform_machine == 'x86_64')" }, { name = "lm-format-enforcer", marker = "python_full_version < '3.12'" }, { name = "mcp", marker = "python_full_version < '3.12'" }, - { name = "mistral-common", extra = ["image"], marker = "python_full_version < '3.12'" }, + { name = "mistral-common", extra = ["image"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "model-hosting-container-standards", marker = "python_full_version < '3.12'" }, { name = "msgspec", marker = "python_full_version < '3.12'" }, { name = "ninja", marker = "python_full_version < '3.12'" }, @@ -9631,7 +9758,7 @@ dependencies = [ { name = "python-json-logger", marker = "python_full_version < '3.12'" }, { name = "pyyaml", marker = "python_full_version < '3.12'" }, { name = "pyzmq", marker = "python_full_version < '3.12'" }, - { name = "ray", extra = ["cgraph"], marker = "python_full_version < '3.12'" }, + { name = "ray", extra = ["cgraph"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "regex", marker = "python_full_version < '3.12'" }, { name = "requests", marker = "python_full_version < '3.12'" }, { name = "sentencepiece", marker = "python_full_version < '3.12'" }, @@ -9642,7 +9769,7 @@ dependencies = [ { name = "torchaudio", marker = "python_full_version < '3.12'" }, { name = "torchvision", marker = "python_full_version < '3.12'" }, { name = "tqdm", marker = "python_full_version < '3.12'" }, - { name = "transformers", marker = "python_full_version < '3.12'" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.12'" }, { name = "typing-extensions", marker = "python_full_version < '3.12'" }, { name = "watchfiles", marker = "python_full_version < '3.12'" }, { name = "xgrammar", marker = "(python_full_version < '3.12' and platform_machine == 'aarch64') or (python_full_version < '3.12' and platform_machine == 'arm64') or (python_full_version < '3.12' and platform_machine == 'ppc64le') or (python_full_version < '3.12' and platform_machine == 's390x') or (python_full_version < '3.12' and platform_machine == 'x86_64')" }, @@ -9661,7 +9788,7 @@ dependencies = [ { name = "aiolimiter" }, { name = "ffmpeg-python" }, { name = "langchain-text-splitters" }, - { name = "numpy", marker = "python_full_version < '3.14'" }, + { name = "numpy", marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "pillow" }, { name = "pydantic" }, { name = "requests" }, @@ -9890,7 +10017,7 @@ name = "whenever" version = "0.8.10" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "tzdata", marker = "python_full_version >= '3.13' and sys_platform == 'win32'" }, + { name = "tzdata", marker = "(python_full_version >= '3.13' and sys_platform == 'win32') or (python_full_version < '3.13' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'win32' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/4d/67/cfc23dfe54ced1e4388826b29db9b9ab2c70a342b33b7e92cf15866f35a6/whenever-0.8.10.tar.gz", hash = "sha256:5e2a3da71527e299f98eec5bb38c4e79d9527a127107387456125005884fb235", size = 240223, upload-time = "2025-10-16T20:31:23.538Z" } wheels = [ @@ -10072,20 +10199,23 @@ dependencies = [ { name = "numpy", marker = "python_full_version < '3.12'" }, { name = "pydantic", marker = "python_full_version < '3.12'" }, { name = "torch", marker = "python_full_version < '3.12'" }, - { name = "transformers", marker = "python_full_version < '3.12'" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.12'" }, { name = "triton", marker = "python_full_version < '3.12' and platform_machine == 'x86_64' and sys_platform == 'linux'" }, { name = "typing-extensions", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/02/a3/70dbe3ffd331a1e7e1ad5a95690a4086e6c7cdb8089f5c7eda712219ccec/xgrammar-0.1.29.tar.gz", hash = "sha256:cf195afa81b489eebf35d4c6f37f27136d05420739ab4a6f7f065c938d7e4baa", size = 2321317, upload-time = "2025-12-19T08:23:54.53Z" } wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/6d/6384619408da47411c71b2baed3d4bc509a4a9aa0a63d738709b516869b5/xgrammar-0.1.29-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:fdc66e834b915cf956168ac086bd577f138261644b944e73d73f07085682a4d8", size = 16008147, upload-time = "2025-12-19T08:22:59.54Z" }, { url = "https://files.pythonhosted.org/packages/a8/2d/6ead6206bda4582620b176f02840254183c61682e20041a2d950d6f1ee7a/xgrammar-0.1.29-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:48c5a5c60c5ca5ab09ff5ef9f6b382384a04b153bae5908006cd4f7d80d71e07", size = 17914539, upload-time = "2025-12-19T08:23:02.011Z" }, { url = "https://files.pythonhosted.org/packages/04/75/5305fe75823489c160dec8ee2a95a631e44a690eacec765469e513aca738/xgrammar-0.1.29-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cea3e65d60f8e55568dbb1457e6c4da6d381262a9b1211fe023630630b733d8", size = 34702454, upload-time = "2025-12-19T08:23:05.143Z" }, { url = "https://files.pythonhosted.org/packages/af/3c/7426aadf64a4ecfc1a1966babc57e4694235bf50392e96c506f930a4cdbe/xgrammar-0.1.29-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:866882b58ac654a1d1cd5e0c1ac67824b730aff8a40f9f19f0e8938a107dcd8a", size = 34903300, upload-time = "2025-12-19T08:23:08.098Z" }, { url = "https://files.pythonhosted.org/packages/05/f5/17ebcb575bd105cbcb5fee3c69906cee2423dbfdd73a18a60e205a619244/xgrammar-0.1.29-cp310-cp310-win_amd64.whl", hash = "sha256:8551dae4d38bd20c36a12c90a2954c3832bb6397211fc3aeba0b0d7920a1ea4b", size = 5928622, upload-time = "2025-12-19T08:23:10.485Z" }, + { url = "https://files.pythonhosted.org/packages/c6/de/88832fac40962fd0d4703bd4ba84598b06b8408bdc4a6722744f363f68a6/xgrammar-0.1.29-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:d2a7eef1b75b8d31b868d5c79855622aad203275ff267fc0e0ef77dd91906cfe", size = 16008004, upload-time = "2025-12-19T08:23:11.998Z" }, { url = "https://files.pythonhosted.org/packages/76/f6/4d22eec5305657430955442077306bc6ed85becc564116165d4b3a7049ad/xgrammar-0.1.29-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4af7f6ce2b2c6295b936b7cbda09f78e33f2c492a139cd64560f5d8d0fe967ed", size = 17914326, upload-time = "2025-12-19T08:23:14.43Z" }, { url = "https://files.pythonhosted.org/packages/87/0b/b5e5c99ce13a9d378a940cda07c5a08b50cc7efb66936c6ac8fa8232a0d5/xgrammar-0.1.29-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51bcfd63bd48a0b26209ffd2143a42067518559355ec9e4e574cef2ae74fac7c", size = 34699408, upload-time = "2025-12-19T08:23:16.906Z" }, { url = "https://files.pythonhosted.org/packages/a3/a0/4ebc1b3f5af79a3f73d0566034758f3fbcd9c64174646314a9a6f7cc1d27/xgrammar-0.1.29-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e27b50cf8c565845295a8263a4a0790c00a7c1fd783e76222fc0f575654d6f56", size = 34903461, upload-time = "2025-12-19T08:23:19.556Z" }, { url = "https://files.pythonhosted.org/packages/77/21/f6b3978dc9761bbfbbb153d33441206ce2253efa271d8e2d8b6b210d2bd7/xgrammar-0.1.29-cp311-cp311-win_amd64.whl", hash = "sha256:c9f8ea76bcf41b48168974b509b1546d2bee289ff1b20c68bc97434c1ea6e49a", size = 5928633, upload-time = "2025-12-19T08:23:21.67Z" }, + { url = "https://files.pythonhosted.org/packages/c1/d8/fb282fc78be6e9bbefb5cb389f66b22e4efd6ae14f06234f599651620da5/xgrammar-0.1.29-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:d992a3cee7594bbdaa64ae59f90da5ce21c5fe654719df3816014289ada6f04d", size = 16007376, upload-time = "2025-12-19T08:23:23.634Z" }, { url = "https://files.pythonhosted.org/packages/82/a7/2c9767620ee50f2f40f1eb95e55a3a29e1a0670f087ee6dc1bc1c887b906/xgrammar-0.1.29-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1bbdf02e45cfa8614218ba01ca7952d375f8bc1c13884e3d04daa4b54180cbc2", size = 17913535, upload-time = "2025-12-19T08:23:26.02Z" }, { url = "https://files.pythonhosted.org/packages/57/94/18793c64bf0368075a34c06e196bf002f1e6ab0aee332268f44e8d356d5a/xgrammar-0.1.29-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6eb370a16b27a683e5f2b9e429ab41440c69977d4a504849ed61831b94cc704c", size = 34705239, upload-time = "2025-12-19T08:23:28.369Z" }, { url = "https://files.pythonhosted.org/packages/3e/da/4c14e3e00be698009b52700f15326a23272b4b00475939b6acc86b151188/xgrammar-0.1.29-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79e6e4f5cd33be77418cf91efc482f2b3d773d309891224383bc8a4948ad7b07", size = 34906135, upload-time = "2025-12-19T08:23:30.838Z" }, From ad81fa6daacc30d7eebd8b2f66c9dc9ff9013524 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Tue, 3 Mar 2026 09:55:58 -0500 Subject: [PATCH 13/33] fix backwards comp --- .../pydantic_ai/ui/_event_stream.py | 12 +- .../pydantic_ai/ui/ag_ui/_event_stream.py | 64 ++++--- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 8 +- .../test_thinking_roundtrip_anthropic.yaml | 72 +++++++ tests/test_ag_ui.py | 181 ++++++++++++++++-- 5 files changed, 275 insertions(+), 62 deletions(-) create mode 100644 tests/cassettes/test_ag_ui/test_thinking_roundtrip_anthropic.yaml diff --git a/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py index 391cf06f2f..101b6feaa1 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py @@ -295,7 +295,7 @@ async def handle_part_start(self, event: PartStartEvent) -> AsyncIterator[EventT async for e in self.handle_text_start(part, follows_text=previous_part_kind == 'text'): yield e case ThinkingPart(): - async for e in self.handle_thinking_start(part, follows_thinking=previous_part_kind == 'thinking'): + async for e in self.handle_thinking_start(part): yield e case ToolCallPart(): async for e in self.handle_tool_call_start(part): @@ -360,7 +360,7 @@ async def handle_part_end(self, event: PartEndEvent) -> AsyncIterator[EventT]: async for e in self.handle_text_end(part, followed_by_text=next_part_kind == 'text'): yield e case ThinkingPart(): - async for e in self.handle_thinking_end(part, followed_by_thinking=next_part_kind == 'thinking'): + async for e in self.handle_thinking_end(part): yield e case ToolCallPart(): async for e in self.handle_tool_call_end(part): @@ -460,12 +460,11 @@ async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) return # pragma: no cover yield # Make this an async generator - async def handle_thinking_start(self, part: ThinkingPart, follows_thinking: bool = False) -> AsyncIterator[EventT]: + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[EventT]: """Handle the start of a `ThinkingPart`. Args: part: The thinking part. - follows_thinking: Whether the part is directly preceded by another thinking part. In this case, you may want to yield a "thinking-delta" event instead of a "thinking-start" event. """ return # pragma: no cover yield # Make this an async generator @@ -479,14 +478,11 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator return # pragma: no cover yield # Make this an async generator - async def handle_thinking_end( - self, part: ThinkingPart, followed_by_thinking: bool = False - ) -> AsyncIterator[EventT]: + async def handle_thinking_end(self, part: ThinkingPart) -> AsyncIterator[EventT]: """Handle the end of a `ThinkingPart`. Args: part: The thinking part. - followed_by_thinking: Whether the part is directly followed by another thinking part. In this case, you may not want to yield a "thinking-end" event yet. """ return # pragma: no cover yield # Make this an async generator diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index cd94621f6f..9fc53840b0 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -75,6 +75,7 @@ class AGUIEventStream(UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, Output """UI event stream transformer for the Agent-User Interaction (AG-UI) protocol.""" _reasoning_message_id: str | None = None + _reasoning_started: bool = False _reasoning_text: bool = False _builtin_tool_call_ids: dict[str, str] = field(default_factory=dict[str, str]) _error: bool = False @@ -145,52 +146,61 @@ async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) if not followed_by_text: yield TextMessageEndEvent(message_id=self.message_id) - async def handle_thinking_start( - self, part: ThinkingPart, follows_thinking: bool = False - ) -> AsyncIterator[BaseEvent]: - if not follows_thinking: - self._reasoning_message_id = str(uuid4()) - yield ReasoningStartEvent(message_id=self._reasoning_message_id) + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseEvent]: + self._reasoning_message_id = str(uuid4()) + self._reasoning_started = False if part.content: - yield ReasoningMessageStartEvent(message_id=self._reasoning_message_id or '', role='assistant') - yield ReasoningMessageContentEvent(message_id=self._reasoning_message_id or '', delta=part.content) + yield ReasoningStartEvent(message_id=self._reasoning_message_id) + self._reasoning_started = True + yield ReasoningMessageStartEvent(message_id=self._reasoning_message_id, role='assistant') + yield ReasoningMessageContentEvent(message_id=self._reasoning_message_id, delta=part.content) self._reasoning_text = True async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: if not delta.content_delta: return # pragma: no cover + message_id = self._reasoning_message_id or '' + + if not self._reasoning_started: + yield ReasoningStartEvent(message_id=message_id) + self._reasoning_started = True + if not self._reasoning_text: - yield ReasoningMessageStartEvent(message_id=self._reasoning_message_id or '', role='assistant') + yield ReasoningMessageStartEvent(message_id=message_id, role='assistant') self._reasoning_text = True - yield ReasoningMessageContentEvent(message_id=self._reasoning_message_id or '', delta=delta.content_delta) + yield ReasoningMessageContentEvent(message_id=message_id, delta=delta.content_delta) - async def handle_thinking_end( - self, part: ThinkingPart, followed_by_thinking: bool = False - ) -> AsyncIterator[BaseEvent]: + async def handle_thinking_end(self, part: ThinkingPart) -> AsyncIterator[BaseEvent]: message_id = self._reasoning_message_id or '' + encrypted: dict[str, Any] = {} + for attr in ('id', 'signature', 'provider_name', 'provider_details'): + if (value := getattr(part, attr)) is not None: + encrypted[attr] = value + + if not self._reasoning_started and not encrypted: + self._reasoning_message_id = None + return + + if not self._reasoning_started: + yield ReasoningStartEvent(message_id=message_id) + if self._reasoning_text: yield ReasoningMessageEndEvent(message_id=message_id) self._reasoning_text = False - if not followed_by_thinking: - encrypted: dict[str, Any] = {} - for attr in ('id', 'signature', 'provider_name', 'provider_details'): - if (value := getattr(part, attr)) is not None: - encrypted[attr] = value - - if encrypted: - yield ReasoningEncryptedValueEvent( - subtype='message', - entity_id=message_id, - encrypted_value=json.dumps(encrypted), - ) + if encrypted: + yield ReasoningEncryptedValueEvent( + subtype='message', + entity_id=message_id, + encrypted_value=json.dumps(encrypted), + ) - yield ReasoningEndEvent(message_id=message_id) - self._reasoning_message_id = None + yield ReasoningEndEvent(message_id=message_id) + self._reasoning_message_id = None def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: return self._handle_tool_call_start(part) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 86187bedfe..44b08399b5 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -168,9 +168,7 @@ async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) ) yield TextEndChunk(id=self.message_id, provider_metadata=provider_metadata) - async def handle_thinking_start( - self, part: ThinkingPart, follows_thinking: bool = False - ) -> AsyncIterator[BaseChunk]: + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseChunk]: message_id = self.new_message_id() provider_metadata = dump_provider_metadata( id=part.id, @@ -193,9 +191,7 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator id=self.message_id, delta=delta.content_delta, provider_metadata=provider_metadata ) - async def handle_thinking_end( - self, part: ThinkingPart, followed_by_thinking: bool = False - ) -> AsyncIterator[BaseChunk]: + async def handle_thinking_end(self, part: ThinkingPart) -> AsyncIterator[BaseChunk]: provider_metadata = dump_provider_metadata( id=part.id, signature=part.signature, diff --git a/tests/cassettes/test_ag_ui/test_thinking_roundtrip_anthropic.yaml b/tests/cassettes/test_ag_ui/test_thinking_roundtrip_anthropic.yaml new file mode 100644 index 0000000000..aec2835ded --- /dev/null +++ b/tests/cassettes/test_ag_ui/test_thinking_roundtrip_anthropic.yaml @@ -0,0 +1,72 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, br, zstd + connection: + - keep-alive + content-length: + - '229' + content-type: + - application/json + host: + - api.anthropic.com + method: POST + parsed_body: + max_tokens: 4096 + messages: + - content: + - text: What is 1+1? Reply in one word. + type: text + role: user + model: claude-sonnet-4-5 + stream: false + thinking: + budget_tokens: 1024 + type: enabled + uri: https://api.anthropic.com/v1/messages?beta=true + response: + headers: + connection: + - keep-alive + content-length: + - '992' + content-security-policy: + - default-src 'none'; frame-ancestors 'none' + content-type: + - application/json + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + parsed_body: + content: + - signature: EooCCkYICxgCKkDYW6Ka+Mo73ZE34HVijmFbdV6QH/iRdv+3WuisH3pR8D5aSFASMBsF1F1bZRQFQXuM0+G4H83czthKvHqdqWriEgwB0eJaWoXZWU18NKoaDMH4nN8ZwJ6W9DnYLyIwrdTWmfc5QTqDr8gye3/yrPpV2YPeZnUBoHBLOGl8MUaC6SuGmxcm8rGqf2s+P+ZtKnJPJJzQiTrvPcEkF3ij22w3bXC9yoyZCyJVPcibR2ZZpLYF/UOoZ+BRBs0FCdm/QFXUUe8W1tcQ/ZQgBaW44LTcdzwOSP5hJb25UrPiGWuTytGMxIr7QyG7INpVbmm8JRBIIEzj3gs2zlxdbl17yZ/yZXcYAQ== + thinking: The user is asking what 1+1 equals and wants a one-word reply. The answer is 2, which is one word. + type: thinking + - text: Two + type: text + id: msg_01VWoy28sUMKXEpbwuUkrpmP + model: claude-sonnet-4-5-20250929 + role: assistant + stop_reason: end_turn + stop_sequence: null + type: message + usage: + cache_creation: + ephemeral_1h_input_tokens: 0 + ephemeral_5m_input_tokens: 0 + cache_creation_input_tokens: 0 + cache_read_input_tokens: 0 + inference_geo: not_available + input_tokens: 48 + output_tokens: 42 + service_tier: standard + status: + code: 200 + message: OK +version: 1 diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 4aa51cde7b..eac6e8d803 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -98,6 +98,10 @@ ) from pydantic_ai.ui.ag_ui import AGUIEventStream +with try_import() as anthropic_imports_successful: + from pydantic_ai.models.anthropic import AnthropicModel, AnthropicModelSettings + from pydantic_ai.providers.anthropic import AnthropicProvider + pytestmark = [ pytest.mark.anyio, @@ -1063,8 +1067,7 @@ async def stream_function( 'threadId': (thread_id := IsSameStr()), 'runId': (run_id := IsSameStr()), }, - {'type': 'REASONING_START', 'timestamp': IsInt(), 'messageId': IsStr()}, - {'type': 'REASONING_END', 'timestamp': IsInt(), 'messageId': IsStr()}, + # Part 0: empty thinking — skipped (no content, no metadata) { 'type': 'TEXT_MESSAGE_START', 'timestamp': IsInt(), @@ -1084,53 +1087,56 @@ async def stream_function( 'delta': ' and some more', }, {'type': 'TEXT_MESSAGE_END', 'timestamp': IsInt(), 'messageId': message_id}, - {'type': 'REASONING_START', 'timestamp': IsInt(), 'messageId': (reasoning_id := IsSameStr())}, + # Part 1: "Thinking about the weather" + {'type': 'REASONING_START', 'timestamp': IsInt(), 'messageId': (r1 := IsSameStr())}, { 'type': 'REASONING_MESSAGE_START', 'timestamp': IsInt(), - 'messageId': reasoning_id, + 'messageId': r1, 'role': 'assistant', }, + {'type': 'REASONING_MESSAGE_CONTENT', 'timestamp': IsInt(), 'messageId': r1, 'delta': 'Thinking '}, { 'type': 'REASONING_MESSAGE_CONTENT', 'timestamp': IsInt(), - 'messageId': reasoning_id, - 'delta': 'Thinking ', - }, - { - 'type': 'REASONING_MESSAGE_CONTENT', - 'timestamp': IsInt(), - 'messageId': reasoning_id, + 'messageId': r1, 'delta': 'about the weather', }, - {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': reasoning_id}, + {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': r1}, + {'type': 'REASONING_END', 'timestamp': IsInt(), 'messageId': r1}, + # Part 2: empty thinking — skipped (no content, no metadata) + # Part 3: "Thinking about the meaning of life" + {'type': 'REASONING_START', 'timestamp': IsInt(), 'messageId': (r3 := IsSameStr())}, { 'type': 'REASONING_MESSAGE_START', 'timestamp': IsInt(), - 'messageId': reasoning_id, + 'messageId': r3, 'role': 'assistant', }, { 'type': 'REASONING_MESSAGE_CONTENT', 'timestamp': IsInt(), - 'messageId': reasoning_id, + 'messageId': r3, 'delta': 'Thinking about the meaning of life', }, - {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': reasoning_id}, + {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': r3}, + {'type': 'REASONING_END', 'timestamp': IsInt(), 'messageId': r3}, + # Part 4: "Thinking about the universe" + {'type': 'REASONING_START', 'timestamp': IsInt(), 'messageId': (r4 := IsSameStr())}, { 'type': 'REASONING_MESSAGE_START', 'timestamp': IsInt(), - 'messageId': reasoning_id, + 'messageId': r4, 'role': 'assistant', }, { 'type': 'REASONING_MESSAGE_CONTENT', 'timestamp': IsInt(), - 'messageId': reasoning_id, + 'messageId': r4, 'delta': 'Thinking about the universe', }, - {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': reasoning_id}, - {'type': 'REASONING_END', 'timestamp': IsInt(), 'messageId': reasoning_id}, + {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': r4}, + {'type': 'REASONING_END', 'timestamp': IsInt(), 'messageId': r4}, { 'type': 'RUN_FINISHED', 'timestamp': IsInt(), @@ -1206,6 +1212,106 @@ async def stream_function( ) +async def test_thinking_consecutive_signatures() -> None: + """Test that consecutive ThinkingParts each preserve their own metadata via separate REASONING blocks.""" + + async def stream_function( + messages: list[ModelMessage], agent_info: AgentInfo + ) -> AsyncIterator[DeltaThinkingCalls | str]: + yield {0: DeltaThinkingPart(content='First thought', signature='sig_aaa')} + yield {1: DeltaThinkingPart(content='Second thought', signature='sig_bbb')} + yield {2: DeltaThinkingPart(content='Third thought', signature='sig_ccc')} + yield 'Final answer' + + agent = Agent(model=FunctionModel(stream_function=stream_function)) + + run_input = create_input( + UserMessage(id='msg_1', content='Think deeply'), + ) + + events = await run_and_collect_events(agent, run_input) + + assert events == snapshot( + [ + { + 'type': 'RUN_STARTED', + 'timestamp': IsInt(), + 'threadId': (thread_id := IsSameStr()), + 'runId': (run_id := IsSameStr()), + }, + # Part 0: signature=sig_aaa + {'type': 'REASONING_START', 'timestamp': IsInt(), 'messageId': (r0 := IsSameStr())}, + { + 'type': 'REASONING_MESSAGE_START', + 'timestamp': IsInt(), + 'messageId': r0, + 'role': 'assistant', + }, + {'type': 'REASONING_MESSAGE_CONTENT', 'timestamp': IsInt(), 'messageId': r0, 'delta': 'First thought'}, + {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': r0}, + { + 'type': 'REASONING_ENCRYPTED_VALUE', + 'timestamp': IsInt(), + 'subtype': 'message', + 'entityId': r0, + 'encryptedValue': IsStr(), + }, + {'type': 'REASONING_END', 'timestamp': IsInt(), 'messageId': r0}, + # Part 1: signature=sig_bbb + {'type': 'REASONING_START', 'timestamp': IsInt(), 'messageId': (r1 := IsSameStr())}, + { + 'type': 'REASONING_MESSAGE_START', + 'timestamp': IsInt(), + 'messageId': r1, + 'role': 'assistant', + }, + {'type': 'REASONING_MESSAGE_CONTENT', 'timestamp': IsInt(), 'messageId': r1, 'delta': 'Second thought'}, + {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': r1}, + { + 'type': 'REASONING_ENCRYPTED_VALUE', + 'timestamp': IsInt(), + 'subtype': 'message', + 'entityId': r1, + 'encryptedValue': IsStr(), + }, + {'type': 'REASONING_END', 'timestamp': IsInt(), 'messageId': r1}, + # Part 2: signature=sig_ccc + {'type': 'REASONING_START', 'timestamp': IsInt(), 'messageId': (r2 := IsSameStr())}, + { + 'type': 'REASONING_MESSAGE_START', + 'timestamp': IsInt(), + 'messageId': r2, + 'role': 'assistant', + }, + {'type': 'REASONING_MESSAGE_CONTENT', 'timestamp': IsInt(), 'messageId': r2, 'delta': 'Third thought'}, + {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': r2}, + { + 'type': 'REASONING_ENCRYPTED_VALUE', + 'timestamp': IsInt(), + 'subtype': 'message', + 'entityId': r2, + 'encryptedValue': IsStr(), + }, + {'type': 'REASONING_END', 'timestamp': IsInt(), 'messageId': r2}, + # Text response + { + 'type': 'TEXT_MESSAGE_START', + 'timestamp': IsInt(), + 'messageId': (message_id := IsSameStr()), + 'role': 'assistant', + }, + { + 'type': 'TEXT_MESSAGE_CONTENT', + 'timestamp': IsInt(), + 'messageId': message_id, + 'delta': 'Final answer', + }, + {'type': 'TEXT_MESSAGE_END', 'timestamp': IsInt(), 'messageId': message_id}, + {'type': 'RUN_FINISHED', 'timestamp': IsInt(), 'threadId': thread_id, 'runId': run_id}, + ] + ) + + def test_reasoning_message_thinking_roundtrip() -> None: """Test that ReasoningMessage converts to ThinkingPart with metadata from encrypted_value.""" messages = AGUIAdapter.load_messages( @@ -1259,9 +1365,9 @@ async def test_reasoning_events_with_all_metadata() -> None: ) events: list[BaseEvent] = [] - async for e in event_stream.handle_thinking_start(part, follows_thinking=False): + async for e in event_stream.handle_thinking_start(part): events.append(e) - async for e in event_stream.handle_thinking_end(part, followed_by_thinking=False): + async for e in event_stream.handle_thinking_end(part): events.append(e) assert [e.model_dump(exclude_none=True) for e in events] == snapshot( @@ -1586,6 +1692,39 @@ def test_dump_load_roundtrip_file_part_only() -> None: assert reloaded == original +@pytest.mark.vcr() +@pytest.mark.skipif(not anthropic_imports_successful(), reason='anthropic not installed') +async def test_thinking_roundtrip_anthropic(allow_model_requests: None, anthropic_api_key: str) -> None: + """Test that pydantic -> AG-UI -> pydantic round-trip preserves thinking metadata with real Anthropic responses.""" + m = AnthropicModel('claude-sonnet-4-5', provider=AnthropicProvider(api_key=anthropic_api_key)) + settings: AnthropicModelSettings = {'anthropic_thinking': {'type': 'enabled', 'budget_tokens': 1024}} + agent: Agent[None, str] = Agent(m, model_settings=settings) + + result = await agent.run('What is 1+1? Reply in one word.') + original = result.all_messages() + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(original, reloaded) + + assert reloaded == snapshot( + [ + ModelRequest(parts=[UserPromptPart(content='What is 1+1? Reply in one word.', timestamp=IsDatetime())]), + ModelResponse( + parts=[ + ThinkingPart( + content='The user is asking what 1+1 equals and wants a one-word reply. The answer is 2, which is one word.', + signature='EooCCkYICxgCKkDYW6Ka+Mo73ZE34HVijmFbdV6QH/iRdv+3WuisH3pR8D5aSFASMBsF1F1bZRQFQXuM0+G4H83czthKvHqdqWriEgwB0eJaWoXZWU18NKoaDMH4nN8ZwJ6W9DnYLyIwrdTWmfc5QTqDr8gye3/yrPpV2YPeZnUBoHBLOGl8MUaC6SuGmxcm8rGqf2s+P+ZtKnJPJJzQiTrvPcEkF3ij22w3bXC9yoyZCyJVPcibR2ZZpLYF/UOoZ+BRBs0FCdm/QFXUUe8W1tcQ/ZQgBaW44LTcdzwOSP5hJb25UrPiGWuTytGMxIr7QyG7INpVbmm8JRBIIEzj3gs2zlxdbl17yZ/yZXcYAQ==', + provider_name='anthropic', + ), + TextPart(content='Two'), + ], + timestamp=IsDatetime(), + ), + ] + ) + + async def test_tool_local_then_ag_ui() -> None: """Test mixed local and AG-UI tool calls.""" From 3d6160564656932fee0d986d307be296e7af3399 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Tue, 3 Mar 2026 11:42:55 -0500 Subject: [PATCH 14/33] fix: align DummyUIEventStream with base class after follows_thinking removal The base UIEventStream.handle_thinking_start/end() no longer accepts follows_thinking/followed_by_thinking params, but the test dummy and snapshot were not updated accordingly. Co-Authored-By: Claude Opus 4.6 --- tests/test_ui.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/test_ui.py b/tests/test_ui.py index 7cd2a2dca0..50cdd1db1e 100644 --- a/tests/test_ui.py +++ b/tests/test_ui.py @@ -148,14 +148,14 @@ async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[str]: async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[str]: yield f'' - async def handle_thinking_start(self, part: ThinkingPart, follows_thinking: bool = False) -> AsyncIterator[str]: - yield f'{part.content}' + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[str]: + yield f'{part.content}' async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[str]: yield str(delta.content_delta) - async def handle_thinking_end(self, part: ThinkingPart, followed_by_thinking: bool = False) -> AsyncIterator[str]: - yield f'' + async def handle_thinking_end(self, part: ThinkingPart) -> AsyncIterator[str]: + yield '' async def handle_tool_call_start(self, part: ToolCallPart) -> AsyncIterator[str]: yield f'{part.args}' @@ -234,19 +234,19 @@ async def stream_function( [ '', '', - 'Half of ', + 'Half of ', 'a thought', - '', - 'Another thought', - '', - 'And one more', - '', + '', + 'Another thought', + '', + 'And one more', + '', 'Half of ', '', 'some text', '', - 'More thinking', - '', + 'More thinking', + '', '', 'Half of some text', '', From 2cffd380fe0092b028d0289e7a703b4a581a4590 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Wed, 4 Mar 2026 11:34:49 -0500 Subject: [PATCH 15/33] address --- .../pydantic_ai/ui/ag_ui/_adapter.py | 69 ++++++++------ .../pydantic_ai/ui/ag_ui/_event_stream.py | 15 ++- tests/test_ag_ui.py | 93 +++++++++++++++++++ 3 files changed, 146 insertions(+), 31 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 88401d2615..7e30896f2d 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -61,7 +61,7 @@ ) from .. import MessagesBuilder, UIAdapter, UIEventStream - from ._event_stream import BUILTIN_TOOL_CALL_ID_PREFIX, AGUIEventStream + from ._event_stream import BUILTIN_TOOL_CALL_ID_PREFIX, AGUIEventStream, thinking_encrypted_metadata except ImportError as e: # pragma: no cover raise ImportError( 'Please install the `ag-ui-protocol` package to use AG-UI integration, ' @@ -355,20 +355,43 @@ def _dump_request_parts(msg: ModelRequest) -> tuple[list[Message], dict[str, str @staticmethod def _dump_response_parts(msg: ModelResponse) -> list[Message]: - """Convert a `ModelResponse` into AG-UI messages.""" + """Convert a `ModelResponse` into AG-UI messages. + + Uses a flush pattern to preserve part ordering: text that appears after tool calls + gets its own AssistantMessage, and ThinkingPart/FilePart boundaries trigger a flush + so content on either side doesn't get merged. + """ result: list[Message] = [] text_content: list[str] = [] tool_calls_list: list[ToolCall] = [] - builtin_tool_returns: list[BuiltinToolReturnPart] = [] + tool_messages: list[ToolMessage] = [] + + builtin_returns = {part.tool_call_id: part for part in msg.parts if isinstance(part, BuiltinToolReturnPart)} + + def flush() -> None: + nonlocal text_content, tool_calls_list, tool_messages + if not text_content and not tool_calls_list: + return + result.append( + AssistantMessage( + id=_new_message_id(), + content='\n'.join(text_content) if text_content else None, + tool_calls=tool_calls_list if tool_calls_list else None, + ) + ) + result.extend(tool_messages) + text_content = [] + tool_calls_list = [] + tool_messages = [] for part in msg.parts: if isinstance(part, TextPart): + if tool_calls_list: + flush() text_content.append(part.content) elif isinstance(part, ThinkingPart): - encrypted: dict[str, Any] = {} - for attr in ('id', 'signature', 'provider_name', 'provider_details'): - if (value := getattr(part, attr)) is not None: - encrypted[attr] = value + flush() + encrypted = thinking_encrypted_metadata(part) result.append( ReasoningMessage( id=_new_message_id(), @@ -391,9 +414,19 @@ def _dump_response_parts(msg: ModelResponse) -> list[Message]: function=FunctionCall(name=part.tool_name, arguments=part.args_as_json_str()), ) ) + if builtin_return := builtin_returns.get(part.tool_call_id): + tool_messages.append( + ToolMessage( + id=_new_message_id(), + content=builtin_return.model_response_str(), + tool_call_id=prefixed_id, + ) + ) elif isinstance(part, BuiltinToolReturnPart): - builtin_tool_returns.append(part) + # Emitted when matching BuiltinToolCallPart is processed above. + pass elif isinstance(part, FilePart): + flush() file_content: dict[str, Any] = { 'url': part.content.data_uri, 'media_type': part.content.media_type, @@ -411,25 +444,7 @@ def _dump_response_parts(msg: ModelResponse) -> list[Message]: else: assert_never(part) - if text_content or tool_calls_list: - result.append( - AssistantMessage( - id=_new_message_id(), - content='\n'.join(text_content) if text_content else None, - tool_calls=tool_calls_list if tool_calls_list else None, - ) - ) - - for part in builtin_tool_returns: - prefixed_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', part.tool_call_id]) - result.append( - ToolMessage( - id=_new_message_id(), - content=part.model_response_str(), - tool_call_id=prefixed_id, - ) - ) - + flush() return result @classmethod diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 9fc53840b0..ba986a28c2 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -69,6 +69,16 @@ BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' +_THINKING_METADATA_ATTRS: Final[tuple[str, ...]] = ('id', 'signature', 'provider_name', 'provider_details') + + +def thinking_encrypted_metadata(part: ThinkingPart) -> dict[str, Any]: + """Collect non-None metadata fields from a ThinkingPart for AG-UI encrypted_value.""" + encrypted: dict[str, Any] = { + attr: value for attr in _THINKING_METADATA_ATTRS if (value := getattr(part, attr)) is not None + } + return encrypted + @dataclass class AGUIEventStream(UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]): @@ -176,10 +186,7 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator async def handle_thinking_end(self, part: ThinkingPart) -> AsyncIterator[BaseEvent]: message_id = self._reasoning_message_id or '' - encrypted: dict[str, Any] = {} - for attr in ('id', 'signature', 'provider_name', 'provider_details'): - if (value := getattr(part, attr)) is not None: - encrypted[attr] = value + encrypted = thinking_encrypted_metadata(part) if not self._reasoning_started and not encrypted: self._reasoning_message_id = None diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index b1d8d663cd..67ae237d09 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -1693,6 +1693,99 @@ def test_dump_load_roundtrip_file_part_only() -> None: assert reloaded == original +def test_dump_load_roundtrip_interleaved_text_and_tools() -> None: + """Test round-trip for response with text interleaved around tool calls. + + When text appears after tool calls, the flush pattern splits them into + separate AssistantMessages to preserve ordering on round-trip. + """ + original: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content='Do things')]), + ModelResponse( + parts=[ + TextPart(content='Before tools'), + ToolCallPart(tool_name='search', args='{"q": "test"}', tool_call_id='call_1'), + TextPart(content='After tools'), + ] + ), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + + # Text before tools shares an AssistantMessage with the tool call; + # text after tools gets its own AssistantMessage. + assert [m.model_dump(exclude={'id'}, exclude_none=True) for m in ag_ui_msgs] == snapshot( + [ + {'role': 'user', 'content': 'Do things'}, + { + 'role': 'assistant', + 'content': 'Before tools', + 'tool_calls': [ + { + 'id': 'call_1', + 'type': 'function', + 'function': {'name': 'search', 'arguments': '{"q": "test"}'}, + }, + ], + }, + {'role': 'assistant', 'content': 'After tools'}, + ] + ) + + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(original, reloaded) + + # Round-trip splits into two ModelResponses due to the two AssistantMessages + assert reloaded == snapshot( + [ + ModelRequest(parts=[UserPromptPart(content='Do things', timestamp=IsDatetime())]), + ModelResponse( + parts=[ + TextPart(content='Before tools'), + ToolCallPart(tool_name='search', args='{"q": "test"}', tool_call_id='call_1'), + TextPart(content='After tools'), + ], + timestamp=IsDatetime(), + ), + ] + ) + + +async def test_reasoning_events_empty_content_with_metadata() -> None: + """Test REASONING_* events for ThinkingPart with no content but with metadata. + + This exercises the path in handle_thinking_end where _reasoning_started is False + (no content was streamed) but encrypted metadata is present — e.g. redacted thinking. + """ + run_input = create_input(UserMessage(id='msg_1', content='test')) + event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE) + + part = ThinkingPart( + content='', + id='think_redacted', + signature='sig_redacted', + ) + + events: list[BaseEvent] = [] + async for e in event_stream.handle_thinking_start(part): # pragma: no branch + events.append(e) + async for e in event_stream.handle_thinking_end(part): + events.append(e) + + assert [e.model_dump(exclude_none=True) for e in events] == snapshot( + [ + {'type': 'REASONING_START', 'message_id': IsStr()}, + { + 'type': 'REASONING_ENCRYPTED_VALUE', + 'subtype': 'message', + 'entity_id': IsStr(), + 'encrypted_value': '{"id": "think_redacted", "signature": "sig_redacted"}', + }, + {'type': 'REASONING_END', 'message_id': IsStr()}, + ] + ) + + @pytest.mark.vcr() @pytest.mark.skipif(not anthropic_imports_successful(), reason='anthropic not installed') async def test_thinking_roundtrip_anthropic(allow_model_requests: None, anthropic_api_key: str) -> None: From 17e3517296eaecaeb1a79f69fde0b88105319827 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Wed, 4 Mar 2026 13:39:32 -0500 Subject: [PATCH 16/33] fix: handle UploadedFile in AG-UI adapter, fix coverage gaps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Handle `UploadedFile` in `_user_content_to_input` (skip — opaque provider file_id can't map to AG-UI's BinaryInputContent) - Add test for BuiltinToolCallPart without matching return - Convert async for loop to list comprehension to fix dead loop body coverage Co-Authored-By: Claude Opus 4.6 --- .../pydantic_ai/ui/ag_ui/_adapter.py | 7 ++++- tests/test_ag_ui.py | 30 +++++++++++++++++-- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 7e30896f2d..a72945f7aa 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -34,6 +34,7 @@ ThinkingPart, ToolCallPart, ToolReturnPart, + UploadedFile, UserPromptPart, VideoUrl, ) @@ -109,7 +110,7 @@ def _new_message_id() -> str: def _user_content_to_input( - item: str | ImageUrl | VideoUrl | AudioUrl | DocumentUrl | BinaryContent | CachePoint, + item: str | ImageUrl | VideoUrl | AudioUrl | DocumentUrl | BinaryContent | UploadedFile | CachePoint, ) -> TextInputContent | BinaryInputContent | None: """Convert a user content item to AG-UI input content.""" if isinstance(item, str): @@ -118,6 +119,10 @@ def _user_content_to_input( return BinaryInputContent(type='binary', url=item.url, mime_type=item.media_type or '') elif isinstance(item, BinaryContent): return BinaryInputContent(type='binary', data=item.base64, mime_type=item.media_type) + elif isinstance(item, UploadedFile): + # UploadedFile holds an opaque provider file_id (e.g. 'file-abc123'), not a URL or + # binary data, so it can't be mapped to AG-UI's BinaryInputContent. Skipped like CachePoint. + return None elif isinstance(item, CachePoint): return None else: diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 67ae237d09..8589ef0eaa 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -1584,6 +1584,32 @@ def test_dump_load_roundtrip_builtin_tool_return() -> None: assert reloaded == original +def test_dump_builtin_tool_call_without_return() -> None: + """Test that BuiltinToolCallPart without a matching BuiltinToolReturnPart still dumps correctly.""" + messages: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content='Search for info')]), + ModelResponse( + parts=[ + BuiltinToolCallPart( + tool_name='web_search', + tool_call_id='call_orphan', + args='{"query": "test"}', + provider_name='anthropic', + ), + ] + ), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(messages) + + assert len(ag_ui_msgs) == 2 + assistant_msg = ag_ui_msgs[1] + assert isinstance(assistant_msg, AssistantMessage) + assert assistant_msg.tool_calls is not None + assert len(assistant_msg.tool_calls) == 1 + assert assistant_msg.tool_calls[0].id == 'pyd_ai_builtin|anthropic|call_orphan' + + def test_dump_load_roundtrip_cache_point() -> None: """Test that CachePoint is filtered out during round-trip (it's metadata only).""" original: list[ModelMessage] = [ @@ -1766,9 +1792,7 @@ async def test_reasoning_events_empty_content_with_metadata() -> None: signature='sig_redacted', ) - events: list[BaseEvent] = [] - async for e in event_stream.handle_thinking_start(part): # pragma: no branch - events.append(e) + events: list[BaseEvent] = [e async for e in event_stream.handle_thinking_start(part)] async for e in event_stream.handle_thinking_end(part): events.append(e) From fff957d06b36eeedef587b273e4879df63f59f65 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Wed, 4 Mar 2026 17:15:25 -0500 Subject: [PATCH 17/33] best of both worlds --- .../pydantic_ai/ui/_event_stream.py | 12 ++++++---- .../pydantic_ai/ui/ag_ui/_event_stream.py | 8 +++++-- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 8 +++++-- tests/test_ui.py | 24 +++++++++---------- 4 files changed, 32 insertions(+), 20 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py index 101b6feaa1..391cf06f2f 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py @@ -295,7 +295,7 @@ async def handle_part_start(self, event: PartStartEvent) -> AsyncIterator[EventT async for e in self.handle_text_start(part, follows_text=previous_part_kind == 'text'): yield e case ThinkingPart(): - async for e in self.handle_thinking_start(part): + async for e in self.handle_thinking_start(part, follows_thinking=previous_part_kind == 'thinking'): yield e case ToolCallPart(): async for e in self.handle_tool_call_start(part): @@ -360,7 +360,7 @@ async def handle_part_end(self, event: PartEndEvent) -> AsyncIterator[EventT]: async for e in self.handle_text_end(part, followed_by_text=next_part_kind == 'text'): yield e case ThinkingPart(): - async for e in self.handle_thinking_end(part): + async for e in self.handle_thinking_end(part, followed_by_thinking=next_part_kind == 'thinking'): yield e case ToolCallPart(): async for e in self.handle_tool_call_end(part): @@ -460,11 +460,12 @@ async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) return # pragma: no cover yield # Make this an async generator - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[EventT]: + async def handle_thinking_start(self, part: ThinkingPart, follows_thinking: bool = False) -> AsyncIterator[EventT]: """Handle the start of a `ThinkingPart`. Args: part: The thinking part. + follows_thinking: Whether the part is directly preceded by another thinking part. In this case, you may want to yield a "thinking-delta" event instead of a "thinking-start" event. """ return # pragma: no cover yield # Make this an async generator @@ -478,11 +479,14 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator return # pragma: no cover yield # Make this an async generator - async def handle_thinking_end(self, part: ThinkingPart) -> AsyncIterator[EventT]: + async def handle_thinking_end( + self, part: ThinkingPart, followed_by_thinking: bool = False + ) -> AsyncIterator[EventT]: """Handle the end of a `ThinkingPart`. Args: part: The thinking part. + followed_by_thinking: Whether the part is directly followed by another thinking part. In this case, you may not want to yield a "thinking-end" event yet. """ return # pragma: no cover yield # Make this an async generator diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index ba986a28c2..b97bf368ed 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -156,7 +156,9 @@ async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) if not followed_by_text: yield TextMessageEndEvent(message_id=self.message_id) - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseEvent]: + async def handle_thinking_start( + self, part: ThinkingPart, follows_thinking: bool = False + ) -> AsyncIterator[BaseEvent]: self._reasoning_message_id = str(uuid4()) self._reasoning_started = False @@ -183,7 +185,9 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator yield ReasoningMessageContentEvent(message_id=message_id, delta=delta.content_delta) - async def handle_thinking_end(self, part: ThinkingPart) -> AsyncIterator[BaseEvent]: + async def handle_thinking_end( + self, part: ThinkingPart, followed_by_thinking: bool = False + ) -> AsyncIterator[BaseEvent]: message_id = self._reasoning_message_id or '' encrypted = thinking_encrypted_metadata(part) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 5433bbbaad..d6e960b5dc 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -167,7 +167,9 @@ async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) ) yield TextEndChunk(id=self.message_id, provider_metadata=provider_metadata) - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseChunk]: + async def handle_thinking_start( + self, part: ThinkingPart, follows_thinking: bool = False + ) -> AsyncIterator[BaseChunk]: message_id = self.new_message_id() provider_metadata = dump_provider_metadata( id=part.id, @@ -190,7 +192,9 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator id=self.message_id, delta=delta.content_delta, provider_metadata=provider_metadata ) - async def handle_thinking_end(self, part: ThinkingPart) -> AsyncIterator[BaseChunk]: + async def handle_thinking_end( + self, part: ThinkingPart, followed_by_thinking: bool = False + ) -> AsyncIterator[BaseChunk]: provider_metadata = dump_provider_metadata( id=part.id, signature=part.signature, diff --git a/tests/test_ui.py b/tests/test_ui.py index 50cdd1db1e..7cd2a2dca0 100644 --- a/tests/test_ui.py +++ b/tests/test_ui.py @@ -148,14 +148,14 @@ async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[str]: async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[str]: yield f'' - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[str]: - yield f'{part.content}' + async def handle_thinking_start(self, part: ThinkingPart, follows_thinking: bool = False) -> AsyncIterator[str]: + yield f'{part.content}' async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[str]: yield str(delta.content_delta) - async def handle_thinking_end(self, part: ThinkingPart) -> AsyncIterator[str]: - yield '' + async def handle_thinking_end(self, part: ThinkingPart, followed_by_thinking: bool = False) -> AsyncIterator[str]: + yield f'' async def handle_tool_call_start(self, part: ToolCallPart) -> AsyncIterator[str]: yield f'{part.args}' @@ -234,19 +234,19 @@ async def stream_function( [ '', '', - 'Half of ', + 'Half of ', 'a thought', - '', - 'Another thought', - '', - 'And one more', - '', + '', + 'Another thought', + '', + 'And one more', + '', 'Half of ', '', 'some text', '', - 'More thinking', - '', + 'More thinking', + '', '', 'Half of some text', '', From d713b4b967237d372ac987c03c192d66656f65b0 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Wed, 4 Mar 2026 22:42:13 -0500 Subject: [PATCH 18/33] test: cover UploadedFile branch in AG-UI adapter for 100% coverage Co-Authored-By: Claude Opus 4.6 --- tests/test_ag_ui.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 8589ef0eaa..ab41a10ff5 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -42,6 +42,7 @@ ToolCallPartDelta, ToolReturn, ToolReturnPart, + UploadedFile, UserPromptPart, VideoUrl, ) @@ -1632,6 +1633,30 @@ def test_dump_load_roundtrip_cache_point() -> None: assert reloaded == expected +def test_dump_load_roundtrip_uploaded_file() -> None: + """Test that UploadedFile is filtered out during round-trip (opaque provider file_id).""" + original: list[ModelMessage] = [ + ModelRequest( + parts=[ + UserPromptPart( + content=['Hello', UploadedFile(file_id='file-abc123', provider_name='anthropic'), 'world'] + ), + ] + ), + ModelResponse(parts=[TextPart(content='Hi!')]), + ] + expected: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content=['Hello', 'world'])]), + ModelResponse(parts=[TextPart(content='Hi!')]), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + _sync_timestamps(expected, reloaded) + + assert reloaded == expected + + def test_dump_load_roundtrip_retry_prompt_with_tool() -> None: """Test round-trip for RetryPromptPart with tool_name (converted to ToolMessage with error).""" original: list[ModelMessage] = [ From c7d6de04314b323ed92e91e45ddc7639d8247648 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Sun, 8 Mar 2026 14:19:36 -0500 Subject: [PATCH 19/33] feat: add include_file_parts opt-in flag to AGUIAdapter for FilePart round-trip FilePart via ActivityMessage(pydantic_ai_file) is now behind an opt-in flag to avoid interfering with users' own activity types. When False (default), FilePart is silently dropped from dump_messages and ActivityMessage(pydantic_ai_file) is ignored by load_messages. Co-Authored-By: Claude Opus 4.6 --- .../pydantic_ai/ui/ag_ui/_adapter.py | 120 ++++++++++++------ .../pydantic_ai/ui/ag_ui/_event_stream.py | 14 +- tests/test_ag_ui.py | 52 ++++++-- 3 files changed, 132 insertions(+), 54 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index a72945f7aa..b6eb98efb2 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -6,6 +6,7 @@ import uuid from base64 import b64decode from collections.abc import Mapping, Sequence +from dataclasses import KW_ONLY, dataclass from functools import cached_property from typing import ( TYPE_CHECKING, @@ -70,7 +71,9 @@ ) from e if TYPE_CHECKING: - pass + from starlette.requests import Request + + from ...agent import AbstractAgent __all__ = ['AGUIAdapter'] @@ -129,9 +132,19 @@ def _user_content_to_input( assert_never(item) +@dataclass class AGUIAdapter(UIAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT, OutputDataT]): """UI adapter for the Agent-User Interaction (AG-UI) protocol.""" + _: KW_ONLY + include_file_parts: bool = False + """Whether to include ``FilePart`` data in message conversion. + + When ``True``, ``FilePart`` round-trips as ``ActivityMessage(activity_type='pydantic_ai_file')``. + When ``False`` (default), ``FilePart`` is silently dropped from ``dump_messages`` output + and ``ActivityMessage`` with ``activity_type='pydantic_ai_file'`` is ignored by ``load_messages``. + """ + @classmethod def build_run_input(cls, body: bytes) -> RunAgentInput: """Build an AG-UI run input object from the request body.""" @@ -141,10 +154,22 @@ def build_event_stream(self) -> UIEventStream[RunAgentInput, BaseEvent, AgentDep """Build an AG-UI event stream transformer.""" return AGUIEventStream(self.run_input, accept=self.accept) + @classmethod + async def from_request( + cls, + request: Request, + *, + agent: AbstractAgent[AgentDepsT, OutputDataT], + include_file_parts: bool = False, + **kwargs: Any, + ) -> AGUIAdapter[AgentDepsT, OutputDataT]: + """Extends [`from_request`][pydantic_ai.ui.UIAdapter.from_request] with the `include_file_parts` parameter.""" + return await super().from_request(request, agent=agent, include_file_parts=include_file_parts, **kwargs) + @cached_property def messages(self) -> list[ModelMessage]: """Pydantic AI messages from the AG-UI run input.""" - return self.load_messages(self.run_input.messages) + return self.load_messages(self.run_input.messages, include_file_parts=self.include_file_parts) @cached_property def toolset(self) -> AbstractToolset[AgentDepsT] | None: @@ -166,7 +191,7 @@ def state(self) -> dict[str, Any] | None: return cast('dict[str, Any]', state) @classmethod - def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: # noqa: C901 + def load_messages(cls, messages: Sequence[Message], *, include_file_parts: bool = False) -> list[ModelMessage]: # noqa: C901 """Transform AG-UI messages into Pydantic AI messages.""" builder = MessagesBuilder() tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. @@ -268,9 +293,12 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: # no ) case ReasoningMessage() as reasoning_msg: - metadata: dict[str, Any] = ( - json.loads(reasoning_msg.encrypted_value) if reasoning_msg.encrypted_value else {} - ) + try: + metadata: dict[str, Any] = ( + json.loads(reasoning_msg.encrypted_value) if reasoning_msg.encrypted_value else {} + ) + except json.JSONDecodeError: + metadata = {} builder.add( ThinkingPart( content=reasoning_msg.content, @@ -282,14 +310,19 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: # no ) case ActivityMessage() as activity_msg: - content = activity_msg.content - if activity_msg.activity_type == 'pydantic_ai_file': + if activity_msg.activity_type == 'pydantic_ai_file' and include_file_parts: + activity_content = activity_msg.content + url = activity_content.get('url', '') + if not url: + raise ValueError( + 'ActivityMessage with activity_type=pydantic_ai_file must have a non-empty url.' + ) builder.add( FilePart( - content=BinaryContent.from_data_uri(content.get('url', '')), - id=content.get('id'), - provider_name=content.get('provider_name'), - provider_details=content.get('provider_details'), + content=BinaryContent.from_data_uri(url), + id=activity_content.get('id'), + provider_name=activity_content.get('provider_name'), + provider_details=activity_content.get('provider_details'), ) ) @@ -299,14 +332,9 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: # no return builder.messages @staticmethod - def _dump_request_parts(msg: ModelRequest) -> tuple[list[Message], dict[str, str]]: - """Convert a `ModelRequest` into AG-UI messages. - - Returns: - A tuple of (messages, tool_call_id_to_name mapping). - """ + def _dump_request_parts(msg: ModelRequest) -> list[Message]: + """Convert a `ModelRequest` into AG-UI messages.""" result: list[Message] = [] - tool_call_names: dict[str, str] = {} system_content: list[str] = [] user_content: list[TextInputContent | BinaryInputContent] = [] @@ -322,7 +350,6 @@ def _dump_request_parts(msg: ModelRequest) -> tuple[list[Message], dict[str, str if converted is not None: user_content.append(converted) elif isinstance(part, ToolReturnPart): - tool_call_names[part.tool_call_id] = part.tool_name result.append( ToolMessage( id=_new_message_id(), @@ -332,7 +359,6 @@ def _dump_request_parts(msg: ModelRequest) -> tuple[list[Message], dict[str, str ) elif isinstance(part, RetryPromptPart): if part.tool_name: - tool_call_names[part.tool_call_id] = part.tool_name result.append( ToolMessage( id=_new_message_id(), @@ -356,10 +382,10 @@ def _dump_request_parts(msg: ModelRequest) -> tuple[list[Message], dict[str, str else: messages.append(UserMessage(id=_new_message_id(), content=user_content)) messages.extend(result) - return messages, tool_call_names + return messages @staticmethod - def _dump_response_parts(msg: ModelResponse) -> list[Message]: + def _dump_response_parts(msg: ModelResponse, *, include_file_parts: bool = False) -> list[Message]: # noqa: C901 """Convert a `ModelResponse` into AG-UI messages. Uses a flush pattern to preserve part ordering: text that appears after tool calls @@ -431,21 +457,25 @@ def flush() -> None: # Emitted when matching BuiltinToolCallPart is processed above. pass elif isinstance(part, FilePart): - flush() - file_content: dict[str, Any] = { - 'url': part.content.data_uri, - 'media_type': part.content.media_type, - } - for attr in ['id', 'provider_name', 'provider_details']: - if (value := getattr(part, attr)) is not None: - file_content[attr] = value - result.append( - ActivityMessage( - id=_new_message_id(), - activity_type='pydantic_ai_file', - content=file_content, + if include_file_parts: + flush() + file_content: dict[str, Any] = { + 'url': part.content.data_uri, + 'media_type': part.content.media_type, + } + if part.id is not None: + file_content['id'] = part.id + if part.provider_name is not None: + file_content['provider_name'] = part.provider_name + if part.provider_details is not None: + file_content['provider_details'] = part.provider_details + result.append( + ActivityMessage( + id=_new_message_id(), + activity_type='pydantic_ai_file', + content=file_content, + ) ) - ) else: assert_never(part) @@ -453,11 +483,21 @@ def flush() -> None: return result @classmethod - def dump_messages(cls, messages: Sequence[ModelMessage]) -> list[Message]: + def dump_messages(cls, messages: Sequence[ModelMessage], *, include_file_parts: bool = False) -> list[Message]: """Transform Pydantic AI messages into AG-UI messages. + Note: The round-trip ``dump_messages`` -> ``load_messages`` is not fully lossless: + + - ``TextPart.id``, ``.provider_name``, ``.provider_details`` are lost. + - ``ToolCallPart.id``, ``.provider_name``, ``.provider_details`` are lost. + - ``RetryPromptPart`` becomes ``ToolReturnPart`` (or ``UserPromptPart``) on reload. + - ``CachePoint`` and ``UploadedFile`` content items are dropped. + - ``FilePart`` is silently dropped unless ``include_file_parts=True``. + - Part ordering within a ``ModelResponse`` may change when text follows tool calls. + Args: messages: A sequence of ModelMessage objects to convert. + include_file_parts: Whether to include ``FilePart`` as ``ActivityMessage``. Returns: A list of AG-UI Message objects. @@ -466,10 +506,10 @@ def dump_messages(cls, messages: Sequence[ModelMessage]) -> list[Message]: for msg in messages: if isinstance(msg, ModelRequest): - request_messages, _ = cls._dump_request_parts(msg) + request_messages = cls._dump_request_parts(msg) result.extend(request_messages) elif isinstance(msg, ModelResponse): - result.extend(cls._dump_response_parts(msg)) + result.extend(cls._dump_response_parts(msg, include_file_parts=include_file_parts)) else: assert_never(msg) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index b97bf368ed..6941feef07 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -69,14 +69,18 @@ BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' -_THINKING_METADATA_ATTRS: Final[tuple[str, ...]] = ('id', 'signature', 'provider_name', 'provider_details') - def thinking_encrypted_metadata(part: ThinkingPart) -> dict[str, Any]: """Collect non-None metadata fields from a ThinkingPart for AG-UI encrypted_value.""" - encrypted: dict[str, Any] = { - attr: value for attr in _THINKING_METADATA_ATTRS if (value := getattr(part, attr)) is not None - } + encrypted: dict[str, Any] = {} + if part.id is not None: + encrypted['id'] = part.id + if part.signature is not None: + encrypted['signature'] = part.signature + if part.provider_name is not None: + encrypted['provider_name'] = part.provider_name + if part.provider_details is not None: + encrypted['provider_details'] = part.provider_details return encrypted diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index ab41a10ff5..7cbabd57d7 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -28,7 +28,9 @@ ImageUrl, ModelMessage, ModelRequest, + ModelRequestPart, ModelResponse, + ModelResponsePart, PartDeltaEvent, PartEndEvent, PartStartEvent, @@ -1405,9 +1407,15 @@ def test_activity_message_other_types_ignored() -> None: assert messages == snapshot([ModelResponse(parts=[TextPart(content='Response')], timestamp=IsDatetime())]) -def _sync_part_timestamps(original_part: Any, new_part: Any) -> None: - """Sync timestamp attribute if both parts have it.""" - if hasattr(new_part, 'timestamp') and hasattr(original_part, 'timestamp'): +_TIMESTAMPED_PARTS = (UserPromptPart, RetryPromptPart, ToolReturnPart, BuiltinToolReturnPart, SystemPromptPart) + + +def _sync_part_timestamps( + original_part: ModelRequestPart | ModelResponsePart, + new_part: ModelRequestPart | ModelResponsePart, +) -> None: + """Sync timestamp attribute if both parts are request parts (which carry timestamps).""" + if isinstance(new_part, _TIMESTAMPED_PARTS) and isinstance(original_part, _TIMESTAMPED_PARTS): object.__setattr__(new_part, 'timestamp', original_part.timestamp) @@ -1543,8 +1551,8 @@ def test_dump_load_roundtrip_file_part() -> None: ), ] - ag_ui_msgs = AGUIAdapter.dump_messages(original) - reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + ag_ui_msgs = AGUIAdapter.dump_messages(original, include_file_parts=True) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs, include_file_parts=True) _sync_timestamps(original, reloaded) assert reloaded == original @@ -1722,8 +1730,8 @@ def test_dump_load_roundtrip_file_part_minimal() -> None: ), ] - ag_ui_msgs = AGUIAdapter.dump_messages(original) - reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + ag_ui_msgs = AGUIAdapter.dump_messages(original, include_file_parts=True) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs, include_file_parts=True) _sync_timestamps(original, reloaded) assert reloaded == original @@ -1737,13 +1745,39 @@ def test_dump_load_roundtrip_file_part_only() -> None: ModelResponse(parts=[FilePart(content=BinaryImage(data=file_data, media_type='image/png'))]), ] - ag_ui_msgs = AGUIAdapter.dump_messages(original) - reloaded = AGUIAdapter.load_messages(ag_ui_msgs) + ag_ui_msgs = AGUIAdapter.dump_messages(original, include_file_parts=True) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs, include_file_parts=True) _sync_timestamps(original, reloaded) assert reloaded == original +def test_file_part_dropped_by_default() -> None: + """Test that FilePart is silently dropped when include_file_parts=False (default). + + dump_messages drops FilePart from output, and load_messages ignores + ActivityMessage(pydantic_ai_file) — both without raising errors. + """ + messages_with_file: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content='Generate an image')]), + ModelResponse( + parts=[ + FilePart(content=BinaryImage(data=b'image data', media_type='image/png')), + TextPart(content='Here is your image.'), + ] + ), + ] + + # dump_messages drops FilePart by default + ag_ui_msgs = AGUIAdapter.dump_messages(messages_with_file) + assert not any(isinstance(m, ActivityMessage) and m.activity_type == 'pydantic_ai_file' for m in ag_ui_msgs) + + # load_messages ignores ActivityMessage(pydantic_ai_file) by default + ag_ui_msgs_with_activity = AGUIAdapter.dump_messages(messages_with_file, include_file_parts=True) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs_with_activity) + assert not any(isinstance(part, FilePart) for msg in reloaded for part in msg.parts) + + def test_dump_load_roundtrip_interleaved_text_and_tools() -> None: """Test round-trip for response with text interleaved around tool calls. From 762775e519265d863c7979dd928b5a7ef98558f8 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Mon, 16 Mar 2026 21:56:30 -0500 Subject: [PATCH 20/33] fix: validate encrypted_value JSON is a dict, add coverage tests Add isinstance(metadata, dict) guard after json.loads for ReasoningMessage.encrypted_value to handle valid JSON that isn't an object (e.g. strings, arrays, numbers). Add parametrized tests for malformed encrypted_value and for the empty-url ActivityMessage path. Co-Authored-By: Claude Opus 4.6 (1M context) --- .../pydantic_ai/ui/ag_ui/_adapter.py | 2 + tests/test_ag_ui.py | 43 +++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 4d4a24fdd6..0d56af7f01 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -303,6 +303,8 @@ def load_messages(cls, messages: Sequence[Message], *, include_file_parts: bool metadata: dict[str, Any] = ( json.loads(reasoning_msg.encrypted_value) if reasoning_msg.encrypted_value else {} ) + if not isinstance(metadata, dict): + metadata = {} except json.JSONDecodeError: metadata = {} builder.add( diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 4c5456eafd..8a8c91f9cf 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -1407,6 +1407,49 @@ def test_activity_message_other_types_ignored() -> None: assert messages == snapshot([ModelResponse(parts=[TextPart(content='Response')], timestamp=IsDatetime())]) +@pytest.mark.parametrize( + 'encrypted_value', + [ + pytest.param('not valid json{{{', id='invalid-json'), + pytest.param('"just a string"', id='non-dict-string'), + pytest.param('[1, 2, 3]', id='non-dict-list'), + pytest.param('42', id='non-dict-number'), + ], +) +def test_reasoning_message_malformed_encrypted_value(encrypted_value: str) -> None: + """Test that malformed or non-dict encrypted_value is handled gracefully.""" + messages = AGUIAdapter.load_messages( + [ + ReasoningMessage(id='r-1', content='Thinking...', encrypted_value=encrypted_value), + AssistantMessage(id='msg-1', content='Done'), + ] + ) + + assert messages == snapshot( + [ + ModelResponse( + parts=[ThinkingPart(content='Thinking...'), TextPart(content='Done')], + timestamp=IsDatetime(), + ) + ] + ) + + +def test_activity_message_file_part_missing_url() -> None: + """Test that ActivityMessage(pydantic_ai_file) with empty url raises ValueError.""" + with pytest.raises(ValueError, match='must have a non-empty url'): + AGUIAdapter.load_messages( + [ + ActivityMessage( + id='activity-1', + activity_type='pydantic_ai_file', + content={'url': '', 'media_type': 'image/png'}, + ), + ], + include_file_parts=True, + ) + + _TIMESTAMPED_PARTS = (UserPromptPart, RetryPromptPart, ToolReturnPart, BuiltinToolReturnPart, SystemPromptPart) From c2017dcbe33f293e9bdad7dcd0a86ec0c2185369 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Tue, 17 Mar 2026 00:49:37 -0500 Subject: [PATCH 21/33] chore: add .agents/* to .gitignore Co-Authored-By: Claude Opus 4.6 (1M context) --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index dbdfd83ee0..8327ad4abe 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ node_modules/ .coverage* /test_tmp/ .mcp.json +.agents/* .claude/* !.claude/skills/ .claude/skills/* From 8ca25f7cfcc3b1e8f52995cbe774b0f405351959 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Tue, 17 Mar 2026 10:56:19 -0500 Subject: [PATCH 22/33] fix: remove redundant ValueError catch from JSONDecodeError handler --- pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 0d56af7f01..cb3ca3f225 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -279,7 +279,7 @@ def load_messages(cls, messages: Sequence[Message], *, include_file_parts: bool if isinstance(content, str): try: content = json.loads(content) - except (json.JSONDecodeError, ValueError): + except json.JSONDecodeError: pass builder.add( BuiltinToolReturnPart( From 2783d83061bb2142f3565a2f4bce7bc25ab06715 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Wed, 25 Mar 2026 14:11:04 -0500 Subject: [PATCH 23/33] =?UTF-8?q?fix:=20address=20PR=20review=20=E2=80=94?= =?UTF-8?q?=20ag=5Fui=5Fversion,=20preserve=5Ffile=5Fdata,=20stray=20delta?= =?UTF-8?q?=20fix?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add `ag_ui_version: Literal['0.1.10', '0.1.13']` parameter (default '0.1.10') for backward-compatible thinking event emission. Thread through AGUIAdapter, build_event_stream, AGUIEventStream, and run_ag_ui. - Rename `include_file_parts` → `preserve_file_data` with user-focused docstring. - Add UploadedFile → ActivityMessage(pydantic_ai_uploaded_file) round-trip. - Fix double backticks → single backticks in all docstrings. - Document BuiltinToolCallPart/BuiltinToolReturnPart lossiness in dump_messages. - Assert _reasoning_message_id non-None instead of or '' fallback. - Fix stray TOOL_CALL_ARGS after TOOL_CALL_END (#4733) via _ended_tool_call_ids. Co-Authored-By: Claude Opus 4.6 (1M context) --- pydantic_ai_slim/pydantic_ai/ag_ui.py | 6 +- .../pydantic_ai/ui/ag_ui/__init__.py | 3 +- .../pydantic_ai/ui/ag_ui/_adapter.py | 151 +++++++++---- .../pydantic_ai/ui/ag_ui/_event_stream.py | 122 +++++++--- tests/test_ag_ui.py | 213 +++++++++++++----- 5 files changed, 362 insertions(+), 133 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index eaa096bc41..55a9a7d688 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -29,7 +29,7 @@ from starlette.responses import Response from .ui import SSE_CONTENT_TYPE, OnCompleteFunc, StateDeps, StateHandler - from .ui.ag_ui import AGUIAdapter + from .ui.ag_ui import AGUIAdapter, AGUIVersion from .ui.ag_ui.app import AGUIApp except ImportError as e: # pragma: no cover raise ImportError( @@ -114,6 +114,7 @@ def run_ag_ui( run_input: RunAgentInput, accept: str = SSE_CONTENT_TYPE, *, + ag_ui_version: AGUIVersion = '0.1.10', output_type: OutputSpec[Any] | None = None, message_history: Sequence[ModelMessage] | None = None, deferred_tool_results: DeferredToolResults | None = None, @@ -133,6 +134,7 @@ def run_ag_ui( agent: The agent to run. run_input: The AG-UI run input containing thread_id, run_id, messages, etc. accept: The accept header value for the run. + ag_ui_version: AG-UI protocol version controlling thinking/reasoning event format. output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's output type. @@ -153,7 +155,7 @@ def run_ag_ui( Yields: Streaming event chunks encoded as strings according to the accept header value. """ - adapter = AGUIAdapter(agent=agent, run_input=run_input, accept=accept) + adapter = AGUIAdapter(agent=agent, run_input=run_input, accept=accept, ag_ui_version=ag_ui_version) return adapter.encode_stream( adapter.run_stream( output_type=output_type, diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py index 6228771869..0d572b3493 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py @@ -1,9 +1,10 @@ """AG-UI protocol integration for Pydantic AI agents.""" from ._adapter import AGUIAdapter -from ._event_stream import AGUIEventStream +from ._event_stream import AGUIEventStream, AGUIVersion __all__ = [ 'AGUIAdapter', 'AGUIEventStream', + 'AGUIVersion', ] diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index cb3ca3f225..b5ac5841e2 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -63,7 +63,7 @@ ) from .. import MessagesBuilder, UIAdapter, UIEventStream - from ._event_stream import BUILTIN_TOOL_CALL_ID_PREFIX, AGUIEventStream, thinking_encrypted_metadata + from ._event_stream import BUILTIN_TOOL_CALL_ID_PREFIX, AGUIEventStream, AGUIVersion, thinking_encrypted_metadata except ImportError as e: # pragma: no cover raise ImportError( 'Please install the `ag-ui-protocol` package to use AG-UI integration, ' @@ -137,12 +137,29 @@ class AGUIAdapter(UIAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT, Outpu """UI adapter for the Agent-User Interaction (AG-UI) protocol.""" _: KW_ONLY - include_file_parts: bool = False - """Whether to include ``FilePart`` data in message conversion. + ag_ui_version: AGUIVersion = '0.1.10' + """AG-UI protocol version controlling thinking/reasoning event format. - When ``True``, ``FilePart`` round-trips as ``ActivityMessage(activity_type='pydantic_ai_file')``. - When ``False`` (default), ``FilePart`` is silently dropped from ``dump_messages`` output - and ``ActivityMessage`` with ``activity_type='pydantic_ai_file'`` is ignored by ``load_messages``. + - `'0.1.10'` (default): emits `THINKING_*` events during streaming, drops `ThinkingPart` + from `dump_messages` output. Compatible with AG-UI frontends that don't support reasoning events. + - `'0.1.13'`: emits `REASONING_*` events with encrypted metadata during streaming, and + includes `ThinkingPart` as `ReasoningMessage` in `dump_messages` output for full round-trip + fidelity of thinking signatures and provider metadata. + + `load_messages` always accepts `ReasoningMessage` regardless of this setting. + """ + + preserve_file_data: bool = False + """Whether to preserve file and uploaded-file data in AG-UI message conversion. + + When `True`, `FilePart` round-trips as `ActivityMessage(activity_type='pydantic_ai_file')` + and `UploadedFile` round-trips as `ActivityMessage(activity_type='pydantic_ai_uploaded_file')`. + When `False` (default), these are silently dropped from `dump_messages` output + and their corresponding `ActivityMessage` types are ignored by `load_messages`. + + If your AG-UI frontend uses [activities](https://docs.ag-ui.com/concepts/activities), + be aware that `pydantic_ai_*` activity types are reserved for internal round-trip use + and should be ignored by frontend activity handlers. """ @classmethod @@ -152,7 +169,7 @@ def build_run_input(cls, body: bytes) -> RunAgentInput: def build_event_stream(self) -> UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]: """Build an AG-UI event stream transformer.""" - return AGUIEventStream(self.run_input, accept=self.accept) + return AGUIEventStream(self.run_input, accept=self.accept, ag_ui_version=self.ag_ui_version) @classmethod async def from_request( @@ -160,16 +177,19 @@ async def from_request( request: Request, *, agent: AbstractAgent[AgentDepsT, OutputDataT], - include_file_parts: bool = False, + ag_ui_version: AGUIVersion = '0.1.10', + preserve_file_data: bool = False, **kwargs: Any, ) -> AGUIAdapter[AgentDepsT, OutputDataT]: - """Extends [`from_request`][pydantic_ai.ui.UIAdapter.from_request] with the `include_file_parts` parameter.""" - return await super().from_request(request, agent=agent, include_file_parts=include_file_parts, **kwargs) + """Extends [`from_request`][pydantic_ai.ui.UIAdapter.from_request] with AG-UI-specific parameters.""" + return await super().from_request( + request, agent=agent, ag_ui_version=ag_ui_version, preserve_file_data=preserve_file_data, **kwargs + ) @cached_property def messages(self) -> list[ModelMessage]: """Pydantic AI messages from the AG-UI run input.""" - return self.load_messages(self.run_input.messages, include_file_parts=self.include_file_parts) + return self.load_messages(self.run_input.messages, preserve_file_data=self.preserve_file_data) @cached_property def toolset(self) -> AbstractToolset[AgentDepsT] | None: @@ -191,7 +211,7 @@ def state(self) -> dict[str, Any] | None: return cast('dict[str, Any]', state) @classmethod - def load_messages(cls, messages: Sequence[Message], *, include_file_parts: bool = False) -> list[ModelMessage]: # noqa: C901 + def load_messages(cls, messages: Sequence[Message], *, preserve_file_data: bool = False) -> list[ModelMessage]: # noqa: C901 """Transform AG-UI messages into Pydantic AI messages.""" builder = MessagesBuilder() tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. @@ -318,7 +338,7 @@ def load_messages(cls, messages: Sequence[Message], *, include_file_parts: bool ) case ActivityMessage() as activity_msg: - if activity_msg.activity_type == 'pydantic_ai_file' and include_file_parts: + if activity_msg.activity_type == 'pydantic_ai_file' and preserve_file_data: activity_content = activity_msg.content url = activity_content.get('url', '') if not url: @@ -333,6 +353,28 @@ def load_messages(cls, messages: Sequence[Message], *, include_file_parts: bool provider_details=activity_content.get('provider_details'), ) ) + elif activity_msg.activity_type == 'pydantic_ai_uploaded_file' and preserve_file_data: + activity_content = activity_msg.content + file_id = activity_content.get('file_id', '') + provider_name = activity_content.get('provider_name', '') + if not file_id or not provider_name: + raise ValueError( + 'ActivityMessage with activity_type=pydantic_ai_uploaded_file ' + 'must have non-empty file_id and provider_name.' + ) + builder.add( + UserPromptPart( + content=[ + UploadedFile( + file_id=file_id, + provider_name=provider_name, + vendor_metadata=activity_content.get('vendor_metadata'), + media_type=activity_content.get('media_type'), + identifier=activity_content.get('identifier'), + ) + ] + ) + ) case _: assert_never(msg) @@ -340,7 +382,7 @@ def load_messages(cls, messages: Sequence[Message], *, include_file_parts: bool return builder.messages @staticmethod - def _dump_request_parts(msg: ModelRequest) -> list[Message]: + def _dump_request_parts(msg: ModelRequest, *, preserve_file_data: bool = False) -> list[Message]: """Convert a `ModelRequest` into AG-UI messages.""" result: list[Message] = [] system_content: list[str] = [] @@ -354,9 +396,26 @@ def _dump_request_parts(msg: ModelRequest) -> list[Message]: user_content.append(TextInputContent(type='text', text=part.content)) else: for item in part.content: - converted = _user_content_to_input(item) - if converted is not None: - user_content.append(converted) + if isinstance(item, UploadedFile) and preserve_file_data: + uploaded_content: dict[str, Any] = { + 'file_id': item.file_id, + 'provider_name': item.provider_name, + 'media_type': item.media_type, + 'identifier': item.identifier, + } + if item.vendor_metadata is not None: + uploaded_content['vendor_metadata'] = item.vendor_metadata + result.append( + ActivityMessage( + id=_new_message_id(), + activity_type='pydantic_ai_uploaded_file', + content=uploaded_content, + ) + ) + else: + converted = _user_content_to_input(item) + if converted is not None: + user_content.append(converted) elif isinstance(part, ToolReturnPart): result.append( ToolMessage( @@ -393,7 +452,9 @@ def _dump_request_parts(msg: ModelRequest) -> list[Message]: return messages @staticmethod - def _dump_response_parts(msg: ModelResponse, *, include_file_parts: bool = False) -> list[Message]: # noqa: C901 + def _dump_response_parts( # noqa: C901 + msg: ModelResponse, *, ag_ui_version: AGUIVersion = '0.1.10', preserve_file_data: bool = False + ) -> list[Message]: """Convert a `ModelResponse` into AG-UI messages. Uses a flush pattern to preserve part ordering: text that appears after tool calls @@ -429,15 +490,16 @@ def flush() -> None: flush() text_content.append(part.content) elif isinstance(part, ThinkingPart): - flush() - encrypted = thinking_encrypted_metadata(part) - result.append( - ReasoningMessage( - id=_new_message_id(), - content=part.content, - encrypted_value=json.dumps(encrypted) if encrypted else None, + if ag_ui_version == '0.1.13': + flush() + encrypted = thinking_encrypted_metadata(part) + result.append( + ReasoningMessage( + id=_new_message_id(), + content=part.content, + encrypted_value=json.dumps(encrypted) if encrypted else None, + ) ) - ) elif isinstance(part, ToolCallPart): tool_calls_list.append( ToolCall( @@ -465,7 +527,7 @@ def flush() -> None: # Emitted when matching BuiltinToolCallPart is processed above. pass elif isinstance(part, FilePart): - if include_file_parts: + if preserve_file_data: flush() file_content: dict[str, Any] = { 'url': part.content.data_uri, @@ -491,21 +553,32 @@ def flush() -> None: return result @classmethod - def dump_messages(cls, messages: Sequence[ModelMessage], *, include_file_parts: bool = False) -> list[Message]: + def dump_messages( + cls, + messages: Sequence[ModelMessage], + *, + ag_ui_version: AGUIVersion = '0.1.10', + preserve_file_data: bool = False, + ) -> list[Message]: """Transform Pydantic AI messages into AG-UI messages. - Note: The round-trip ``dump_messages`` -> ``load_messages`` is not fully lossless: + Note: The round-trip `dump_messages` -> `load_messages` is not fully lossless: - - ``TextPart.id``, ``.provider_name``, ``.provider_details`` are lost. - - ``ToolCallPart.id``, ``.provider_name``, ``.provider_details`` are lost. - - ``RetryPromptPart`` becomes ``ToolReturnPart`` (or ``UserPromptPart``) on reload. - - ``CachePoint`` and ``UploadedFile`` content items are dropped. - - ``FilePart`` is silently dropped unless ``include_file_parts=True``. - - Part ordering within a ``ModelResponse`` may change when text follows tool calls. + - `TextPart.id`, `.provider_name`, `.provider_details` are lost. + - `ToolCallPart.id`, `.provider_name`, `.provider_details` are lost. + - `BuiltinToolCallPart.id`, `.provider_details` are lost (only `.provider_name` survives + via the prefixed tool call ID). + - `BuiltinToolReturnPart.provider_details` is lost. + - `RetryPromptPart` becomes `ToolReturnPart` (or `UserPromptPart`) on reload. + - `CachePoint` and `UploadedFile` content items are dropped (unless `preserve_file_data=True`). + - `ThinkingPart` is dropped when `ag_ui_version='0.1.10'` (default). + - `FilePart` is silently dropped unless `preserve_file_data=True`. + - Part ordering within a `ModelResponse` may change when text follows tool calls. Args: messages: A sequence of ModelMessage objects to convert. - include_file_parts: Whether to include ``FilePart`` as ``ActivityMessage``. + ag_ui_version: AG-UI protocol version controlling `ThinkingPart` emission. + preserve_file_data: Whether to include `FilePart` and `UploadedFile` as `ActivityMessage`. Returns: A list of AG-UI Message objects. @@ -514,10 +587,12 @@ def dump_messages(cls, messages: Sequence[ModelMessage], *, include_file_parts: for msg in messages: if isinstance(msg, ModelRequest): - request_messages = cls._dump_request_parts(msg) + request_messages = cls._dump_request_parts(msg, preserve_file_data=preserve_file_data) result.extend(request_messages) elif isinstance(msg, ModelResponse): - result.extend(cls._dump_response_parts(msg, include_file_parts=include_file_parts)) + result.extend( + cls._dump_response_parts(msg, ag_ui_version=ag_ui_version, preserve_file_data=preserve_file_data) + ) else: assert_never(msg) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 6941feef07..fcc54ad902 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -9,7 +9,7 @@ import json from collections.abc import AsyncIterator, Iterable from dataclasses import dataclass, field -from typing import Any, Final +from typing import Any, Final, Literal from uuid import uuid4 from ..._utils import now_utc @@ -47,6 +47,11 @@ TextMessageContentEvent, TextMessageEndEvent, TextMessageStartEvent, + ThinkingEndEvent, + ThinkingStartEvent, + ThinkingTextMessageContentEvent, + ThinkingTextMessageEndEvent, + ThinkingTextMessageStartEvent, ToolCallArgsEvent, ToolCallEndEvent, ToolCallResultEvent, @@ -60,8 +65,12 @@ 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' ) from e +AGUIVersion = Literal['0.1.10', '0.1.13'] +"""Supported AG-UI protocol versions for thinking/reasoning event emission.""" + __all__ = [ 'AGUIEventStream', + 'AGUIVersion', 'RunAgentInput', 'RunStartedEvent', 'RunFinishedEvent', @@ -88,10 +97,13 @@ def thinking_encrypted_metadata(part: ThinkingPart) -> dict[str, Any]: class AGUIEventStream(UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]): """UI event stream transformer for the Agent-User Interaction (AG-UI) protocol.""" + ag_ui_version: AGUIVersion = '0.1.10' + _reasoning_message_id: str | None = None _reasoning_started: bool = False _reasoning_text: bool = False _builtin_tool_call_ids: dict[str, str] = field(default_factory=dict[str, str]) + _ended_tool_call_ids: set[str] = field(default_factory=set[str]) _error: bool = False @property @@ -166,56 +178,95 @@ async def handle_thinking_start( self._reasoning_message_id = str(uuid4()) self._reasoning_started = False - if part.content: - yield ReasoningStartEvent(message_id=self._reasoning_message_id) - self._reasoning_started = True - yield ReasoningMessageStartEvent(message_id=self._reasoning_message_id, role='assistant') - yield ReasoningMessageContentEvent(message_id=self._reasoning_message_id, delta=part.content) - self._reasoning_text = True + if self.ag_ui_version == '0.1.10': + if part.content: + yield ThinkingStartEvent() + self._reasoning_started = True + yield ThinkingTextMessageStartEvent() + yield ThinkingTextMessageContentEvent(delta=part.content) + self._reasoning_text = True + else: + if part.content: + yield ReasoningStartEvent(message_id=self._reasoning_message_id) + self._reasoning_started = True + yield ReasoningMessageStartEvent(message_id=self._reasoning_message_id, role='assistant') + yield ReasoningMessageContentEvent(message_id=self._reasoning_message_id, delta=part.content) + self._reasoning_text = True async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: if not delta.content_delta: return # pragma: no cover - message_id = self._reasoning_message_id or '' + assert self._reasoning_message_id is not None, ( + 'handle_thinking_start must be called before handle_thinking_delta' + ) + + if self.ag_ui_version == '0.1.10': + if not self._reasoning_started: + yield ThinkingStartEvent() + self._reasoning_started = True + + if not self._reasoning_text: + yield ThinkingTextMessageStartEvent() + self._reasoning_text = True + + yield ThinkingTextMessageContentEvent(delta=delta.content_delta) + else: + message_id = self._reasoning_message_id - if not self._reasoning_started: - yield ReasoningStartEvent(message_id=message_id) - self._reasoning_started = True + if not self._reasoning_started: + yield ReasoningStartEvent(message_id=message_id) + self._reasoning_started = True - if not self._reasoning_text: - yield ReasoningMessageStartEvent(message_id=message_id, role='assistant') - self._reasoning_text = True + if not self._reasoning_text: + yield ReasoningMessageStartEvent(message_id=message_id, role='assistant') + self._reasoning_text = True - yield ReasoningMessageContentEvent(message_id=message_id, delta=delta.content_delta) + yield ReasoningMessageContentEvent(message_id=message_id, delta=delta.content_delta) async def handle_thinking_end( self, part: ThinkingPart, followed_by_thinking: bool = False ) -> AsyncIterator[BaseEvent]: - message_id = self._reasoning_message_id or '' + assert self._reasoning_message_id is not None, 'handle_thinking_start must be called before handle_thinking_end' - encrypted = thinking_encrypted_metadata(part) + if self.ag_ui_version == '0.1.10': + if not self._reasoning_started and not part.content: + self._reasoning_message_id = None + return - if not self._reasoning_started and not encrypted: + if not self._reasoning_started: + yield ThinkingStartEvent() + + if self._reasoning_text: + yield ThinkingTextMessageEndEvent() + self._reasoning_text = False + + yield ThinkingEndEvent() self._reasoning_message_id = None - return + else: + message_id = self._reasoning_message_id + encrypted = thinking_encrypted_metadata(part) - if not self._reasoning_started: - yield ReasoningStartEvent(message_id=message_id) + if not self._reasoning_started and not encrypted: + self._reasoning_message_id = None + return - if self._reasoning_text: - yield ReasoningMessageEndEvent(message_id=message_id) - self._reasoning_text = False + if not self._reasoning_started: + yield ReasoningStartEvent(message_id=message_id) - if encrypted: - yield ReasoningEncryptedValueEvent( - subtype='message', - entity_id=message_id, - encrypted_value=json.dumps(encrypted), - ) + if self._reasoning_text: + yield ReasoningMessageEndEvent(message_id=message_id) + self._reasoning_text = False + + if encrypted: + yield ReasoningEncryptedValueEvent( + subtype='message', + entity_id=message_id, + encrypted_value=json.dumps(encrypted), + ) - yield ReasoningEndEvent(message_id=message_id) - self._reasoning_message_id = None + yield ReasoningEndEvent(message_id=message_id) + self._reasoning_message_id = None def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: return self._handle_tool_call_start(part) @@ -245,16 +296,21 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' if tool_call_id in self._builtin_tool_call_ids: tool_call_id = self._builtin_tool_call_ids[tool_call_id] + if tool_call_id in self._ended_tool_call_ids: + return yield ToolCallArgsEvent( tool_call_id=tool_call_id, delta=delta.args_delta if isinstance(delta.args_delta, str) else json.dumps(delta.args_delta), ) async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[BaseEvent]: + self._ended_tool_call_ids.add(part.tool_call_id) yield ToolCallEndEvent(tool_call_id=part.tool_call_id) async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: - yield ToolCallEndEvent(tool_call_id=self._builtin_tool_call_ids[part.tool_call_id]) + builtin_id = self._builtin_tool_call_ids[part.tool_call_id] + self._ended_tool_call_ids.add(builtin_id) + yield ToolCallEndEvent(tool_call_id=builtin_id) async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseEvent]: tool_call_id = self._builtin_tool_call_ids[part.tool_call_id] diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 8a8c91f9cf..208eb28817 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -7,7 +7,7 @@ from collections.abc import AsyncIterator, MutableMapping from dataclasses import dataclass from http import HTTPStatus -from typing import Any +from typing import Any, Literal import httpx import pytest @@ -157,10 +157,11 @@ async def run_and_collect_events( *run_inputs: RunAgentInput, deps: AgentDepsT = None, on_complete: OnCompleteFunc[BaseEvent] | None = None, + ag_ui_version: Literal['0.1.10', '0.1.13'] = '0.1.10', ) -> list[dict[str, Any]]: events = list[dict[str, Any]]() for run_input in run_inputs: - async for event in run_ag_ui(agent, run_input, deps=deps, on_complete=on_complete): + async for event in run_ag_ui(agent, run_input, ag_ui_version=ag_ui_version, deps=deps, on_complete=on_complete): events.append(json.loads(event.removeprefix('data: '))) return events @@ -1092,55 +1093,29 @@ async def stream_function( }, {'type': 'TEXT_MESSAGE_END', 'timestamp': IsInt(), 'messageId': message_id}, # Part 1: "Thinking about the weather" - {'type': 'REASONING_START', 'timestamp': IsInt(), 'messageId': (r1 := IsSameStr())}, - { - 'type': 'REASONING_MESSAGE_START', - 'timestamp': IsInt(), - 'messageId': r1, - 'role': 'assistant', - }, - {'type': 'REASONING_MESSAGE_CONTENT', 'timestamp': IsInt(), 'messageId': r1, 'delta': 'Thinking '}, - { - 'type': 'REASONING_MESSAGE_CONTENT', - 'timestamp': IsInt(), - 'messageId': r1, - 'delta': 'about the weather', - }, - {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': r1}, - {'type': 'REASONING_END', 'timestamp': IsInt(), 'messageId': r1}, + {'type': 'THINKING_START', 'timestamp': IsInt()}, + {'type': 'THINKING_TEXT_MESSAGE_START', 'timestamp': IsInt()}, + {'type': 'THINKING_TEXT_MESSAGE_CONTENT', 'timestamp': IsInt(), 'delta': 'Thinking '}, + {'type': 'THINKING_TEXT_MESSAGE_CONTENT', 'timestamp': IsInt(), 'delta': 'about the weather'}, + {'type': 'THINKING_TEXT_MESSAGE_END', 'timestamp': IsInt()}, + {'type': 'THINKING_END', 'timestamp': IsInt()}, # Part 2: empty thinking — skipped (no content, no metadata) # Part 3: "Thinking about the meaning of life" - {'type': 'REASONING_START', 'timestamp': IsInt(), 'messageId': (r3 := IsSameStr())}, + {'type': 'THINKING_START', 'timestamp': IsInt()}, + {'type': 'THINKING_TEXT_MESSAGE_START', 'timestamp': IsInt()}, { - 'type': 'REASONING_MESSAGE_START', + 'type': 'THINKING_TEXT_MESSAGE_CONTENT', 'timestamp': IsInt(), - 'messageId': r3, - 'role': 'assistant', - }, - { - 'type': 'REASONING_MESSAGE_CONTENT', - 'timestamp': IsInt(), - 'messageId': r3, 'delta': 'Thinking about the meaning of life', }, - {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': r3}, - {'type': 'REASONING_END', 'timestamp': IsInt(), 'messageId': r3}, + {'type': 'THINKING_TEXT_MESSAGE_END', 'timestamp': IsInt()}, + {'type': 'THINKING_END', 'timestamp': IsInt()}, # Part 4: "Thinking about the universe" - {'type': 'REASONING_START', 'timestamp': IsInt(), 'messageId': (r4 := IsSameStr())}, - { - 'type': 'REASONING_MESSAGE_START', - 'timestamp': IsInt(), - 'messageId': r4, - 'role': 'assistant', - }, - { - 'type': 'REASONING_MESSAGE_CONTENT', - 'timestamp': IsInt(), - 'messageId': r4, - 'delta': 'Thinking about the universe', - }, - {'type': 'REASONING_MESSAGE_END', 'timestamp': IsInt(), 'messageId': r4}, - {'type': 'REASONING_END', 'timestamp': IsInt(), 'messageId': r4}, + {'type': 'THINKING_START', 'timestamp': IsInt()}, + {'type': 'THINKING_TEXT_MESSAGE_START', 'timestamp': IsInt()}, + {'type': 'THINKING_TEXT_MESSAGE_CONTENT', 'timestamp': IsInt(), 'delta': 'Thinking about the universe'}, + {'type': 'THINKING_TEXT_MESSAGE_END', 'timestamp': IsInt()}, + {'type': 'THINKING_END', 'timestamp': IsInt()}, { 'type': 'RUN_FINISHED', 'timestamp': IsInt(), @@ -1166,7 +1141,7 @@ async def stream_function( UserMessage(id='msg_1', content='Think about something'), ) - events = await run_and_collect_events(agent, run_input) + events = await run_and_collect_events(agent, run_input, ag_ui_version='0.1.13') assert events == snapshot( [ @@ -1233,7 +1208,7 @@ async def stream_function( UserMessage(id='msg_1', content='Think deeply'), ) - events = await run_and_collect_events(agent, run_input) + events = await run_and_collect_events(agent, run_input, ag_ui_version='0.1.13') assert events == snapshot( [ @@ -1358,7 +1333,7 @@ def test_reasoning_message_thinking_roundtrip() -> None: async def test_reasoning_events_with_all_metadata() -> None: """Test that REASONING_* events emit encryptedValue with all metadata fields.""" run_input = create_input(UserMessage(id='msg_1', content='test')) - event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE) + event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE, ag_ui_version='0.1.13') part = ThinkingPart( content='Thinking content', @@ -1446,7 +1421,7 @@ def test_activity_message_file_part_missing_url() -> None: content={'url': '', 'media_type': 'image/png'}, ), ], - include_file_parts=True, + preserve_file_data=True, ) @@ -1506,7 +1481,7 @@ def test_dump_load_roundtrip_thinking() -> None: ), ] - ag_ui_msgs = AGUIAdapter.dump_messages(original) + ag_ui_msgs = AGUIAdapter.dump_messages(original, ag_ui_version='0.1.13') reloaded = AGUIAdapter.load_messages(ag_ui_msgs) _sync_timestamps(original, reloaded) @@ -1542,7 +1517,7 @@ def test_dump_load_roundtrip_multiple_thinking_parts() -> None: ), ] - ag_ui_msgs = AGUIAdapter.dump_messages(original) + ag_ui_msgs = AGUIAdapter.dump_messages(original, ag_ui_version='0.1.13') reloaded = AGUIAdapter.load_messages(ag_ui_msgs) _sync_timestamps(original, reloaded) @@ -1594,8 +1569,8 @@ def test_dump_load_roundtrip_file_part() -> None: ), ] - ag_ui_msgs = AGUIAdapter.dump_messages(original, include_file_parts=True) - reloaded = AGUIAdapter.load_messages(ag_ui_msgs, include_file_parts=True) + ag_ui_msgs = AGUIAdapter.dump_messages(original, preserve_file_data=True) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs, preserve_file_data=True) _sync_timestamps(original, reloaded) assert reloaded == original @@ -1773,8 +1748,8 @@ def test_dump_load_roundtrip_file_part_minimal() -> None: ), ] - ag_ui_msgs = AGUIAdapter.dump_messages(original, include_file_parts=True) - reloaded = AGUIAdapter.load_messages(ag_ui_msgs, include_file_parts=True) + ag_ui_msgs = AGUIAdapter.dump_messages(original, preserve_file_data=True) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs, preserve_file_data=True) _sync_timestamps(original, reloaded) assert reloaded == original @@ -1788,8 +1763,8 @@ def test_dump_load_roundtrip_file_part_only() -> None: ModelResponse(parts=[FilePart(content=BinaryImage(data=file_data, media_type='image/png'))]), ] - ag_ui_msgs = AGUIAdapter.dump_messages(original, include_file_parts=True) - reloaded = AGUIAdapter.load_messages(ag_ui_msgs, include_file_parts=True) + ag_ui_msgs = AGUIAdapter.dump_messages(original, preserve_file_data=True) + reloaded = AGUIAdapter.load_messages(ag_ui_msgs, preserve_file_data=True) _sync_timestamps(original, reloaded) assert reloaded == original @@ -1816,7 +1791,7 @@ def test_file_part_dropped_by_default() -> None: assert not any(isinstance(m, ActivityMessage) and m.activity_type == 'pydantic_ai_file' for m in ag_ui_msgs) # load_messages ignores ActivityMessage(pydantic_ai_file) by default - ag_ui_msgs_with_activity = AGUIAdapter.dump_messages(messages_with_file, include_file_parts=True) + ag_ui_msgs_with_activity = AGUIAdapter.dump_messages(messages_with_file, preserve_file_data=True) reloaded = AGUIAdapter.load_messages(ag_ui_msgs_with_activity) assert not any(isinstance(part, FilePart) for msg in reloaded for part in msg.parts) @@ -1886,7 +1861,7 @@ async def test_reasoning_events_empty_content_with_metadata() -> None: (no content was streamed) but encrypted metadata is present — e.g. redacted thinking. """ run_input = create_input(UserMessage(id='msg_1', content='test')) - event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE) + event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE, ag_ui_version='0.1.13') part = ThinkingPart( content='', @@ -1923,7 +1898,7 @@ async def test_thinking_roundtrip_anthropic(allow_model_requests: None, anthropi result = await agent.run('What is 1+1? Reply in one word.') original = result.all_messages() - ag_ui_msgs = AGUIAdapter.dump_messages(original) + ag_ui_msgs = AGUIAdapter.dump_messages(original, ag_ui_version='0.1.13') reloaded = AGUIAdapter.load_messages(ag_ui_msgs) _sync_timestamps(original, reloaded) @@ -3409,3 +3384,123 @@ async def send(data: MutableMapping[str, Any]) -> None: {'type': 'http.response.body', 'body': b'', 'more_body': False}, ] ) + + +async def test_stray_tool_call_delta_after_end() -> None: + """Test that TOOL_CALL_ARGS events are suppressed after TOOL_CALL_END for the same tool call.""" + run_input = create_input(UserMessage(id='msg_1', content='test')) + event_stream = AGUIEventStream(run_input=run_input) + + part = BuiltinToolCallPart( + tool_name='web_search', + tool_call_id='call_123', + args='{"query": "test"}', + provider_name='anthropic', + ) + + events: list[BaseEvent] = [] + async for e in event_stream.handle_builtin_tool_call_start(part): + events.append(e) + async for e in event_stream.handle_builtin_tool_call_end(part): + events.append(e) + + stray_delta = ToolCallPartDelta(tool_call_id='call_123', args_delta='{"extra": true}') + async for e in event_stream.handle_tool_call_delta(stray_delta): + events.append(e) + + event_types = [e.type.value for e in events] + assert 'TOOL_CALL_START' in event_types + assert 'TOOL_CALL_END' in event_types + # No TOOL_CALL_ARGS after TOOL_CALL_END + end_idx = event_types.index('TOOL_CALL_END') + assert 'TOOL_CALL_ARGS' not in event_types[end_idx + 1 :] + + +def test_dump_load_roundtrip_uploaded_file_preserved() -> None: + """Test UploadedFile round-trips via ActivityMessage when preserve_file_data=True.""" + original: list[ModelMessage] = [ + ModelRequest( + parts=[ + UserPromptPart( + content=[ + 'Describe this file', + UploadedFile( + file_id='file-abc123', + provider_name='anthropic', + media_type='application/pdf', + vendor_metadata={'source': 'upload'}, + identifier='my-doc.pdf', + ), + ] + ), + ] + ), + ModelResponse(parts=[TextPart(content='I see a PDF.')]), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(original, preserve_file_data=True) + + # Verify ActivityMessage was emitted + activity_msgs = [m for m in ag_ui_msgs if isinstance(m, ActivityMessage)] + assert len(activity_msgs) == 1 + assert activity_msgs[0].activity_type == 'pydantic_ai_uploaded_file' + assert activity_msgs[0].content['file_id'] == 'file-abc123' + + reloaded = AGUIAdapter.load_messages(ag_ui_msgs, preserve_file_data=True) + + # The text and UploadedFile come back as separate UserPromptParts + request_parts = [p for msg in reloaded if isinstance(msg, ModelRequest) for p in msg.parts] + user_parts = [p for p in request_parts if isinstance(p, UserPromptPart)] + assert len(user_parts) == 2 + + # First UserPromptPart has the text + assert user_parts[0].content == 'Describe this file' + + # Second UserPromptPart has the UploadedFile + assert isinstance(user_parts[1].content, list) + uploaded = user_parts[1].content[0] + assert isinstance(uploaded, UploadedFile) + assert uploaded.file_id == 'file-abc123' + assert uploaded.provider_name == 'anthropic' + assert uploaded.media_type == 'application/pdf' + assert uploaded.vendor_metadata == {'source': 'upload'} + assert uploaded.identifier == 'my-doc.pdf' + + +def test_dump_messages_v010_drops_thinking() -> None: + """Test that dump_messages with ag_ui_version='0.1.10' drops ThinkingPart.""" + messages: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content='Think about this')]), + ModelResponse( + parts=[ + ThinkingPart(content='Deep thoughts...', signature='sig_xyz'), + TextPart(content='Conclusion'), + ] + ), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(messages) # default 0.1.10 + # No ReasoningMessage in output + assert not any(isinstance(m, ReasoningMessage) for m in ag_ui_msgs) + # Text still present + assert any(isinstance(m, AssistantMessage) and m.content == 'Conclusion' for m in ag_ui_msgs) + + +def test_dump_messages_v013_includes_reasoning() -> None: + """Test that dump_messages with ag_ui_version='0.1.13' includes ThinkingPart as ReasoningMessage.""" + messages: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content='Think about this')]), + ModelResponse( + parts=[ + ThinkingPart(content='Deep thoughts...', signature='sig_xyz'), + TextPart(content='Conclusion'), + ] + ), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(messages, ag_ui_version='0.1.13') + reasoning_msgs = [m for m in ag_ui_msgs if isinstance(m, ReasoningMessage)] + assert len(reasoning_msgs) == 1 + assert reasoning_msgs[0].content == 'Deep thoughts...' + assert reasoning_msgs[0].encrypted_value is not None + assert 'sig_xyz' in reasoning_msgs[0].encrypted_value From 9f9fd41db26fc1a0782b843436afdfd849a799dc Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Wed, 25 Mar 2026 21:52:07 -0500 Subject: [PATCH 24/33] fix: expose ag_ui_version in handle_ag_ui_request Co-Authored-By: Claude Opus 4.6 (1M context) --- pydantic_ai_slim/pydantic_ai/ag_ui.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index 55a9a7d688..b2efe8e68d 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -53,6 +53,7 @@ async def handle_ag_ui_request( agent: AbstractAgent[AgentDepsT, Any], request: Request, *, + ag_ui_version: AGUIVersion = '0.1.10', output_type: OutputSpec[Any] | None = None, message_history: Sequence[ModelMessage] | None = None, deferred_tool_results: DeferredToolResults | None = None, @@ -71,6 +72,7 @@ async def handle_ag_ui_request( Args: agent: The agent to run. request: The Starlette request (e.g. from FastAPI) containing the AG-UI run input. + ag_ui_version: AG-UI protocol version controlling thinking/reasoning event format. output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's output type. @@ -94,6 +96,7 @@ async def handle_ag_ui_request( return await AGUIAdapter[AgentDepsT].dispatch_request( request, agent=agent, + ag_ui_version=ag_ui_version, deps=deps, output_type=output_type, message_history=message_history, From ae863234746b43937ec97bb375c6c1193085b690 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Thu, 26 Mar 2026 11:48:51 -0500 Subject: [PATCH 25/33] coverage --- .../pydantic_ai/ui/ag_ui/_event_stream.py | 15 +- tests/test_ag_ui.py | 244 ++++++++++++++++-- 2 files changed, 230 insertions(+), 29 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index fec9b07a6e..60957325ec 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -12,6 +12,8 @@ from typing import Any, Final, Literal from uuid import uuid4 +from typing_extensions import assert_never + from ..._utils import now_utc from ...messages import ( BuiltinToolCallPart, @@ -186,13 +188,16 @@ async def handle_thinking_start( yield ThinkingTextMessageStartEvent() yield ThinkingTextMessageContentEvent(delta=part.content) self._reasoning_text = True - else: + elif self.ag_ui_version == '0.1.13': if part.content: yield ReasoningStartEvent(message_id=self._reasoning_message_id) self._reasoning_started = True yield ReasoningMessageStartEvent(message_id=self._reasoning_message_id, role='assistant') yield ReasoningMessageContentEvent(message_id=self._reasoning_message_id, delta=part.content) self._reasoning_text = True + else: + # exhaustive branching protects against future additions of AG-UI versions + assert_never(self.ag_ui_version) async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: if not delta.content_delta: @@ -212,7 +217,7 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator self._reasoning_text = True yield ThinkingTextMessageContentEvent(delta=delta.content_delta) - else: + elif self.ag_ui_version == '0.1.13': message_id = self._reasoning_message_id if not self._reasoning_started: @@ -224,6 +229,8 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator self._reasoning_text = True yield ReasoningMessageContentEvent(message_id=message_id, delta=delta.content_delta) + else: + assert_never(self.ag_ui_version) async def handle_thinking_end( self, part: ThinkingPart, followed_by_thinking: bool = False @@ -244,7 +251,7 @@ async def handle_thinking_end( yield ThinkingEndEvent() self._reasoning_message_id = None - else: + elif self.ag_ui_version == '0.1.13': message_id = self._reasoning_message_id encrypted = thinking_encrypted_metadata(part) @@ -268,6 +275,8 @@ async def handle_thinking_end( yield ReasoningEndEvent(message_id=message_id) self._reasoning_message_id = None + else: + assert_never(self.ag_ui_version) def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: return self._handle_tool_call_start(part) diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 32f03f314a..e27a17a782 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -40,6 +40,7 @@ TextPart, TextPartDelta, ThinkingPart, + ThinkingPartDelta, ToolCallPart, ToolCallPartDelta, ToolReturn, @@ -65,7 +66,7 @@ from pydantic_ai.tools import AgentDepsT, ToolDefinition from ._inline_snapshot import snapshot -from .conftest import IsDatetime, IsInt, IsSameStr, IsStr, try_import +from .conftest import IsBytes, IsDatetime, IsInt, IsSameStr, IsStr, try_import with try_import() as imports_successful: from ag_ui.core import ( @@ -2566,45 +2567,30 @@ async def test_messages(image_content: BinaryContent, document_content: BinaryCo timestamp=IsDatetime(), ), UserPromptPart( - content=['this is an image:', image_content], + content=[ + 'this is an image:', + BinaryImage(data=IsBytes(), media_type='image/jpeg', _identifier='241a70'), + ], timestamp=IsDatetime(), ), UserPromptPart( - content=[ - ImageUrl( - url='http://example.com/image.png', _media_type='image/png', media_type='image/png' - ) - ], + content=[ImageUrl(url='http://example.com/image.png', _media_type='image/png')], timestamp=IsDatetime(), ), UserPromptPart( - content=[ - VideoUrl( - url='http://example.com/video.mp4', _media_type='video/mp4', media_type='video/mp4' - ) - ], + content=[VideoUrl(url='http://example.com/video.mp4', _media_type='video/mp4')], timestamp=IsDatetime(), ), UserPromptPart( - content=[ - AudioUrl( - url='http://example.com/audio.mp3', _media_type='audio/mpeg', media_type='audio/mpeg' - ) - ], + content=[AudioUrl(url='http://example.com/audio.mp3', _media_type='audio/mpeg')], timestamp=IsDatetime(), ), UserPromptPart( - content=[ - DocumentUrl( - url='http://example.com/doc.pdf', - _media_type='application/pdf', - media_type='application/pdf', - ) - ], + content=[DocumentUrl(url='http://example.com/doc.pdf', _media_type='application/pdf')], timestamp=IsDatetime(), ), UserPromptPart( - content=[document_content], + content=[BinaryContent(data=IsBytes(), media_type='application/pdf')], timestamp=IsDatetime(), ), ] @@ -3406,7 +3392,7 @@ async def test_stray_tool_call_delta_after_end() -> None: stray_delta = ToolCallPartDelta(tool_call_id='call_123', args_delta='{"extra": true}') async for e in event_stream.handle_tool_call_delta(stray_delta): - events.append(e) + events.append(e) # pragma: no cover event_types = [e.type.value for e in events] assert 'TOOL_CALL_START' in event_types @@ -3555,3 +3541,209 @@ async def event_generator(): }, ] ) + + +# region: Coverage — event_stream thinking version branches + + +async def test_thinking_events_v010_with_content() -> None: + """Test v0.1.10 THINKING_* events for ThinkingPart with content.""" + run_input = create_input(UserMessage(id='msg_1', content='test')) + event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE, ag_ui_version='0.1.10') + + part = ThinkingPart(content='Some thoughts', signature='sig_abc') + + events: list[BaseEvent] = [] + async for e in event_stream.handle_thinking_start(part): + events.append(e) + async for e in event_stream.handle_thinking_end(part): + events.append(e) + + assert [e.model_dump(exclude_none=True) for e in events] == snapshot( + [ + {'type': 'THINKING_START'}, + {'type': 'THINKING_TEXT_MESSAGE_START'}, + {'type': 'THINKING_TEXT_MESSAGE_CONTENT', 'delta': 'Some thoughts'}, + {'type': 'THINKING_TEXT_MESSAGE_END'}, + {'type': 'THINKING_END'}, + ] + ) + + +async def test_thinking_events_v010_empty_content() -> None: + """Test v0.1.10 early return when ThinkingPart has no content.""" + run_input = create_input(UserMessage(id='msg_1', content='test')) + event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE, ag_ui_version='0.1.10') + + part = ThinkingPart(content='', signature='sig_abc') + + events: list[BaseEvent] = [] + async for e in event_stream.handle_thinking_start(part): + events.append(e) + async for e in event_stream.handle_thinking_end(part): + events.append(e) + + assert events == [] + + +async def test_thinking_delta_v013() -> None: + """Test v0.1.13 REASONING_* events emitted via handle_thinking_delta.""" + run_input = create_input(UserMessage(id='msg_1', content='test')) + event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE, ag_ui_version='0.1.13') + + start_part = ThinkingPart(content='') + events: list[BaseEvent] = [e async for e in event_stream.handle_thinking_start(start_part)] + + delta = ThinkingPartDelta(content_delta='chunk1') + async for e in event_stream.handle_thinking_delta(delta): + events.append(e) + + assert [e.model_dump(exclude_none=True) for e in events] == snapshot( + [ + {'type': 'REASONING_START', 'message_id': IsStr()}, + {'type': 'REASONING_MESSAGE_START', 'message_id': IsStr(), 'role': 'assistant'}, + {'type': 'REASONING_MESSAGE_CONTENT', 'message_id': IsStr(), 'delta': 'chunk1'}, + ] + ) + + +async def test_thinking_end_v013_no_content_no_metadata() -> None: + """Test v0.1.13 early return when ThinkingPart has no content and no encrypted metadata.""" + run_input = create_input(UserMessage(id='msg_1', content='test')) + event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE, ag_ui_version='0.1.13') + + part = ThinkingPart(content='') + + events: list[BaseEvent] = [e async for e in event_stream.handle_thinking_start(part)] + async for e in event_stream.handle_thinking_end(part): + events.append(e) + + assert events == [] + + +# endregion + +# region: Coverage — encrypted_metadata branch gap + + +async def test_thinking_encrypted_metadata_partial_fields() -> None: + """Test thinking_encrypted_metadata with signature but no provider_name.""" + run_input = create_input(UserMessage(id='msg_1', content='test')) + event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE, ag_ui_version='0.1.13') + + part = ThinkingPart(content='Thoughts', signature='sig_only') + + events: list[BaseEvent] = [] + async for e in event_stream.handle_thinking_start(part): + events.append(e) + async for e in event_stream.handle_thinking_end(part): + events.append(e) + + assert [e.model_dump(exclude_none=True) for e in events] == snapshot( + [ + {'type': 'REASONING_START', 'message_id': IsStr()}, + {'type': 'REASONING_MESSAGE_START', 'message_id': IsStr(), 'role': 'assistant'}, + {'type': 'REASONING_MESSAGE_CONTENT', 'message_id': IsStr(), 'delta': 'Thoughts'}, + {'type': 'REASONING_MESSAGE_END', 'message_id': IsStr()}, + { + 'type': 'REASONING_ENCRYPTED_VALUE', + 'subtype': 'message', + 'entity_id': IsStr(), + 'encrypted_value': '{"signature": "sig_only"}', + }, + {'type': 'REASONING_END', 'message_id': IsStr()}, + ] + ) + + +# endregion + +# region: Coverage — adapter uploaded file edge cases + + +def test_load_messages_uploaded_file_missing_fields() -> None: + """Test load_messages raises ValueError for malformed pydantic_ai_uploaded_file ActivityMessage.""" + with pytest.raises(ValueError, match='must have non-empty file_id and provider_name'): + AGUIAdapter.load_messages( + [ActivityMessage(id='msg_1', activity_type='pydantic_ai_uploaded_file', content={})], + preserve_file_data=True, + ) + + +def test_dump_messages_uploaded_file_with_vendor_metadata() -> None: + """Test dump_messages includes vendor_metadata in ActivityMessage when present on UploadedFile.""" + messages: list[ModelMessage] = [ + ModelRequest( + parts=[ + UserPromptPart( + content=[ + UploadedFile( + file_id='file-xyz', + provider_name='openai', + media_type='text/plain', + vendor_metadata={'custom': 'data'}, + ), + ] + ), + ] + ), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(messages, preserve_file_data=True) + activity_msgs = [m for m in ag_ui_msgs if isinstance(m, ActivityMessage)] + assert [m.model_dump() for m in activity_msgs] == snapshot( + [ + { + 'id': IsStr(), + 'role': 'activity', + 'activity_type': 'pydantic_ai_uploaded_file', + 'content': { + 'file_id': 'file-xyz', + 'provider_name': 'openai', + 'media_type': 'text/plain', + 'identifier': '6f0bbc', + 'vendor_metadata': {'custom': 'data'}, + }, + } + ] + ) + + +def test_dump_messages_uploaded_file_without_vendor_metadata() -> None: + """Test dump_messages omits vendor_metadata from ActivityMessage when None on UploadedFile.""" + messages: list[ModelMessage] = [ + ModelRequest( + parts=[ + UserPromptPart( + content=[ + UploadedFile( + file_id='file-xyz', + provider_name='openai', + media_type='text/plain', + ), + ] + ), + ] + ), + ] + + ag_ui_msgs = AGUIAdapter.dump_messages(messages, preserve_file_data=True) + activity_msgs = [m for m in ag_ui_msgs if isinstance(m, ActivityMessage)] + assert [m.model_dump() for m in activity_msgs] == snapshot( + [ + { + 'id': IsStr(), + 'role': 'activity', + 'activity_type': 'pydantic_ai_uploaded_file', + 'content': { + 'file_id': 'file-xyz', + 'provider_name': 'openai', + 'media_type': 'text/plain', + 'identifier': '6f0bbc', + }, + } + ] + ) + + +# endregion From 2df3abcc8c1c2d81b2e99a2369f80fc49ddea1ec Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Thu, 26 Mar 2026 17:02:29 -0500 Subject: [PATCH 26/33] fix: use fixtures instead of IsBytes() in test_messages snapshot Pydantic validates constructor args, rejecting dirty_equals matchers as invalid bytes. Use fixture references directly like main does. Co-Authored-By: Claude Opus 4.6 (1M context) --- tests/test_ag_ui.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index e27a17a782..5338b67dc7 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -66,7 +66,7 @@ from pydantic_ai.tools import AgentDepsT, ToolDefinition from ._inline_snapshot import snapshot -from .conftest import IsBytes, IsDatetime, IsInt, IsSameStr, IsStr, try_import +from .conftest import IsDatetime, IsInt, IsSameStr, IsStr, try_import with try_import() as imports_successful: from ag_ui.core import ( @@ -2567,10 +2567,7 @@ async def test_messages(image_content: BinaryContent, document_content: BinaryCo timestamp=IsDatetime(), ), UserPromptPart( - content=[ - 'this is an image:', - BinaryImage(data=IsBytes(), media_type='image/jpeg', _identifier='241a70'), - ], + content=['this is an image:', image_content], timestamp=IsDatetime(), ), UserPromptPart( @@ -2590,7 +2587,7 @@ async def test_messages(image_content: BinaryContent, document_content: BinaryCo timestamp=IsDatetime(), ), UserPromptPart( - content=[BinaryContent(data=IsBytes(), media_type='application/pdf')], + content=[document_content], timestamp=IsDatetime(), ), ] From b49be72119108a9b8cdcb18a79ace8a530e52466 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Thu, 26 Mar 2026 18:24:15 -0500 Subject: [PATCH 27/33] refactor: use semver >= comparisons for ag_ui_version instead of Literal matching Replace AGUIVersion Literal type with str + tuple-based semver comparison, making version checks forward-compatible (e.g. 0.1.15 auto-gets REASONING_* events). Also fix coverage gaps with 3 new tests and 2 test refactors. --- pydantic_ai_slim/pydantic_ai/ag_ui.py | 6 +- .../pydantic_ai/ui/ag_ui/__init__.py | 3 +- .../pydantic_ai/ui/ag_ui/_adapter.py | 18 ++-- .../pydantic_ai/ui/ag_ui/_event_stream.py | 88 +++++++++--------- tests/test_ag_ui.py | 91 +++++++++++++++++-- 5 files changed, 141 insertions(+), 65 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index b2efe8e68d..a8475f47d8 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -29,7 +29,7 @@ from starlette.responses import Response from .ui import SSE_CONTENT_TYPE, OnCompleteFunc, StateDeps, StateHandler - from .ui.ag_ui import AGUIAdapter, AGUIVersion + from .ui.ag_ui import AGUIAdapter from .ui.ag_ui.app import AGUIApp except ImportError as e: # pragma: no cover raise ImportError( @@ -53,7 +53,7 @@ async def handle_ag_ui_request( agent: AbstractAgent[AgentDepsT, Any], request: Request, *, - ag_ui_version: AGUIVersion = '0.1.10', + ag_ui_version: str = '0.1.10', output_type: OutputSpec[Any] | None = None, message_history: Sequence[ModelMessage] | None = None, deferred_tool_results: DeferredToolResults | None = None, @@ -117,7 +117,7 @@ def run_ag_ui( run_input: RunAgentInput, accept: str = SSE_CONTENT_TYPE, *, - ag_ui_version: AGUIVersion = '0.1.10', + ag_ui_version: str = '0.1.10', output_type: OutputSpec[Any] | None = None, message_history: Sequence[ModelMessage] | None = None, deferred_tool_results: DeferredToolResults | None = None, diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py index 0d572b3493..6228771869 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py @@ -1,10 +1,9 @@ """AG-UI protocol integration for Pydantic AI agents.""" from ._adapter import AGUIAdapter -from ._event_stream import AGUIEventStream, AGUIVersion +from ._event_stream import AGUIEventStream __all__ = [ 'AGUIAdapter', 'AGUIEventStream', - 'AGUIVersion', ] diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index b5ac5841e2..c9b309b6f9 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -63,7 +63,13 @@ ) from .. import MessagesBuilder, UIAdapter, UIEventStream - from ._event_stream import BUILTIN_TOOL_CALL_ID_PREFIX, AGUIEventStream, AGUIVersion, thinking_encrypted_metadata + from ._event_stream import ( + BUILTIN_TOOL_CALL_ID_PREFIX, + REASONING_VERSION, + AGUIEventStream, + parse_ag_ui_version, + thinking_encrypted_metadata, + ) except ImportError as e: # pragma: no cover raise ImportError( 'Please install the `ag-ui-protocol` package to use AG-UI integration, ' @@ -137,7 +143,7 @@ class AGUIAdapter(UIAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT, Outpu """UI adapter for the Agent-User Interaction (AG-UI) protocol.""" _: KW_ONLY - ag_ui_version: AGUIVersion = '0.1.10' + ag_ui_version: str = '0.1.10' """AG-UI protocol version controlling thinking/reasoning event format. - `'0.1.10'` (default): emits `THINKING_*` events during streaming, drops `ThinkingPart` @@ -177,7 +183,7 @@ async def from_request( request: Request, *, agent: AbstractAgent[AgentDepsT, OutputDataT], - ag_ui_version: AGUIVersion = '0.1.10', + ag_ui_version: str = '0.1.10', preserve_file_data: bool = False, **kwargs: Any, ) -> AGUIAdapter[AgentDepsT, OutputDataT]: @@ -453,7 +459,7 @@ def _dump_request_parts(msg: ModelRequest, *, preserve_file_data: bool = False) @staticmethod def _dump_response_parts( # noqa: C901 - msg: ModelResponse, *, ag_ui_version: AGUIVersion = '0.1.10', preserve_file_data: bool = False + msg: ModelResponse, *, ag_ui_version: str = '0.1.10', preserve_file_data: bool = False ) -> list[Message]: """Convert a `ModelResponse` into AG-UI messages. @@ -490,7 +496,7 @@ def flush() -> None: flush() text_content.append(part.content) elif isinstance(part, ThinkingPart): - if ag_ui_version == '0.1.13': + if parse_ag_ui_version(ag_ui_version) >= REASONING_VERSION: flush() encrypted = thinking_encrypted_metadata(part) result.append( @@ -557,7 +563,7 @@ def dump_messages( cls, messages: Sequence[ModelMessage], *, - ag_ui_version: AGUIVersion = '0.1.10', + ag_ui_version: str = '0.1.10', preserve_file_data: bool = False, ) -> list[Message]: """Transform Pydantic AI messages into AG-UI messages. diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 60957325ec..a06f0088f7 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -9,11 +9,9 @@ import json from collections.abc import AsyncIterator, Iterable from dataclasses import dataclass, field -from typing import Any, Final, Literal +from typing import Any, Final from uuid import uuid4 -from typing_extensions import assert_never - from ..._utils import now_utc from ...messages import ( BuiltinToolCallPart, @@ -68,17 +66,22 @@ 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' ) from e -AGUIVersion = Literal['0.1.10', '0.1.13'] -"""Supported AG-UI protocol versions for thinking/reasoning event emission.""" - __all__ = [ 'AGUIEventStream', - 'AGUIVersion', 'RunAgentInput', 'RunStartedEvent', 'RunFinishedEvent', ] + +def parse_ag_ui_version(version: str) -> tuple[int, ...]: + """Parse an AG-UI version string into a comparable tuple.""" + return tuple(int(x) for x in version.split('.')) + + +REASONING_VERSION = (0, 1, 13) +"""AG-UI version that introduced REASONING_* events (replacing THINKING_*).""" + BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' @@ -100,7 +103,7 @@ def thinking_encrypted_metadata(part: ThinkingPart) -> dict[str, Any]: class AGUIEventStream(UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]): """UI event stream transformer for the Agent-User Interaction (AG-UI) protocol.""" - ag_ui_version: AGUIVersion = '0.1.10' + ag_ui_version: str = '0.1.10' _reasoning_message_id: str | None = None _reasoning_started: bool = False @@ -181,14 +184,7 @@ async def handle_thinking_start( self._reasoning_message_id = str(uuid4()) self._reasoning_started = False - if self.ag_ui_version == '0.1.10': - if part.content: - yield ThinkingStartEvent() - self._reasoning_started = True - yield ThinkingTextMessageStartEvent() - yield ThinkingTextMessageContentEvent(delta=part.content) - self._reasoning_text = True - elif self.ag_ui_version == '0.1.13': + if parse_ag_ui_version(self.ag_ui_version) >= REASONING_VERSION: if part.content: yield ReasoningStartEvent(message_id=self._reasoning_message_id) self._reasoning_started = True @@ -196,8 +192,12 @@ async def handle_thinking_start( yield ReasoningMessageContentEvent(message_id=self._reasoning_message_id, delta=part.content) self._reasoning_text = True else: - # exhaustive branching protects against future additions of AG-UI versions - assert_never(self.ag_ui_version) + if part.content: + yield ThinkingStartEvent() + self._reasoning_started = True + yield ThinkingTextMessageStartEvent() + yield ThinkingTextMessageContentEvent(delta=part.content) + self._reasoning_text = True async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: if not delta.content_delta: @@ -207,17 +207,7 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator 'handle_thinking_start must be called before handle_thinking_delta' ) - if self.ag_ui_version == '0.1.10': - if not self._reasoning_started: - yield ThinkingStartEvent() - self._reasoning_started = True - - if not self._reasoning_text: - yield ThinkingTextMessageStartEvent() - self._reasoning_text = True - - yield ThinkingTextMessageContentEvent(delta=delta.content_delta) - elif self.ag_ui_version == '0.1.13': + if parse_ag_ui_version(self.ag_ui_version) >= REASONING_VERSION: message_id = self._reasoning_message_id if not self._reasoning_started: @@ -230,28 +220,22 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator yield ReasoningMessageContentEvent(message_id=message_id, delta=delta.content_delta) else: - assert_never(self.ag_ui_version) + if not self._reasoning_started: + yield ThinkingStartEvent() + self._reasoning_started = True + + if not self._reasoning_text: + yield ThinkingTextMessageStartEvent() + self._reasoning_text = True + + yield ThinkingTextMessageContentEvent(delta=delta.content_delta) async def handle_thinking_end( self, part: ThinkingPart, followed_by_thinking: bool = False ) -> AsyncIterator[BaseEvent]: assert self._reasoning_message_id is not None, 'handle_thinking_start must be called before handle_thinking_end' - if self.ag_ui_version == '0.1.10': - if not self._reasoning_started and not part.content: - self._reasoning_message_id = None - return - - if not self._reasoning_started: - yield ThinkingStartEvent() - - if self._reasoning_text: - yield ThinkingTextMessageEndEvent() - self._reasoning_text = False - - yield ThinkingEndEvent() - self._reasoning_message_id = None - elif self.ag_ui_version == '0.1.13': + if parse_ag_ui_version(self.ag_ui_version) >= REASONING_VERSION: message_id = self._reasoning_message_id encrypted = thinking_encrypted_metadata(part) @@ -276,7 +260,19 @@ async def handle_thinking_end( yield ReasoningEndEvent(message_id=message_id) self._reasoning_message_id = None else: - assert_never(self.ag_ui_version) + if not self._reasoning_started and not part.content: + self._reasoning_message_id = None + return + + if not self._reasoning_started: + yield ThinkingStartEvent() + + if self._reasoning_text: + yield ThinkingTextMessageEndEvent() + self._reasoning_text = False + + yield ThinkingEndEvent() + self._reasoning_message_id = None def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: return self._handle_tool_call_start(part) diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 5338b67dc7..849c44544f 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -3574,11 +3574,8 @@ async def test_thinking_events_v010_empty_content() -> None: part = ThinkingPart(content='', signature='sig_abc') - events: list[BaseEvent] = [] - async for e in event_stream.handle_thinking_start(part): - events.append(e) - async for e in event_stream.handle_thinking_end(part): - events.append(e) + events = [e async for e in event_stream.handle_thinking_start(part)] + events.extend([e async for e in event_stream.handle_thinking_end(part)]) assert events == [] @@ -3611,13 +3608,91 @@ async def test_thinking_end_v013_no_content_no_metadata() -> None: part = ThinkingPart(content='') - events: list[BaseEvent] = [e async for e in event_stream.handle_thinking_start(part)] - async for e in event_stream.handle_thinking_end(part): - events.append(e) + events = [e async for e in event_stream.handle_thinking_start(part)] + events.extend([e async for e in event_stream.handle_thinking_end(part)]) assert events == [] +async def test_thinking_delta_v013_after_content_start() -> None: + """Test v0.1.13 delta skips START/MESSAGE_START when reasoning already started.""" + run_input = create_input(UserMessage(id='msg_1', content='test')) + event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE, ag_ui_version='0.1.13') + + start_part = ThinkingPart(content='initial') + events = [e async for e in event_stream.handle_thinking_start(start_part)] + + delta = ThinkingPartDelta(content_delta='more') + events.extend([e async for e in event_stream.handle_thinking_delta(delta)]) + + assert [e.model_dump(exclude_none=True) for e in events] == snapshot( + [ + {'type': 'REASONING_START', 'message_id': IsStr()}, + {'type': 'REASONING_MESSAGE_START', 'message_id': IsStr(), 'role': 'assistant'}, + {'type': 'REASONING_MESSAGE_CONTENT', 'message_id': IsStr(), 'delta': 'initial'}, + {'type': 'REASONING_MESSAGE_CONTENT', 'message_id': IsStr(), 'delta': 'more'}, + ] + ) + + +async def test_thinking_end_v010_with_content() -> None: + """Test v0.1.10 end emits TextMessageEnd when content was streamed, and ThinkingStart when not started.""" + run_input = create_input(UserMessage(id='msg_1', content='test')) + + # Case 1: start with content → _reasoning_started=True, _reasoning_text=True + # end should emit TextMessageEnd + ThinkingEnd + event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE, ag_ui_version='0.1.10') + part = ThinkingPart(content='text') + events = [e async for e in event_stream.handle_thinking_start(part)] + events.extend([e async for e in event_stream.handle_thinking_end(part)]) + + assert [e.model_dump(exclude_none=True) for e in events] == snapshot( + [ + {'type': 'THINKING_START'}, + {'type': 'THINKING_TEXT_MESSAGE_START'}, + {'type': 'THINKING_TEXT_MESSAGE_CONTENT', 'delta': 'text'}, + {'type': 'THINKING_TEXT_MESSAGE_END'}, + {'type': 'THINKING_END'}, + ] + ) + + # Case 2: start with empty content → _reasoning_started=False + # end with content → hits ThinkingStartEvent at line 246 + event_stream2 = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE, ag_ui_version='0.1.10') + empty_part = ThinkingPart(content='') + events2 = [e async for e in event_stream2.handle_thinking_start(empty_part)] + + full_part = ThinkingPart(content='non-empty') + events2.extend([e async for e in event_stream2.handle_thinking_end(full_part)]) + + assert [e.model_dump(exclude_none=True) for e in events2] == snapshot( + [ + {'type': 'THINKING_START'}, + {'type': 'THINKING_END'}, + ] + ) + + +async def test_thinking_end_v013_no_encrypted_metadata() -> None: + """Test v0.1.13 end skips encrypted_value event when part has no signature or metadata.""" + run_input = create_input(UserMessage(id='msg_1', content='test')) + event_stream = AGUIEventStream(run_input, accept=SSE_CONTENT_TYPE, ag_ui_version='0.1.13') + + part = ThinkingPart(content='text') + events = [e async for e in event_stream.handle_thinking_start(part)] + events.extend([e async for e in event_stream.handle_thinking_end(part)]) + + assert [e.model_dump(exclude_none=True) for e in events] == snapshot( + [ + {'type': 'REASONING_START', 'message_id': IsStr()}, + {'type': 'REASONING_MESSAGE_START', 'message_id': IsStr(), 'role': 'assistant'}, + {'type': 'REASONING_MESSAGE_CONTENT', 'message_id': IsStr(), 'delta': 'text'}, + {'type': 'REASONING_MESSAGE_END', 'message_id': IsStr()}, + {'type': 'REASONING_END', 'message_id': IsStr()}, + ] + ) + + # endregion # region: Coverage — encrypted_metadata branch gap From d244c07d885398d819cd22bfa39183d0bfc4fba6 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Mon, 30 Mar 2026 17:52:11 -0500 Subject: [PATCH 28/33] address review: AGUIVersion type, auto-detect default, cache version check, thread preserve_file_data - Add AGUIVersion = Literal['0.1.10', '0.1.13'] with auto-detected DEFAULT_AG_UI_VERSION - Cache version check via _use_reasoning in __post_init__ (removes 3x duplication) - Add UserError validation to parse_ag_ui_version for malformed input - Thread preserve_file_data through handle_ag_ui_request, run_ag_ui, AGUIApp - Reword preserve_file_data docstring to user-focused language - Add UploadedFile split note to dump_messages lossiness docs --- pydantic_ai_slim/pydantic_ai/ag_ui.py | 20 +++++- pydantic_ai_slim/pydantic_ai/ui/AGENTS.md | 6 ++ pydantic_ai_slim/pydantic_ai/ui/CLAUDE.md | 1 + .../pydantic_ai/ui/ag_ui/__init__.py | 4 +- .../pydantic_ai/ui/ag_ui/_adapter.py | 33 +++++----- .../pydantic_ai/ui/ag_ui/_event_stream.py | 63 ++++++++++++++----- pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py | 8 +++ tests/test_ag_ui.py | 2 +- 8 files changed, 104 insertions(+), 33 deletions(-) create mode 100644 pydantic_ai_slim/pydantic_ai/ui/AGENTS.md create mode 120000 pydantic_ai_slim/pydantic_ai/ui/CLAUDE.md diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index a8475f47d8..671c172354 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -30,6 +30,7 @@ from .ui import SSE_CONTENT_TYPE, OnCompleteFunc, StateDeps, StateHandler from .ui.ag_ui import AGUIAdapter + from .ui.ag_ui._event_stream import DEFAULT_AG_UI_VERSION, AGUIVersion from .ui.ag_ui.app import AGUIApp except ImportError as e: # pragma: no cover raise ImportError( @@ -53,7 +54,8 @@ async def handle_ag_ui_request( agent: AbstractAgent[AgentDepsT, Any], request: Request, *, - ag_ui_version: str = '0.1.10', + ag_ui_version: AGUIVersion = DEFAULT_AG_UI_VERSION, + preserve_file_data: bool = False, output_type: OutputSpec[Any] | None = None, message_history: Sequence[ModelMessage] | None = None, deferred_tool_results: DeferredToolResults | None = None, @@ -73,6 +75,8 @@ async def handle_ag_ui_request( agent: The agent to run. request: The Starlette request (e.g. from FastAPI) containing the AG-UI run input. ag_ui_version: AG-UI protocol version controlling thinking/reasoning event format. + preserve_file_data: Whether to preserve agent-generated files and uploaded files + in AG-UI message conversion. See [`AGUIAdapter.preserve_file_data`][pydantic_ai.ui.ag_ui.AGUIAdapter.preserve_file_data]. output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's output type. @@ -97,6 +101,7 @@ async def handle_ag_ui_request( request, agent=agent, ag_ui_version=ag_ui_version, + preserve_file_data=preserve_file_data, deps=deps, output_type=output_type, message_history=message_history, @@ -117,7 +122,8 @@ def run_ag_ui( run_input: RunAgentInput, accept: str = SSE_CONTENT_TYPE, *, - ag_ui_version: str = '0.1.10', + ag_ui_version: AGUIVersion = DEFAULT_AG_UI_VERSION, + preserve_file_data: bool = False, output_type: OutputSpec[Any] | None = None, message_history: Sequence[ModelMessage] | None = None, deferred_tool_results: DeferredToolResults | None = None, @@ -138,6 +144,8 @@ def run_ag_ui( run_input: The AG-UI run input containing thread_id, run_id, messages, etc. accept: The accept header value for the run. ag_ui_version: AG-UI protocol version controlling thinking/reasoning event format. + preserve_file_data: Whether to preserve agent-generated files and uploaded files + in AG-UI message conversion. See [`AGUIAdapter.preserve_file_data`][pydantic_ai.ui.ag_ui.AGUIAdapter.preserve_file_data]. output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's output type. @@ -158,7 +166,13 @@ def run_ag_ui( Yields: Streaming event chunks encoded as strings according to the accept header value. """ - adapter = AGUIAdapter(agent=agent, run_input=run_input, accept=accept, ag_ui_version=ag_ui_version) + adapter = AGUIAdapter( + agent=agent, + run_input=run_input, + accept=accept, + ag_ui_version=ag_ui_version, + preserve_file_data=preserve_file_data, + ) return adapter.encode_stream( adapter.run_stream( output_type=output_type, diff --git a/pydantic_ai_slim/pydantic_ai/ui/AGENTS.md b/pydantic_ai_slim/pydantic_ai/ui/AGENTS.md new file mode 100644 index 0000000000..5dcffe6b9c --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/AGENTS.md @@ -0,0 +1,6 @@ +## Backwards compatibility in UI adapters (specially AG-UI) + +Since [3971](https://github.com/pydantic/pydantic-ai/pull/3971#discussion_r3011028336) we decided to introduce the policy of sticking to the lower (existing) version requirement. In short, this means: +- version requirement bumps are disallowed +- new functionality should be gated behind version checks (including imports) +- older versions don't error out when they encounter new functionality, but instead skip it diff --git a/pydantic_ai_slim/pydantic_ai/ui/CLAUDE.md b/pydantic_ai_slim/pydantic_ai/ui/CLAUDE.md new file mode 120000 index 0000000000..47dc3e3d86 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/CLAUDE.md @@ -0,0 +1 @@ +AGENTS.md \ No newline at end of file diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py index 6228771869..ec358d1f92 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py @@ -1,9 +1,11 @@ """AG-UI protocol integration for Pydantic AI agents.""" from ._adapter import AGUIAdapter -from ._event_stream import AGUIEventStream +from ._event_stream import DEFAULT_AG_UI_VERSION, AGUIEventStream, AGUIVersion __all__ = [ 'AGUIAdapter', 'AGUIEventStream', + 'AGUIVersion', + 'DEFAULT_AG_UI_VERSION', ] diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index c9b309b6f9..b105068a40 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -65,8 +65,10 @@ from .. import MessagesBuilder, UIAdapter, UIEventStream from ._event_stream import ( BUILTIN_TOOL_CALL_ID_PREFIX, + DEFAULT_AG_UI_VERSION, REASONING_VERSION, AGUIEventStream, + AGUIVersion, parse_ag_ui_version, thinking_encrypted_metadata, ) @@ -143,10 +145,12 @@ class AGUIAdapter(UIAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT, Outpu """UI adapter for the Agent-User Interaction (AG-UI) protocol.""" _: KW_ONLY - ag_ui_version: str = '0.1.10' + ag_ui_version: AGUIVersion = DEFAULT_AG_UI_VERSION """AG-UI protocol version controlling thinking/reasoning event format. - - `'0.1.10'` (default): emits `THINKING_*` events during streaming, drops `ThinkingPart` + Defaults to the version detected from the installed `ag-ui-protocol` package. + + - `'0.1.10'`: emits `THINKING_*` events during streaming, drops `ThinkingPart` from `dump_messages` output. Compatible with AG-UI frontends that don't support reasoning events. - `'0.1.13'`: emits `REASONING_*` events with encrypted metadata during streaming, and includes `ThinkingPart` as `ReasoningMessage` in `dump_messages` output for full round-trip @@ -156,16 +160,15 @@ class AGUIAdapter(UIAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT, Outpu """ preserve_file_data: bool = False - """Whether to preserve file and uploaded-file data in AG-UI message conversion. + """Whether to preserve agent-generated files and uploaded files in AG-UI message conversion. - When `True`, `FilePart` round-trips as `ActivityMessage(activity_type='pydantic_ai_file')` - and `UploadedFile` round-trips as `ActivityMessage(activity_type='pydantic_ai_uploaded_file')`. - When `False` (default), these are silently dropped from `dump_messages` output - and their corresponding `ActivityMessage` types are ignored by `load_messages`. + When `True`, agent-generated files and uploaded files are stored as + [activity messages](https://docs.ag-ui.com/concepts/activities) during `dump_messages` + and restored during `load_messages`, enabling full round-trip fidelity. + When `False` (default), they are silently dropped. - If your AG-UI frontend uses [activities](https://docs.ag-ui.com/concepts/activities), - be aware that `pydantic_ai_*` activity types are reserved for internal round-trip use - and should be ignored by frontend activity handlers. + If your AG-UI frontend uses activities, be aware that `pydantic_ai_*` activity types + are reserved for internal round-trip use and should be ignored by frontend activity handlers. """ @classmethod @@ -183,7 +186,7 @@ async def from_request( request: Request, *, agent: AbstractAgent[AgentDepsT, OutputDataT], - ag_ui_version: str = '0.1.10', + ag_ui_version: AGUIVersion = DEFAULT_AG_UI_VERSION, preserve_file_data: bool = False, **kwargs: Any, ) -> AGUIAdapter[AgentDepsT, OutputDataT]: @@ -459,7 +462,7 @@ def _dump_request_parts(msg: ModelRequest, *, preserve_file_data: bool = False) @staticmethod def _dump_response_parts( # noqa: C901 - msg: ModelResponse, *, ag_ui_version: str = '0.1.10', preserve_file_data: bool = False + msg: ModelResponse, *, ag_ui_version: AGUIVersion = DEFAULT_AG_UI_VERSION, preserve_file_data: bool = False ) -> list[Message]: """Convert a `ModelResponse` into AG-UI messages. @@ -563,7 +566,7 @@ def dump_messages( cls, messages: Sequence[ModelMessage], *, - ag_ui_version: str = '0.1.10', + ag_ui_version: AGUIVersion = DEFAULT_AG_UI_VERSION, preserve_file_data: bool = False, ) -> list[Message]: """Transform Pydantic AI messages into AG-UI messages. @@ -577,8 +580,10 @@ def dump_messages( - `BuiltinToolReturnPart.provider_details` is lost. - `RetryPromptPart` becomes `ToolReturnPart` (or `UserPromptPart`) on reload. - `CachePoint` and `UploadedFile` content items are dropped (unless `preserve_file_data=True`). - - `ThinkingPart` is dropped when `ag_ui_version='0.1.10'` (default). + - `ThinkingPart` is dropped when `ag_ui_version='0.1.10'`. - `FilePart` is silently dropped unless `preserve_file_data=True`. + - `UploadedFile` in a multi-item `UserPromptPart` is split into a separate activity message + when `preserve_file_data=True`, which reloads as a separate `UserPromptPart`. - Part ordering within a `ModelResponse` may change when text follows tool calls. Args: diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index a06f0088f7..b6ccd64079 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -6,10 +6,11 @@ from __future__ import annotations +import importlib.metadata import json from collections.abc import AsyncIterator, Iterable from dataclasses import dataclass, field -from typing import Any, Final +from typing import Any, Final, Literal from uuid import uuid4 from ..._utils import now_utc @@ -66,22 +67,52 @@ 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' ) from e +AGUIVersion = Literal['0.1.10', '0.1.13'] +"""Supported AG-UI protocol versions. + +- `'0.1.10'`: emits `THINKING_*` events, drops `ThinkingPart` from `dump_messages`. +- `'0.1.13'`: emits `REASONING_*` events with encrypted metadata, preserves `ThinkingPart` + as `ReasoningMessage` in `dump_messages` for full round-trip fidelity. +""" + +REASONING_VERSION = (0, 1, 13) +"""AG-UI version that introduced REASONING_* events (replacing THINKING_*).""" + + +def parse_ag_ui_version(version: str) -> tuple[int, ...]: + """Parse an AG-UI version string (e.g. `'0.1.13'`) into a comparable tuple.""" + from ...exceptions import UserError + + try: + return tuple(int(x) for x in version.split('.')) + except ValueError: + raise UserError(f"Invalid AG-UI version {version!r}: expected a dotted numeric version like '0.1.13'") from None + + +def _detect_ag_ui_version() -> AGUIVersion: + """Detect installed ag-ui-protocol version and map to the nearest supported `AGUIVersion`.""" + try: + installed = importlib.metadata.version('ag-ui-protocol') + if parse_ag_ui_version(installed) >= REASONING_VERSION: + return '0.1.13' + except importlib.metadata.PackageNotFoundError: + pass + return '0.1.10' + + +DEFAULT_AG_UI_VERSION: AGUIVersion = _detect_ag_ui_version() +"""The default AG-UI version, auto-detected from the installed `ag-ui-protocol` package.""" + + __all__ = [ 'AGUIEventStream', + 'AGUIVersion', + 'DEFAULT_AG_UI_VERSION', 'RunAgentInput', 'RunStartedEvent', 'RunFinishedEvent', ] - -def parse_ag_ui_version(version: str) -> tuple[int, ...]: - """Parse an AG-UI version string into a comparable tuple.""" - return tuple(int(x) for x in version.split('.')) - - -REASONING_VERSION = (0, 1, 13) -"""AG-UI version that introduced REASONING_* events (replacing THINKING_*).""" - BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' @@ -103,8 +134,9 @@ def thinking_encrypted_metadata(part: ThinkingPart) -> dict[str, Any]: class AGUIEventStream(UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]): """UI event stream transformer for the Agent-User Interaction (AG-UI) protocol.""" - ag_ui_version: str = '0.1.10' + ag_ui_version: AGUIVersion = DEFAULT_AG_UI_VERSION + _use_reasoning: bool = field(default=False, init=False) _reasoning_message_id: str | None = None _reasoning_started: bool = False _reasoning_text: bool = False @@ -112,6 +144,9 @@ class AGUIEventStream(UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, Output _ended_tool_call_ids: set[str] = field(default_factory=set[str]) _error: bool = False + def __post_init__(self) -> None: + self._use_reasoning = parse_ag_ui_version(self.ag_ui_version) >= REASONING_VERSION + @property def _event_encoder(self) -> EventEncoder: return EventEncoder(accept=self.accept or SSE_CONTENT_TYPE) @@ -184,7 +219,7 @@ async def handle_thinking_start( self._reasoning_message_id = str(uuid4()) self._reasoning_started = False - if parse_ag_ui_version(self.ag_ui_version) >= REASONING_VERSION: + if self._use_reasoning: if part.content: yield ReasoningStartEvent(message_id=self._reasoning_message_id) self._reasoning_started = True @@ -207,7 +242,7 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator 'handle_thinking_start must be called before handle_thinking_delta' ) - if parse_ag_ui_version(self.ag_ui_version) >= REASONING_VERSION: + if self._use_reasoning: message_id = self._reasoning_message_id if not self._reasoning_started: @@ -235,7 +270,7 @@ async def handle_thinking_end( ) -> AsyncIterator[BaseEvent]: assert self._reasoning_message_id is not None, 'handle_thinking_start must be called before handle_thinking_end' - if parse_ag_ui_version(self.ag_ui_version) >= REASONING_VERSION: + if self._use_reasoning: message_id = self._reasoning_message_id encrypted = thinking_encrypted_metadata(part) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py index 1f0fbe5262..4dfaf155c6 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py @@ -21,6 +21,7 @@ from .. import OnCompleteFunc, StateHandler from ._adapter import AGUIAdapter +from ._event_stream import DEFAULT_AG_UI_VERSION, AGUIVersion try: from starlette.applications import Starlette @@ -44,6 +45,8 @@ def __init__( agent: AbstractAgent[AgentDepsT, OutputDataT], *, # AGUIAdapter.dispatch_request parameters + ag_ui_version: AGUIVersion = DEFAULT_AG_UI_VERSION, + preserve_file_data: bool = False, output_type: OutputSpec[Any] | None = None, message_history: Sequence[ModelMessage] | None = None, deferred_tool_results: DeferredToolResults | None = None, @@ -75,6 +78,9 @@ def __init__( Args: agent: The agent to run. + ag_ui_version: AG-UI protocol version controlling thinking/reasoning event format. + preserve_file_data: Whether to preserve agent-generated files and uploaded files + in AG-UI message conversion. See [`AGUIAdapter.preserve_file_data`][pydantic_ai.ui.ag_ui.AGUIAdapter.preserve_file_data]. output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's @@ -131,6 +137,8 @@ async def run_agent(request: Request) -> Response: return await AGUIAdapter[AgentDepsT, OutputDataT].dispatch_request( request, agent=agent, + ag_ui_version=ag_ui_version, + preserve_file_data=preserve_file_data, output_type=output_type, message_history=message_history, deferred_tool_results=deferred_tool_results, diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 849c44544f..d91e87aab2 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -3462,7 +3462,7 @@ def test_dump_messages_v010_drops_thinking() -> None: ), ] - ag_ui_msgs = AGUIAdapter.dump_messages(messages) # default 0.1.10 + ag_ui_msgs = AGUIAdapter.dump_messages(messages, ag_ui_version='0.1.10') # No ReasoningMessage in output assert not any(isinstance(m, ReasoningMessage) for m in ag_ui_msgs) # Text still present From f64bd5ff19957fdeffe30c0096c30de596c43b03 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Tue, 31 Mar 2026 09:43:23 -0500 Subject: [PATCH 29/33] fix: handle TextContent in _user_content_to_input after merge with main --- pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index b105068a40..370fbe2ae8 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -31,6 +31,7 @@ ModelResponse, RetryPromptPart, SystemPromptPart, + TextContent, TextPart, ThinkingPart, ToolCallPart, @@ -121,11 +122,13 @@ def _new_message_id() -> str: def _user_content_to_input( - item: str | ImageUrl | VideoUrl | AudioUrl | DocumentUrl | BinaryContent | UploadedFile | CachePoint, + item: str | TextContent | ImageUrl | VideoUrl | AudioUrl | DocumentUrl | BinaryContent | UploadedFile | CachePoint, ) -> TextInputContent | BinaryInputContent | None: """Convert a user content item to AG-UI input content.""" if isinstance(item, str): return TextInputContent(type='text', text=item) + elif isinstance(item, TextContent): + return TextInputContent(type='text', text=item.content) elif isinstance(item, (ImageUrl, VideoUrl, AudioUrl, DocumentUrl)): return BinaryInputContent(type='binary', url=item.url, mime_type=item.media_type or '') elif isinstance(item, BinaryContent): From e54f27a5294f7196ac9da8b025fe990ec1257434 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Tue, 31 Mar 2026 10:53:55 -0500 Subject: [PATCH 30/33] coverage: handle TextContent, test version validation/detect fallback; optimize check_cassettes --- scripts/check_cassettes.py | 107 +++++++++++++++++++++++-------------- tests/test_ag_ui.py | 44 +++++++++++++++ 2 files changed, 111 insertions(+), 40 deletions(-) diff --git a/scripts/check_cassettes.py b/scripts/check_cassettes.py index 39ad943723..c56793991f 100644 --- a/scripts/check_cassettes.py +++ b/scripts/check_cassettes.py @@ -11,52 +11,73 @@ from __future__ import annotations +import ast import sys from collections import defaultdict from pathlib import Path -import pytest +_FORBIDDEN_CHARS = r"""<>?%*:|"'/\\""" -class _CollectVcrTests: - """Pytest plugin that collects cassette names referenced by VCR-marked tests. +def _sanitize_cassette_name(name: str) -> str: + """Replicate pytest-recording's cassette name sanitization.""" + for ch in _FORBIDDEN_CHARS: + name = name.replace(ch, '-') + return name - This is a class (not functions) because pytest's plugin system requires objects - with hook methods, and we need to accumulate state across all test items. - """ - def __init__(self) -> None: - self.tests: dict[str, set[str]] = defaultdict(set) +def _has_vcr_marker(decorator_list: list[ast.expr]) -> bool: + """Check if a decorator list contains pytest.mark.vcr (with or without parens).""" + for dec in decorator_list: + # @pytest.mark.vcr or @pytest.mark.vcr() + if isinstance(dec, ast.Attribute) and dec.attr == 'vcr': + return True + if isinstance(dec, ast.Call) and isinstance(dec.func, ast.Attribute) and dec.func.attr == 'vcr': + return True + return False - @staticmethod - def _remove_yaml_ext(s: str) -> str: - if s.endswith('.yaml'): - return s[:-5] - return s - def pytest_collection_modifyitems( - self, session: pytest.Session, config: pytest.Config, items: list[pytest.Item] - ) -> None: - # prevents pytest.PytestAssertRewriteWarning: Module already imported so cannot be rewritten; pytest_recording - from pytest_recording.plugin import get_default_cassette_name - - for item in items: - if not any(item.iter_markers('vcr')): +def _has_module_vcr_marker(tree: ast.Module) -> bool: + """Check if the module has pytestmark = [..., pytest.mark.vcr, ...].""" + for node in ast.iter_child_nodes(tree): + if not isinstance(node, ast.Assign): + continue + for target in node.targets: + if not (isinstance(target, ast.Name) and target.id == 'pytestmark'): continue + return 'vcr' in ast.dump(node.value) + return False - test_file_stem = Path(item.location[0]).stem - m = item.get_closest_marker('default_cassette') - if m and m.args: - self.tests[test_file_stem].add(self._remove_yaml_ext(m.args[0])) - else: - self.tests[test_file_stem].add( - self._remove_yaml_ext(get_default_cassette_name(getattr(item, 'cls', None), item.name)) - ) +def _collect_vcr_tests_from_file(path: Path) -> set[str]: + """Parse a Python test file and return cassette names for VCR-marked tests.""" + try: + tree = ast.parse(path.read_text()) + except SyntaxError: + return set() - for vm in item.iter_markers('vcr'): - for arg in vm.args: - self.tests[test_file_stem].add(self._remove_yaml_ext(arg)) + module_has_vcr = _has_module_vcr_marker(tree) + cassette_names: set[str] = set() + + for node in ast.iter_child_nodes(tree): + if isinstance(node, ast.FunctionDef | ast.AsyncFunctionDef): + if not node.name.startswith('test_'): + continue + if module_has_vcr or _has_vcr_marker(node.decorator_list): + # Parametrized tests get [] suffixes but cassettes use the base name + cassette_names.add(_sanitize_cassette_name(node.name)) + + elif isinstance(node, ast.ClassDef): + class_has_vcr = _has_vcr_marker(node.decorator_list) + for method in ast.iter_child_nodes(node): + if not isinstance(method, ast.FunctionDef | ast.AsyncFunctionDef): + continue + if not method.name.startswith('test_'): + continue + if module_has_vcr or class_has_vcr or _has_vcr_marker(method.decorator_list): + cassette_names.add(_sanitize_cassette_name(f'{node.name}.{method.name}')) + + return cassette_names def get_all_cassettes() -> dict[str, set[str]]: @@ -77,12 +98,15 @@ def get_all_cassettes() -> dict[str, set[str]]: def get_all_tests() -> dict[str, set[str]]: - """Use pytest collection to get all VCR-marked tests and their cassette names.""" - collector = _CollectVcrTests() - rc = pytest.main(['--collect-only', '-q', 'tests/'], plugins=[collector]) - if rc not in (pytest.ExitCode.OK, pytest.ExitCode.NO_TESTS_COLLECTED): - raise SystemExit(rc) - return dict(collector.tests) + """Use AST parsing to find all VCR-marked tests and their cassette names.""" + tests: dict[str, set[str]] = defaultdict(set) + + for test_file in Path('tests').rglob('test_*.py'): + cassette_names = _collect_vcr_tests_from_file(test_file) + if cassette_names: + tests[test_file.stem].update(cassette_names) + + return dict(tests) def main() -> int: @@ -93,7 +117,7 @@ def main() -> int: total_cassettes = sum(len(c) for c in cassettes.values()) print(f'Found {total_cassettes} cassettes in {len(cassettes)} test modules') - print('Collecting VCR-marked tests (this may take a moment)...') + print('Collecting VCR-marked tests...') tests = get_all_tests() total_tests = sum(len(t) for t in tests.values()) print(f'Found {total_tests} tests in {len(tests)} test modules') @@ -108,7 +132,10 @@ def main() -> int: print(f'Warning: No tests found for module {test_file}') for cassette in sorted(cassette_names): - if cassette in expected_cassettes: + # Parametrized tests produce cassettes like test_foo[param].yaml + # Strip the [param] suffix to match the base test name + base_name = cassette.split('[')[0] + if cassette in expected_cassettes or base_name in expected_cassettes: matched += 1 if verbose: print(f' OK: {test_file}/{cassette}.yaml') diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index d91e87aab2..f986207c2e 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -2,6 +2,7 @@ from __future__ import annotations +import importlib.metadata import json import uuid from collections.abc import AsyncIterator, MutableMapping @@ -37,6 +38,7 @@ RequestUsage, RetryPromptPart, SystemPromptPart, + TextContent, TextPart, TextPartDelta, ThinkingPart, @@ -52,6 +54,7 @@ from pydantic_ai._run_context import RunContext from pydantic_ai.agent import Agent, AgentRunResult from pydantic_ai.builtin_tools import WebSearchTool +from pydantic_ai.exceptions import UserError from pydantic_ai.models.function import ( AgentInfo, BuiltinToolCallsReturns, @@ -102,6 +105,10 @@ run_ag_ui, ) from pydantic_ai.ui.ag_ui import AGUIEventStream + from pydantic_ai.ui.ag_ui._event_stream import ( + _detect_ag_ui_version, # pyright: ignore[reportPrivateUsage] + parse_ag_ui_version, + ) with try_import() as anthropic_imports_successful: from pydantic_ai.models.anthropic import AnthropicModel, AnthropicModelSettings @@ -3819,3 +3826,40 @@ def test_dump_messages_uploaded_file_without_vendor_metadata() -> None: # endregion + + +# region: Coverage — parse_ag_ui_version validation + TextContent + detect fallback + + +def test_parse_ag_ui_version_invalid() -> None: + """Test that parse_ag_ui_version raises UserError for malformed input.""" + with pytest.raises(UserError, match="Invalid AG-UI version 'latest'"): + parse_ag_ui_version('latest') + + with pytest.raises(UserError, match="Invalid AG-UI version '0.1.x'"): + parse_ag_ui_version('0.1.x') + + +def test_detect_ag_ui_version_fallback(monkeypatch: pytest.MonkeyPatch) -> None: + """Test that _detect_ag_ui_version returns '0.1.10' when package is not found.""" + + def _raise_not_found(_name: str) -> str: + raise importlib.metadata.PackageNotFoundError() + + monkeypatch.setattr('pydantic_ai.ui.ag_ui._event_stream.importlib.metadata.version', _raise_not_found) + assert _detect_ag_ui_version() == snapshot('0.1.10') + + +def test_dump_messages_text_content() -> None: + """Test that TextContent in UserPromptPart is converted to TextInputContent.""" + messages: list[ModelMessage] = [ + ModelRequest(parts=[UserPromptPart(content=[TextContent(content='hello')])]), + ] + + result = AGUIAdapter.dump_messages(messages) + assert [m.model_dump(exclude={'id'}, exclude_none=True) for m in result] == snapshot( + [{'role': 'user', 'content': 'hello'}] + ) + + +# endregion From ab9c5ae2bad7e77c4d6b03b07987c5f8709eb38a Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Tue, 31 Mar 2026 14:42:02 -0500 Subject: [PATCH 31/33] fix: gate reasoning imports for ag-ui-protocol backward compat Revert ag-ui-protocol minimum from >=0.1.13 to >=0.1.10. Extract version-specific thinking/reasoning event handlers into _thinking_0_10.py and _thinking_0_13.py (lazy-imported). Gate ReasoningMessage in _adapter.py via TYPE_CHECKING + runtime stub. Fix pre-release version parsing in parse_ag_ui_version with regex-based numeric prefix extraction. --- .../pydantic_ai/ui/ag_ui/_adapter.py | 12 +- .../pydantic_ai/ui/ag_ui/_event_stream.py | 108 ++++-------------- .../pydantic_ai/ui/ag_ui/_thinking_0_10.py | 72 ++++++++++++ .../pydantic_ai/ui/ag_ui/_thinking_0_13.py | 89 +++++++++++++++ pydantic_ai_slim/pyproject.toml | 2 +- tests/test_ag_ui.py | 13 ++- uv.lock | 2 +- 7 files changed, 208 insertions(+), 90 deletions(-) create mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/_thinking_0_10.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/_thinking_0_13.py diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 370fbe2ae8..a5e0ac8513 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -53,7 +53,6 @@ DeveloperMessage, FunctionCall, Message, - ReasoningMessage, RunAgentInput, SystemMessage, TextInputContent, @@ -80,9 +79,18 @@ ) from e if TYPE_CHECKING: + from ag_ui.core import ReasoningMessage from starlette.requests import Request from ...agent import AbstractAgent +else: + try: + from ag_ui.core import ReasoningMessage + except ImportError: + + class ReasoningMessage: + """Stub for ag-ui-protocol < 0.1.13 — no instances exist, so pattern matching is a no-op.""" + __all__ = ['AGUIAdapter'] @@ -503,6 +511,8 @@ def flush() -> None: text_content.append(part.content) elif isinstance(part, ThinkingPart): if parse_ag_ui_version(ag_ui_version) >= REASONING_VERSION: + from ag_ui.core import ReasoningMessage + flush() encrypted = thinking_encrypted_metadata(part) result.append( diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index b6ccd64079..35a2c9a8cb 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -36,12 +36,6 @@ from ag_ui.core import ( BaseEvent, EventType, - ReasoningEncryptedValueEvent, - ReasoningEndEvent, - ReasoningMessageContentEvent, - ReasoningMessageEndEvent, - ReasoningMessageStartEvent, - ReasoningStartEvent, RunAgentInput, RunErrorEvent, RunFinishedEvent, @@ -49,11 +43,6 @@ TextMessageContentEvent, TextMessageEndEvent, TextMessageStartEvent, - ThinkingEndEvent, - ThinkingStartEvent, - ThinkingTextMessageContentEvent, - ThinkingTextMessageEndEvent, - ThinkingTextMessageStartEvent, ToolCallArgsEvent, ToolCallEndEvent, ToolCallResultEvent, @@ -80,13 +69,18 @@ def parse_ag_ui_version(version: str) -> tuple[int, ...]: - """Parse an AG-UI version string (e.g. `'0.1.13'`) into a comparable tuple.""" + """Parse an AG-UI version string (e.g. `'0.1.13'`) into a comparable tuple. + + Pre-release suffixes like `a1`, `b2`, `rc1`, `.dev0` are stripped before parsing. + """ + import re + from ...exceptions import UserError - try: - return tuple(int(x) for x in version.split('.')) - except ValueError: - raise UserError(f"Invalid AG-UI version {version!r}: expected a dotted numeric version like '0.1.13'") from None + match = re.match(r'(\d+(?:\.\d+)*)', version) + if not match: + raise UserError(f"Invalid AG-UI version {version!r}: expected a dotted numeric version like '0.1.13'") + return tuple(int(x) for x in match.group(1).split('.')) def _detect_ag_ui_version() -> AGUIVersion: @@ -95,7 +89,7 @@ def _detect_ag_ui_version() -> AGUIVersion: installed = importlib.metadata.version('ag-ui-protocol') if parse_ag_ui_version(installed) >= REASONING_VERSION: return '0.1.13' - except importlib.metadata.PackageNotFoundError: + except Exception: pass return '0.1.10' @@ -220,19 +214,11 @@ async def handle_thinking_start( self._reasoning_started = False if self._use_reasoning: - if part.content: - yield ReasoningStartEvent(message_id=self._reasoning_message_id) - self._reasoning_started = True - yield ReasoningMessageStartEvent(message_id=self._reasoning_message_id, role='assistant') - yield ReasoningMessageContentEvent(message_id=self._reasoning_message_id, delta=part.content) - self._reasoning_text = True + from ._thinking_0_13 import handle_thinking_start as _impl else: - if part.content: - yield ThinkingStartEvent() - self._reasoning_started = True - yield ThinkingTextMessageStartEvent() - yield ThinkingTextMessageContentEvent(delta=part.content) - self._reasoning_text = True + from ._thinking_0_10 import handle_thinking_start as _impl + async for event in _impl(self, part): + yield event async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: if not delta.content_delta: @@ -243,27 +229,11 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator ) if self._use_reasoning: - message_id = self._reasoning_message_id - - if not self._reasoning_started: - yield ReasoningStartEvent(message_id=message_id) - self._reasoning_started = True - - if not self._reasoning_text: - yield ReasoningMessageStartEvent(message_id=message_id, role='assistant') - self._reasoning_text = True - - yield ReasoningMessageContentEvent(message_id=message_id, delta=delta.content_delta) + from ._thinking_0_13 import handle_thinking_delta as _impl else: - if not self._reasoning_started: - yield ThinkingStartEvent() - self._reasoning_started = True - - if not self._reasoning_text: - yield ThinkingTextMessageStartEvent() - self._reasoning_text = True - - yield ThinkingTextMessageContentEvent(delta=delta.content_delta) + from ._thinking_0_10 import handle_thinking_delta as _impl + async for event in _impl(self, delta): + yield event async def handle_thinking_end( self, part: ThinkingPart, followed_by_thinking: bool = False @@ -271,43 +241,11 @@ async def handle_thinking_end( assert self._reasoning_message_id is not None, 'handle_thinking_start must be called before handle_thinking_end' if self._use_reasoning: - message_id = self._reasoning_message_id - encrypted = thinking_encrypted_metadata(part) - - if not self._reasoning_started and not encrypted: - self._reasoning_message_id = None - return - - if not self._reasoning_started: - yield ReasoningStartEvent(message_id=message_id) - - if self._reasoning_text: - yield ReasoningMessageEndEvent(message_id=message_id) - self._reasoning_text = False - - if encrypted: - yield ReasoningEncryptedValueEvent( - subtype='message', - entity_id=message_id, - encrypted_value=json.dumps(encrypted), - ) - - yield ReasoningEndEvent(message_id=message_id) - self._reasoning_message_id = None + from ._thinking_0_13 import handle_thinking_end as _impl else: - if not self._reasoning_started and not part.content: - self._reasoning_message_id = None - return - - if not self._reasoning_started: - yield ThinkingStartEvent() - - if self._reasoning_text: - yield ThinkingTextMessageEndEvent() - self._reasoning_text = False - - yield ThinkingEndEvent() - self._reasoning_message_id = None + from ._thinking_0_10 import handle_thinking_end as _impl + async for event in _impl(self, part): + yield event def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: return self._handle_tool_call_start(part) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_thinking_0_10.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_thinking_0_10.py new file mode 100644 index 0000000000..914cfe7044 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_thinking_0_10.py @@ -0,0 +1,72 @@ +# pyright: reportPrivateUsage=false +"""Thinking event handlers for AG-UI protocol < 0.1.13 (THINKING_* events). + +These are extracted class methods of `AGUIEventStream` — the `self` parameter +is the event stream instance, and access to its private fields is intentional. +""" + +from __future__ import annotations + +from collections.abc import AsyncIterator +from typing import TYPE_CHECKING + +from ag_ui.core import ( + BaseEvent, + ThinkingEndEvent, + ThinkingStartEvent, + ThinkingTextMessageContentEvent, + ThinkingTextMessageEndEvent, + ThinkingTextMessageStartEvent, +) + +from ...messages import ThinkingPart, ThinkingPartDelta + +if TYPE_CHECKING: + from ...output import OutputDataT + from ...tools import AgentDepsT + from ._event_stream import AGUIEventStream + + +async def handle_thinking_start( + self: AGUIEventStream[AgentDepsT, OutputDataT], part: ThinkingPart +) -> AsyncIterator[BaseEvent]: + if part.content: + yield ThinkingStartEvent() + self._reasoning_started = True + yield ThinkingTextMessageStartEvent() + yield ThinkingTextMessageContentEvent(delta=part.content) + self._reasoning_text = True + + +async def handle_thinking_delta( + self: AGUIEventStream[AgentDepsT, OutputDataT], delta: ThinkingPartDelta +) -> AsyncIterator[BaseEvent]: + assert delta.content_delta is not None + + if not self._reasoning_started: + yield ThinkingStartEvent() + self._reasoning_started = True + + if not self._reasoning_text: + yield ThinkingTextMessageStartEvent() + self._reasoning_text = True + + yield ThinkingTextMessageContentEvent(delta=delta.content_delta) + + +async def handle_thinking_end( + self: AGUIEventStream[AgentDepsT, OutputDataT], part: ThinkingPart +) -> AsyncIterator[BaseEvent]: + if not self._reasoning_started and not part.content: + self._reasoning_message_id = None + return + + if not self._reasoning_started: + yield ThinkingStartEvent() + + if self._reasoning_text: + yield ThinkingTextMessageEndEvent() + self._reasoning_text = False + + yield ThinkingEndEvent() + self._reasoning_message_id = None diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_thinking_0_13.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_thinking_0_13.py new file mode 100644 index 0000000000..166254510c --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_thinking_0_13.py @@ -0,0 +1,89 @@ +# pyright: reportPrivateUsage=false +"""Reasoning event handlers for AG-UI protocol >= 0.1.13 (REASONING_* events). + +These are extracted class methods of `AGUIEventStream` — the `self` parameter +is the event stream instance, and access to its private fields is intentional. +""" + +from __future__ import annotations + +import json +from collections.abc import AsyncIterator +from typing import TYPE_CHECKING + +from ag_ui.core import ( + BaseEvent, + ReasoningEncryptedValueEvent, + ReasoningEndEvent, + ReasoningMessageContentEvent, + ReasoningMessageEndEvent, + ReasoningMessageStartEvent, + ReasoningStartEvent, +) + +from ...messages import ThinkingPart, ThinkingPartDelta +from ._event_stream import thinking_encrypted_metadata + +if TYPE_CHECKING: + from ...output import OutputDataT + from ...tools import AgentDepsT + from ._event_stream import AGUIEventStream + + +async def handle_thinking_start( + self: AGUIEventStream[AgentDepsT, OutputDataT], part: ThinkingPart +) -> AsyncIterator[BaseEvent]: + assert self._reasoning_message_id is not None + if part.content: + yield ReasoningStartEvent(message_id=self._reasoning_message_id) + self._reasoning_started = True + yield ReasoningMessageStartEvent(message_id=self._reasoning_message_id, role='assistant') + yield ReasoningMessageContentEvent(message_id=self._reasoning_message_id, delta=part.content) + self._reasoning_text = True + + +async def handle_thinking_delta( + self: AGUIEventStream[AgentDepsT, OutputDataT], delta: ThinkingPartDelta +) -> AsyncIterator[BaseEvent]: + assert self._reasoning_message_id is not None + assert delta.content_delta is not None + message_id = self._reasoning_message_id + + if not self._reasoning_started: + yield ReasoningStartEvent(message_id=message_id) + self._reasoning_started = True + + if not self._reasoning_text: + yield ReasoningMessageStartEvent(message_id=message_id, role='assistant') + self._reasoning_text = True + + yield ReasoningMessageContentEvent(message_id=message_id, delta=delta.content_delta) + + +async def handle_thinking_end( + self: AGUIEventStream[AgentDepsT, OutputDataT], part: ThinkingPart +) -> AsyncIterator[BaseEvent]: + assert self._reasoning_message_id is not None + message_id = self._reasoning_message_id + encrypted = thinking_encrypted_metadata(part) + + if not self._reasoning_started and not encrypted: + self._reasoning_message_id = None + return + + if not self._reasoning_started: + yield ReasoningStartEvent(message_id=message_id) + + if self._reasoning_text: + yield ReasoningMessageEndEvent(message_id=message_id) + self._reasoning_text = False + + if encrypted: + yield ReasoningEncryptedValueEvent( + subtype='message', + entity_id=message_id, + encrypted_value=json.dumps(encrypted), + ) + + yield ReasoningEndEvent(message_id=message_id) + self._reasoning_message_id = None diff --git a/pydantic_ai_slim/pyproject.toml b/pydantic_ai_slim/pyproject.toml index cad78c8538..ff0a429149 100644 --- a/pydantic_ai_slim/pyproject.toml +++ b/pydantic_ai_slim/pyproject.toml @@ -119,7 +119,7 @@ ui = ["starlette>=0.45.3"] # A2A a2a = ["fasta2a>=0.4.1"] # AG-UI -ag-ui = ["ag-ui-protocol>=0.1.13", "starlette>=0.45.3"] +ag-ui = ["ag-ui-protocol>=0.1.10", "starlette>=0.45.3"] # Web web = ["starlette>=0.45.3", "httpx>=0.27.0", "uvicorn>=0.38.0"] # Retries diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index f986207c2e..15bdcf6b37 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -3836,8 +3836,17 @@ def test_parse_ag_ui_version_invalid() -> None: with pytest.raises(UserError, match="Invalid AG-UI version 'latest'"): parse_ag_ui_version('latest') - with pytest.raises(UserError, match="Invalid AG-UI version '0.1.x'"): - parse_ag_ui_version('0.1.x') + with pytest.raises(UserError, match="Invalid AG-UI version ''"): + parse_ag_ui_version('') + + +def test_parse_ag_ui_version_prerelease() -> None: + """Test that parse_ag_ui_version strips pre-release suffixes.""" + assert parse_ag_ui_version('0.1.13a1') == snapshot((0, 1, 13)) + assert parse_ag_ui_version('0.1.13b2') == snapshot((0, 1, 13)) + assert parse_ag_ui_version('0.1.13rc1') == snapshot((0, 1, 13)) + assert parse_ag_ui_version('0.1.13.dev0') == snapshot((0, 1, 13)) + assert parse_ag_ui_version('0.1.x') == snapshot((0, 1)) def test_detect_ag_ui_version_fallback(monkeypatch: pytest.MonkeyPatch) -> None: diff --git a/uv.lock b/uv.lock index e997aa8f1e..dc679ba373 100644 --- a/uv.lock +++ b/uv.lock @@ -6946,7 +6946,7 @@ xai = [ [package.metadata] requires-dist = [ - { name = "ag-ui-protocol", marker = "extra == 'ag-ui'", specifier = ">=0.1.13" }, + { name = "ag-ui-protocol", marker = "extra == 'ag-ui'", specifier = ">=0.1.10" }, { name = "anthropic", marker = "extra == 'anthropic'", specifier = ">=0.80.0" }, { name = "argcomplete", marker = "extra == 'cli'", specifier = ">=3.5.0" }, { name = "boto3", marker = "extra == 'bedrock'", specifier = ">=1.42.14" }, From d3a22db6335a4a28e7b23845c484158ac8a983a0 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Tue, 31 Mar 2026 17:38:59 -0500 Subject: [PATCH 32/33] fix coverage: pragma on ReasoningMessage stub, test old version detection --- pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py | 2 +- tests/test_ag_ui.py | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index a5e0ac8513..59240e4a19 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -86,7 +86,7 @@ else: try: from ag_ui.core import ReasoningMessage - except ImportError: + except ImportError: # pragma: no cover class ReasoningMessage: """Stub for ag-ui-protocol < 0.1.13 — no instances exist, so pattern matching is a no-op.""" diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 15bdcf6b37..377b7714f7 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -3859,6 +3859,16 @@ def _raise_not_found(_name: str) -> str: assert _detect_ag_ui_version() == snapshot('0.1.10') +def test_detect_ag_ui_version_old(monkeypatch: pytest.MonkeyPatch) -> None: + """Test that _detect_ag_ui_version returns '0.1.10' when installed version is below REASONING_VERSION.""" + + def _return_old_version(_name: str) -> str: + return '0.1.10' + + monkeypatch.setattr('pydantic_ai.ui.ag_ui._event_stream.importlib.metadata.version', _return_old_version) + assert _detect_ag_ui_version() == snapshot('0.1.10') + + def test_dump_messages_text_content() -> None: """Test that TextContent in UserPromptPart is converted to TextInputContent.""" messages: list[ModelMessage] = [ From 41bd1a02592dea1d939b6fa71bfadb226560aa85 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Tue, 31 Mar 2026 17:47:32 -0500 Subject: [PATCH 33/33] add comment --- pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 59240e4a19..4b2fc3f696 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -397,6 +397,8 @@ def load_messages(cls, messages: Sequence[Message], *, preserve_file_data: bool ) case _: + # this might crash if a user is using the latest AG-UI protocol with new message types + # in that case we can easily push a patch handling the new message type as a placeholder while we plan the actual implementation assert_never(msg) return builder.messages