Skip to content
72 changes: 59 additions & 13 deletions pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,8 @@
SourceUrlUIPart,
StepStartUIPart,
TextUIPart,
ToolApprovalRequested,
ToolApprovalRequestedPart,
ToolApprovalResponded,
ToolInputAvailablePart,
ToolOutputAvailablePart,
Expand Down Expand Up @@ -469,7 +471,10 @@ def _dump_request_message(msg: ModelRequest) -> tuple[list[UIMessagePart], list[

@classmethod
def _dump_response_message(
cls, msg: ModelResponse, tool_results: dict[str, ToolReturnPart | RetryPromptPart]
cls,
msg: ModelResponse,
tool_results: dict[str, ToolReturnPart | RetryPromptPart],
deferred_tool_call_ids: frozenset[str] = frozenset(),
) -> list[UIMessagePart]:
"""Convert a ModelResponse into a UIMessage."""
ui_parts: list[UIMessagePart] = []
Expand Down Expand Up @@ -580,25 +585,39 @@ def _dump_response_message(
call_provider_metadata = dump_provider_metadata(
id=part.id, provider_name=part.provider_name, provider_details=part.provider_details
)
ui_parts.append(
ToolInputAvailablePart(
type=tool_name,
tool_call_id=part.tool_call_id,
input=part.args_as_dict(),
provider_executed=True,
call_provider_metadata=call_provider_metadata,
if part.tool_call_id in deferred_tool_call_ids:
ui_parts.append(
ToolApprovalRequestedPart(
type=tool_name,
tool_call_id=part.tool_call_id,
input=part.args_as_dict(),
provider_executed=True,
call_provider_metadata=call_provider_metadata,
approval=ToolApprovalRequested(id=part.tool_call_id),
Copy link
Copy Markdown
Contributor

@devin-ai-integration devin-ai-integration bot Mar 25, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

📝 Info: approval_id and tool_call_id are intentionally identical

In _event_stream.py:130-131, both approval_id and tool_call_id are set to tool_call.tool_call_id. Similarly in the dump path, ToolApprovalRequested(id=part.tool_call_id) reuses the tool call ID. I verified that iter_tool_approval_responses in _utils.py:147-151 uses part.tool_call_id (not part.approval.id) for matching, confirming that approval.id is not used as a matching key. This makes the output fully deterministic, which is desirable for snapshot testing and idempotent renders. The only potential concern would be if the Vercel AI SDK frontend uses approval_id for deduplication across multiple approval requests for the same tool call, but the comments explicitly note this is not the case.

Open in Devin Review

Was this helpful? React with 👍 or 👎 to provide feedback.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The inconsistency is harmless approval.id is never used for matching anywhere. All approval pairing goes through tool_call_id (see iter_tool_approval_responses in _utils.py). Using tool_call_id in the dump path is intentional for deterministic output. Added a clarifying comment.

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@tijmenhammer If we use tool_call_id in the dump case, let's do it in the streaming case as well.

)
)
else:
ui_parts.append(
ToolInputAvailablePart(
type=tool_name,
tool_call_id=part.tool_call_id,
input=part.args_as_dict(),
provider_executed=True,
call_provider_metadata=call_provider_metadata,
)
)
)
elif isinstance(part, ToolCallPart):
ui_parts.extend(cls._dump_tool_call_part(part, tool_results))
ui_parts.extend(cls._dump_tool_call_part(part, tool_results, deferred_tool_call_ids))
else:
assert_never(part)

return ui_parts

@staticmethod
def _dump_tool_call_part(
part: ToolCallPart, tool_results: dict[str, ToolReturnPart | RetryPromptPart]
part: ToolCallPart,
tool_results: dict[str, ToolReturnPart | RetryPromptPart],
deferred_tool_call_ids: frozenset[str] = frozenset(),
) -> list[UIMessagePart]:
"""Convert a ToolCallPart (with optional result) into UIMessageParts."""
tool_result = tool_results.get(part.tool_call_id)
Expand Down Expand Up @@ -649,14 +668,35 @@ def _dump_tool_call_part(
# Check for Vercel AI chunks returned by tool calls via metadata.
ui_parts.extend(_extract_metadata_ui_parts(tool_result))
elif isinstance(tool_result, RetryPromptPart):
# Use the raw content string to avoid model_response() appending
# "Fix the errors and try again." — that suffix is intended for the
# model, not for UI display. For structured validation errors (list
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This means that these RetryPromptParts are now lossy in a round-trip right? Because on the next turn (after dump and load), the LLM would not see the "Fix the errors..." bit anymore, breaking the cache, and potentially being less clear to it than the message with the prompt.

Either way I think this is a more controversial change than the one about approval state so would like to see this in a separate PR, if you insist we need it :)

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fair point about the lossy round-trip. Reverted will open a separate PR if I find the time.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Opened #4869 for this

# of ErrorDetails), fall back to the formatted model_response().
if isinstance(tool_result.content, str):
error_text = tool_result.content
else:
error_text = tool_result.model_response()
ui_parts.append(
ToolOutputErrorPart(
type=tool_type,
tool_call_id=part.tool_call_id,
input=part.args_as_dict(),
error_text=tool_result.model_response(),
error_text=error_text,
provider_executed=False,
call_provider_metadata=call_provider_metadata,
)
)
elif part.tool_call_id in deferred_tool_call_ids:
# Deferred tool awaiting approval — emit approval-requested state
# so the frontend renders approve/reject buttons on reload.
ui_parts.append(
ToolApprovalRequestedPart(
type=tool_type,
tool_call_id=part.tool_call_id,
input=part.args_as_dict(),
provider_executed=False,
call_provider_metadata=call_provider_metadata,
approval=ToolApprovalRequested(id=part.tool_call_id),
)
)
else:
Expand All @@ -679,6 +719,7 @@ def dump_messages(
*,
generate_message_id: Callable[[ModelRequest | ModelResponse, Literal['system', 'user', 'assistant'], int], str]
| None = None,
deferred_tool_call_ids: frozenset[str] | None = None,
) -> list[UIMessage]:
"""Transform Pydantic AI messages into Vercel AI messages.

Expand All @@ -689,10 +730,15 @@ def dump_messages(
message index (incremented per UIMessage appended), and should return a unique
string ID. If not provided, uses `provider_response_id` for responses,
run_id-based IDs for messages with run_id, or a deterministic UUID5 fallback.
deferred_tool_call_ids: Optional set of tool call IDs that are deferred (awaiting
user approval). Tool calls with these IDs that have no result will be emitted
with ``state='approval-requested'`` and an ``approval`` field, so the frontend
can render approve/reject buttons on reload.

Returns:
A list of UIMessage objects in Vercel AI format
"""
_deferred = deferred_tool_call_ids or frozenset()
tool_results: dict[str, ToolReturnPart | RetryPromptPart] = {}

for msg in messages:
Expand Down Expand Up @@ -725,7 +771,7 @@ def dump_messages(
elif isinstance( # pragma: no branch
msg, ModelResponse
):
ui_parts: list[UIMessagePart] = cls._dump_response_message(msg, tool_results)
ui_parts: list[UIMessagePart] = cls._dump_response_message(msg, tool_results, _deferred)
if ui_parts: # pragma: no branch
result.append(
UIMessage(id=id_generator(msg, 'assistant', message_index), role='assistant', parts=ui_parts)
Expand Down
148 changes: 136 additions & 12 deletions tests/test_vercel_ai.py
Original file line number Diff line number Diff line change
Expand Up @@ -4136,11 +4136,7 @@ async def test_adapter_dump_messages_with_retry():
'raw_input': None,
'input': {'arg': 'value'},
'provider_executed': False,
'error_text': """\
Tool failed with error

Fix the errors and try again.\
""",
'error_text': 'Tool failed with error',
'call_provider_metadata': None,
'approval': None,
}
Expand All @@ -4157,7 +4153,7 @@ async def test_adapter_dump_messages_with_retry():
assert tool_error_part == snapshot(
ToolReturnPart(
tool_name='my_tool',
content='Tool failed with error\n\nFix the errors and try again.',
content='Tool failed with error',
tool_call_id='tool_789',
timestamp=IsDatetime(),
outcome='failed',
Expand Down Expand Up @@ -4610,6 +4606,138 @@ async def test_adapter_dump_messages_tool_call_without_return():
)


async def test_adapter_dump_messages_deferred_tool_approval():
"""Test that dump_messages emits approval-requested state for deferred tool calls."""
messages: list[ModelMessage] = [
ModelRequest(parts=[UserPromptPart(content='Do something')]),
ModelResponse(
parts=[
ToolCallPart(
tool_name='dangerous_action',
args={'target': 'production'},
tool_call_id='deferred_tc1',
),
]
),
]

# Without deferred_tool_call_ids — should produce input-available (existing behavior)
ui_messages_default = VercelAIAdapter.dump_messages(messages)
default_dicts = [msg.model_dump() for msg in ui_messages_default]
tool_part_default = default_dicts[1]['parts'][0]
assert tool_part_default['state'] == 'input-available'
assert tool_part_default['approval'] is None

# With deferred_tool_call_ids — should produce approval-requested with approval field
ui_messages_deferred = VercelAIAdapter.dump_messages(messages, deferred_tool_call_ids=frozenset({'deferred_tc1'}))
deferred_dicts = [msg.model_dump() for msg in ui_messages_deferred]
tool_part_deferred = deferred_dicts[1]['parts'][0]
assert tool_part_deferred == snapshot(
{
'type': 'tool-dangerous_action',
'tool_call_id': 'deferred_tc1',
'state': 'approval-requested',
'input': {'target': 'production'},
'provider_executed': False,
'call_provider_metadata': None,
'approval': {'id': 'deferred_tc1'},
}
)

# Verify roundtrip — load_messages should reconstruct a ToolCallPart without a result
reloaded = VercelAIAdapter.load_messages(ui_messages_deferred)
assert len(reloaded) == 2
tool_call_part = reloaded[1].parts[0]
assert isinstance(tool_call_part, ToolCallPart)
assert tool_call_part.tool_name == 'dangerous_action'
assert tool_call_part.tool_call_id == 'deferred_tc1'


async def test_adapter_dump_messages_deferred_tool_with_resolved_result():
"""Test that deferred tools with results ignore deferred_tool_call_ids."""
messages: list[ModelMessage] = [
ModelRequest(parts=[UserPromptPart(content='Do something')]),
ModelResponse(
parts=[
ToolCallPart(
tool_name='dangerous_action',
args={'target': 'production'},
tool_call_id='resolved_tc1',
),
]
),
ModelRequest(
parts=[
ToolReturnPart(
tool_name='dangerous_action',
content='Action completed',
tool_call_id='resolved_tc1',
),
]
),
]

# Even with deferred_tool_call_ids containing this ID, result takes priority
ui_messages = VercelAIAdapter.dump_messages(messages, deferred_tool_call_ids=frozenset({'resolved_tc1'}))
dicts = [msg.model_dump() for msg in ui_messages]
tool_part = dicts[1]['parts'][0]
assert tool_part['state'] == 'output-available'
assert tool_part['output'] == 'Action completed'


async def test_adapter_dump_messages_deferred_builtin_tool():
"""Test that builtin tool calls also get approval-requested when deferred."""
messages: list[ModelMessage] = [
ModelResponse(
parts=[
BuiltinToolCallPart(
tool_name='web_search',
args={'query': 'test'},
tool_call_id='builtin_deferred_tc1',
),
]
),
]

ui_messages = VercelAIAdapter.dump_messages(messages, deferred_tool_call_ids=frozenset({'builtin_deferred_tc1'}))
dicts = [msg.model_dump() for msg in ui_messages]
tool_part = dicts[0]['parts'][0]
assert tool_part['state'] == 'approval-requested'
assert tool_part['approval'] == {'id': 'builtin_deferred_tc1'}


async def test_adapter_dump_messages_retry_preserves_raw_error_text():
"""Test that RetryPromptPart with string content preserves the raw error text.

Previously, model_response() would append 'Fix the errors and try again.'
to the error text, which is intended for the model, not for UI display.
"""
messages: list[ModelMessage] = [
ModelRequest(parts=[UserPromptPart(content='Do something')]),
ModelResponse(
parts=[
ToolCallPart(tool_name='my_tool', args={}, tool_call_id='tc_retry'),
]
),
ModelRequest(
parts=[
RetryPromptPart(
content='Cancelled',
tool_name='my_tool',
tool_call_id='tc_retry',
)
]
),
]

ui_messages = VercelAIAdapter.dump_messages(messages)
dicts = [msg.model_dump() for msg in ui_messages]
tool_part = dicts[1]['parts'][0]
# Should be the raw content without the "Fix the errors..." suffix
assert tool_part['error_text'] == 'Cancelled'
assert tool_part['state'] == 'output-error'


async def test_adapter_dump_messages_assistant_starts_with_tool():
"""Test an assistant message that starts with a tool call instead of text."""
messages = [
Expand Down Expand Up @@ -5691,11 +5819,7 @@ async def test_adapter_dump_messages_tool_error_with_provider_metadata():
'raw_input': None,
'input': {'x': 1},
'provider_executed': False,
'error_text': """\
Tool execution failed

Fix the errors and try again.\
""",
'error_text': 'Tool execution failed',
'call_provider_metadata': {
'pydantic_ai': {
'id': 'call_fail_id',
Expand All @@ -5715,7 +5839,7 @@ async def test_adapter_dump_messages_tool_error_with_provider_metadata():
tool_error_part = reloaded_messages[2].parts[0]
assert isinstance(tool_error_part, ToolReturnPart)
assert tool_error_part.outcome == 'failed'
assert tool_error_part.content == 'Tool execution failed\n\nFix the errors and try again.'
assert tool_error_part.content == 'Tool execution failed'


async def test_event_stream_text_with_provider_metadata():
Expand Down
Loading