Skip to content

Commit d4381f6

Browse files
author
Lucas Messenger
committed
feat(bedrock): add native structured output support via outputConfig.textFormat
Add opt-in native structured output mode for BedrockModel that uses Bedrock's outputConfig.textFormat API for schema-constrained responses, replacing the tool-based workaround when enabled. Model-level: - Add `structured_output_mode` config ("tool" | "native", defaults to "tool") - Add `convert_pydantic_to_json_schema()` utility with recursive `additionalProperties: false` injection - Thread `output_config` through stream() -> _stream() -> _format_request() - Native mode parses JSON text response instead of extracting tool use args Agent-level: - StructuredOutputContext gains native_mode: skips tool registration, stores output_config, and extracts results from text responses - event_loop_cycle handles end_turn as success in native mode - stream_messages passes output_config through to model.stream() Closes #1652
1 parent 287c5b6 commit d4381f6

File tree

11 files changed

+555
-44
lines changed

11 files changed

+555
-44
lines changed
Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
"""Example: Bedrock native structured output using outputConfig.textFormat.
2+
3+
Demonstrates three ways to use native structured output:
4+
1. Agent-level: agent(prompt, structured_output_model=Model)
5+
2. Model-level via agent: agent.structured_output(Model, prompt)
6+
3. Model-level direct: model.structured_output(Model, prompt)
7+
8+
Requires a Bedrock model that supports structured output (e.g., Claude 4.5+).
9+
"""
10+
11+
import asyncio
12+
import logging
13+
from pprint import pprint
14+
15+
from pydantic import BaseModel, Field
16+
17+
from strands import Agent
18+
from strands.models import BedrockModel
19+
20+
# Enable debug logging to confirm native mode (no toolConfig, has outputConfig)
21+
logging.basicConfig(level=logging.WARNING, format="%(name)s %(levelname)s %(message)s")
22+
logging.getLogger("strands.models.bedrock").setLevel(logging.DEBUG)
23+
logging.getLogger("strands.event_loop.event_loop").setLevel(logging.DEBUG)
24+
25+
26+
# --- Models ---
27+
28+
29+
class WeatherReport(BaseModel):
30+
"""A weather report for a given location."""
31+
32+
location: str = Field(description="The city and state")
33+
temperature_f: int = Field(description="Temperature in Fahrenheit")
34+
conditions: str = Field(description="Current weather conditions")
35+
wind_mph: int = Field(description="Wind speed in miles per hour")
36+
37+
38+
class MovieReview(BaseModel):
39+
"""A movie review with rating."""
40+
41+
title: str = Field(description="The movie title")
42+
year: int = Field(description="Release year")
43+
rating: float = Field(description="Rating out of 10")
44+
summary: str = Field(description="One sentence summary")
45+
pros: list[str] = Field(description="List of positive aspects")
46+
cons: list[str] = Field(description="List of negative aspects")
47+
48+
49+
class Recipe(BaseModel):
50+
"""A cooking recipe."""
51+
52+
name: str = Field(description="Recipe name")
53+
servings: int = Field(description="Number of servings")
54+
prep_time_minutes: int = Field(description="Preparation time in minutes")
55+
ingredients: list[str] = Field(description="List of ingredients")
56+
steps: list[str] = Field(description="Ordered cooking steps")
57+
58+
59+
# --- Setup ---
60+
61+
model = BedrockModel(
62+
model_id="us.anthropic.claude-sonnet-4-5-20250929-v1:0",
63+
structured_output_mode="native",
64+
additional_request_fields={"thinking": {"type": "enabled", "budget_tokens": 1024}},
65+
)
66+
agent = Agent(model=model)
67+
68+
69+
# --- Example 1: Agent-level structured output ---
70+
71+
print("=" * 60)
72+
print("Example 1: Agent-level (recommended)")
73+
print("=" * 60)
74+
75+
result = agent(
76+
"What's the weather like in Seattle, WA right now? Make up plausible data.",
77+
structured_output_model=WeatherReport,
78+
)
79+
weather = result.structured_output
80+
pprint(weather)
81+
82+
83+
# --- Example 2: Model-level via agent.structured_output ---
84+
85+
print("=" * 60)
86+
print("Example 2: Model-level via agent.structured_output()")
87+
print("=" * 60)
88+
89+
review = agent.structured_output(MovieReview, "Write a review of The Matrix (1999)")
90+
pprint(review)
91+
92+
93+
# --- Example 3: Direct model.structured_output ---
94+
95+
print("=" * 60)
96+
print("Example 3: Direct model.structured_output()")
97+
print("=" * 60)
98+
99+
100+
async def direct_model_example():
101+
messages = [{"role": "user", "content": [{"text": "Give me a simple pancake recipe"}]}]
102+
stream = model.structured_output(Recipe, messages)
103+
result = None
104+
async for event in stream:
105+
if "output" in event:
106+
result = event["output"]
107+
return result
108+
109+
110+
recipe = asyncio.run(direct_model_example())
111+
pprint(recipe)

src/strands/agent/agent.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -886,9 +886,14 @@ async def _run_loop(
886886

887887
await self._append_messages(*current_messages)
888888

889+
# Check if the model supports native structured output
890+
model_config = self.model.get_config()
891+
native_mode = isinstance(model_config, dict) and model_config.get("structured_output_mode") == "native"
892+
889893
structured_output_context = StructuredOutputContext(
890894
structured_output_model or self._default_structured_output_model,
891895
structured_output_prompt=structured_output_prompt or self._structured_output_prompt,
896+
native_mode=native_mode,
892897
)
893898

894899
# Execute the event loop cycle with retry logic for context limits
@@ -950,7 +955,7 @@ async def _execute_event_loop_cycle(
950955
# Add `Agent` to invocation_state to keep backwards-compatibility
951956
invocation_state["agent"] = self
952957

953-
if structured_output_context:
958+
if structured_output_context and not structured_output_context.native_mode:
954959
structured_output_context.register_tool(self.tool_registry)
955960

956961
try:

src/strands/event_loop/event_loop.py

Lines changed: 46 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -201,25 +201,46 @@ async def event_loop_cycle(
201201
# End the cycle and return results
202202
agent.event_loop_metrics.end_cycle(cycle_start_time, cycle_trace, attributes)
203203

204-
# Force structured output tool call if LLM didn't use it automatically
204+
# Handle structured output when model returns end_turn
205205
if structured_output_context.is_enabled and stop_reason == "end_turn":
206-
if structured_output_context.force_attempted:
206+
if structured_output_context.native_mode:
207+
# Native mode: parse text response as structured output
208+
native_result = structured_output_context.extract_native_result(message)
209+
if native_result is not None:
210+
yield StructuredOutputEvent(structured_output=native_result)
211+
tracer.end_event_loop_cycle_span(cycle_span, message)
212+
yield EventLoopStopEvent(
213+
stop_reason,
214+
message,
215+
agent.event_loop_metrics,
216+
invocation_state["request_state"],
217+
structured_output=native_result,
218+
)
219+
return
207220
raise StructuredOutputException(
208-
"The model failed to invoke the structured output tool even after it was forced."
221+
"Native structured output mode: no text content found in model response."
222+
)
223+
else:
224+
# Tool mode: force the model to call the structured output tool
225+
if structured_output_context.force_attempted:
226+
raise StructuredOutputException(
227+
"The model failed to invoke the structured output tool even after it was forced."
228+
)
229+
structured_output_context.set_forced_mode()
230+
logger.debug("Forcing structured output tool")
231+
await agent._append_messages(
232+
{"role": "user", "content": [{"text": structured_output_context.structured_output_prompt}]}
209233
)
210-
structured_output_context.set_forced_mode()
211-
logger.debug("Forcing structured output tool")
212-
await agent._append_messages(
213-
{"role": "user", "content": [{"text": structured_output_context.structured_output_prompt}]}
214-
)
215234

216-
tracer.end_event_loop_cycle_span(cycle_span, message)
217-
events = recurse_event_loop(
218-
agent=agent, invocation_state=invocation_state, structured_output_context=structured_output_context
219-
)
220-
async for typed_event in events:
221-
yield typed_event
222-
return
235+
tracer.end_event_loop_cycle_span(cycle_span, message)
236+
events = recurse_event_loop(
237+
agent=agent,
238+
invocation_state=invocation_state,
239+
structured_output_context=structured_output_context,
240+
)
241+
async for typed_event in events:
242+
yield typed_event
243+
return
223244

224245
tracer.end_event_loop_cycle_span(cycle_span, message)
225246
yield EventLoopStopEvent(stop_reason, message, agent.event_loop_metrics, invocation_state["request_state"])
@@ -338,6 +359,15 @@ async def _handle_model_execution(
338359
else:
339360
tool_specs = agent.tool_registry.get_all_tool_specs()
340361

362+
if structured_output_context.is_enabled:
363+
so_mode = "native" if structured_output_context.native_mode else "tool"
364+
logger.debug(
365+
"structured_output_mode=<%s> | tool_count=<%d> | output_config=<%s>",
366+
so_mode,
367+
len(tool_specs),
368+
bool(structured_output_context.output_config),
369+
)
370+
341371
async for event in stream_messages(
342372
agent.model,
343373
agent.system_prompt,
@@ -348,6 +378,7 @@ async def _handle_model_execution(
348378
invocation_state=invocation_state,
349379
model_state=agent._model_state,
350380
cancel_signal=agent._cancel_signal,
381+
output_config=structured_output_context.output_config,
351382
):
352383
yield event
353384

src/strands/event_loop/streaming.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -490,6 +490,11 @@ async def stream_messages(
490490
messages = _normalize_messages(messages)
491491
start_time = time.time()
492492

493+
# Build optional kwargs for model.stream() - only pass output_config if supported
494+
stream_kwargs: dict[str, Any] = {}
495+
if kwargs.get("output_config") is not None:
496+
stream_kwargs["output_config"] = kwargs["output_config"]
497+
493498
chunks = model.stream(
494499
messages,
495500
tool_specs if tool_specs else None,
@@ -498,6 +503,7 @@ async def stream_messages(
498503
system_prompt_content=system_prompt_content,
499504
invocation_state=invocation_state,
500505
model_state=model_state,
506+
**stream_kwargs,
501507
)
502508

503509
async for event in process_stream(chunks, start_time, cancel_signal):

0 commit comments

Comments
 (0)