Skip to content

Commit 753084f

Browse files
seanzhougooglecopybara-github
authored andcommitted
refactor: Extract helper function for llm request building and response processing
Co-authored-by: Xiang (Sean) Zhou <seanzhougoogle@google.com> PiperOrigin-RevId: 860212868
1 parent 2380afd commit 753084f

File tree

4 files changed

+195
-121
lines changed

4 files changed

+195
-121
lines changed

src/google/adk/flows/llm_flows/agent_transfer.py

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ async def run_async(
5454
)
5555

5656
llm_request.append_instructions([
57-
_build_target_agents_instructions(
57+
_build_transfer_instructions(
5858
transfer_to_agent_tool.name,
5959
invocation_context.agent,
6060
transfer_targets,
@@ -83,11 +83,24 @@ def _build_target_agents_info(target_agent: BaseAgent) -> str:
8383
line_break = '\n'
8484

8585

86-
def _build_target_agents_instructions(
86+
def _build_transfer_instructions(
8787
tool_name: str,
88-
agent: LlmAgent,
89-
target_agents: list[BaseAgent],
88+
agent: 'LlmAgent',
89+
target_agents: list['BaseAgent'],
9090
) -> str:
91+
"""Build instructions for agent transfer.
92+
93+
This function generates the instruction text that guides the LLM on how to
94+
use the transfer tool to delegate to other agents.
95+
96+
Args:
97+
tool_name: The name of the transfer tool (e.g., 'transfer_to_agent').
98+
agent: The current agent that may initiate transfers.
99+
target_agents: List of agents that can be transferred to.
100+
101+
Returns:
102+
Instruction text for the LLM about agent transfers.
103+
"""
91104
# Build list of available agent names for the NOTE
92105
# target_agents already includes parent agent if applicable,
93106
# so no need to add it again

src/google/adk/flows/llm_flows/base_llm_flow.py

Lines changed: 39 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,42 @@
6969
DEFAULT_ENABLE_CACHE_STATISTICS = False
7070

7171

72+
def _finalize_model_response_event(
73+
llm_request: LlmRequest,
74+
llm_response: LlmResponse,
75+
model_response_event: Event,
76+
) -> Event:
77+
"""Finalize and build the model response event from LLM response.
78+
79+
Merges the LLM response data into the model response event and
80+
populates function call IDs and long-running tool information.
81+
82+
Args:
83+
llm_request: The original LLM request.
84+
llm_response: The LLM response from the model.
85+
model_response_event: The base event to populate.
86+
87+
Returns:
88+
The finalized Event with LLM response data merged in.
89+
"""
90+
finalized_event = Event.model_validate({
91+
**model_response_event.model_dump(exclude_none=True),
92+
**llm_response.model_dump(exclude_none=True),
93+
})
94+
95+
if finalized_event.content:
96+
function_calls = finalized_event.get_function_calls()
97+
if function_calls:
98+
functions.populate_client_function_call_id(finalized_event)
99+
finalized_event.long_running_tool_ids = (
100+
functions.get_long_running_function_calls(
101+
function_calls, llm_request.tools_dict
102+
)
103+
)
104+
105+
return finalized_event
106+
107+
72108
class BaseLlmFlow(ABC):
73109
"""A basic flow that calls the LLM in a loop until a final response is generated.
74110
@@ -941,22 +977,9 @@ def _finalize_model_response_event(
941977
llm_response: LlmResponse,
942978
model_response_event: Event,
943979
) -> Event:
944-
model_response_event = Event.model_validate({
945-
**model_response_event.model_dump(exclude_none=True),
946-
**llm_response.model_dump(exclude_none=True),
947-
})
948-
949-
if model_response_event.content:
950-
function_calls = model_response_event.get_function_calls()
951-
if function_calls:
952-
functions.populate_client_function_call_id(model_response_event)
953-
model_response_event.long_running_tool_ids = (
954-
functions.get_long_running_function_calls(
955-
function_calls, llm_request.tools_dict
956-
)
957-
)
958-
959-
return model_response_event
980+
return _finalize_model_response_event(
981+
llm_request, llm_response, model_response_event
982+
)
960983

961984
async def _handle_control_event_flush(
962985
self, invocation_context: InvocationContext, llm_response: LlmResponse

src/google/adk/flows/llm_flows/basic.py

Lines changed: 59 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -29,55 +29,71 @@
2929
from ._base_llm_processor import BaseLlmRequestProcessor
3030

3131

32+
def _build_basic_request(
33+
invocation_context: InvocationContext,
34+
llm_request: LlmRequest,
35+
) -> None:
36+
"""Populate basic LlmRequest fields from agent configuration.
37+
38+
Sets up model, config, output_schema, and live connect configuration
39+
based on the agent and run configuration.
40+
41+
Args:
42+
invocation_context: The invocation context containing agent and run config.
43+
llm_request: The LlmRequest to populate.
44+
"""
45+
agent = invocation_context.agent
46+
model = agent.canonical_model
47+
llm_request.model = model if isinstance(model, str) else model.model
48+
llm_request.config = (
49+
agent.generate_content_config.model_copy(deep=True)
50+
if agent.generate_content_config
51+
else types.GenerateContentConfig()
52+
)
53+
# Only set output_schema if no tools are specified. as of now, model don't
54+
# support output_schema and tools together. we have a workaround to support
55+
# both output_schema and tools at the same time. see
56+
# _output_schema_processor.py for details
57+
if agent.output_schema:
58+
if not agent.tools or can_use_output_schema_with_tools(model):
59+
llm_request.set_output_schema(agent.output_schema)
60+
61+
llm_request.live_connect_config.response_modalities = (
62+
invocation_context.run_config.response_modalities
63+
)
64+
llm_request.live_connect_config.speech_config = (
65+
invocation_context.run_config.speech_config
66+
)
67+
llm_request.live_connect_config.output_audio_transcription = (
68+
invocation_context.run_config.output_audio_transcription
69+
)
70+
llm_request.live_connect_config.input_audio_transcription = (
71+
invocation_context.run_config.input_audio_transcription
72+
)
73+
llm_request.live_connect_config.realtime_input_config = (
74+
invocation_context.run_config.realtime_input_config
75+
)
76+
llm_request.live_connect_config.enable_affective_dialog = (
77+
invocation_context.run_config.enable_affective_dialog
78+
)
79+
llm_request.live_connect_config.proactivity = (
80+
invocation_context.run_config.proactivity
81+
)
82+
llm_request.live_connect_config.session_resumption = (
83+
invocation_context.run_config.session_resumption
84+
)
85+
llm_request.live_connect_config.context_window_compression = (
86+
invocation_context.run_config.context_window_compression
87+
)
88+
89+
3290
class _BasicLlmRequestProcessor(BaseLlmRequestProcessor):
3391

3492
@override
3593
async def run_async(
3694
self, invocation_context: InvocationContext, llm_request: LlmRequest
3795
) -> AsyncGenerator[Event, None]:
38-
agent = invocation_context.agent
39-
model = agent.canonical_model
40-
llm_request.model = model if isinstance(model, str) else model.model
41-
llm_request.config = (
42-
agent.generate_content_config.model_copy(deep=True)
43-
if agent.generate_content_config
44-
else types.GenerateContentConfig()
45-
)
46-
# Only set output_schema if no tools are specified. as of now, model don't
47-
# support output_schema and tools together. we have a workaround to support
48-
# both output_schema and tools at the same time. see
49-
# _output_schema_processor.py for details
50-
if agent.output_schema:
51-
if not agent.tools or can_use_output_schema_with_tools(model):
52-
llm_request.set_output_schema(agent.output_schema)
53-
54-
llm_request.live_connect_config.response_modalities = (
55-
invocation_context.run_config.response_modalities
56-
)
57-
llm_request.live_connect_config.speech_config = (
58-
invocation_context.run_config.speech_config
59-
)
60-
llm_request.live_connect_config.output_audio_transcription = (
61-
invocation_context.run_config.output_audio_transcription
62-
)
63-
llm_request.live_connect_config.input_audio_transcription = (
64-
invocation_context.run_config.input_audio_transcription
65-
)
66-
llm_request.live_connect_config.realtime_input_config = (
67-
invocation_context.run_config.realtime_input_config
68-
)
69-
llm_request.live_connect_config.enable_affective_dialog = (
70-
invocation_context.run_config.enable_affective_dialog
71-
)
72-
llm_request.live_connect_config.proactivity = (
73-
invocation_context.run_config.proactivity
74-
)
75-
llm_request.live_connect_config.session_resumption = (
76-
invocation_context.run_config.session_resumption
77-
)
78-
llm_request.live_connect_config.context_window_compression = (
79-
invocation_context.run_config.context_window_compression
80-
)
96+
_build_basic_request(invocation_context, llm_request)
8197

8298
# TODO: handle tool append here, instead of in BaseTool.process_llm_request.
8399

src/google/adk/flows/llm_flows/instructions.py

Lines changed: 80 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -28,81 +28,103 @@
2828

2929
if TYPE_CHECKING:
3030
from ...agents.invocation_context import InvocationContext
31+
from ...agents.llm_agent import LlmAgent
3132
from ...models.llm_request import LlmRequest
3233

3334

34-
class _InstructionsLlmRequestProcessor(BaseLlmRequestProcessor):
35-
"""Handles instructions and global instructions for LLM flow."""
35+
async def _process_agent_instruction(
36+
agent: 'LlmAgent',
37+
invocation_context: 'InvocationContext',
38+
) -> str:
39+
"""Process agent instruction with state injection.
40+
41+
Resolves the agent's instruction and injects session state variables
42+
unless bypass_state_injection is set.
43+
44+
Args:
45+
agent: The agent with instruction to process.
46+
invocation_context: The invocation context.
47+
48+
Returns:
49+
The processed instruction text with state variables injected.
50+
"""
51+
raw_si, bypass_state_injection = await agent.canonical_instruction(
52+
ReadonlyContext(invocation_context)
53+
)
54+
si = raw_si
55+
if not bypass_state_injection:
56+
si = await instructions_utils.inject_session_state(
57+
raw_si, ReadonlyContext(invocation_context)
58+
)
59+
return si
60+
61+
62+
async def _build_instructions(
63+
invocation_context: 'InvocationContext',
64+
llm_request: 'LlmRequest',
65+
) -> None:
66+
"""Build and append instructions to the LLM request.
67+
68+
Handles global instructions (deprecated), static_instruction, and
69+
dynamic instruction based on agent configuration.
3670
37-
async def _process_agent_instruction(
38-
self, agent, invocation_context: InvocationContext
39-
) -> str:
40-
"""Process agent instruction with state injection.
71+
Args:
72+
invocation_context: The invocation context.
73+
llm_request: The LlmRequest to populate with instructions.
74+
"""
75+
from ...agents.base_agent import BaseAgent
76+
from ...agents.llm_agent import LlmAgent
4177

42-
Args:
43-
agent: The agent with instruction to process
44-
invocation_context: The invocation context
78+
agent = invocation_context.agent
4579

46-
Returns:
47-
The processed instruction text
48-
"""
49-
raw_si, bypass_state_injection = await agent.canonical_instruction(
50-
ReadonlyContext(invocation_context)
80+
root_agent: BaseAgent = agent.root_agent
81+
82+
# Handle global instructions (DEPRECATED - use GlobalInstructionPlugin instead)
83+
# TODO: Remove this code block when global_instruction field is removed
84+
if isinstance(root_agent, LlmAgent) and root_agent.global_instruction:
85+
raw_si, bypass_state_injection = (
86+
await root_agent.canonical_global_instruction(
87+
ReadonlyContext(invocation_context)
88+
)
5189
)
5290
si = raw_si
5391
if not bypass_state_injection:
5492
si = await instructions_utils.inject_session_state(
5593
raw_si, ReadonlyContext(invocation_context)
5694
)
57-
return si
95+
llm_request.append_instructions([si])
96+
97+
# Handle static_instruction - add via append_instructions
98+
if agent.static_instruction:
99+
from google.genai import _transformers
100+
101+
# Convert ContentUnion to Content using genai transformer
102+
static_content = _transformers.t_content(agent.static_instruction)
103+
llm_request.append_instructions(static_content)
104+
105+
# Handle instruction based on whether static_instruction exists
106+
if agent.instruction and not agent.static_instruction:
107+
# Only add to system instructions if no static instruction exists
108+
si = await _process_agent_instruction(agent, invocation_context)
109+
llm_request.append_instructions([si])
110+
elif agent.instruction and agent.static_instruction:
111+
# Static instruction exists, so add dynamic instruction to content
112+
from google.genai import types
113+
114+
si = await _process_agent_instruction(agent, invocation_context)
115+
# Create user content for dynamic instruction
116+
dynamic_content = types.Content(role='user', parts=[types.Part(text=si)])
117+
llm_request.contents.append(dynamic_content)
118+
119+
120+
class _InstructionsLlmRequestProcessor(BaseLlmRequestProcessor):
121+
"""Handles instructions and global instructions for LLM flow."""
58122

59123
@override
60124
async def run_async(
61125
self, invocation_context: InvocationContext, llm_request: LlmRequest
62126
) -> AsyncGenerator[Event, None]:
63-
from ...agents.base_agent import BaseAgent
64-
from ...agents.llm_agent import LlmAgent
65-
66-
agent = invocation_context.agent
67-
68-
root_agent: BaseAgent = agent.root_agent
69-
70-
# Handle global instructions (DEPRECATED - use GlobalInstructionPlugin instead)
71-
# TODO: Remove this code block when global_instruction field is removed
72-
if isinstance(root_agent, LlmAgent) and root_agent.global_instruction:
73-
raw_si, bypass_state_injection = (
74-
await root_agent.canonical_global_instruction(
75-
ReadonlyContext(invocation_context)
76-
)
77-
)
78-
si = raw_si
79-
if not bypass_state_injection:
80-
si = await instructions_utils.inject_session_state(
81-
raw_si, ReadonlyContext(invocation_context)
82-
)
83-
llm_request.append_instructions([si])
84-
85-
# Handle static_instruction - add via append_instructions
86-
if agent.static_instruction:
87-
from google.genai import _transformers
88-
89-
# Convert ContentUnion to Content using genai transformer
90-
static_content = _transformers.t_content(agent.static_instruction)
91-
llm_request.append_instructions(static_content)
92-
93-
# Handle instruction based on whether static_instruction exists
94-
if agent.instruction and not agent.static_instruction:
95-
# Only add to system instructions if no static instruction exists
96-
si = await self._process_agent_instruction(agent, invocation_context)
97-
llm_request.append_instructions([si])
98-
elif agent.instruction and agent.static_instruction:
99-
# Static instruction exists, so add dynamic instruction to content
100-
from google.genai import types
101-
102-
si = await self._process_agent_instruction(agent, invocation_context)
103-
# Create user content for dynamic instruction
104-
dynamic_content = types.Content(role='user', parts=[types.Part(text=si)])
105-
llm_request.contents.append(dynamic_content)
127+
await _build_instructions(invocation_context, llm_request)
106128

107129
# Maintain async generator behavior
108130
return

0 commit comments

Comments
 (0)