|
| 1 | +"""Integration tests for ADK streaming support. |
| 2 | +
|
| 3 | +Verifies that the streaming model activity publishes raw ``LlmResponse`` |
| 4 | +chunks via the WorkflowStream broker. Non-streaming behavior is covered |
| 5 | +by ``test_google_adk_agents.py``. |
| 6 | +""" |
| 7 | + |
| 8 | +import asyncio |
| 9 | +import logging |
| 10 | +import uuid |
| 11 | +from collections.abc import AsyncGenerator |
| 12 | +from datetime import timedelta |
| 13 | + |
| 14 | +import pytest |
| 15 | +from google.adk import Agent |
| 16 | +from google.adk.agents.run_config import RunConfig, StreamingMode |
| 17 | +from google.adk.models import BaseLlm, LLMRegistry |
| 18 | +from google.adk.models.llm_request import LlmRequest |
| 19 | +from google.adk.models.llm_response import LlmResponse |
| 20 | +from google.adk.runners import InMemoryRunner |
| 21 | +from google.genai.types import Content, Part |
| 22 | + |
| 23 | +from temporalio import workflow |
| 24 | +from temporalio.client import Client, WorkflowFailureError |
| 25 | +from temporalio.contrib.google_adk_agents import GoogleAdkPlugin, TemporalModel |
| 26 | +from temporalio.contrib.workflow_streams import WorkflowStream, WorkflowStreamClient |
| 27 | +from temporalio.worker import Worker |
| 28 | + |
| 29 | +logger = logging.getLogger(__name__) |
| 30 | + |
| 31 | + |
| 32 | +class StreamingTestModel(BaseLlm): |
| 33 | + """Test model that yields multiple partial responses to simulate streaming.""" |
| 34 | + |
| 35 | + @classmethod |
| 36 | + def supported_models(cls) -> list[str]: |
| 37 | + return ["streaming_test_model"] |
| 38 | + |
| 39 | + async def generate_content_async( |
| 40 | + self, llm_request: LlmRequest, stream: bool = False |
| 41 | + ) -> AsyncGenerator[LlmResponse, None]: |
| 42 | + # The streaming activity must call us with stream=True; if a |
| 43 | + # regression drops the flag this test should fail. |
| 44 | + if not stream: |
| 45 | + raise AssertionError( |
| 46 | + "StreamingTestModel.generate_content_async requires stream=True" |
| 47 | + ) |
| 48 | + yield LlmResponse(content=Content(role="model", parts=[Part(text="Hello ")])) |
| 49 | + yield LlmResponse(content=Content(role="model", parts=[Part(text="world!")])) |
| 50 | + |
| 51 | + |
| 52 | +@workflow.defn |
| 53 | +class StreamingAdkWorkflow: |
| 54 | + """Test workflow that opts into streaming via RunConfig.streaming_mode.""" |
| 55 | + |
| 56 | + @workflow.init |
| 57 | + def __init__(self, prompt: str) -> None: |
| 58 | + self.stream = WorkflowStream() |
| 59 | + |
| 60 | + @workflow.run |
| 61 | + async def run(self, prompt: str) -> str: |
| 62 | + model = TemporalModel("streaming_test_model", streaming_event_topic="events") |
| 63 | + agent = Agent( |
| 64 | + name="test_agent", |
| 65 | + model=model, |
| 66 | + instruction="You are a test agent.", |
| 67 | + ) |
| 68 | + |
| 69 | + runner = InMemoryRunner(agent=agent, app_name="test-app") |
| 70 | + session = await runner.session_service.create_session( |
| 71 | + app_name="test-app", user_id="test" |
| 72 | + ) |
| 73 | + |
| 74 | + final_text = "" |
| 75 | + async for event in runner.run_async( |
| 76 | + user_id="test", |
| 77 | + session_id=session.id, |
| 78 | + new_message=Content(role="user", parts=[Part(text=prompt)]), |
| 79 | + run_config=RunConfig(streaming_mode=StreamingMode.SSE), |
| 80 | + ): |
| 81 | + if event.content and event.content.parts: |
| 82 | + for part in event.content.parts: |
| 83 | + if part.text: |
| 84 | + final_text = part.text |
| 85 | + |
| 86 | + return final_text |
| 87 | + |
| 88 | + |
| 89 | +@pytest.mark.asyncio |
| 90 | +async def test_streaming_publishes_events(client: Client): |
| 91 | + """Streaming activity publishes raw LlmResponse chunks to the topic.""" |
| 92 | + LLMRegistry.register(StreamingTestModel) |
| 93 | + |
| 94 | + new_config = client.config() |
| 95 | + new_config["plugins"] = [GoogleAdkPlugin()] |
| 96 | + client = Client(**new_config) |
| 97 | + |
| 98 | + workflow_id = f"adk-streaming-test-{uuid.uuid4()}" |
| 99 | + |
| 100 | + async with Worker( |
| 101 | + client, |
| 102 | + task_queue="adk-streaming-test", |
| 103 | + workflows=[StreamingAdkWorkflow], |
| 104 | + max_cached_workflows=0, |
| 105 | + ): |
| 106 | + handle = await client.start_workflow( |
| 107 | + StreamingAdkWorkflow.run, |
| 108 | + "Hello", |
| 109 | + id=workflow_id, |
| 110 | + task_queue="adk-streaming-test", |
| 111 | + execution_timeout=timedelta(seconds=30), |
| 112 | + ) |
| 113 | + |
| 114 | + stream = WorkflowStreamClient.create(client, workflow_id) |
| 115 | + responses: list[LlmResponse] = [] |
| 116 | + |
| 117 | + async def collect_events() -> None: |
| 118 | + async for item in stream.subscribe( |
| 119 | + ["events"], |
| 120 | + from_offset=0, |
| 121 | + result_type=LlmResponse, |
| 122 | + poll_cooldown=timedelta(milliseconds=50), |
| 123 | + ): |
| 124 | + responses.append(item.data) |
| 125 | + if len(responses) >= 2: |
| 126 | + break |
| 127 | + |
| 128 | + collect_task = asyncio.create_task(collect_events()) |
| 129 | + result = await handle.result() |
| 130 | + await asyncio.wait_for(collect_task, timeout=10.0) |
| 131 | + |
| 132 | + # Workflow assembles streamed parts; the last part it observes is "world!". |
| 133 | + assert result == "world!" |
| 134 | + |
| 135 | + texts: list[str] = [] |
| 136 | + for r in responses: |
| 137 | + if r.content and r.content.parts: |
| 138 | + for part in r.content.parts: |
| 139 | + if part.text: |
| 140 | + texts.append(part.text) |
| 141 | + assert texts == ["Hello ", "world!"], f"Unexpected text deltas: {texts}" |
| 142 | + |
| 143 | + |
| 144 | +@workflow.defn |
| 145 | +class StreamingAdkRequiresTopicWorkflow: |
| 146 | + """Calls ``generate_content_async(stream=True)`` without configuring |
| 147 | + ``streaming_event_topic``; the call must raise before any activity |
| 148 | + is scheduled.""" |
| 149 | + |
| 150 | + @workflow.run |
| 151 | + async def run(self, prompt: str) -> str: |
| 152 | + model = TemporalModel("streaming_test_model") |
| 153 | + agent = Agent( |
| 154 | + name="test_agent", |
| 155 | + model=model, |
| 156 | + instruction="You are a test agent.", |
| 157 | + ) |
| 158 | + runner = InMemoryRunner(agent=agent, app_name="test-app") |
| 159 | + session = await runner.session_service.create_session( |
| 160 | + app_name="test-app", user_id="test" |
| 161 | + ) |
| 162 | + async for _ in runner.run_async( |
| 163 | + user_id="test", |
| 164 | + session_id=session.id, |
| 165 | + new_message=Content(role="user", parts=[Part(text=prompt)]), |
| 166 | + run_config=RunConfig(streaming_mode=StreamingMode.SSE), |
| 167 | + ): |
| 168 | + pass |
| 169 | + return "should not reach" |
| 170 | + |
| 171 | + |
| 172 | +@pytest.mark.asyncio |
| 173 | +async def test_streaming_requires_topic(client: Client): |
| 174 | + """``stream=True`` fails fast when no streaming topic was configured |
| 175 | + on ``TemporalModel``. The error is raised in the workflow before any |
| 176 | + streaming activity is scheduled.""" |
| 177 | + LLMRegistry.register(StreamingTestModel) |
| 178 | + |
| 179 | + new_config = client.config() |
| 180 | + new_config["plugins"] = [GoogleAdkPlugin()] |
| 181 | + client = Client(**new_config) |
| 182 | + |
| 183 | + async with Worker( |
| 184 | + client, |
| 185 | + task_queue="adk-streaming-requires-topic", |
| 186 | + workflows=[StreamingAdkRequiresTopicWorkflow], |
| 187 | + max_cached_workflows=0, |
| 188 | + ): |
| 189 | + with pytest.raises(WorkflowFailureError) as exc_info: |
| 190 | + await client.execute_workflow( |
| 191 | + StreamingAdkRequiresTopicWorkflow.run, |
| 192 | + "Hi", |
| 193 | + id=f"adk-streaming-requires-topic-{uuid.uuid4()}", |
| 194 | + task_queue="adk-streaming-requires-topic", |
| 195 | + execution_timeout=timedelta(seconds=30), |
| 196 | + ) |
| 197 | + |
| 198 | + assert "streaming_event_topic" in str(exc_info.value.cause) |
0 commit comments