Skip to content

Commit 96c5db5

Browse files
sasha-gitgcopybara-github
authored andcommitted
fix: use mode='json' in model_dump to serialize bytes correctly when using telemetry
fixes #4043 Co-authored-by: Sasha Sobran <asobran@google.com> PiperOrigin-RevId: 853256045
1 parent 10bdc07 commit 96c5db5

File tree

2 files changed

+101
-23
lines changed

2 files changed

+101
-23
lines changed

src/google/adk/telemetry/tracing.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -340,7 +340,7 @@ def trace_send_data(
340340
'gcp.vertex.agent.data',
341341
_safe_json_serialize([
342342
types.Content(role=content.role, parts=content.parts).model_dump(
343-
exclude_none=True
343+
exclude_none=True, mode='json'
344344
)
345345
for content in data
346346
]),
@@ -366,7 +366,7 @@ def _build_llm_request_for_trace(llm_request: LlmRequest) -> dict[str, Any]:
366366
result = {
367367
'model': llm_request.model,
368368
'config': llm_request.config.model_dump(
369-
exclude_none=True, exclude='response_schema'
369+
exclude_none=True, exclude='response_schema', mode='json'
370370
),
371371
'contents': [],
372372
}
@@ -375,7 +375,7 @@ def _build_llm_request_for_trace(llm_request: LlmRequest) -> dict[str, Any]:
375375
parts = [part for part in content.parts if not part.inline_data]
376376
result['contents'].append(
377377
types.Content(role=content.role, parts=parts).model_dump(
378-
exclude_none=True
378+
exclude_none=True, mode='json'
379379
)
380380
)
381381
return result

tests/unittests/telemetry/test_spans.py

Lines changed: 98 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# limitations under the License.
1414

1515
import json
16-
import os
1716
from typing import Any
1817
from typing import Dict
1918
from typing import Optional
@@ -207,18 +206,87 @@ async def test_trace_call_llm_with_binary_content(
207206
assert mock_span_fixture.set_attribute.call_count == 7
208207
mock_span_fixture.set_attribute.assert_has_calls(expected_calls)
209208

210-
# Verify binary content is replaced with '<not serializable>' in JSON
209+
# Verify binary values are properly serialized as base64
211210
llm_request_json_str = None
212211
for call_obj in mock_span_fixture.set_attribute.call_args_list:
213-
if call_obj.args[0] == 'gcp.vertex.agent.llm_request':
214-
llm_request_json_str = call_obj.args[1]
212+
arg_name, arg_value = call_obj.args
213+
if arg_name == 'gcp.vertex.agent.llm_request':
214+
llm_request_json_str = arg_value
215+
break
216+
217+
assert llm_request_json_str is not None
218+
219+
# Verify bytes are base64 encoded (b'test_data' -> 'dGVzdF9kYXRh')
220+
assert 'dGVzdF9kYXRh' in llm_request_json_str
221+
222+
# Verify no serialization failures
223+
assert '<not serializable>' not in llm_request_json_str
224+
225+
226+
@pytest.mark.asyncio
227+
async def test_trace_call_llm_with_thought_signature(
228+
monkeypatch, mock_span_fixture
229+
):
230+
"""Test trace_call_llm handles thought_signature bytes correctly.
231+
232+
This test verifies that thought_signature bytes from Gemini 3.0 models
233+
are properly serialized as base64 in telemetry traces.
234+
"""
235+
monkeypatch.setattr(
236+
'opentelemetry.trace.get_current_span', lambda: mock_span_fixture
237+
)
238+
239+
agent = LlmAgent(name='test_agent')
240+
invocation_context = await _create_invocation_context(agent)
241+
242+
# multi-turn conversation where the model's response contains
243+
# thought_signature bytes
244+
thought_signature_bytes = b'thought_signature'
245+
llm_request = LlmRequest(
246+
model='gemini-3-pro-preview',
247+
contents=[
248+
types.Content(
249+
role='user',
250+
parts=[types.Part(text='Hello')],
251+
),
252+
types.Content(
253+
role='model',
254+
parts=[
255+
types.Part(
256+
thought=True,
257+
thought_signature=thought_signature_bytes,
258+
)
259+
],
260+
),
261+
types.Content(
262+
role='user',
263+
parts=[types.Part(text='Follow up question')],
264+
),
265+
],
266+
config=types.GenerateContentConfig(),
267+
)
268+
llm_response = LlmResponse(turn_complete=True)
269+
270+
# should not raise TypeError for bytes serialization
271+
trace_call_llm(invocation_context, 'test_event_id', llm_request, llm_response)
272+
273+
llm_request_json_str = None
274+
for call_obj in mock_span_fixture.set_attribute.call_args_list:
275+
arg_name, arg_value = call_obj.args
276+
if arg_name == 'gcp.vertex.agent.llm_request':
277+
llm_request_json_str = arg_value
215278
break
216279

217280
assert (
218281
llm_request_json_str is not None
219282
), "Attribute 'gcp.vertex.agent.llm_request' was not set on the span."
220283

221-
assert llm_request_json_str.count('<not serializable>') == 2
284+
# no serialization failures
285+
assert '<not serializable>' not in llm_request_json_str
286+
# llm request is valid JSON
287+
parsed = json.loads(llm_request_json_str)
288+
assert parsed['model'] == 'gemini-3-pro-preview'
289+
assert len(parsed['contents']) == 3
222290

223291

224292
def test_trace_tool_call_with_scalar_response(
@@ -407,15 +475,19 @@ async def test_call_llm_disabling_request_response_content(
407475

408476
# Assert
409477
assert not any(
410-
call_obj.args[0] == 'gcp.vertex.agent.llm_request'
411-
and call_obj.args[1] != {}
412-
for call_obj in mock_span_fixture.set_attribute.call_args_list
478+
arg_name == 'gcp.vertex.agent.llm_request' and arg_value != {}
479+
for arg_name, arg_value in (
480+
call_obj.args
481+
for call_obj in mock_span_fixture.set_attribute.call_args_list
482+
)
413483
), "Attribute 'gcp.vertex.agent.llm_request' was incorrectly set on the span."
414484

415485
assert not any(
416-
call_obj.args[0] == 'gcp.vertex.agent.llm_response'
417-
and call_obj.args[1] != {}
418-
for call_obj in mock_span_fixture.set_attribute.call_args_list
486+
arg_name == 'gcp.vertex.agent.llm_response' and arg_value != {}
487+
for arg_name, arg_value in (
488+
call_obj.args
489+
for call_obj in mock_span_fixture.set_attribute.call_args_list
490+
)
419491
), (
420492
"Attribute 'gcp.vertex.agent.llm_response' was incorrectly set on the"
421493
' span.'
@@ -466,18 +538,22 @@ def test_trace_tool_call_disabling_request_response_content(
466538

467539
# Assert
468540
assert not any(
469-
call_obj.args[0] == 'gcp.vertex.agent.tool_call_args'
470-
and call_obj.args[1] != {}
471-
for call_obj in mock_span_fixture.set_attribute.call_args_list
541+
arg_name == 'gcp.vertex.agent.tool_call_args' and arg_value != {}
542+
for arg_name, arg_value in (
543+
call_obj.args
544+
for call_obj in mock_span_fixture.set_attribute.call_args_list
545+
)
472546
), (
473547
"Attribute 'gcp.vertex.agent.tool_call_args' was incorrectly set on the"
474548
' span.'
475549
)
476550

477551
assert not any(
478-
call_obj.args[0] == 'gcp.vertex.agent.tool_response'
479-
and call_obj.args[1] != {}
480-
for call_obj in mock_span_fixture.set_attribute.call_args_list
552+
arg_name == 'gcp.vertex.agent.tool_response' and arg_value != {}
553+
for arg_name, arg_value in (
554+
call_obj.args
555+
for call_obj in mock_span_fixture.set_attribute.call_args_list
556+
)
481557
), (
482558
"Attribute 'gcp.vertex.agent.tool_response' was incorrectly set on the"
483559
' span.'
@@ -510,9 +586,11 @@ def test_trace_merged_tool_disabling_request_response_content(
510586

511587
# Assert
512588
assert not any(
513-
call_obj.args[0] == 'gcp.vertex.agent.tool_response'
514-
and call_obj.args[1] != {}
515-
for call_obj in mock_span_fixture.set_attribute.call_args_list
589+
arg_name == 'gcp.vertex.agent.tool_response' and arg_value != {}
590+
for arg_name, arg_value in (
591+
call_obj.args
592+
for call_obj in mock_span_fixture.set_attribute.call_args_list
593+
)
516594
), (
517595
"Attribute 'gcp.vertex.agent.tool_response' was incorrectly set on the"
518596
' span.'

0 commit comments

Comments
 (0)