Skip to content

Commit 4ea2966

Browse files
committed
.
1 parent 265bfbd commit 4ea2966

6 files changed

Lines changed: 322 additions & 0 deletions

File tree

deepeval/__init__.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ def _expose_public_api() -> None:
1717
# Do not do this at module level or ruff will complain with E402
1818
global __version__, evaluate, assert_test, compare
1919
global on_test_run_end, log_hyperparameters, login, telemetry
20+
global instrument
2021

2122
from ._version import __version__ as _version
2223
from deepeval.evaluate import (
@@ -40,6 +41,25 @@ def _expose_public_api() -> None:
4041
login = _login
4142
telemetry = _telemetry
4243

44+
def instrument(*args, **kwargs):
45+
"""Set up Confident AI's OTel backend.
46+
47+
Configures a TracerProvider, attaches deepeval's OpenInference span
48+
interceptor, and ships spans to the Confident OTel endpoint. Pair with
49+
any OpenInference instrumentor (e.g. ``GoogleADKInstrumentor``,
50+
``OpenAIInstrumentor``) to capture framework-specific telemetry.
51+
52+
Accepts the same arguments as
53+
``deepeval.integrations.openinference.instrument_openinference``.
54+
"""
55+
from deepeval.integrations.openinference import (
56+
instrument_openinference,
57+
)
58+
59+
return instrument_openinference(*args, **kwargs)
60+
61+
globals()["instrument"] = instrument
62+
4363

4464
_expose_public_api()
4565

@@ -60,6 +80,7 @@ def _expose_public_api() -> None:
6080
"assert_test",
6181
"on_test_run_end",
6282
"compare",
83+
"instrument",
6384
]
6485

6586

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from .otel import instrument_google_adk
2+
3+
__all__ = ["instrument_google_adk"]
Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
from __future__ import annotations
2+
3+
import logging
4+
from typing import List, Optional
5+
6+
from deepeval.confident.api import get_confident_api_key
7+
from deepeval.metrics.base_metric import BaseMetric
8+
from deepeval.prompt import Prompt
9+
from deepeval.telemetry import capture_tracing_integration
10+
11+
logger = logging.getLogger(__name__)
12+
13+
14+
def _require_google_adk_instrumentor():
15+
try:
16+
from openinference.instrumentation.google_adk import (
17+
GoogleADKInstrumentor,
18+
)
19+
20+
return GoogleADKInstrumentor
21+
except ImportError as exc:
22+
raise ImportError(
23+
"openinference-instrumentation-google-adk is not installed. "
24+
"Install it with: "
25+
"`pip install google-adk openinference-instrumentation-google-adk`."
26+
) from exc
27+
28+
29+
def instrument_google_adk(
30+
api_key: Optional[str] = None,
31+
name: Optional[str] = None,
32+
thread_id: Optional[str] = None,
33+
user_id: Optional[str] = None,
34+
metadata: Optional[dict] = None,
35+
tags: Optional[List[str]] = None,
36+
environment: Optional[str] = None,
37+
metric_collection: Optional[str] = None,
38+
trace_metric_collection: Optional[str] = None,
39+
llm_metric_collection: Optional[str] = None,
40+
agent_metric_collection: Optional[str] = None,
41+
tool_metric_collection_map: Optional[dict] = None,
42+
confident_prompt: Optional[Prompt] = None,
43+
test_case_id: Optional[str] = None,
44+
turn_id: Optional[str] = None,
45+
is_test_mode: bool = False,
46+
agent_metrics: Optional[List[BaseMetric]] = None,
47+
) -> None:
48+
"""Instrument Google ADK agents and ship traces to Confident AI.
49+
50+
Wraps the community-maintained ``openinference-instrumentation-google-adk``
51+
package: every ADK agent, model call, and tool invocation emits an OTel
52+
span tagged with OpenInference semantic conventions, which deepeval's
53+
OpenInference span interceptor translates into ``confident.span.*``
54+
attributes before exporting via OTLP.
55+
56+
Pair with ``@observe`` / ``with trace(...)`` to mix native deepeval spans
57+
with ADK-emitted OTel spans on the same trace.
58+
"""
59+
60+
with capture_tracing_integration("google_adk"):
61+
if not api_key:
62+
api_key = get_confident_api_key()
63+
if not api_key:
64+
raise ValueError(
65+
"CONFIDENT_API_KEY is not set. "
66+
"Pass it directly or set the environment variable."
67+
)
68+
69+
GoogleADKInstrumentor = _require_google_adk_instrumentor()
70+
GoogleADKInstrumentor().instrument()
71+
72+
from deepeval.integrations.openinference import (
73+
instrument_openinference,
74+
)
75+
76+
instrument_openinference(
77+
api_key=api_key,
78+
name=name,
79+
thread_id=thread_id,
80+
user_id=user_id,
81+
metadata=metadata,
82+
tags=tags,
83+
environment=environment,
84+
metric_collection=metric_collection,
85+
trace_metric_collection=trace_metric_collection,
86+
llm_metric_collection=llm_metric_collection,
87+
agent_metric_collection=agent_metric_collection,
88+
tool_metric_collection_map=tool_metric_collection_map,
89+
confident_prompt=confident_prompt,
90+
test_case_id=test_case_id,
91+
turn_id=turn_id,
92+
is_test_mode=is_test_mode,
93+
agent_metrics=agent_metrics,
94+
)
95+
96+
logger.info(
97+
"Confident AI Google ADK telemetry attached (env=%s, test_mode=%s).",
98+
environment,
99+
is_test_mode,
100+
)
Lines changed: 195 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,195 @@
1+
---
2+
id: google-adk
3+
title: Google ADK
4+
sidebar_label: Google ADK
5+
---
6+
[Google's Agent Development Kit (ADK)](https://adk.dev/) is an open-source framework for building, orchestrating, and tracing generative-AI agents.
7+
8+
:::tip
9+
We recommend logging in to [Confident AI](https://app.confident-ai.com) to view your Google ADK evaluations.
10+
11+
```bash
12+
deepeval login
13+
```
14+
15+
For users in the EU region, please set your OTEL endpoint in the env as following:
16+
17+
```bash
18+
export CONFIDENT_OTEL_URL="https://eu.otel.confident-ai.com"
19+
20+
```
21+
22+
Or if you're in the AU region, please set your OTEL endpoint in the env as following:
23+
24+
```bash
25+
export CONFIDENT_OTEL_URL="https://au.otel.confident-ai.com"
26+
27+
```
28+
29+
:::
30+
31+
`deepeval` instruments Google ADK through the community-maintained [`openinference-instrumentation-google-adk`](https://github.com/Arize-ai/openinference/tree/main/python/instrumentation/openinference-instrumentation-google-adk) package. Every ADK agent, model call, and tool invocation emits an OTel span tagged with OpenInference semantic conventions, which `deepeval` then translates into Confident AI traces.
32+
33+
```bash
34+
pip install google-adk openinference-instrumentation-google-adk
35+
```
36+
37+
## End-to-End Evals
38+
39+
`deepeval` allows you to evaluate Google ADK agents in **under a minute**.
40+
41+
<Steps>
42+
43+
<Step>
44+
### Configure Google ADK
45+
46+
47+
Pass `agent_metrics` to the `instrument_google_adk` method.
48+
49+
```python title="main.py" showLineNumbers
50+
from google.adk.agents import Agent
51+
from google.adk.runners import Runner
52+
from google.adk.sessions import InMemorySessionService
53+
from google.genai import types
54+
55+
from deepeval.integrations.google_adk import instrument_google_adk
56+
from deepeval.metrics import AnswerRelevancyMetric
57+
58+
instrument_google_adk(
59+
name="Google ADK Tracing",
60+
environment="development",
61+
agent_metrics=[AnswerRelevancyMetric()],
62+
)
63+
64+
def get_weather(city: str) -> dict:
65+
return {"city": city, "forecast": "sunny, 24C"}
66+
67+
weather_agent = Agent(
68+
name="weather_agent",
69+
model="gemini-2.0-flash",
70+
instruction="Use the get_weather tool to answer questions about the weather.",
71+
tools=[get_weather],
72+
)
73+
74+
APP_NAME = "weather_app"
75+
USER_ID = "demo-user"
76+
SESSION_ID = "demo-session"
77+
78+
session_service = InMemorySessionService()
79+
session_service.create_session(
80+
app_name=APP_NAME, user_id=USER_ID, session_id=SESSION_ID
81+
)
82+
runner = Runner(
83+
agent=weather_agent, app_name=APP_NAME, session_service=session_service
84+
)
85+
86+
def invoke(prompt: str) -> str:
87+
user_msg = types.Content(role="user", parts=[types.Part(text=prompt)])
88+
final = ""
89+
for event in runner.run(
90+
user_id=USER_ID, session_id=SESSION_ID, new_message=user_msg
91+
):
92+
if event.is_final_response():
93+
final = event.content.parts[0].text
94+
return final
95+
96+
invoke("What's the weather in Paris?")
97+
```
98+
99+
:::info
100+
Evaluations are supported for Google ADK `Agent`. Only metrics with parameters `input`, `output` and `tools_called` are eligible for evaluation.
101+
:::
102+
103+
</Step>
104+
<Step>
105+
### Run evaluations
106+
107+
108+
Create an `EvaluationDataset` and invoke your Google ADK agent for each golden within the `evals_iterator()` loop to run end-to-end evaluations.
109+
110+
<Tabs items={["Synchronous"]}>
111+
<Tab value="Synchronous">
112+
113+
```python title="main.py" showLineNumbers
114+
from deepeval.dataset import EvaluationDataset, Golden
115+
from deepeval.evaluate.configs import AsyncConfig
116+
117+
dataset = EvaluationDataset(
118+
goldens=[
119+
Golden(input="What's the weather in Paris?"),
120+
Golden(input="What's the weather in London?"),
121+
]
122+
)
123+
124+
for golden in dataset.evals_iterator(async_config=AsyncConfig(run_async=False)):
125+
invoke(golden.input)
126+
```
127+
128+
</Tab>
129+
</Tabs>
130+
131+
✅ Done. The `evals_iterator` will automatically generate a test run with individual evaluation traces for each golden.
132+
133+
</Step>
134+
135+
</Steps>
136+
137+
## Evals in Production
138+
139+
To run online evaluations in production, replace `agent_metrics` with a [metric collection](https://www.confident-ai.com/docs/metrics/metric-collections) string from Confident AI, and run your Google ADK agent as usual:
140+
141+
```python filename="main.py" showLineNumbers
142+
from google.adk.agents import Agent
143+
from deepeval.integrations.google_adk import instrument_google_adk
144+
145+
instrument_google_adk(
146+
name="Google ADK Tracing",
147+
environment="production",
148+
trace_metric_collection="my-trace-collection",
149+
agent_metric_collection="my-agent-collection",
150+
llm_metric_collection="my-llm-collection",
151+
tool_metric_collection_map={
152+
"get_weather": "my-tool-collection",
153+
},
154+
)
155+
156+
weather_agent = Agent(
157+
name="weather_agent",
158+
model="gemini-2.0-flash",
159+
instruction="Use the get_weather tool to answer questions about the weather.",
160+
tools=[get_weather],
161+
)
162+
```
163+
164+
`deepeval` allows you to run component evals at different levels like Trace, Agent, LLM and Tool spans. You can pass your metric collection for any spans using the `instrument_google_adk` method.
165+
166+
## Combining with `@observe`
167+
168+
`instrument_google_adk` is OTel-based, but it composes cleanly with deepeval's native `@observe` decorator. When an ADK call runs inside an `@observe`'d function, both the deepeval-native span (your Python function) and the OTel spans (ADK's agent, LLM, and tool invocations) land on the same trace.
169+
170+
```python title="main.py" showLineNumbers
171+
from deepeval.tracing import observe
172+
from deepeval.metrics import AnswerRelevancyMetric
173+
from deepeval.integrations.google_adk import instrument_google_adk
174+
175+
instrument_google_adk(name="Google ADK Tracing", environment="development")
176+
177+
@observe(type="agent", metrics=[AnswerRelevancyMetric()])
178+
def my_pipeline(query: str) -> str:
179+
# ADK runs here -> OTel spans, attached to the SAME trace
180+
return invoke(query)
181+
```
182+
183+
## Using `deepeval.instrument(...)` directly
184+
185+
If you'd rather wire up `GoogleADKInstrumentor` yourself (for example, alongside other OpenInference instrumentors), use `deepeval.instrument(...)` to set up the OTel backend once and call any OpenInference instrumentor on top of it:
186+
187+
```python title="main.py" showLineNumbers
188+
import deepeval
189+
from openinference.instrumentation.google_adk import GoogleADKInstrumentor
190+
191+
deepeval.instrument(name="my-adk-app", environment="development")
192+
GoogleADKInstrumentor().instrument()
193+
```
194+
195+
This is the same pattern you'd use for any other OpenInference-instrumented framework.

docs/content/integrations/frameworks/meta.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
"openai",
55
"anthropic",
66
"agentcore",
7+
"google-adk",
78
"langchain",
89
"langgraph",
910
"llamaindex",

pyproject.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,8 @@ crewai = { version = "*", python = ">=3.10,<3.14" }
9797
pydantic-ai = { version = "*", python = ">=3.10,<3.14" }
9898
llama-index = "^0.14.4"
9999
openai-agents = "^0.3.3"
100+
google-adk = { version = "*", python = ">=3.10,<4.0" }
101+
openinference-instrumentation-google-adk = { version = "*", python = ">=3.10,<4.0" }
100102

101103
[tool.poetry.group.langchain]
102104
optional = true

0 commit comments

Comments
 (0)