Skip to content

Commit 9a94fb6

Browse files
committed
feat: add integration tests for telemetry
1 parent 672c463 commit 9a94fb6

5 files changed

Lines changed: 256 additions & 0 deletions

File tree

.github/workflows/integration_tests.yml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,11 @@ jobs:
6262
BASE_URL: ${{ matrix.environment == 'alpha' && secrets.ALPHA_BASE_URL || matrix.environment == 'staging' && secrets.STAGING_BASE_URL || matrix.environment == 'cloud' && secrets.CLOUD_BASE_URL }}
6363

6464
USE_AZURE_CHAT: ${{ matrix.use_azure_chat }}
65+
66+
# App Insights for telemetry testing
67+
APPLICATIONINSIGHTS_CONNECTION_STRING: ${{ secrets.APPLICATIONINSIGHTS_CONNECTION_STRING }}
68+
APP_INSIGHTS_APP_ID: ${{ secrets.APP_INSIGHTS_APP_ID }}
69+
APP_INSIGHTS_API_KEY: ${{ secrets.APP_INSIGHTS_API_KEY }}
6570
working-directory: testcases/${{ matrix.testcase }}
6671
run: |
6772
# If any errors occur execution will stop with exit code
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
[project]
2+
name = "eval-telemetry-testcase"
3+
version = "0.1.0"
4+
description = "E2E test for verifying eval telemetry events in Application Insights"
5+
requires-python = ">=3.11"
6+
dependencies = [
7+
"uipath",
8+
"httpx",
9+
]
10+
11+
[tool.uv.sources]
12+
uipath = { path = "../../", editable = true }
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
#!/bin/bash
2+
set -e
3+
4+
echo "=== E2E Test: Eval Telemetry Integration ==="
5+
6+
# Validate required environment variables
7+
if [ -z "$APPLICATIONINSIGHTS_CONNECTION_STRING" ]; then
8+
echo "Warning: APPLICATIONINSIGHTS_CONNECTION_STRING not set, telemetry won't be sent"
9+
fi
10+
if [ -z "$APP_INSIGHTS_APP_ID" ]; then
11+
echo "Warning: APP_INSIGHTS_APP_ID not set, skipping telemetry verification"
12+
fi
13+
if [ -z "$APP_INSIGHTS_API_KEY" ]; then
14+
echo "Warning: APP_INSIGHTS_API_KEY not set, skipping telemetry verification"
15+
fi
16+
17+
echo "Syncing dependencies..."
18+
uv sync
19+
20+
echo "Authenticating with UiPath..."
21+
uv run uipath auth --client-id="$CLIENT_ID" --client-secret="$CLIENT_SECRET" --base-url="$BASE_URL"
22+
23+
# Generate a unique run ID to identify this test run's telemetry events
24+
export EVAL_TEST_RUN_ID="e2e-test-$(date +%s)-$$"
25+
echo "Test Run ID: $EVAL_TEST_RUN_ID"
26+
27+
echo "Running evaluations with telemetry enabled..."
28+
# Run eval with telemetry explicitly enabled and App Insights connection string
29+
UIPATH_TELEMETRY_ENABLED=true uv run uipath eval main ../../samples/calculator/evaluations/eval-sets/default.json \
30+
--no-report \
31+
--output-file __uipath/output.json \
32+
--eval-set-run-id "$EVAL_TEST_RUN_ID"
33+
34+
# Wait for telemetry to be ingested into App Insights
35+
if [ -n "$APP_INSIGHTS_APP_ID" ] && [ -n "$APP_INSIGHTS_API_KEY" ]; then
36+
echo "Waiting for telemetry to be ingested (30 seconds)..."
37+
sleep 30
38+
fi
39+
40+
echo "Test completed successfully!"
Lines changed: 194 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,194 @@
1+
"""E2E assertions for eval telemetry testcase.
2+
3+
This script validates that telemetry events are sent to Application Insights by:
4+
1. Verifying eval completed successfully
5+
2. Querying App Insights API to check for expected telemetry events
6+
3. Validating event properties match expected values
7+
"""
8+
9+
import json
10+
import os
11+
import sys
12+
import time
13+
from typing import Any
14+
15+
import httpx
16+
17+
# Expected telemetry event names
18+
EXPECTED_EVENTS = [
19+
"EvalSetRun.Start.URT",
20+
"EvalSetRun.End.URT",
21+
"EvalRun.Start.URT",
22+
"EvalRun.End.URT",
23+
]
24+
25+
26+
def load_output(output_file: str) -> dict[str, Any]:
27+
"""Load output from a JSON file."""
28+
with open(output_file, "r", encoding="utf-8") as f:
29+
return json.load(f)
30+
31+
32+
def query_app_insights(
33+
app_id: str, api_key: str, query: str, max_retries: int = 3
34+
) -> dict[str, Any]:
35+
"""Query Application Insights using the REST API.
36+
37+
Args:
38+
app_id: Application Insights App ID
39+
api_key: Application Insights API Key
40+
query: Kusto query to execute
41+
max_retries: Number of retries on failure
42+
43+
Returns:
44+
Query results as dictionary
45+
"""
46+
url = f"https://api.applicationinsights.io/v1/apps/{app_id}/query"
47+
headers = {"x-api-key": api_key, "Content-Type": "application/json"}
48+
payload = {"query": query}
49+
50+
for attempt in range(max_retries):
51+
try:
52+
response = httpx.post(url, headers=headers, json=payload, timeout=30)
53+
response.raise_for_status()
54+
return response.json()
55+
except Exception as e:
56+
if attempt < max_retries - 1:
57+
print(f" Retry {attempt + 1}/{max_retries} after error: {e}")
58+
time.sleep(5)
59+
else:
60+
raise
61+
62+
63+
def verify_telemetry_events(app_id: str, api_key: str, eval_set_run_id: str) -> bool:
64+
"""Verify telemetry events were sent to Application Insights.
65+
66+
Args:
67+
app_id: Application Insights App ID
68+
api_key: Application Insights API Key
69+
eval_set_run_id: The eval set run ID to search for
70+
71+
Returns:
72+
True if all expected events were found
73+
"""
74+
print("\n--- Querying App Insights for events ---")
75+
print(f" EvalSetRunId: {eval_set_run_id}")
76+
77+
# Query for events with the specific EvalSetRunId
78+
query = f"""
79+
customEvents
80+
| where timestamp > ago(10m)
81+
| where customDimensions.EvalSetRunId == "{eval_set_run_id}"
82+
or customDimensions["EvalSetRunId"] == "{eval_set_run_id}"
83+
| project name, timestamp, customDimensions
84+
| order by timestamp asc
85+
"""
86+
87+
try:
88+
result = query_app_insights(app_id, api_key, query)
89+
except Exception as e:
90+
print(f" Error querying App Insights: {e}")
91+
return False
92+
93+
# Parse results
94+
tables = result.get("tables", [])
95+
if not tables:
96+
print(" No tables returned from query")
97+
return False
98+
99+
rows = tables[0].get("rows", [])
100+
columns = [col["name"] for col in tables[0].get("columns", [])]
101+
102+
print(f" Found {len(rows)} events")
103+
104+
# Extract event names
105+
found_events: list[str] = []
106+
name_idx = columns.index("name") if "name" in columns else 0
107+
108+
for row in rows:
109+
event_name = row[name_idx]
110+
found_events.append(event_name)
111+
print(f" - {event_name}")
112+
113+
# Check for expected events
114+
print("\n--- Verifying expected events ---")
115+
all_found = True
116+
for expected in EXPECTED_EVENTS:
117+
if expected in found_events:
118+
print(f" [OK] {expected}")
119+
else:
120+
print(f" [MISSING] {expected}")
121+
all_found = False
122+
123+
return all_found
124+
125+
126+
def verify_output(output_file: str) -> bool:
127+
"""Verify the eval output file."""
128+
print("\n--- Verifying eval output ---")
129+
130+
if not os.path.isfile(output_file):
131+
print(f" Output file '{output_file}' not found")
132+
return False
133+
134+
output_data = load_output(output_file)
135+
status = output_data.get("status")
136+
137+
if status != "successful":
138+
print(f" Eval failed with status: {status}")
139+
return False
140+
141+
print(f" Status: {status}")
142+
143+
output = output_data.get("output", {})
144+
evaluation_results = output.get("evaluationSetResults", [])
145+
print(f" Evaluation results: {len(evaluation_results)}")
146+
147+
return True
148+
149+
150+
def main() -> None:
151+
"""Main assertion logic."""
152+
output_file = "__uipath/output.json"
153+
154+
# Get environment variables
155+
app_id = os.environ.get("APP_INSIGHTS_APP_ID")
156+
api_key = os.environ.get("APP_INSIGHTS_API_KEY")
157+
eval_set_run_id = os.environ.get("EVAL_TEST_RUN_ID")
158+
159+
# Verify eval output first
160+
if not verify_output(output_file):
161+
print("\nEval output verification failed")
162+
sys.exit(1)
163+
164+
# Check if App Insights verification is possible
165+
if not app_id or not api_key:
166+
print("\n--- Skipping App Insights verification ---")
167+
print(" APP_INSIGHTS_APP_ID or APP_INSIGHTS_API_KEY not set")
168+
print(" Telemetry verification skipped (eval completed successfully)")
169+
print("\nAll assertions passed! (telemetry verification skipped)")
170+
return
171+
172+
if not eval_set_run_id:
173+
print("\n--- Skipping App Insights verification ---")
174+
print(" EVAL_TEST_RUN_ID not set")
175+
print("\nAll assertions passed! (telemetry verification skipped)")
176+
return
177+
178+
# Verify telemetry events in App Insights
179+
if not verify_telemetry_events(app_id, api_key, eval_set_run_id):
180+
print("\n" + "=" * 60)
181+
print("Telemetry verification FAILED")
182+
print("Expected events not found in App Insights")
183+
print("=" * 60)
184+
sys.exit(1)
185+
186+
print("\n" + "=" * 60)
187+
print("All assertions passed!")
188+
print(" - Eval completed successfully")
189+
print(" - Telemetry events verified in App Insights")
190+
print("=" * 60)
191+
192+
193+
if __name__ == "__main__":
194+
main()
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
{
2+
"functions": {
3+
"main": "../../samples/calculator/main.py:main"
4+
}
5+
}

0 commit comments

Comments
 (0)