Skip to content

Commit 7b8a72f

Browse files
committed
compute span annotation summaries
1 parent 3a7c446 commit 7b8a72f

File tree

2 files changed

+37
-25
lines changed

2 files changed

+37
-25
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
from collections import defaultdict
2+
from dataclasses import asdict, dataclass
3+
from typing import Optional
4+
5+
import pandas as pd
6+
7+
from phoenix.db import models
8+
from phoenix.server.api.types.Span import AnnotationSummary
9+
10+
11+
def compute_span_annotation_summaries(
12+
annotations: list[models.SpanAnnotation],
13+
) -> list[AnnotationSummary]:
14+
@dataclass
15+
class Metrics:
16+
record_count: int = 0
17+
label_count: int = 0
18+
score_sum: float = 0
19+
score_count: int = 0
20+
21+
summaries: defaultdict[str, defaultdict[Optional[str], Metrics]] = defaultdict(
22+
lambda: defaultdict(Metrics)
23+
)
24+
for annotation in annotations:
25+
metrics = summaries[annotation.name][annotation.label]
26+
metrics.record_count += 1
27+
metrics.label_count += int(annotation.label is not None)
28+
metrics.score_sum += annotation.score or 0
29+
metrics.score_count += int(annotation.score is not None)
30+
31+
result: list[AnnotationSummary] = []
32+
for name, label_metrics in summaries.items():
33+
rows = [{"label": label, **asdict(metrics)} for label, metrics in label_metrics.items()]
34+
result.append(AnnotationSummary(name=name, df=pd.DataFrame(rows), simple_avg=True))
35+
return result

src/phoenix/server/api/types/Span.py

+2-25
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,11 @@
11
import json
22
from asyncio import gather
3-
from collections import defaultdict
43
from collections.abc import Mapping
5-
from dataclasses import asdict, dataclass
64
from datetime import datetime
75
from enum import Enum
86
from typing import TYPE_CHECKING, Any, Iterable, Optional, cast
97

108
import numpy as np
11-
import pandas as pd
129
import strawberry
1310
from openinference.semconv.trace import SpanAttributes
1411
from strawberry import ID, UNSET
@@ -19,6 +16,7 @@
1916
import phoenix.trace.schemas as trace_schema
2017
from phoenix.db import models
2118
from phoenix.server.api.context import Context
19+
from phoenix.server.api.helpers.annotations import compute_span_annotation_summaries
2220
from phoenix.server.api.helpers.dataset_helpers import (
2321
get_dataset_example_input,
2422
get_dataset_example_output,
@@ -558,28 +556,7 @@ async def span_annotation_summaries(
558556
annotation for annotation in annotations if satisfies_filter(annotation, filter)
559557
]
560558

561-
@dataclass
562-
class Metrics:
563-
record_count: int = 0
564-
label_count: int = 0
565-
score_sum: float = 0
566-
score_count: int = 0
567-
568-
summaries: defaultdict[str, defaultdict[Optional[str], Metrics]] = defaultdict(
569-
lambda: defaultdict(Metrics)
570-
)
571-
for annotation in annotations:
572-
metrics = summaries[annotation.name][annotation.label]
573-
metrics.record_count += 1
574-
metrics.label_count += int(annotation.label is not None)
575-
metrics.score_sum += annotation.score or 0
576-
metrics.score_count += int(annotation.score is not None)
577-
578-
result: list[AnnotationSummary] = []
579-
for name, label_metrics in summaries.items():
580-
rows = [{"label": label, **asdict(metrics)} for label, metrics in label_metrics.items()]
581-
result.append(AnnotationSummary(name=name, df=pd.DataFrame(rows), simple_avg=True))
582-
return result
559+
return compute_span_annotation_summaries(annotations)
583560

584561
@strawberry.field(
585562
description="Evaluations of the documents associated with the span, e.g. "

0 commit comments

Comments
 (0)