Skip to content

Commit d15c0cb

Browse files
authored
Users/singankit/evaluation 1dp (#40505)
* Adding sample for evaluation * Fixing spell check errors * Fixing pylint issues
1 parent 19bf7a1 commit d15c0cb

File tree

5 files changed

+114
-1
lines changed

5 files changed

+114
-1
lines changed

Diff for: .vscode/cspell.json

+7
Original file line numberDiff line numberDiff line change
@@ -1361,6 +1361,13 @@
13611361
"azureopenai"
13621362
]
13631363
},
1364+
{
1365+
"filename": "sdk/ai/azure-ai-projects-onedp/**",
1366+
"words": [
1367+
"aiservices",
1368+
"azureai",
1369+
]
1370+
},
13641371
{
13651372
"filename": "sdk/ai/azure-ai-inference/**",
13661373
"words": [

Diff for: sdk/ai/azure-ai-projects-onedp/azure/ai/projects/onedp/models/_patch.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,11 @@
77
Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
88
"""
99
from typing import List
10+
from ._patch_evaluations import EvaluationMetrics
1011

11-
__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
12+
__all__: List[str] = [
13+
"EvaluationMetrics",
14+
] # Add all objects you want publicly available to users at this package level
1215

1316

1417
def patch_sdk():
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# pylint: disable=line-too-long,useless-suppression
2+
# ------------------------------------
3+
# Copyright (c) Microsoft Corporation.
4+
# Licensed under the MIT License.
5+
# ------------------------------------
6+
"""Customize generated code here.
7+
8+
Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
9+
"""
10+
from enum import Enum
11+
12+
from azure.core import CaseInsensitiveEnumMeta
13+
14+
15+
class EvaluationMetrics(str, Enum, metaclass=CaseInsensitiveEnumMeta):
16+
RELEVANCE = "relevance"
17+
HATE_UNFAIRNESS = "hate_unfairness"
18+
VIOLENCE = "violence"
19+
GROUNDEDNESS = "groundedness"
20+
GROUNDEDNESS_PRO = "groundedness_pro"
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
# pylint: disable=line-too-long,useless-suppression
2+
# ------------------------------------
3+
# Copyright (c) Microsoft Corporation.
4+
# Licensed under the MIT License.
5+
# ------------------------------------
6+
7+
"""
8+
DESCRIPTION:
9+
Given an AIProjectClient, this sample demonstrates how to use the synchronous
10+
`.evaluations` methods to create, get and list evaluations.
11+
12+
USAGE:
13+
python sample_evaluations.py
14+
15+
Before running the sample:
16+
17+
pip install azure-ai-projects azure-identity
18+
19+
Set these environment variables with your own values:
20+
1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
21+
Azure AI Foundry project.
22+
2) DATASET_NAME - Required. The name of the Dataset to create and use in this sample.
23+
"""
24+
25+
import os
26+
from azure.identity import DefaultAzureCredential
27+
from azure.ai.projects.onedp import AIProjectClient
28+
from azure.ai.projects.onedp.models import Evaluation, InputDataset, EvaluatorConfiguration, EvaluationMetrics
29+
from dotenv import load_dotenv
30+
31+
load_dotenv()
32+
33+
endpoint = os.environ["PROJECT_ENDPOINT"]
34+
dataset_name = os.environ["DATASET_NAME"]
35+
36+
with AIProjectClient(
37+
endpoint=endpoint,
38+
credential=DefaultAzureCredential(exclude_interactive_browser_credential=False),
39+
) as project_client:
40+
41+
# [START evaluations_sample]
42+
print(
43+
"Upload a single file and create a new Dataset to reference the file. Here we explicitly specify the dataset version."
44+
)
45+
# dataset: DatasetVersion = project_client.datasets.upload_file_and_create(
46+
# name=dataset_name,
47+
# version="1",
48+
# file="./samples_folder/sample_data_evaluation.jsonl",
49+
# )
50+
# print(dataset)
51+
52+
print("Create an evaluation")
53+
# evaluation = Evaluation(
54+
# display_name="Sample Evaluation",
55+
# data=InputDataset(id="azureml://locations/centraluseuap/workspaces/abc/data/abc/versions/11"),
56+
# evaluators={
57+
# "relevance": EvaluatorConfiguration(
58+
# id=f"aiservices:{EvaluationMetrics.Relevance.value}",
59+
# # id="azureml://registries/azureml/models/Retrieval-Evaluator/versions/4",
60+
# # either client or service (TBD) resolves to azureml://registries/azureml/models/Retrieval-Evaluator/versions/...
61+
# init_params={
62+
# "deployment_name": "gpt-4o",
63+
# },
64+
# ),
65+
# "hate_unfairness": EvaluatorConfiguration(
66+
# # id=f"aiservices:{EvaluationMetrics.HateUnfairness.value}",
67+
# id="azureml://registries/azureml/models/Retrieval-Evaluator/versions/4",
68+
# # either client or service (TBD) resolves to azureml://registries/azureml/models/Hate-Unfairness-Evaluator/versions/...
69+
# init_params={
70+
# "azure_ai_project": endpoint,
71+
# },
72+
# ),
73+
# },
74+
# )
75+
#
76+
# evaluation_respone = project_client.evaluations.create_run(evaluation)
77+
78+
print("Get evaluation")
79+
# get_evaluation_response = project_client.evaluations.get(evaluation_respone.id)
80+
# print(get_evaluation_response)
81+
82+
# [END evaluations_sample]
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{"query": "What is capital of France?", "context": "France is in Europe", "response": "Paris is the capital of France.", "ground_truth": "Paris is the capital of France."}

0 commit comments

Comments
 (0)