|
| 1 | +# pylint: disable=line-too-long,useless-suppression |
| 2 | +# ------------------------------------ |
| 3 | +# Copyright (c) Microsoft Corporation. |
| 4 | +# Licensed under the MIT License. |
| 5 | +# ------------------------------------ |
| 6 | + |
| 7 | +""" |
| 8 | +DESCRIPTION: |
| 9 | + Given an AIProjectClient, this sample demonstrates how to use the synchronous |
| 10 | + `.evaluations` methods to create, get and list evaluations. |
| 11 | +
|
| 12 | +USAGE: |
| 13 | + python sample_evaluations.py |
| 14 | +
|
| 15 | + Before running the sample: |
| 16 | +
|
| 17 | + pip install azure-ai-projects azure-identity |
| 18 | +
|
| 19 | + Set these environment variables with your own values: |
| 20 | + 1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your |
| 21 | + Azure AI Foundry project. |
| 22 | + 2) DATASET_NAME - Required. The name of the Dataset to create and use in this sample. |
| 23 | +""" |
| 24 | + |
| 25 | +import os |
| 26 | +from azure.identity import DefaultAzureCredential |
| 27 | +from azure.ai.projects.onedp import AIProjectClient |
| 28 | +from azure.ai.projects.onedp.models import Evaluation, InputDataset, EvaluatorConfiguration, EvaluationMetrics |
| 29 | +from dotenv import load_dotenv |
| 30 | + |
| 31 | +load_dotenv() |
| 32 | + |
| 33 | +endpoint = os.environ["PROJECT_ENDPOINT"] |
| 34 | +dataset_name = os.environ["DATASET_NAME"] |
| 35 | + |
| 36 | +with AIProjectClient( |
| 37 | + endpoint=endpoint, |
| 38 | + credential=DefaultAzureCredential(exclude_interactive_browser_credential=False), |
| 39 | +) as project_client: |
| 40 | + |
| 41 | + # [START evaluations_sample] |
| 42 | + print( |
| 43 | + "Upload a single file and create a new Dataset to reference the file. Here we explicitly specify the dataset version." |
| 44 | + ) |
| 45 | + # dataset: DatasetVersion = project_client.datasets.upload_file_and_create( |
| 46 | + # name=dataset_name, |
| 47 | + # version="1", |
| 48 | + # file="./samples_folder/sample_data_evaluation.jsonl", |
| 49 | + # ) |
| 50 | + # print(dataset) |
| 51 | + |
| 52 | + print("Create an evaluation") |
| 53 | + # evaluation = Evaluation( |
| 54 | + # display_name="Sample Evaluation", |
| 55 | + # data=InputDataset(id="azureml://locations/centraluseuap/workspaces/abc/data/abc/versions/11"), |
| 56 | + # evaluators={ |
| 57 | + # "relevance": EvaluatorConfiguration( |
| 58 | + # id=f"aiservices:{EvaluationMetrics.Relevance.value}", |
| 59 | + # # id="azureml://registries/azureml/models/Retrieval-Evaluator/versions/4", |
| 60 | + # # either client or service (TBD) resolves to azureml://registries/azureml/models/Retrieval-Evaluator/versions/... |
| 61 | + # init_params={ |
| 62 | + # "deployment_name": "gpt-4o", |
| 63 | + # }, |
| 64 | + # ), |
| 65 | + # "hate_unfairness": EvaluatorConfiguration( |
| 66 | + # # id=f"aiservices:{EvaluationMetrics.HateUnfairness.value}", |
| 67 | + # id="azureml://registries/azureml/models/Retrieval-Evaluator/versions/4", |
| 68 | + # # either client or service (TBD) resolves to azureml://registries/azureml/models/Hate-Unfairness-Evaluator/versions/... |
| 69 | + # init_params={ |
| 70 | + # "azure_ai_project": endpoint, |
| 71 | + # }, |
| 72 | + # ), |
| 73 | + # }, |
| 74 | + # ) |
| 75 | + # |
| 76 | + # evaluation_respone = project_client.evaluations.create_run(evaluation) |
| 77 | + |
| 78 | + print("Get evaluation") |
| 79 | + # get_evaluation_response = project_client.evaluations.get(evaluation_respone.id) |
| 80 | + # print(get_evaluation_response) |
| 81 | + |
| 82 | + # [END evaluations_sample] |
0 commit comments