diff --git a/.vscode/cspell.json b/.vscode/cspell.json index 404795164d2e..dc0c2667ff7e 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -39,6 +39,8 @@ "sdk/ai/azure-ai-projects/samples/agents/tripadvisor_openapi.json", "/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/data/**", "/sdk/ai/azure-ai-projects/samples/evaluations/data/**", + "sdk/ai/azure-ai-assistants/samples/nifty_500_quarterly_results.csv", + "sdk/ai/azure-ai-assistants/samples/tripadvisor_openapi.json", "sdk/ai/azure-ai-resources/azure/ai/resources/_index/_langchain/vendor/**", "sdk/ai/azure-ai-resources/azure/ai/resources/_restclient/**", "sdk/cognitiveservices/azure-cognitiveservices-search-autosuggest/**", @@ -1360,6 +1362,14 @@ "azureopenai" ] }, + { + "filename": "sdk/ai/azure-ai-assistants/**", + "words": [ + "GENAI", + "fspath", + "wttr" + ] + }, { "filename": "sdk/ai/azure-ai-inference/**", "words": [ diff --git a/eng/.docsettings.yml b/eng/.docsettings.yml index 679c12c2d4a8..b783fb5e37ed 100644 --- a/eng/.docsettings.yml +++ b/eng/.docsettings.yml @@ -16,6 +16,7 @@ omitted_paths: - sdk/vision/azure-ai-vision-imageanalysis/tests/* - sdk/ai/azure-ai-inference/tests/* - sdk/ai/azure-ai-projects/tests/* + - sdk/ai/azure-ai-assistants/tests/* - sdk/storage/azure-storage-extensions/* language: python diff --git a/pylintrc b/pylintrc index e58b01fd5c1b..2621c1bd1041 100644 --- a/pylintrc +++ b/pylintrc @@ -8,6 +8,7 @@ ignore-paths= azure\\mixedreality\\remoterendering\\_api_version.py, azure/mixedreality/remoterendering/_api_version.py, (?:.*[/\\]|^)projects/(models/_models.py|_model_base.py|operations/_operations.py|aio/operations/_operations.py)$, + (?:.*[/\\]|^)assistants/(_models.py|_model_base.py|_operations/_operations.py|aio/_operations/_operations.py)$, # Exclude any path that contains the following directory names (?:.*[/\\]|^)(?:_vendor|_generated|_restclient|samples|examples|test|tests|doc|\.tox)(?:[/\\]|$) diff --git a/sdk/ai/azure-ai-assistants/CHANGELOG.md b/sdk/ai/azure-ai-assistants/CHANGELOG.md new file mode 100644 index 000000000000..40bc6bf7aa1c --- /dev/null +++ b/sdk/ai/azure-ai-assistants/CHANGELOG.md @@ -0,0 +1,7 @@ +# Release History + +## 1.0.0b1 (Unreleased) + +### Features Added + +- Initial version diff --git a/sdk/ai/azure-ai-assistants/LICENSE b/sdk/ai/azure-ai-assistants/LICENSE new file mode 100644 index 000000000000..63447fd8bbbf --- /dev/null +++ b/sdk/ai/azure-ai-assistants/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) Microsoft Corporation. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/MANIFEST.in b/sdk/ai/azure-ai-assistants/MANIFEST.in new file mode 100644 index 000000000000..c50d503e6ce9 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/MANIFEST.in @@ -0,0 +1,7 @@ +include *.md +include LICENSE +include azure/ai/assistants/py.typed +recursive-include tests *.py +recursive-include samples *.py *.md +include azure/__init__.py +include azure/ai/__init__.py diff --git a/sdk/ai/azure-ai-assistants/README.md b/sdk/ai/azure-ai-assistants/README.md new file mode 100644 index 000000000000..69c1c39f58c4 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/README.md @@ -0,0 +1,1211 @@ + +# Azure AI Assistants client library for Python + +Use the AI Assistants client library (in preview) to: + +* **Enumerate connections** in your Azure AI Foundry project and get connection properties. +For example, get the inference endpoint URL and credentials associated with your Azure OpenAI connection. +* **Develop Assistants using the Azure AI Assistants Service**, leveraging an extensive ecosystem of models, tools, and capabilities from OpenAI, Microsoft, and other LLM providers. The Azure AI Assistants Service enables the building of Assistants for a wide range of generative AI use cases. The package is currently in preview. +* **Enable OpenTelemetry tracing**. + +[Product documentation](https://aka.ms/azsdk/azure-ai-projects/product-doc) +| [Samples][samples] +| [API reference documentation](https://aka.ms/azsdk/azure-ai-projects/python/reference) +| [Package (PyPI)](https://aka.ms/azsdk/azure-ai-projects/python/package) +| [SDK source code](https://aka.ms/azsdk/azure-ai-projects/python/code) +| [AI Starter Template](https://aka.ms/azsdk/azure-ai-projects/python/ai-starter-template) + +## Reporting issues + +To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-projects" in the title or content. + +## Table of contents + +- [Getting started](#getting-started) + - [Prerequisite](#prerequisite) + - [Install the package](#install-the-package) +- [Key concepts](#key-concepts) + - [Create and authenticate the client](#create-and-authenticate-the-client) +- [Examples](#examples) + - [Create an Assistant](#create-assistant) with: + - [File Search](#create-assistant-with-file-search) + - [Enterprise File Search](#create-assistant-with-enterprise-file-search) + - [Code interpreter](#create-assistant-with-code-interpreter) + - [Bing grounding](#create-assistant-with-bing-grounding) + - [Azure AI Search](#create-assistant-with-azure-ai-search) + - [Function call](#create-assistant-with-function-call) + - [Azure Function Call](#create-assistant-with-azure-function-call) + - [OpenAPI](#create-assistant-with-openapi) + - [Fabric data](#create-an-assistant-with-fabric) + - [Create thread](#create-thread) with + - [Tool resource](#create-thread-with-tool-resource) + - [Create message](#create-message) with: + - [File search attachment](#create-message-with-file-search-attachment) + - [Code interpreter attachment](#create-message-with-code-interpreter-attachment) + - [Execute Run, Run_and_Process, or Stream](#create-run-run_and_process-or-stream) + - [Retrieve message](#retrieve-message) + - [Retrieve file](#retrieve-file) + - [Tear down by deleting resource](#teardown) + - [Tracing](#tracing) + - [Installation](#installation) + - [How to enable tracing](#how-to-enable-tracing) + - [How to trace your own functions](#how-to-trace-your-own-functions) +- [Troubleshooting](#troubleshooting) + - [Logging](#logging) + - [Reporting issues](#reporting-issues) +- [Next steps](#next-steps) +- [Contributing](#contributing) + +## Getting started + +### Prerequisite + +- Python 3.9 or later. +- An [Azure subscription][azure_sub]. +- A [project in Azure AI Foundry](https://learn.microsoft.com/azure/ai-studio/how-to/create-projects). +- The project connection string. It can be found in your Azure AI Foundry project overview page, under "Project details". Below we will assume the environment variable `PROJECT_CONNECTION_STRING` was defined to hold this value. +- Entra ID is needed to authenticate the client. Your application needs an object that implements the [TokenCredential](https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential) interface. Code samples here use [DefaultAzureCredential](https://learn.microsoft.com/python/api/azure-identity/azure.identity.defaultazurecredential). To get that working, you will need: + * An appropriate role assignment. see [Role-based access control in Azure AI Foundry portal](https://learn.microsoft.com/azure/ai-foundry/concepts/rbac-ai-foundry). Role assigned can be done via the "Access Control (IAM)" tab of your Azure AI Project resource in the Azure portal. + * [Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) installed. + * You are logged into your Azure account by running `az login`. + * Note that if you have multiple Azure subscriptions, the subscription that contains your Azure AI Project resource must be your default subscription. Run `az account list --output table` to list all your subscription and see which one is the default. Run `az account set --subscription "Your Subscription ID or Name"` to change your default subscription. + +### Install the package + +```bash +pip install azure-ai-assistants +``` + +## Key concepts + +### Create and authenticate the client + +To construct a synchronous client: + +```python +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential = AzureKeyCredential(os.environ["API_KEY"]) +) +``` + +To construct an asynchronous client, Install the additional package [aiohttp](https://pypi.org/project/aiohttp/): + +```bash +pip install aiohttp +``` + +and update the code above to import `asyncio`, and import `AssistantsClient` from the `azure.ai.assistants.aio` namespace: + +```python +import os +import asyncio +from azure.ai.assistants.aio import AssistantsClient +from azure.core.credentials import AzureKeyCredential + +assistant_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=AzureKeyCredential(os.environ["API_KEY"]), +) +``` + +## Examples + +### Create Assistant + +Before creating an Assistant, you need to set up Azure resources to deploy your model. [Create a New Assistant Quickstart](https://learn.microsoft.com/azure/ai-services/agents/quickstart?pivots=programming-language-python-azure) details selecting and deploying your Assistant Setup. + +Here is an example of how to create an Assistant: + + +```python +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", +) +``` + + + +To allow Assistants to access your resources or custom functions, you need tools. You can pass tools to `create_assistant` by either `toolset` or combination of `tools` and `tool_resources`. + +Here is an example of `toolset`: + + +```python +functions = FunctionTool(user_functions) +code_interpreter = CodeInterpreterTool() + +toolset = ToolSet() +toolset.add(functions) +toolset.add(code_interpreter) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, +) +``` + + + +Also notices that if you use asynchronous client, you use `AsyncToolSet` instead. Additional information related to `AsyncFunctionTool` be discussed in the later sections. + +Here is an example to use `tools` and `tool_resources`: + + +```python +file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + +# Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, +) +``` + + + +In the following sections, we show you sample code in either `toolset` or combination of `tools` and `tool_resources`. + +### Create Assistant with File Search + +To perform file search by an Assistant, we first need to upload a file, create a vector store, and associate the file to the vector store. Here is an example: + + + +```python +file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") +print(f"Uploaded file, file ID: {file.id}") + +vector_store = assistants_client.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") +print(f"Created vector store, vector store ID: {vector_store.id}") + +# Create file search tool with resources followed by creating assistant +file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, +) +``` + + + +### Create Assistant with Enterprise File Search + +We can upload file to Azure as it is shown in the example, or use the existing Azure blob storage. In the code below we demonstrate how this can be achieved. First we upload file to azure and create `VectorStoreDataSource`, which then is used to create vector store. This vector store is then given to the `FileSearchTool` constructor. + + + +```python +# We will upload the local file to Azure and will use it for vector store creation. +asset_uri = os.environ["AZURE_BLOB_URI"] + +# Create a vector store with no file and wait for it to be processed +ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) +vector_store = assistants_client.create_vector_store_and_poll(data_sources=[ds], name="sample_vector_store") +print(f"Created vector store, vector store ID: {vector_store.id}") + +# Create a file search tool +file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + +# Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, +) +``` + + + +We also can attach files to the existing vector store. In the code snippet below, we first create an empty vector store and add file to it. + + + +```python +# Create a vector store with no file and wait for it to be processed +vector_store = assistants_client.create_vector_store_and_poll(data_sources=[], name="sample_vector_store") +print(f"Created vector store, vector store ID: {vector_store.id}") + +ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) +# Add the file to the vector store or you can supply data sources in the vector store creation +vector_store_file_batch = assistants_client.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=[ds] +) +print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + +# Create a file search tool +file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) +``` + + + +### Create Assistant with Code Interpreter + +Here is an example to upload a file and use it for code interpreter by an Assistant: + + + +```python +file = assistants_client.upload_file_and_poll( + file_path="nifty_500_quarterly_results.csv", purpose=FilePurpose.ASSISTANTS +) +print(f"Uploaded file, file ID: {file.id}") + +code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + +# Create assistant with code interpreter tool and tools_resources +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, +) +``` + + + +### Create Assistant with Bing Grounding + +To enable your Assistant to perform search through Bing search API, you use `BingGroundingTool` along with a connection. + +Here is an example: + + + +```python +conn_id = os.environ["AZURE_BING_CONNECTION_ID"] + +print(conn_id) + +# Initialize assistant bing tool and add the connection id +bing = BingGroundingTool(connection_id=conn_id) + +# Create assistant with the bing tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=bing.definitions, + headers={"x-ms-enable-preview": "true"}, + ) +``` + + + +### Create Assistant with Azure AI Search + +Azure AI Search is an enterprise search system for high-performance applications. It integrates with Azure OpenAI Service and Azure Machine Learning, offering advanced search technologies like vector search and full-text search. Ideal for knowledge base insights, information discovery, and automation. Creating an Assistant with Azure AI Search requires an existing Azure AI Search Index. For more information and setup guides, see [Azure AI Search Tool Guide](https://learn.microsoft.com/azure/ai-services/agents/how-to/tools/azure-ai-search?tabs=azurecli%2Cpython&pivots=overview-azure-ai-search). + +Here is an example to integrate Azure AI Search: + + + +```python +conn_id = os.environ["AI_AZURE_AI_CONNECTION_ID"] + +print(conn_id) + +# Initialize assistant AI search tool and add the search index connection id +ai_search = AzureAISearchTool( + index_connection_id=conn_id, index_name="sample_index", query_type=AzureAISearchQueryType.SIMPLE, top_k=3, filter="" +) + +# Create assistant with AI search tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=ai_search.definitions, + tool_resources=ai_search.resources, + ) +``` + + + +If the assistant has found the relevant information in the index, the reference +and annotation will be provided in the message response. In the example above, we replace +the reference placeholder by the actual reference and url. Please note, that to +get sensible result, the index needs to have "embedding", "token", "category" and "title" fields. + + + +```python +# Fetch and log all messages +messages = assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) +for message in messages.data: + if message.role == MessageRole.ASSISTANT and message.url_citation_annotations: + placeholder_annotations = { + annotation.text: f" [see {annotation.url_citation.title}] ({annotation.url_citation.url})" + for annotation in message.url_citation_annotations + } + for message_text in message.text_messages: + message_str = message_text.text.value + for k, v in placeholder_annotations.items(): + message_str = message_str.replace(k, v) + print(f"{message.role}: {message_str}") + else: + for message_text in message.text_messages: + print(f"{message.role}: {message_text.text.value}") +``` + + + +### Create Assistant with Function Call + +You can enhance your Assistants by defining callback functions as function tools. These can be provided to `create_assistant` via either the `toolset` parameter or the combination of `tools` and `tool_resources`. Here are the distinctions: + +- `toolset`: When using the `toolset` parameter, you provide not only the function definitions and descriptions but also their implementations. The SDK will execute these functions within `create_and_run_process` or `streaming` . These functions will be invoked based on their definitions. +- `tools` and `tool_resources`: When using the `tools` and `tool_resources` parameters, only the function definitions and descriptions are provided to `create_assistant`, without the implementations. The `Run` or `event handler of stream` will raise a `requires_action` status based on the function definitions. Your code must handle this status and call the appropriate functions. + +For more details about calling functions by code, refer to [`sample_assistants_stream_eventhandler_with_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py) and [`sample_assistants_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py). + +For more details about requirements and specification of functions, refer to [Function Tool Specifications](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/FunctionTool.md) + +Here is an example to use [user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/user_functions.py) in `toolset`: + + +```python +functions = FunctionTool(user_functions) +toolset = ToolSet() +toolset.add(functions) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, +) +``` + + + +For asynchronous functions, you must import `AIProjectClient` from `azure.ai.projects.aio` and use `AsyncFunctionTool`. Here is an example using [asynchronous user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py): + +```python +from azure.ai.projects.aio import AIProjectClient +``` + + + +```python +functions = AsyncFunctionTool(user_async_functions) + +toolset = AsyncToolSet() +toolset.add(functions) + +assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, +) +``` + + + +### Create Assistant With Azure Function Call + +The AI assistant leverages Azure Functions triggered asynchronously via Azure Storage Queues. To enable the assistant to perform Azure Function calls, you must set up the corresponding `AzureFunctionTool`, specifying input and output queues as well as parameter definitions. + +Example Python snippet illustrating how you create an assistant utilizing the Azure Function Tool: + + + +```python +azure_function_tool = AzureFunctionTool( + name="foo", + description="Get answers from the foo bot.", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + }, + input_queue=AzureFunctionStorageQueue( + queue_name="azure-function-foo-input", + storage_service_endpoint=storage_service_endpoint, + ), + output_queue=AzureFunctionStorageQueue( + queue_name="azure-function-tool-output", + storage_service_endpoint=storage_service_endpoint, + ), +) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="azure-function-assistant-foo", + instructions=f"You are a helpful support assistant. Use the provided function any time the prompt contains the string 'What would foo say?'. When you invoke the function, ALWAYS specify the output queue uri parameter as '{storage_service_endpoint}/azure-function-tool-output'. Always responds with \"Foo says\" and then the response from the tool.", + tools=azure_function_tool.definitions, +) +print(f"Created assistant, assistant ID: {assistant.id}") +``` + + + +--- + +**Limitations** + +Currently, the Azure Function integration for the AI Assistant has the following limitations: + +- Azure Functions integration is available **only for non-streaming scenarios**. +- Supported trigger for Azure Function is currently limited to **Queue triggers** only. + HTTP or other trigger types and streaming responses are not supported at this time. + +--- + +**Create and Deploy Azure Function** + +Before you can use the assistant with AzureFunctionTool, you need to create and deploy Azure Function. + +Below is an example Python Azure Function responding to queue-triggered messages and placing responses on the output queue: + +```python +import azure.functions as func +import logging +import json + +app = func.FunctionApp() + +@app.get_weather(arg_name="inputQueue", + queue_name="input", + connection="AzureWebJobsStorage") +@app.queue_output(arg_name="outputQueue", + queue_name="output", + connection="AzureWebJobsStorage") +def get_weather(inputQueue: func.QueueMessage, outputQueue: func.Out[str]): + try: + messagepayload = json.loads(inputQueue.get_body().decode("utf-8")) + location = messagepayload["location"] + weather_result = f"Weather is 82 degrees and sunny in {location}." + + response_message = { + "Value": weather_result, + "CorrelationId": messagepayload["CorrelationId"] + } + + outputQueue.set(json.dumps(response_message)) + + logging.info(f"Sent message to output queue with message {response_message}") + except Exception as e: + logging.error(f"Error processing message: {e}") + return +``` + +> **Important:** Both input and output payloads must contain the `CorrelationId`, which must match in request and response. + +--- + +**Azure Function Project Creation and Deployment** + +To deploy your function to Azure properly, follow Microsoft's official documentation step by step: + +[Azure Functions Python Developer Guide](https://learn.microsoft.com/azure/azure-functions/create-first-function-cli-python?tabs=windows%2Cbash%2Cazure-cli%2Cbrowser) + +**Summary of required steps:** + +- Use the Azure CLI or Azure Portal to create an Azure Function App. +- Enable System Managed Identity for your Azure Function App. +- Assign appropriate permissions to your Azure Function App identity as outlined in the Role Assignments section below +- Create input and output queues in Azure Storage. +- Deploy your Function code. + +--- + +**Verification and Testing Azure Function** + +To ensure that your Azure Function deployment functions correctly: + +1. Place the following style message manually into the input queue (`input`): + +{ + "location": "Seattle", + "CorrelationId": "42" +} + +Check the output queue (`output`) and validate the structured message response: + +{ + "Value": "The weather in Seattle is sunny and warm.", + "CorrelationId": "42" +} + +--- + +**Required Role Assignments (IAM Configuration)** + +Clearly assign the following Azure IAM roles to ensure correct permissions: + +1. **Azure Function App's identity:** + - Enable system managed identity through Azure Function App > Settings > Identity. + - Add permission to storage account: + - Go to **Storage Account > Access control (IAM)** and add role assignment: + - `Storage Queue Data Contributor` assigned to Azure Function managed identity + +2. **Azure AI Project Identity:** + +Ensure your Azure AI Project identity has the following storage account permissions: +- `Storage Account Contributor` +- `Storage Blob Data Contributor` +- `Storage File Data Privileged Contributor` +- `Storage Queue Data Contributor` +- `Storage Table Data Contributor` + +--- + +**Additional Important Configuration Notes** + +- The Azure Function configured above uses the `AzureWebJobsStorage` connection string for queue connectivity. You may alternatively use managed identity-based connections as described in the official Azure Functions Managed Identity documentation. +- Storage queues you specify (`input` & `output`) should already exist in the storage account before the Function deployment or invocation, created manually via Azure portal or CLI. +- When using Azure storage account connection strings, make sure the account has enabled storage account key access (`Storage Account > Settings > Configuration`). + +--- + +With the above steps complete, your Azure Function integration with your AI Assistant is ready for use. + + +### Create Assistant With Logic Apps + +Logic Apps allow HTTP requests to trigger actions. For more information, refer to the guide [Logic App Workflows for Function Calling](https://learn.microsoft.com/azure/ai-services/openai/how-to/assistants-logic-apps#create-logic-apps-workflows-for-function-calling). + +Your Logic App must be in the same resource group as your Azure AI Project, shown in the Azure Portal. Assistants SDK accesses Logic Apps through Workflow URLs, which are fetched and called as requests in functions. + +Below is an example of how to create an Azure Logic App utility tool and register a function with it. + + + +```python + +# Create the project client +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Extract subscription and resource group from the project scope +subscription_id = os.environ["SUBSCRIPTION_ID"] +resource_group = os.environ["resource_group_name"] + +# Logic App details +logic_app_name = "" +trigger_name = "" + +# Create and initialize AzureLogicAppTool utility +logic_app_tool = AzureLogicAppTool(subscription_id, resource_group) +logic_app_tool.register_logic_app(logic_app_name, trigger_name) +print(f"Registered logic app '{logic_app_name}' with trigger '{trigger_name}'.") + +# Create the specialized "send_email_via_logic_app" function for your assistant tools +send_email_func = create_send_email_function(logic_app_tool, logic_app_name) + +# Prepare the function tools for the assistant +functions_to_use: Set = { + fetch_current_datetime, + send_email_func, # This references the AzureLogicAppTool instance via closure +} +``` + + + +After this the functions can be incorporated normally into code using `FunctionTool`. + + +### Create Assistant With OpenAPI + +OpenAPI specifications describe REST operations against a specific endpoint. Assistants SDK can read an OpenAPI spec, create a function from it, and call that function against the REST endpoint without additional client-side execution. + +Here is an example creating an OpenAPI tool (using anonymous authentication): + + + +```python + +with open("./weather_openapi.json", "r") as f: + openapi_weather = jsonref.loads(f.read()) + +with open("./countries.json", "r") as f: + openapi_countries = jsonref.loads(f.read()) + +# Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) +auth = OpenApiAnonymousAuthDetails() + +# Initialize assistant OpenApi tool using the read in OpenAPI spec +openapi_tool = OpenApiTool( + name="get_weather", spec=openapi_weather, description="Retrieve weather information for a location", auth=auth +) +openapi_tool.add_definition( + name="get_countries", spec=openapi_countries, description="Retrieve a list of countries", auth=auth +) + +# Create assistant with OpenApi tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=openapi_tool.definitions, + ) +``` + + + +### Create an Assistant with Fabric + +To enable your Assistant to answer queries using Fabric data, use `FabricTool` along with a connection to the Fabric resource. + +Here is an example: + + + +```python +conn_id = os.environ["FABRIC_CONNECTION_ID"] + +print(conn_id) + +# Initialize an Assistant Fabric tool and add the connection id +fabric = FabricTool(connection_id=conn_id) + +# Create an Assistant with the Fabric tool and process an Assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=fabric.definitions, + headers={"x-ms-enable-preview": "true"}, + ) +``` + + + + +### Create Thread + +For each session or conversation, a thread is required. Here is an example: + + + +```python +thread = assistants_client.create_thread() +``` + + + +### Create Thread with Tool Resource + +In some scenarios, you might need to assign specific resources to individual threads. To achieve this, you provide the `tool_resources` argument to `create_thread`. In the following example, you create a vector store and upload a file, enable an Assistant for file search using the `tools` argument, and then associate the file with the thread using the `tool_resources` argument. + + + +```python +file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") +print(f"Uploaded file, file ID: {file.id}") + +vector_store = assistants_client.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") +print(f"Created vector store, vector store ID: {vector_store.id}") + +# Create file search tool with resources followed by creating assistant +file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, +) + +print(f"Created assistant, ID: {assistant.id}") + +# Create thread with file resources. +# If the assistant has multiple threads, only this thread can search this file. +thread = assistants_client.create_thread(tool_resources=file_search.resources) +``` + + +### Create Message + +To create a message for assistant to process, you pass `user` as `role` and a question as `content`: + + + +```python +message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") +``` + + + +### Create Message with File Search Attachment + +To attach a file to a message for content searching, you use `MessageAttachment` and `FileSearchTool`: + + + +```python +attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) +message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] +) +``` + + + +### Create Message with Code Interpreter Attachment + +To attach a file to a message for data analysis, use `MessageAttachment` and `CodeInterpreterTool` classes. You must pass `CodeInterpreterTool` as `tools` or `toolset` in `create_assistant` call or the file attachment cannot be opened for code interpreter. + +Here is an example to pass `CodeInterpreterTool` as tool: + + + +```python +# Notice that CodeInterpreter must be enabled in the assistant creation, +# otherwise the assistant will not be able to see the file attachment for code interpretation +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=CodeInterpreterTool().definitions, +) +print(f"Created assistant, assistant ID: {assistant.id}") + +thread = assistants_client.create_thread() +print(f"Created thread, thread ID: {thread.id}") + +# Create an attachment +attachment = MessageAttachment(file_id=file.id, tools=CodeInterpreterTool().definitions) + +# Create a message +message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + attachments=[attachment], +) +``` + + + +Azure blob storage can be used as a message attachment. In this case, use `VectorStoreDataSource` as a data source: + + + +```python +# We will upload the local file to Azure and will use it for vector store creation. +asset_uri = os.environ["AZURE_BLOB_URI"] +ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + +# Create a message with the attachment +attachment = MessageAttachment(data_source=ds, tools=code_interpreter.definitions) +message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] +) +``` + + + +### Create Run, Run_and_Process, or Stream + +To process your message, you can use `create_run`, `create_and_process_run`, or `create_stream`. + +`create_run` requests the Assistant to process the message without polling for the result. If you are using `function tools` regardless as `toolset` or not, your code is responsible for polling for the result and acknowledging the status of `Run`. When the status is `requires_action`, your code is responsible for calling the function tools. For a code sample, visit [`sample_assistants_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py). + +Here is an example of `create_run` and poll until the run is completed: + + + +```python +run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + +# Poll the run as long as run status is queued or in progress +while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) +``` + + + +To have the SDK poll on your behalf and call `function tools`, use the `create_and_process_run` method. Note that `function tools` will only be invoked if they are provided as `toolset` during the `create_assistant` call. + +Here is an example: + + + +```python +run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) +``` + + + +With streaming, polling need not be considered. If `function tools` are provided as `toolset` during the `create_assistant` call, they will be invoked by the SDK. + +Here is an example of streaming: + + + +```python +with assistants_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AssistantStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") +``` + + + +In the code above, because an `event_handler` object is not passed to the `create_stream` function, the SDK will instantiate `AssistantEventHandler` or `AsyncAssistantEventHandler` as the default event handler and produce an iterable object with `event_type` and `event_data`. `AssistantEventHandler` and `AsyncAssistantEventHandler` are overridable. Here is an example: + + + +```python +# With AssistantEventHandler[str], the return type for each event functions is optional string. +class MyEventHandler(AssistantEventHandler[str]): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> Optional[str]: + return f"Text delta received: {delta.text}" + + def on_thread_message(self, message: "ThreadMessage") -> Optional[str]: + return f"ThreadMessage created. ID: {message.id}, Status: {message.status}" + + def on_thread_run(self, run: "ThreadRun") -> Optional[str]: + return f"ThreadRun status: {run.status}" + + def on_run_step(self, step: "RunStep") -> Optional[str]: + return f"RunStep type: {step.type}, Status: {step.status}" + + def on_error(self, data: str) -> Optional[str]: + return f"An error occurred. Data: {data}" + + def on_done(self) -> Optional[str]: + return "Stream completed." + + def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional[str]: + return f"Unhandled Event Type: {event_type}, Data: {event_data}" +``` + + + + + + +```python +with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() +) as stream: + for event_type, event_data, func_return in stream: + print(f"Received data.") + print(f"Streaming receive Event Type: {event_type}") + print(f"Event Data: {str(event_data)[:100]}...") + print(f"Event Function return: {func_return}\n") +``` + + + +As you can see, this SDK parses the events and produces various event types similar to OpenAI assistants. In your use case, you might not be interested in handling all these types and may decide to parse the events on your own. To achieve this, please refer to [override base event handler](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_with_base_override_eventhandler.py). + +``` +Note: Multiple streaming processes may be chained behind the scenes. + +When the SDK receives a `ThreadRun` event with the status `requires_action`, the next event will be `Done`, followed by termination. The SDK will submit the tool calls using the same event handler. The event handler will then chain the main stream with the tool stream. + +Consequently, when you iterate over the streaming using a for loop similar to the example above, the for loop will receive events from the main stream followed by events from the tool stream. +``` + + +### Retrieve Message + +To retrieve messages from assistants, use the following example: + + + +```python +messages = assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + +# The messages are following in the reverse order, +# we will iterate them and output only text contents. +for data_point in messages.data: + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") +``` + + + +In addition, `messages` and `messages.data[]` offer helper properties such as `text_messages`, `image_contents`, `file_citation_annotations`, and `file_path_annotations` to quickly retrieve content from one message or all messages. + +### Retrieve File + +Files uploaded by Assistants cannot be retrieved back. If your use case need to access the file content uploaded by the Assistants, you are advised to keep an additional copy accessible by your application. However, files generated by Assistants are retrievable by `save_file` or `get_file_content`. + +Here is an example retrieving file ids from messages and save to the local drive: + + + +```python +messages = assistants_client.list_messages(thread_id=thread.id) +print(f"Messages: {messages}") + +for image_content in messages.image_contents: + file_id = image_content.image_file.file_id + print(f"Image File ID: {file_id}") + file_name = f"{file_id}_image_file.png" + assistants_client.save_file(file_id=file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + +for file_path_annotation in messages.file_path_annotations: + print(f"File Paths:") + print(f"Type: {file_path_annotation.type}") + print(f"Text: {file_path_annotation.text}") + print(f"File ID: {file_path_annotation.file_path.file_id}") + print(f"Start Index: {file_path_annotation.start_index}") + print(f"End Index: {file_path_annotation.end_index}") +``` + + + +Here is an example to use `get_file_content`: + +```python +from pathlib import Path + +async def save_file_content(client, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None): + # Determine the target directory + path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() + path.mkdir(parents=True, exist_ok=True) + + # Retrieve the file content + file_content_stream = await client.get_file_content(file_id) + if not file_content_stream: + raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") + + # Collect all chunks asynchronously + chunks = [] + async for chunk in file_content_stream: + if isinstance(chunk, (bytes, bytearray)): + chunks.append(chunk) + else: + raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") + + target_file_path = path / file_name + + # Write the collected content to the file synchronously + with open(target_file_path, "wb") as file: + for chunk in chunks: + file.write(chunk) +``` + +### Teardown + +To remove resources after completing tasks, use the following functions: + + + +```python +# Delete the file when done +assistants_client.delete_vector_store(vector_store.id) +print("Deleted vector store") + +assistants_client.delete_file(file_id=file.id) +print("Deleted file") + +# Delete the assistant when done +assistants_client.delete_assistant(assistant.id) +print("Deleted assistant") +``` + + + +## Tracing + +You can add an Application Insights Azure resource to your Azure AI Foundry project. See the Tracing tab in your AI Foundry project. If one was enabled, you can get the Application Insights connection string, configure your Assistants, and observe the full execution path through Azure Monitor. Typically, you might want to start tracing before you create an Assistant. + +### Installation + +Make sure to install OpenTelemetry and the Azure SDK tracing plugin via + +```bash +pip install opentelemetry +pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry +``` + +You will also need an exporter to send telemetry to your observability backend. You can print traces to the console or use a local viewer such as [Aspire Dashboard](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash). + +To connect to Aspire Dashboard or another OpenTelemetry compatible backend, install OTLP exporter: + +```bash +pip install opentelemetry-exporter-otlp +``` + +### How to enable tracing + +Here is a code sample that shows how to enable Azure Monitor tracing: + + + +```python +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +# Enable Azure Monitor tracing +application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] +configure_azure_monitor(connection_string=application_insights_connection_string) + +# enable additional instrumentations +enable_telemetry() + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with assistants_client: +``` + + + +In addition, you might find helpful to see the tracing logs in console. You can achieve by the following code: + +```python +from azure.ai.assistants.telemetry import enable_telemetry + +enable_telemetry(destination=sys.stdout) +``` +### How to trace your own functions + +The decorator `trace_function` is provided for tracing your own function calls using OpenTelemetry. By default the function name is used as the name for the span. Alternatively you can provide the name for the span as a parameter to the decorator. + +This decorator handles various data types for function parameters and return values, and records them as attributes in the trace span. The supported data types include: +* Basic data types: str, int, float, bool +* Collections: list, dict, tuple, set + * Special handling for collections: + - If a collection (list, dict, tuple, set) contains nested collections, the entire collection is converted to a string before being recorded as an attribute. + - Sets and dictionaries are always converted to strings to ensure compatibility with span attributes. + +Object types are omitted, and the corresponding parameter is not traced. + +The parameters are recorded in attributes `code.function.parameter.` and the return value is recorder in attribute `code.function.return.value` + +## Troubleshooting + +### Logging + +The client uses the standard [Python logging library](https://docs.python.org/3/library/logging.html). The SDK logs HTTP request and response details, which may be useful in troubleshooting. To log to stdout, add the following: + +```python +import sys +import logging + +# Acquire the logger for this client library. Use 'azure' to affect both +# 'azure.core` and `azure.ai.inference' libraries. +logger = logging.getLogger("azure") + +# Set the desired logging level. logging.INFO or logging.DEBUG are good options. +logger.setLevel(logging.DEBUG) + +# Direct logging output to stdout: +handler = logging.StreamHandler(stream=sys.stdout) +# Or direct logging output to a file: +# handler = logging.FileHandler(filename="sample.log") +logger.addHandler(handler) + +# Optional: change the default logging format. Here we add a timestamp. +#formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") +#handler.setFormatter(formatter) +``` + +By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key or token), and the request and response payloads. To create logs without redaction, add `logging_enable = True` to the client constructor: + +```python +assistants_client = AIProjectClient.from_connection_string( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + logging_enable = True +) +``` + +Note that the log level must be set to `logging.DEBUG` (see above code). Logs will be redacted with any other log level. + +Be sure to protect non redacted logs to avoid compromising security. + +For more information, see [Configure logging in the Azure libraries for Python](https://aka.ms/azsdk/python/logging) + +### Reporting issues + +To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-assistants" in the title or content. + + +## Next steps + +Have a look at the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-projects/samples) folder, containing fully runnable Python code for synchronous and asynchronous clients. + +Explore the [AI Starter Template](https://aka.ms/azsdk/azure-ai-projects/python/ai-starter-template). This template creates an Azure AI Foundry hub, project and connected resources including Azure OpenAI Service, AI Search and more. It also deploys a simple chat application to Azure Container Apps. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, +see the Code of Conduct FAQ or contact opencode@microsoft.com with any +additional questions or comments. + + +[samples]: https://aka.ms/azsdk/azure-ai-projects/python/samples/ +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[entra_id]: https://learn.microsoft.com/azure/ai-services/authentication?tabs=powershell#authenticate-with-microsoft-entra-id +[azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials +[azure_identity_pip]: https://pypi.org/project/azure-identity/ +[default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential +[pip]: https://pypi.org/project/pip/ +[azure_sub]: https://azure.microsoft.com/free/ +[evaluators]: https://learn.microsoft.com/azure/ai-studio/how-to/develop/evaluate-sdk +[azure_ai_evaluation]: https://learn.microsoft.com/python/api/overview/azure/ai-evaluation-readme +[evaluator_library]: https://learn.microsoft.com/azure/ai-studio/how-to/evaluate-generative-ai-app#view-and-manage-the-evaluators-in-the-evaluator-library \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/apiview-properties.json b/sdk/ai/azure-ai-assistants/apiview-properties.json new file mode 100644 index 000000000000..15de3b3ab8d4 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/apiview-properties.json @@ -0,0 +1,243 @@ +{ + "CrossLanguagePackageId": "Azure.AI.Assistants", + "CrossLanguageDefinitionId": { + "azure.ai.assistants.models.AISearchIndexResource": "Azure.AI.Assistants.AISearchIndexResource", + "azure.ai.assistants.models.Assistant": "Azure.AI.Assistants.Assistant", + "azure.ai.assistants.models.AssistantDeletionStatus": "Azure.AI.Assistants.AssistantDeletionStatus", + "azure.ai.assistants.models.AssistantsApiResponseFormat": "Azure.AI.Assistants.AssistantsApiResponseFormat", + "azure.ai.assistants.models.AssistantsNamedToolChoice": "Azure.AI.Assistants.AssistantsNamedToolChoice", + "azure.ai.assistants.models.AssistantThread": "Azure.AI.Assistants.AssistantThread", + "azure.ai.assistants.models.AssistantThreadCreationOptions": "Azure.AI.Assistants.AssistantThreadCreationOptions", + "azure.ai.assistants.models.AzureAISearchResource": "Azure.AI.Assistants.AzureAISearchResource", + "azure.ai.assistants.models.ToolDefinition": "Azure.AI.Assistants.ToolDefinition", + "azure.ai.assistants.models.AzureAISearchToolDefinition": "Azure.AI.Assistants.AzureAISearchToolDefinition", + "azure.ai.assistants.models.AzureFunctionBinding": "Azure.AI.Assistants.AzureFunctionBinding", + "azure.ai.assistants.models.AzureFunctionDefinition": "Azure.AI.Assistants.AzureFunctionDefinition", + "azure.ai.assistants.models.AzureFunctionStorageQueue": "Azure.AI.Assistants.AzureFunctionStorageQueue", + "azure.ai.assistants.models.AzureFunctionToolDefinition": "Azure.AI.Assistants.AzureFunctionToolDefinition", + "azure.ai.assistants.models.BingCustomSearchToolDefinition": "Azure.AI.Assistants.BingCustomSearchToolDefinition", + "azure.ai.assistants.models.BingGroundingToolDefinition": "Azure.AI.Assistants.BingGroundingToolDefinition", + "azure.ai.assistants.models.CodeInterpreterToolDefinition": "Azure.AI.Assistants.CodeInterpreterToolDefinition", + "azure.ai.assistants.models.CodeInterpreterToolResource": "Azure.AI.Assistants.CodeInterpreterToolResource", + "azure.ai.assistants.models.FileDeletionStatus": "Azure.AI.Assistants.FileDeletionStatus", + "azure.ai.assistants.models.FileListResponse": "Azure.AI.Assistants.FileListResponse", + "azure.ai.assistants.models.FileSearchRankingOptions": "Azure.AI.Assistants.FileSearchRankingOptions", + "azure.ai.assistants.models.FileSearchToolCallContent": "Azure.AI.Assistants.FileSearchToolCallContent", + "azure.ai.assistants.models.FileSearchToolDefinition": "Azure.AI.Assistants.FileSearchToolDefinition", + "azure.ai.assistants.models.FileSearchToolDefinitionDetails": "Azure.AI.Assistants.FileSearchToolDefinitionDetails", + "azure.ai.assistants.models.FileSearchToolResource": "Azure.AI.Assistants.FileSearchToolResource", + "azure.ai.assistants.models.FunctionDefinition": "Azure.AI.Assistants.FunctionDefinition", + "azure.ai.assistants.models.FunctionName": "Azure.AI.Assistants.FunctionName", + "azure.ai.assistants.models.FunctionToolDefinition": "Azure.AI.Assistants.FunctionToolDefinition", + "azure.ai.assistants.models.IncompleteRunDetails": "Azure.AI.Assistants.IncompleteRunDetails", + "azure.ai.assistants.models.MessageAttachment": "Azure.AI.Assistants.MessageAttachment", + "azure.ai.assistants.models.MessageContent": "Azure.AI.Assistants.MessageContent", + "azure.ai.assistants.models.MessageDelta": "Azure.AI.Assistants.MessageDelta", + "azure.ai.assistants.models.MessageDeltaChunk": "Azure.AI.Assistants.MessageDeltaChunk", + "azure.ai.assistants.models.MessageDeltaContent": "Azure.AI.Assistants.MessageDeltaContent", + "azure.ai.assistants.models.MessageDeltaImageFileContent": "Azure.AI.Assistants.MessageDeltaImageFileContent", + "azure.ai.assistants.models.MessageDeltaImageFileContentObject": "Azure.AI.Assistants.MessageDeltaImageFileContentObject", + "azure.ai.assistants.models.MessageDeltaTextAnnotation": "Azure.AI.Assistants.MessageDeltaTextAnnotation", + "azure.ai.assistants.models.MessageDeltaTextContent": "Azure.AI.Assistants.MessageDeltaTextContent", + "azure.ai.assistants.models.MessageDeltaTextContentObject": "Azure.AI.Assistants.MessageDeltaTextContentObject", + "azure.ai.assistants.models.MessageDeltaTextFileCitationAnnotation": "Azure.AI.Assistants.MessageDeltaTextFileCitationAnnotation", + "azure.ai.assistants.models.MessageDeltaTextFileCitationAnnotationObject": "Azure.AI.Assistants.MessageDeltaTextFileCitationAnnotationObject", + "azure.ai.assistants.models.MessageDeltaTextFilePathAnnotation": "Azure.AI.Assistants.MessageDeltaTextFilePathAnnotation", + "azure.ai.assistants.models.MessageDeltaTextFilePathAnnotationObject": "Azure.AI.Assistants.MessageDeltaTextFilePathAnnotationObject", + "azure.ai.assistants.models.MessageDeltaTextUrlCitationAnnotation": "Azure.AI.Assistants.MessageDeltaTextUrlCitationAnnotation", + "azure.ai.assistants.models.MessageDeltaTextUrlCitationDetails": "Azure.AI.Assistants.MessageDeltaTextUrlCitationDetails", + "azure.ai.assistants.models.MessageImageFileContent": "Azure.AI.Assistants.MessageImageFileContent", + "azure.ai.assistants.models.MessageImageFileDetails": "Azure.AI.Assistants.MessageImageFileDetails", + "azure.ai.assistants.models.MessageImageFileParam": "Azure.AI.Assistants.MessageImageFileParam", + "azure.ai.assistants.models.MessageImageUrlParam": "Azure.AI.Assistants.MessageImageUrlParam", + "azure.ai.assistants.models.MessageIncompleteDetails": "Azure.AI.Assistants.MessageIncompleteDetails", + "azure.ai.assistants.models.MessageInputContentBlock": "Azure.AI.Assistants.MessageInputContentBlock", + "azure.ai.assistants.models.MessageInputImageFileBlock": "Azure.AI.Assistants.MessageInputImageFileBlock", + "azure.ai.assistants.models.MessageInputImageUrlBlock": "Azure.AI.Assistants.MessageInputImageUrlBlock", + "azure.ai.assistants.models.MessageInputTextBlock": "Azure.AI.Assistants.MessageInputTextBlock", + "azure.ai.assistants.models.MessageTextAnnotation": "Azure.AI.Assistants.MessageTextAnnotation", + "azure.ai.assistants.models.MessageTextContent": "Azure.AI.Assistants.MessageTextContent", + "azure.ai.assistants.models.MessageTextDetails": "Azure.AI.Assistants.MessageTextDetails", + "azure.ai.assistants.models.MessageTextFileCitationAnnotation": "Azure.AI.Assistants.MessageTextFileCitationAnnotation", + "azure.ai.assistants.models.MessageTextFileCitationDetails": "Azure.AI.Assistants.MessageTextFileCitationDetails", + "azure.ai.assistants.models.MessageTextFilePathAnnotation": "Azure.AI.Assistants.MessageTextFilePathAnnotation", + "azure.ai.assistants.models.MessageTextFilePathDetails": "Azure.AI.Assistants.MessageTextFilePathDetails", + "azure.ai.assistants.models.MessageTextUrlCitationAnnotation": "Azure.AI.Assistants.MessageTextUrlCitationAnnotation", + "azure.ai.assistants.models.MessageTextUrlCitationDetails": "Azure.AI.Assistants.MessageTextUrlCitationDetails", + "azure.ai.assistants.models.MicrosoftFabricToolDefinition": "Azure.AI.Assistants.MicrosoftFabricToolDefinition", + "azure.ai.assistants.models.OpenAIFile": "Azure.AI.Assistants.OpenAIFile", + "azure.ai.assistants.models.OpenAIPageableListOfAssistant": "Azure.AI.Assistants.OpenAIPageableListOf", + "azure.ai.assistants.models.OpenAIPageableListOfRunStep": "Azure.AI.Assistants.OpenAIPageableListOf", + "azure.ai.assistants.models.OpenAIPageableListOfThreadMessage": "Azure.AI.Assistants.OpenAIPageableListOf", + "azure.ai.assistants.models.OpenAIPageableListOfThreadRun": "Azure.AI.Assistants.OpenAIPageableListOf", + "azure.ai.assistants.models.OpenAIPageableListOfVectorStore": "Azure.AI.Assistants.OpenAIPageableListOf", + "azure.ai.assistants.models.OpenAIPageableListOfVectorStoreFile": "Azure.AI.Assistants.OpenAIPageableListOf", + "azure.ai.assistants.models.OpenApiAuthDetails": "Azure.AI.Assistants.OpenApiAuthDetails", + "azure.ai.assistants.models.OpenApiAnonymousAuthDetails": "Azure.AI.Assistants.OpenApiAnonymousAuthDetails", + "azure.ai.assistants.models.OpenApiConnectionAuthDetails": "Azure.AI.Assistants.OpenApiConnectionAuthDetails", + "azure.ai.assistants.models.OpenApiConnectionSecurityScheme": "Azure.AI.Assistants.OpenApiConnectionSecurityScheme", + "azure.ai.assistants.models.OpenApiFunctionDefinition": "Azure.AI.Assistants.OpenApiFunctionDefinition", + "azure.ai.assistants.models.OpenApiManagedAuthDetails": "Azure.AI.Assistants.OpenApiManagedAuthDetails", + "azure.ai.assistants.models.OpenApiManagedSecurityScheme": "Azure.AI.Assistants.OpenApiManagedSecurityScheme", + "azure.ai.assistants.models.OpenApiToolDefinition": "Azure.AI.Assistants.OpenApiToolDefinition", + "azure.ai.assistants.models.RequiredAction": "Azure.AI.Assistants.RequiredAction", + "azure.ai.assistants.models.RequiredToolCall": "Azure.AI.Assistants.RequiredToolCall", + "azure.ai.assistants.models.RequiredFunctionToolCall": "Azure.AI.Assistants.RequiredFunctionToolCall", + "azure.ai.assistants.models.RequiredFunctionToolCallDetails": "Azure.AI.Assistants.RequiredFunctionToolCallDetails", + "azure.ai.assistants.models.ResponseFormatJsonSchema": "Azure.AI.Assistants.ResponseFormatJsonSchema", + "azure.ai.assistants.models.ResponseFormatJsonSchemaType": "Azure.AI.Assistants.ResponseFormatJsonSchemaType", + "azure.ai.assistants.models.RunCompletionUsage": "Azure.AI.Assistants.RunCompletionUsage", + "azure.ai.assistants.models.RunError": "Azure.AI.Assistants.RunError", + "azure.ai.assistants.models.RunStep": "Azure.AI.Assistants.RunStep", + "azure.ai.assistants.models.RunStepToolCall": "Azure.AI.Assistants.RunStepToolCall", + "azure.ai.assistants.models.RunStepAzureAISearchToolCall": "Azure.AI.Assistants.RunStepAzureAISearchToolCall", + "azure.ai.assistants.models.RunStepBingGroundingToolCall": "Azure.AI.Assistants.RunStepBingGroundingToolCall", + "azure.ai.assistants.models.RunStepCodeInterpreterToolCallOutput": "Azure.AI.Assistants.RunStepCodeInterpreterToolCallOutput", + "azure.ai.assistants.models.RunStepCodeInterpreterImageOutput": "Azure.AI.Assistants.RunStepCodeInterpreterImageOutput", + "azure.ai.assistants.models.RunStepCodeInterpreterImageReference": "Azure.AI.Assistants.RunStepCodeInterpreterImageReference", + "azure.ai.assistants.models.RunStepCodeInterpreterLogOutput": "Azure.AI.Assistants.RunStepCodeInterpreterLogOutput", + "azure.ai.assistants.models.RunStepCodeInterpreterToolCall": "Azure.AI.Assistants.RunStepCodeInterpreterToolCall", + "azure.ai.assistants.models.RunStepCodeInterpreterToolCallDetails": "Azure.AI.Assistants.RunStepCodeInterpreterToolCallDetails", + "azure.ai.assistants.models.RunStepCompletionUsage": "Azure.AI.Assistants.RunStepCompletionUsage", + "azure.ai.assistants.models.RunStepCustomSearchToolCall": "Azure.AI.Assistants.RunStepCustomSearchToolCall", + "azure.ai.assistants.models.RunStepDelta": "Azure.AI.Assistants.RunStepDelta", + "azure.ai.assistants.models.RunStepDeltaChunk": "Azure.AI.Assistants.RunStepDeltaChunk", + "azure.ai.assistants.models.RunStepDeltaCodeInterpreterDetailItemObject": "Azure.AI.Assistants.RunStepDeltaCodeInterpreterDetailItemObject", + "azure.ai.assistants.models.RunStepDeltaCodeInterpreterOutput": "Azure.AI.Assistants.RunStepDeltaCodeInterpreterOutput", + "azure.ai.assistants.models.RunStepDeltaCodeInterpreterImageOutput": "Azure.AI.Assistants.RunStepDeltaCodeInterpreterImageOutput", + "azure.ai.assistants.models.RunStepDeltaCodeInterpreterImageOutputObject": "Azure.AI.Assistants.RunStepDeltaCodeInterpreterImageOutputObject", + "azure.ai.assistants.models.RunStepDeltaCodeInterpreterLogOutput": "Azure.AI.Assistants.RunStepDeltaCodeInterpreterLogOutput", + "azure.ai.assistants.models.RunStepDeltaToolCall": "Azure.AI.Assistants.RunStepDeltaToolCall", + "azure.ai.assistants.models.RunStepDeltaCodeInterpreterToolCall": "Azure.AI.Assistants.RunStepDeltaCodeInterpreterToolCall", + "azure.ai.assistants.models.RunStepDeltaDetail": "Azure.AI.Assistants.RunStepDeltaDetail", + "azure.ai.assistants.models.RunStepDeltaFileSearchToolCall": "Azure.AI.Assistants.RunStepDeltaFileSearchToolCall", + "azure.ai.assistants.models.RunStepDeltaFunction": "Azure.AI.Assistants.RunStepDeltaFunction", + "azure.ai.assistants.models.RunStepDeltaFunctionToolCall": "Azure.AI.Assistants.RunStepDeltaFunctionToolCall", + "azure.ai.assistants.models.RunStepDeltaMessageCreation": "Azure.AI.Assistants.RunStepDeltaMessageCreation", + "azure.ai.assistants.models.RunStepDeltaMessageCreationObject": "Azure.AI.Assistants.RunStepDeltaMessageCreationObject", + "azure.ai.assistants.models.RunStepDeltaToolCallObject": "Azure.AI.Assistants.RunStepDeltaToolCallObject", + "azure.ai.assistants.models.RunStepDetails": "Azure.AI.Assistants.RunStepDetails", + "azure.ai.assistants.models.RunStepError": "Azure.AI.Assistants.RunStepError", + "azure.ai.assistants.models.RunStepFileSearchToolCall": "Azure.AI.Assistants.RunStepFileSearchToolCall", + "azure.ai.assistants.models.RunStepFileSearchToolCallResult": "Azure.AI.Assistants.RunStepFileSearchToolCallResult", + "azure.ai.assistants.models.RunStepFileSearchToolCallResults": "Azure.AI.Assistants.RunStepFileSearchToolCallResults", + "azure.ai.assistants.models.RunStepFunctionToolCall": "Azure.AI.Assistants.RunStepFunctionToolCall", + "azure.ai.assistants.models.RunStepFunctionToolCallDetails": "Azure.AI.Assistants.RunStepFunctionToolCallDetails", + "azure.ai.assistants.models.RunStepMessageCreationDetails": "Azure.AI.Assistants.RunStepMessageCreationDetails", + "azure.ai.assistants.models.RunStepMessageCreationReference": "Azure.AI.Assistants.RunStepMessageCreationReference", + "azure.ai.assistants.models.RunStepMicrosoftFabricToolCall": "Azure.AI.Assistants.RunStepMicrosoftFabricToolCall", + "azure.ai.assistants.models.RunStepOpenAPIToolCall": "Azure.AI.Assistants.RunStepOpenAPIToolCall", + "azure.ai.assistants.models.RunStepSharepointToolCall": "Azure.AI.Assistants.RunStepSharepointToolCall", + "azure.ai.assistants.models.RunStepToolCallDetails": "Azure.AI.Assistants.RunStepToolCallDetails", + "azure.ai.assistants.models.SearchConfiguration": "Azure.AI.Assistants.SearchConfiguration", + "azure.ai.assistants.models.SearchConfigurationList": "Azure.AI.Assistants.SearchConfigurationList", + "azure.ai.assistants.models.SharepointToolDefinition": "Azure.AI.Assistants.SharepointToolDefinition", + "azure.ai.assistants.models.SubmitToolOutputsAction": "Azure.AI.Assistants.SubmitToolOutputsAction", + "azure.ai.assistants.models.SubmitToolOutputsDetails": "Azure.AI.Assistants.SubmitToolOutputsDetails", + "azure.ai.assistants.models.ThreadDeletionStatus": "Azure.AI.Assistants.ThreadDeletionStatus", + "azure.ai.assistants.models.ThreadMessage": "Azure.AI.Assistants.ThreadMessage", + "azure.ai.assistants.models.ThreadMessageOptions": "Azure.AI.Assistants.ThreadMessageOptions", + "azure.ai.assistants.models.ThreadRun": "Azure.AI.Assistants.ThreadRun", + "azure.ai.assistants.models.ToolConnection": "Azure.AI.Assistants.ToolConnection", + "azure.ai.assistants.models.ToolConnectionList": "Azure.AI.Assistants.ToolConnectionList", + "azure.ai.assistants.models.ToolOutput": "Azure.AI.Assistants.ToolOutput", + "azure.ai.assistants.models.ToolResources": "Azure.AI.Assistants.ToolResources", + "azure.ai.assistants.models.TruncationObject": "Azure.AI.Assistants.TruncationObject", + "azure.ai.assistants.models.UpdateCodeInterpreterToolResourceOptions": "Azure.AI.Assistants.UpdateCodeInterpreterToolResourceOptions", + "azure.ai.assistants.models.UpdateFileSearchToolResourceOptions": "Azure.AI.Assistants.UpdateFileSearchToolResourceOptions", + "azure.ai.assistants.models.UpdateToolResourcesOptions": "Azure.AI.Assistants.UpdateToolResourcesOptions", + "azure.ai.assistants.models.VectorStore": "Azure.AI.Assistants.VectorStore", + "azure.ai.assistants.models.VectorStoreChunkingStrategyRequest": "Azure.AI.Assistants.VectorStoreChunkingStrategyRequest", + "azure.ai.assistants.models.VectorStoreAutoChunkingStrategyRequest": "Azure.AI.Assistants.VectorStoreAutoChunkingStrategyRequest", + "azure.ai.assistants.models.VectorStoreChunkingStrategyResponse": "Azure.AI.Assistants.VectorStoreChunkingStrategyResponse", + "azure.ai.assistants.models.VectorStoreAutoChunkingStrategyResponse": "Azure.AI.Assistants.VectorStoreAutoChunkingStrategyResponse", + "azure.ai.assistants.models.VectorStoreConfiguration": "Azure.AI.Assistants.VectorStoreConfiguration", + "azure.ai.assistants.models.VectorStoreConfigurations": "Azure.AI.Assistants.VectorStoreConfigurations", + "azure.ai.assistants.models.VectorStoreDataSource": "Azure.AI.Assistants.VectorStoreDataSource", + "azure.ai.assistants.models.VectorStoreDeletionStatus": "Azure.AI.Assistants.VectorStoreDeletionStatus", + "azure.ai.assistants.models.VectorStoreExpirationPolicy": "Azure.AI.Assistants.VectorStoreExpirationPolicy", + "azure.ai.assistants.models.VectorStoreFile": "Azure.AI.Assistants.VectorStoreFile", + "azure.ai.assistants.models.VectorStoreFileBatch": "Azure.AI.Assistants.VectorStoreFileBatch", + "azure.ai.assistants.models.VectorStoreFileCount": "Azure.AI.Assistants.VectorStoreFileCount", + "azure.ai.assistants.models.VectorStoreFileDeletionStatus": "Azure.AI.Assistants.VectorStoreFileDeletionStatus", + "azure.ai.assistants.models.VectorStoreFileError": "Azure.AI.Assistants.VectorStoreFileError", + "azure.ai.assistants.models.VectorStoreStaticChunkingStrategyOptions": "Azure.AI.Assistants.VectorStoreStaticChunkingStrategyOptions", + "azure.ai.assistants.models.VectorStoreStaticChunkingStrategyRequest": "Azure.AI.Assistants.VectorStoreStaticChunkingStrategyRequest", + "azure.ai.assistants.models.VectorStoreStaticChunkingStrategyResponse": "Azure.AI.Assistants.VectorStoreStaticChunkingStrategyResponse", + "azure.ai.assistants.models.OpenApiAuthType": "Azure.AI.Assistants.OpenApiAuthType", + "azure.ai.assistants.models.VectorStoreDataSourceAssetType": "Azure.AI.Assistants.VectorStoreDataSourceAssetType", + "azure.ai.assistants.models.AzureAISearchQueryType": "Azure.AI.Assistants.AzureAISearchQueryType", + "azure.ai.assistants.models.AssistantsApiResponseFormatMode": "Azure.AI.Assistants.AssistantsApiResponseFormatMode", + "azure.ai.assistants.models.ResponseFormat": "Azure.AI.Assistants.ResponseFormat", + "azure.ai.assistants.models.ListSortOrder": "Azure.AI.Assistants.ListSortOrder", + "azure.ai.assistants.models.MessageRole": "Azure.AI.Assistants.MessageRole", + "azure.ai.assistants.models.MessageBlockType": "Azure.AI.Assistants.MessageBlockType", + "azure.ai.assistants.models.ImageDetailLevel": "Azure.AI.Assistants.ImageDetailLevel", + "azure.ai.assistants.models.MessageStatus": "Azure.AI.Assistants.MessageStatus", + "azure.ai.assistants.models.MessageIncompleteDetailsReason": "Azure.AI.Assistants.MessageIncompleteDetailsReason", + "azure.ai.assistants.models.RunStatus": "Azure.AI.Assistants.RunStatus", + "azure.ai.assistants.models.IncompleteDetailsReason": "Azure.AI.Assistants.IncompleteDetailsReason", + "azure.ai.assistants.models.TruncationStrategy": "Azure.AI.Assistants.TruncationStrategy", + "azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode": "Azure.AI.Assistants.AssistantsApiToolChoiceOptionMode", + "azure.ai.assistants.models.AssistantsNamedToolChoiceType": "Azure.AI.Assistants.AssistantsNamedToolChoiceType", + "azure.ai.assistants.models.RunAdditionalFieldList": "Azure.AI.Assistants.RunAdditionalFieldList", + "azure.ai.assistants.models.RunStepType": "Azure.AI.Assistants.RunStepType", + "azure.ai.assistants.models.RunStepStatus": "Azure.AI.Assistants.RunStepStatus", + "azure.ai.assistants.models.RunStepErrorCode": "Azure.AI.Assistants.RunStepErrorCode", + "azure.ai.assistants.models.FilePurpose": "Azure.AI.Assistants.FilePurpose", + "azure.ai.assistants.models.FileState": "Azure.AI.Assistants.FileState", + "azure.ai.assistants.models.VectorStoreStatus": "Azure.AI.Assistants.VectorStoreStatus", + "azure.ai.assistants.models.VectorStoreExpirationPolicyAnchor": "Azure.AI.Assistants.VectorStoreExpirationPolicyAnchor", + "azure.ai.assistants.models.VectorStoreChunkingStrategyRequestType": "Azure.AI.Assistants.VectorStoreChunkingStrategyRequestType", + "azure.ai.assistants.models.VectorStoreFileStatus": "Azure.AI.Assistants.VectorStoreFileStatus", + "azure.ai.assistants.models.VectorStoreFileErrorCode": "Azure.AI.Assistants.VectorStoreFileErrorCode", + "azure.ai.assistants.models.VectorStoreChunkingStrategyResponseType": "Azure.AI.Assistants.VectorStoreChunkingStrategyResponseType", + "azure.ai.assistants.models.VectorStoreFileStatusFilter": "Azure.AI.Assistants.VectorStoreFileStatusFilter", + "azure.ai.assistants.models.VectorStoreFileBatchStatus": "Azure.AI.Assistants.VectorStoreFileBatchStatus", + "azure.ai.assistants.models.ThreadStreamEvent": "Azure.AI.Assistants.ThreadStreamEvent", + "azure.ai.assistants.models.RunStreamEvent": "Azure.AI.Assistants.RunStreamEvent", + "azure.ai.assistants.models.RunStepStreamEvent": "Azure.AI.Assistants.RunStepStreamEvent", + "azure.ai.assistants.models.MessageStreamEvent": "Azure.AI.Assistants.MessageStreamEvent", + "azure.ai.assistants.models.ErrorEvent": "Azure.AI.Assistants.ErrorEvent", + "azure.ai.assistants.models.DoneEvent": "Azure.AI.Assistants.DoneEvent", + "azure.ai.assistants.models.AssistantStreamEvent": "Azure.AI.Assistants.AssistantStreamEvent", + "azure.ai.assistants.AssistantsClient.create_assistant": "Azure.AI.Assistants.createAssistant", + "azure.ai.assistants.AssistantsClient.list_assistants": "Azure.AI.Assistants.listAssistants", + "azure.ai.assistants.AssistantsClient.get_assistant": "Azure.AI.Assistants.getAssistant", + "azure.ai.assistants.AssistantsClient.update_assistant": "Azure.AI.Assistants.updateAssistant", + "azure.ai.assistants.AssistantsClient.delete_assistant": "Azure.AI.Assistants.deleteAssistant", + "azure.ai.assistants.AssistantsClient.create_thread": "Azure.AI.Assistants.createThread", + "azure.ai.assistants.AssistantsClient.get_thread": "Azure.AI.Assistants.getThread", + "azure.ai.assistants.AssistantsClient.update_thread": "Azure.AI.Assistants.updateThread", + "azure.ai.assistants.AssistantsClient.delete_thread": "Azure.AI.Assistants.deleteThread", + "azure.ai.assistants.AssistantsClient.create_message": "Azure.AI.Assistants.createMessage", + "azure.ai.assistants.AssistantsClient.list_messages": "Azure.AI.Assistants.listMessages", + "azure.ai.assistants.AssistantsClient.get_message": "Azure.AI.Assistants.getMessage", + "azure.ai.assistants.AssistantsClient.update_message": "Azure.AI.Assistants.updateMessage", + "azure.ai.assistants.AssistantsClient.create_run": "Azure.AI.Assistants.createRun", + "azure.ai.assistants.AssistantsClient.list_runs": "Azure.AI.Assistants.listRuns", + "azure.ai.assistants.AssistantsClient.get_run": "Azure.AI.Assistants.getRun", + "azure.ai.assistants.AssistantsClient.update_run": "Azure.AI.Assistants.updateRun", + "azure.ai.assistants.AssistantsClient.submit_tool_outputs_to_run": "Azure.AI.Assistants.submitToolOutputsToRun", + "azure.ai.assistants.AssistantsClient.cancel_run": "Azure.AI.Assistants.cancelRun", + "azure.ai.assistants.AssistantsClient.create_thread_and_run": "Azure.AI.Assistants.createThreadAndRun", + "azure.ai.assistants.AssistantsClient.get_run_step": "Azure.AI.Assistants.getRunStep", + "azure.ai.assistants.AssistantsClient.list_run_steps": "Azure.AI.Assistants.listRunSteps", + "azure.ai.assistants.AssistantsClient.list_files": "Azure.AI.Assistants.listFiles", + "azure.ai.assistants.AssistantsClient.delete_file": "Azure.AI.Assistants.deleteFile", + "azure.ai.assistants.AssistantsClient.get_file": "Azure.AI.Assistants.getFile", + "azure.ai.assistants.AssistantsClient.list_vector_stores": "Azure.AI.Assistants.listVectorStores", + "azure.ai.assistants.AssistantsClient.create_vector_store": "Azure.AI.Assistants.createVectorStore", + "azure.ai.assistants.AssistantsClient.get_vector_store": "Azure.AI.Assistants.getVectorStore", + "azure.ai.assistants.AssistantsClient.modify_vector_store": "Azure.AI.Assistants.modifyVectorStore", + "azure.ai.assistants.AssistantsClient.delete_vector_store": "Azure.AI.Assistants.deleteVectorStore", + "azure.ai.assistants.AssistantsClient.list_vector_store_files": "Azure.AI.Assistants.listVectorStoreFiles", + "azure.ai.assistants.AssistantsClient.create_vector_store_file": "Azure.AI.Assistants.createVectorStoreFile", + "azure.ai.assistants.AssistantsClient.get_vector_store_file": "Azure.AI.Assistants.getVectorStoreFile", + "azure.ai.assistants.AssistantsClient.delete_vector_store_file": "Azure.AI.Assistants.deleteVectorStoreFile", + "azure.ai.assistants.AssistantsClient.create_vector_store_file_batch": "Azure.AI.Assistants.createVectorStoreFileBatch", + "azure.ai.assistants.AssistantsClient.get_vector_store_file_batch": "Azure.AI.Assistants.getVectorStoreFileBatch", + "azure.ai.assistants.AssistantsClient.cancel_vector_store_file_batch": "Azure.AI.Assistants.cancelVectorStoreFileBatch", + "azure.ai.assistants.AssistantsClient.list_vector_store_file_batch_files": "Azure.AI.Assistants.listVectorStoreFileBatchFiles" + } +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/assets.json b/sdk/ai/azure-ai-assistants/assets.json new file mode 100644 index 000000000000..823831a56f44 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "python", + "TagPrefix": "python/ai/azure-ai-assistants", + "Tag": "python/ai/azure-ai-assistants_a471817af2" +} diff --git a/sdk/ai/azure-ai-assistants/azure/__init__.py b/sdk/ai/azure-ai-assistants/azure/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-assistants/azure/ai/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/__init__.py new file mode 100644 index 000000000000..2484b50c5378 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/__init__.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import AssistantsClient # type: ignore +from ._version import VERSION + +__version__ = VERSION + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AssistantsClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py new file mode 100644 index 000000000000..ef1341bc0ac4 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, TYPE_CHECKING, Union +from typing_extensions import Self + +from azure.core import PipelineClient +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse + +from ._configuration import AssistantsClientConfiguration +from ._operations import AssistantsClientOperationsMixin +from ._serialization import Deserializer, Serializer + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + + +class AssistantsClient(AssistantsClientOperationsMixin): + """AssistantsClient. + + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + _endpoint = "{endpoint}" + self._config = AssistantsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> Self: + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py new file mode 100644 index 000000000000..b3aa33c5f408 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING, Union + +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies + +from ._version import VERSION + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + + +class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for AssistantsClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-assistants/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Authorization", prefix="Bearer", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_model_base.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_model_base.py new file mode 100644 index 000000000000..3072ee252ed9 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_model_base.py @@ -0,0 +1,1235 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access, broad-except + +import copy +import calendar +import decimal +import functools +import sys +import logging +import base64 +import re +import typing +import enum +import email.utils +from datetime import datetime, date, time, timedelta, timezone +from json import JSONEncoder +import xml.etree.ElementTree as ET +from typing_extensions import Self +import isodate +from azure.core.exceptions import DeserializationError +from azure.core import CaseInsensitiveEnumMeta +from azure.core.pipeline import PipelineResponse +from azure.core.serialization import _Null + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping + +_LOGGER = logging.getLogger(__name__) + +__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] + +TZ_UTC = timezone.utc +_T = typing.TypeVar("_T") + + +def _timedelta_as_isostr(td: timedelta) -> str: + """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' + + Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython + + :param timedelta td: The timedelta to convert + :rtype: str + :return: ISO8601 version of this timedelta + """ + + # Split seconds to larger units + seconds = td.total_seconds() + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + + days, hours, minutes = list(map(int, (days, hours, minutes))) + seconds = round(seconds, 6) + + # Build date + date_str = "" + if days: + date_str = "%sD" % days + + if hours or minutes or seconds: + # Build time + time_str = "T" + + # Hours + bigger_exists = date_str or hours + if bigger_exists: + time_str += "{:02}H".format(hours) + + # Minutes + bigger_exists = bigger_exists or minutes + if bigger_exists: + time_str += "{:02}M".format(minutes) + + # Seconds + try: + if seconds.is_integer(): + seconds_string = "{:02}".format(int(seconds)) + else: + # 9 chars long w/ leading 0, 6 digits after decimal + seconds_string = "%09.6f" % seconds + # Remove trailing zeros + seconds_string = seconds_string.rstrip("0") + except AttributeError: # int.is_integer() raises + seconds_string = "{:02}".format(seconds) + + time_str += "{}S".format(seconds_string) + else: + time_str = "" + + return "P" + date_str + time_str + + +def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: + encoded = base64.b64encode(o).decode() + if format == "base64url": + return encoded.strip("=").replace("+", "-").replace("/", "_") + return encoded + + +def _serialize_datetime(o, format: typing.Optional[str] = None): + if hasattr(o, "year") and hasattr(o, "hour"): + if format == "rfc7231": + return email.utils.format_datetime(o, usegmt=True) + if format == "unix-timestamp": + return int(calendar.timegm(o.utctimetuple())) + + # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) + if not o.tzinfo: + iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() + else: + iso_formatted = o.astimezone(TZ_UTC).isoformat() + # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) + return iso_formatted.replace("+00:00", "Z") + # Next try datetime.date or datetime.time + return o.isoformat() + + +def _is_readonly(p): + try: + return p._visibility == ["read"] + except AttributeError: + return False + + +class SdkJSONEncoder(JSONEncoder): + """A JSON encoder that's capable of serializing datetime objects and bytes.""" + + def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): + super().__init__(*args, **kwargs) + self.exclude_readonly = exclude_readonly + self.format = format + + def default(self, o): # pylint: disable=too-many-return-statements + if _is_model(o): + if self.exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + return {k: v for k, v in o.items() if k not in readonly_props} + return dict(o.items()) + try: + return super(SdkJSONEncoder, self).default(o) + except TypeError: + if isinstance(o, _Null): + return None + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, self.format) + try: + # First try datetime.datetime + return _serialize_datetime(o, self.format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return super(SdkJSONEncoder, self).default(o) + + +_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") +_VALID_RFC7231 = re.compile( + r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" + r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" +) + + +def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + attr = attr.upper() + match = _VALID_DATE.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + return date_obj + + +def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize RFC7231 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + match = _VALID_RFC7231.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + return email.utils.parsedate_to_datetime(attr) + + +def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: + """Deserialize unix timestamp into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + return datetime.fromtimestamp(attr, TZ_UTC) + + +def _deserialize_date(attr: typing.Union[str, date]) -> date: + """Deserialize ISO-8601 formatted string into Date object. + :param str attr: response string to be deserialized. + :rtype: date + :returns: The date object from that input + """ + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + if isinstance(attr, date): + return attr + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore + + +def _deserialize_time(attr: typing.Union[str, time]) -> time: + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :returns: The time object from that input + """ + if isinstance(attr, time): + return attr + return isodate.parse_time(attr) + + +def _deserialize_bytes(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + return bytes(base64.b64decode(attr)) + + +def _deserialize_bytes_base64(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return bytes(base64.b64decode(encoded)) + + +def _deserialize_duration(attr): + if isinstance(attr, timedelta): + return attr + return isodate.parse_duration(attr) + + +def _deserialize_decimal(attr): + if isinstance(attr, decimal.Decimal): + return attr + return decimal.Decimal(str(attr)) + + +def _deserialize_int_as_str(attr): + if isinstance(attr, int): + return attr + return int(attr) + + +_DESERIALIZE_MAPPING = { + datetime: _deserialize_datetime, + date: _deserialize_date, + time: _deserialize_time, + bytes: _deserialize_bytes, + bytearray: _deserialize_bytes, + timedelta: _deserialize_duration, + typing.Any: lambda x: x, + decimal.Decimal: _deserialize_decimal, +} + +_DESERIALIZE_MAPPING_WITHFORMAT = { + "rfc3339": _deserialize_datetime, + "rfc7231": _deserialize_datetime_rfc7231, + "unix-timestamp": _deserialize_datetime_unix_timestamp, + "base64": _deserialize_bytes, + "base64url": _deserialize_bytes_base64, +} + + +def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): + if annotation is int and rf and rf._format == "str": + return _deserialize_int_as_str + if rf and rf._format: + return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) + return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore + + +def _get_type_alias_type(module_name: str, alias_name: str): + types = { + k: v + for k, v in sys.modules[module_name].__dict__.items() + if isinstance(v, typing._GenericAlias) # type: ignore + } + if alias_name not in types: + return alias_name + return types[alias_name] + + +def _get_model(module_name: str, model_name: str): + models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} + module_end = module_name.rsplit(".", 1)[0] + models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) + if isinstance(model_name, str): + model_name = model_name.split(".")[-1] + if model_name not in models: + return model_name + return models[model_name] + + +_UNSET = object() + + +class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object + def __init__(self, data: typing.Dict[str, typing.Any]) -> None: + self._data = data + + def __contains__(self, key: typing.Any) -> bool: + return key in self._data + + def __getitem__(self, key: str) -> typing.Any: + return self._data.__getitem__(key) + + def __setitem__(self, key: str, value: typing.Any) -> None: + self._data.__setitem__(key, value) + + def __delitem__(self, key: str) -> None: + self._data.__delitem__(key) + + def __iter__(self) -> typing.Iterator[typing.Any]: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + def __ne__(self, other: typing.Any) -> bool: + return not self.__eq__(other) + + def keys(self) -> typing.KeysView[str]: + """ + :returns: a set-like object providing a view on D's keys + :rtype: ~typing.KeysView + """ + return self._data.keys() + + def values(self) -> typing.ValuesView[typing.Any]: + """ + :returns: an object providing a view on D's values + :rtype: ~typing.ValuesView + """ + return self._data.values() + + def items(self) -> typing.ItemsView[str, typing.Any]: + """ + :returns: set-like object providing a view on D's items + :rtype: ~typing.ItemsView + """ + return self._data.items() + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + """ + Get the value for key if key is in the dictionary, else default. + :param str key: The key to look up. + :param any default: The value to return if key is not in the dictionary. Defaults to None + :returns: D[k] if k in D, else d. + :rtype: any + """ + try: + return self[key] + except KeyError: + return default + + @typing.overload + def pop(self, key: str) -> typing.Any: ... + + @typing.overload + def pop(self, key: str, default: _T) -> _T: ... + + @typing.overload + def pop(self, key: str, default: typing.Any) -> typing.Any: ... + + def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Removes specified key and return the corresponding value. + :param str key: The key to pop. + :param any default: The value to return if key is not in the dictionary + :returns: The value corresponding to the key. + :rtype: any + :raises KeyError: If key is not found and default is not given. + """ + if default is _UNSET: + return self._data.pop(key) + return self._data.pop(key, default) + + def popitem(self) -> typing.Tuple[str, typing.Any]: + """ + Removes and returns some (key, value) pair + :returns: The (key, value) pair. + :rtype: tuple + :raises KeyError: if D is empty. + """ + return self._data.popitem() + + def clear(self) -> None: + """ + Remove all items from D. + """ + self._data.clear() + + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + """ + Updates D from mapping/iterable E and F. + :param any args: Either a mapping object or an iterable of key-value pairs. + """ + self._data.update(*args, **kwargs) + + @typing.overload + def setdefault(self, key: str, default: None = None) -> None: ... + + @typing.overload + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... + + def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Same as calling D.get(k, d), and setting D[k]=d if k not found + :param str key: The key to look up. + :param any default: The value to set if key is not in the dictionary + :returns: D[k] if k in D, else d. + :rtype: any + """ + if default is _UNSET: + return self._data.setdefault(key) + return self._data.setdefault(key, default) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_model = self.__class__(other) + except Exception: + return False + return self._data == other_model._data + + def __repr__(self) -> str: + return str(self._data) + + +def _is_model(obj: typing.Any) -> bool: + return getattr(obj, "_is_model", False) + + +def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements + if isinstance(o, list): + return [_serialize(x, format) for x in o] + if isinstance(o, dict): + return {k: _serialize(v, format) for k, v in o.items()} + if isinstance(o, set): + return {_serialize(x, format) for x in o} + if isinstance(o, tuple): + return tuple(_serialize(x, format) for x in o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, format) + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, enum.Enum): + return o.value + if isinstance(o, int): + if format == "str": + return str(o) + return o + try: + # First try datetime.datetime + return _serialize_datetime(o, format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return o + + +def _get_rest_field( + attr_to_rest_field: typing.Dict[str, "_RestField"], rest_name: str +) -> typing.Optional["_RestField"]: + try: + return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) + except StopIteration: + return None + + +def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: + if not rf: + return _serialize(value, None) + if rf._is_multipart_file_input: + return value + if rf._is_model: + return _deserialize(rf._type, value) + if isinstance(value, ET.Element): + value = _deserialize(rf._type, value) + return _serialize(value, rf._format) + + +class Model(_MyMutableMapping): + _is_model = True + # label whether current class's _attr_to_rest_field has been calculated + # could not see _attr_to_rest_field directly because subclass inherits it from parent class + _calculated: typing.Set[str] = set() + + def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: + class_name = self.__class__.__name__ + if len(args) > 1: + raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") + dict_to_pass = { + rest_field._rest_name: rest_field._default + for rest_field in self._attr_to_rest_field.values() + if rest_field._default is not _UNSET + } + if args: # pylint: disable=too-many-nested-blocks + if isinstance(args[0], ET.Element): + existed_attr_keys = [] + model_meta = getattr(self, "_xml", {}) + + for rf in self._attr_to_rest_field.values(): + prop_meta = getattr(rf, "_xml", {}) + xml_name = prop_meta.get("name", rf._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + # attribute + if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name)) + continue + + # unwrapped element is array + if prop_meta.get("unwrapped", False): + # unwrapped array could either use prop items meta/prop meta + if prop_meta.get("itemsName"): + xml_name = prop_meta.get("itemsName") + xml_ns = prop_meta.get("itemNs") + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + items = args[0].findall(xml_name) # pyright: ignore + if len(items) > 0: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) + continue + + # text element is primitive type + if prop_meta.get("text", False): + if args[0].text is not None: + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text) + continue + + # wrapped element could be normal property or array, it should only have one element + item = args[0].find(xml_name) + if item is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, item) + + # rest thing is additional properties + for e in args[0]: + if e.tag not in existed_attr_keys: + dict_to_pass[e.tag] = _convert_element(e) + else: + dict_to_pass.update( + {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} + ) + else: + non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] + if non_attr_kwargs: + # actual type errors only throw the first wrong keyword arg they see, so following that. + raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") + dict_to_pass.update( + { + self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) + for k, v in kwargs.items() + if v is not None + } + ) + super().__init__(dict_to_pass) + + def copy(self) -> "Model": + return Model(self.__dict__) + + def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: + if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated: + # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', + # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' + mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order + attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property + k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") + } + annotations = { + k: v + for mro_class in mros + if hasattr(mro_class, "__annotations__") + for k, v in mro_class.__annotations__.items() + } + for attr, rf in attr_to_rest_field.items(): + rf._module = cls.__module__ + if not rf._type: + rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) + if not rf._rest_name_input: + rf._rest_name_input = attr + cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) + cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") + + return super().__new__(cls) # pylint: disable=no-value-for-parameter + + def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: + for base in cls.__bases__: + if hasattr(base, "__mapping__"): + base.__mapping__[discriminator or cls.__name__] = cls # type: ignore + + @classmethod + def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: + for v in cls.__dict__.values(): + if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators: + return v + return None + + @classmethod + def _deserialize(cls, data, exist_discriminators): + if not hasattr(cls, "__mapping__"): + return cls(data) + discriminator = cls._get_discriminator(exist_discriminators) + if discriminator is None: + return cls(data) + exist_discriminators.append(discriminator._rest_name) + if isinstance(data, ET.Element): + model_meta = getattr(cls, "_xml", {}) + prop_meta = getattr(discriminator, "_xml", {}) + xml_name = prop_meta.get("name", discriminator._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + if data.get(xml_name) is not None: + discriminator_value = data.get(xml_name) + else: + discriminator_value = data.find(xml_name).text # pyright: ignore + else: + discriminator_value = data.get(discriminator._rest_name) + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore + return mapped_cls._deserialize(data, exist_discriminators) + + def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: + """Return a dict that can be turned into json using json.dump. + + :keyword bool exclude_readonly: Whether to remove the readonly properties. + :returns: A dict JSON compatible object + :rtype: dict + """ + + result = {} + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] + for k, v in self.items(): + if exclude_readonly and k in readonly_props: # pyright: ignore + continue + is_multipart_file_input = False + try: + is_multipart_file_input = next( + rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k + )._is_multipart_file_input + except StopIteration: + pass + result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) + return result + + @staticmethod + def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: + if v is None or isinstance(v, _Null): + return None + if isinstance(v, (list, tuple, set)): + return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) + if isinstance(v, dict): + return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} + return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v + + +def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): + if _is_model(obj): + return obj + return _deserialize(model_deserializer, obj) + + +def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): + if obj is None: + return obj + return _deserialize_with_callable(if_obj_deserializer, obj) + + +def _deserialize_with_union(deserializers, obj): + for deserializer in deserializers: + try: + return _deserialize(deserializer, obj) + except DeserializationError: + pass + raise DeserializationError() + + +def _deserialize_dict( + value_deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj: typing.Dict[typing.Any, typing.Any], +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = {child.tag: child for child in obj} + return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} + + +def _deserialize_multiple_sequence( + entry_deserializers: typing.List[typing.Optional[typing.Callable]], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) + + +def _deserialize_sequence( + deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = list(obj) + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) + + +def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.Any]: + return sorted( + types, + key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), + ) + + +def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-branches + annotation: typing.Any, + module: typing.Optional[str], + rf: typing.Optional["_RestField"] = None, +) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + if not annotation: + return None + + # is it a type alias? + if isinstance(annotation, str): + if module is not None: + annotation = _get_type_alias_type(module, annotation) + + # is it a forward ref / in quotes? + if isinstance(annotation, (str, typing.ForwardRef)): + try: + model_name = annotation.__forward_arg__ # type: ignore + except AttributeError: + model_name = annotation + if module is not None: + annotation = _get_model(module, model_name) # type: ignore + + try: + if module and _is_model(annotation): + if rf: + rf._is_model = True + + return functools.partial(_deserialize_model, annotation) # pyright: ignore + except Exception: + pass + + # is it a literal? + try: + if annotation.__origin__ is typing.Literal: # pyright: ignore + return None + except AttributeError: + pass + + # is it optional? + try: + if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore + if len(annotation.__args__) <= 2: # pyright: ignore + if_obj_deserializer = _get_deserialize_callable_from_annotation( + next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_with_optional, if_obj_deserializer) + # the type is Optional[Union[...]], we need to remove the None type from the Union + annotation_copy = copy.copy(annotation) + annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore + return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) + except AttributeError: + pass + + # is it union? + if getattr(annotation, "__origin__", None) is typing.Union: + # initial ordering is we make `string` the last deserialization option, because it is often them most generic + deserializers = [ + _get_deserialize_callable_from_annotation(arg, module, rf) + for arg in _sorted_annotations(annotation.__args__) # pyright: ignore + ] + + return functools.partial(_deserialize_with_union, deserializers) + + try: + if annotation._name == "Dict": # pyright: ignore + value_deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[1], module, rf # pyright: ignore + ) + + return functools.partial( + _deserialize_dict, + value_deserializer, + module, + ) + except (AttributeError, IndexError): + pass + try: + if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore + if len(annotation.__args__) > 1: # pyright: ignore + entry_deserializers = [ + _get_deserialize_callable_from_annotation(dt, module, rf) + for dt in annotation.__args__ # pyright: ignore + ] + return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) + deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[0], module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_sequence, deserializer, module) + except (TypeError, IndexError, AttributeError, SyntaxError): + pass + + def _deserialize_default( + deserializer, + obj, + ): + if obj is None: + return obj + try: + return _deserialize_with_callable(deserializer, obj) + except Exception: + pass + return obj + + if get_deserializer(annotation, rf): + return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) + + return functools.partial(_deserialize_default, annotation) + + +def _deserialize_with_callable( + deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], + value: typing.Any, +): # pylint: disable=too-many-return-statements + try: + if value is None or isinstance(value, _Null): + return None + if isinstance(value, ET.Element): + if deserializer is str: + return value.text or "" + if deserializer is int: + return int(value.text) if value.text else None + if deserializer is float: + return float(value.text) if value.text else None + if deserializer is bool: + return value.text == "true" if value.text else None + if deserializer is None: + return value + if deserializer in [int, float, bool]: + return deserializer(value) + if isinstance(deserializer, CaseInsensitiveEnumMeta): + try: + return deserializer(value) + except ValueError: + # for unknown value, return raw value + return value + if isinstance(deserializer, type) and issubclass(deserializer, Model): + return deserializer._deserialize(value, []) + return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) + except Exception as e: + raise DeserializationError() from e + + +def _deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + if isinstance(value, PipelineResponse): + value = value.http_response.json() + if rf is None and format: + rf = _RestField(format=format) + if not isinstance(deserializer, functools.partial): + deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) + return _deserialize_with_callable(deserializer, value) + + +def _failsafe_deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + try: + return _deserialize(deserializer, value, module, rf, format) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + +def _failsafe_deserialize_xml( + deserializer: typing.Any, + value: typing.Any, +) -> typing.Any: + try: + return _deserialize_xml(deserializer, value) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + +class _RestField: + def __init__( + self, + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + is_discriminator: bool = False, + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, + ): + self._type = type + self._rest_name_input = name + self._module: typing.Optional[str] = None + self._is_discriminator = is_discriminator + self._visibility = visibility + self._is_model = False + self._default = default + self._format = format + self._is_multipart_file_input = is_multipart_file_input + self._xml = xml if xml is not None else {} + + @property + def _class_type(self) -> typing.Any: + return getattr(self._type, "args", [None])[0] + + @property + def _rest_name(self) -> str: + if self._rest_name_input is None: + raise ValueError("Rest name was never set") + return self._rest_name_input + + def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin + # by this point, type and rest_name will have a value bc we default + # them in __new__ of the Model class + item = obj.get(self._rest_name) + if item is None: + return item + if self._is_model: + return item + return _deserialize(self._type, _serialize(item, self._format), rf=self) + + def __set__(self, obj: Model, value) -> None: + if value is None: + # we want to wipe out entries if users set attr to None + try: + obj.__delitem__(self._rest_name) + except KeyError: + pass + return + if self._is_model: + if not _is_model(value): + value = _deserialize(self._type, value) + obj.__setitem__(self._rest_name, value) + return + obj.__setitem__(self._rest_name, _serialize(value, self._format)) + + def _get_deserialize_callable_from_annotation( + self, annotation: typing.Any + ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + return _get_deserialize_callable_from_annotation(annotation, self._module, self) + + +def rest_field( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField( + name=name, + type=type, + visibility=visibility, + default=default, + format=format, + is_multipart_file_input=is_multipart_file_input, + xml=xml, + ) + + +def rest_discriminator( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml) + + +def serialize_xml(model: Model, exclude_readonly: bool = False) -> str: + """Serialize a model to XML. + + :param Model model: The model to serialize. + :param bool exclude_readonly: Whether to exclude readonly properties. + :returns: The XML representation of the model. + :rtype: str + """ + return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore + + +def _get_element( + o: typing.Any, + exclude_readonly: bool = False, + parent_meta: typing.Optional[typing.Dict[str, typing.Any]] = None, + wrapped_element: typing.Optional[ET.Element] = None, +) -> typing.Union[ET.Element, typing.List[ET.Element]]: + if _is_model(o): + model_meta = getattr(o, "_xml", {}) + + # if prop is a model, then use the prop element directly, else generate a wrapper of model + if wrapped_element is None: + wrapped_element = _create_xml_element( + model_meta.get("name", o.__class__.__name__), + model_meta.get("prefix"), + model_meta.get("ns"), + ) + + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + + for k, v in o.items(): + # do not serialize readonly properties + if exclude_readonly and k in readonly_props: + continue + + prop_rest_field = _get_rest_field(o._attr_to_rest_field, k) + if prop_rest_field: + prop_meta = getattr(prop_rest_field, "_xml").copy() + # use the wire name as xml name if no specific name is set + if prop_meta.get("name") is None: + prop_meta["name"] = k + else: + # additional properties will not have rest field, use the wire name as xml name + prop_meta = {"name": k} + + # if no ns for prop, use model's + if prop_meta.get("ns") is None and model_meta.get("ns"): + prop_meta["ns"] = model_meta.get("ns") + prop_meta["prefix"] = model_meta.get("prefix") + + if prop_meta.get("unwrapped", False): + # unwrapped could only set on array + wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta)) + elif prop_meta.get("text", False): + # text could only set on primitive type + wrapped_element.text = _get_primitive_type_value(v) + elif prop_meta.get("attribute", False): + xml_name = prop_meta.get("name", k) + if prop_meta.get("ns"): + ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore + xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore + # attribute should be primitive type + wrapped_element.set(xml_name, _get_primitive_type_value(v)) + else: + # other wrapped prop element + wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta)) + return wrapped_element + if isinstance(o, list): + return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore + if isinstance(o, dict): + result = [] + for k, v in o.items(): + result.append( + _get_wrapped_element( + v, + exclude_readonly, + { + "name": k, + "ns": parent_meta.get("ns") if parent_meta else None, + "prefix": parent_meta.get("prefix") if parent_meta else None, + }, + ) + ) + return result + + # primitive case need to create element based on parent_meta + if parent_meta: + return _get_wrapped_element( + o, + exclude_readonly, + { + "name": parent_meta.get("itemsName", parent_meta.get("name")), + "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")), + "ns": parent_meta.get("itemsNs", parent_meta.get("ns")), + }, + ) + + raise ValueError("Could not serialize value into xml: " + o) + + +def _get_wrapped_element( + v: typing.Any, + exclude_readonly: bool, + meta: typing.Optional[typing.Dict[str, typing.Any]], +) -> ET.Element: + wrapped_element = _create_xml_element( + meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None + ) + if isinstance(v, (dict, list)): + wrapped_element.extend(_get_element(v, exclude_readonly, meta)) + elif _is_model(v): + _get_element(v, exclude_readonly, meta, wrapped_element) + else: + wrapped_element.text = _get_primitive_type_value(v) + return wrapped_element + + +def _get_primitive_type_value(v) -> str: + if v is True: + return "true" + if v is False: + return "false" + if isinstance(v, _Null): + return "" + return str(v) + + +def _create_xml_element(tag, prefix=None, ns=None): + if prefix and ns: + ET.register_namespace(prefix, ns) + if ns: + return ET.Element("{" + ns + "}" + tag) + return ET.Element(tag) + + +def _deserialize_xml( + deserializer: typing.Any, + value: str, +) -> typing.Any: + element = ET.fromstring(value) # nosec + return _deserialize(deserializer, element) + + +def _convert_element(e: ET.Element): + # dict case + if len(e.attrib) > 0 or len({child.tag for child in e}) > 1: + dict_result: typing.Dict[str, typing.Any] = {} + for child in e: + if dict_result.get(child.tag) is not None: + if isinstance(dict_result[child.tag], list): + dict_result[child.tag].append(_convert_element(child)) + else: + dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)] + else: + dict_result[child.tag] = _convert_element(child) + dict_result.update(e.attrib) + return dict_result + # array case + if len(e) > 0: + array_result: typing.List[typing.Any] = [] + for child in e: + array_result.append(_convert_element(child)) + return array_result + # primitive case + return e.text diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/__init__.py new file mode 100644 index 000000000000..ee3f17d82ddc --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/__init__.py @@ -0,0 +1,25 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import AssistantsClientOperationsMixin # type: ignore + +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AssistantsClientOperationsMixin", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py new file mode 100644 index 000000000000..06cc62f53078 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py @@ -0,0 +1,5883 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, Callable, Dict, IO, Iterator, List, Optional, TYPE_CHECKING, TypeVar, Union, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import _model_base, models as _models +from .._model_base import SdkJSONEncoder, _deserialize +from .._serialization import Serializer +from .._vendor import AssistantsClientMixinABC, prepare_multipart_form_data + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore + +if TYPE_CHECKING: + from .. import _types +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_assistants_create_assistant_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_list_assistants_request( + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_get_assistant_request(assistant_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_update_assistant_request( # pylint: disable=name-too-long + assistant_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_delete_assistant_request( # pylint: disable=name-too-long + assistant_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_create_thread_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_get_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_update_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_delete_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_create_message_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_list_messages_request( + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if run_id is not None: + _params["runId"] = _SERIALIZER.query("run_id", run_id, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_get_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages/{messageId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "messageId": _SERIALIZER.url("message_id", message_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_update_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages/{messageId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "messageId": _SERIALIZER.url("message_id", message_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_create_run_request( + thread_id: str, *, include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if include is not None: + _params["include[]"] = _SERIALIZER.query("include", include, "[str]", div=",") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_list_runs_request( + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_get_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_update_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_submit_tool_outputs_to_run_request( # pylint: disable=name-too-long + thread_id: str, run_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/submit_tool_outputs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_cancel_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/cancel" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_create_thread_and_run_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/runs" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_get_run_step_request( + thread_id: str, + run_id: str, + step_id: str, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/steps/{stepId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + "stepId": _SERIALIZER.url("step_id", step_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if include is not None: + _params["include[]"] = _SERIALIZER.query("include", include, "[str]", div=",") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_list_run_steps_request( + thread_id: str, + run_id: str, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/steps" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if include is not None: + _params["include[]"] = _SERIALIZER.query("include", include, "[str]", div=",") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_list_files_request( + *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if purpose is not None: + _params["purpose"] = _SERIALIZER.query("purpose", purpose, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_upload_file_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_delete_file_request(file_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files/{fileId}" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_get_file_request(file_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files/{fileId}" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_get_file_content_request( # pylint: disable=name-too-long + file_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/octet-stream") + + # Construct URL + _url = "/files/{fileId}/content" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_list_vector_stores_request( # pylint: disable=name-too-long + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_create_vector_store_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_get_vector_store_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_modify_vector_store_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_delete_vector_store_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_list_vector_store_files_request( # pylint: disable=name-too-long + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if filter is not None: + _params["filter"] = _SERIALIZER.query("filter", filter, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_create_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_get_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, file_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files/{fileId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_delete_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, file_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files/{fileId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_create_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_get_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, batch_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_cancel_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, batch_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}/cancel" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_assistants_list_vector_store_file_batch_files_request( # pylint: disable=name-too-long + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if filter is not None: + _params["filter"] = _SERIALIZER.query("filter", filter, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class AssistantsClientOperationsMixin(AssistantsClientMixinABC): # pylint: disable=too-many-public-methods + + @overload + def create_assistant( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new assistant. Default value is None. + :paramtype name: str + :keyword description: The description of the new assistant. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new assistant to use. Default value is + None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new assistant. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_assistant( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_assistant( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_assistant( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword name: The name of the new assistant. Default value is None. + :paramtype name: str + :keyword description: The description of the new assistant. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new assistant to use. Default value is + None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new assistant. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Assistant] = kwargs.pop("cls", None) + + if body is _Unset: + if model is _Unset: + raise TypeError("missing required argument: model") + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_assistant_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Assistant, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_assistants( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfAssistant: + """Gets a list of assistants that were previously created. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfAssistant. The OpenAIPageableListOfAssistant is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfAssistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfAssistant] = kwargs.pop("cls", None) + + _request = build_assistants_list_assistants_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfAssistant, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_assistant(self, assistant_id: str, **kwargs: Any) -> _models.Assistant: + """Retrieves an existing assistant. + + :param assistant_id: Identifier of the assistant. Required. + :type assistant_id: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Assistant] = kwargs.pop("cls", None) + + _request = build_assistants_get_assistant_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Assistant, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_assistant( + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default + value is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the assistant. Default value is + None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_assistant( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_assistant( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_assistant( + self, + assistant_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default + value is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the assistant. Default value is + None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Assistant] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_update_assistant_request( + assistant_id=assistant_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Assistant, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.AssistantDeletionStatus: + """Deletes an assistant. + + :param assistant_id: Identifier of the assistant. Required. + :type assistant_id: str + :return: AssistantDeletionStatus. The AssistantDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AssistantDeletionStatus] = kwargs.pop("cls", None) + + _request = build_assistants_delete_assistant_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AssistantDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_thread( + self, + *, + content_type: str = "application/json", + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the assistant's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_thread( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the assistant's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AssistantThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_thread_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AssistantThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AssistantThread: + """Gets information about an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AssistantThread] = kwargs.pop("cls", None) + + _request = build_assistants_get_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AssistantThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_thread( + self, + thread_id: str, + *, + content_type: str = "application/json", + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AssistantThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword tool_resources: A set of resources that are made available to the assistant's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_thread( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AssistantThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_thread( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AssistantThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_thread( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AssistantThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_resources: A set of resources that are made available to the assistant's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AssistantThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_update_thread_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AssistantThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: + """Deletes an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: ThreadDeletionStatus. The ThreadDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) + + _request = build_assistants_delete_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_message( + self, + thread_id: str, + *, + role: Union[str, _models.MessageRole], + content: "_types.MessageInputContent", + content_type: str = "application/json", + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword role: The role of the entity that is creating the message. Allowed values include: + ``user``, which indicates the message is sent by an actual user (and should be + used in most cases to represent user-generated messages), and ``assistant``, + which indicates the message is generated by the agent (use this value to insert + messages from the agent into the conversation). Known values are: "user" and "assistant". + Required. + :paramtype role: str or ~azure.ai.assistants.models.MessageRole + :keyword content: The content of the initial message. This may be a basic string (if you only + need text) or an array of typed content blocks (for example, text, image_file, + image_url, and so on). Is either a str type or a [MessageInputContentBlock] type. Required. + :paramtype content: str or list[~azure.ai.assistants.models.MessageInputContentBlock] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.assistants.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_message( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_message( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_message( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + role: Union[str, _models.MessageRole] = _Unset, + content: "_types.MessageInputContent" = _Unset, + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword role: The role of the entity that is creating the message. Allowed values include: + ``user``, which indicates the message is sent by an actual user (and should be + used in most cases to represent user-generated messages), and ``assistant``, + which indicates the message is generated by the agent (use this value to insert + messages from the agent into the conversation). Known values are: "user" and "assistant". + Required. + :paramtype role: str or ~azure.ai.assistants.models.MessageRole + :keyword content: The content of the initial message. This may be a basic string (if you only + need text) or an array of typed content blocks (for example, text, image_file, + image_url, and so on). Is either a str type or a [MessageInputContentBlock] type. Required. + :paramtype content: str or list[~azure.ai.assistants.models.MessageInputContentBlock] + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.assistants.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + if role is _Unset: + raise TypeError("missing required argument: role") + if content is _Unset: + raise TypeError("missing required argument: content") + body = {"attachments": attachments, "content": content, "metadata": metadata, "role": role} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_message_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_messages( + self, + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadMessage: + """Gets a list of messages that exist on a thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword run_id: Filter messages by the run ID that generated them. Default value is None. + :paramtype run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadMessage. The OpenAIPageableListOfThreadMessage is compatible + with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) + + _request = build_assistants_list_messages_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: + """Gets an existing message from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + _request = build_assistants_get_message_request( + thread_id=thread_id, + message_id=message_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_message( + self, + thread_id: str, + message_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_message( + self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_message( + self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_message( + self, + thread_id: str, + message_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_update_message_request( + thread_id=thread_id, + message_id=message_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_run( + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the assistant should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, + thread_id: str, + body: JSON, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, + thread_id: str, + body: IO[bytes], + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword model: The overridden model name that the assistant should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "additional_instructions": additional_instructions, + "additional_messages": additional_messages, + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "parallel_tool_calls": parallel_tool_calls, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_run_request( + thread_id=thread_id, + include=include, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_runs( + self, + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadRun: + """Gets a list of runs for a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadRun. The OpenAIPageableListOfThreadRun is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) + + _request = build_assistants_list_runs_request( + thread_id=thread_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Gets an existing run from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_assistants_get_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_run( + self, + thread_id: str, + run_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_update_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if tool_outputs is _Unset: + raise TypeError("missing required argument: tool_outputs") + body = {"stream": stream_parameter, "tool_outputs": tool_outputs} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_submit_tool_outputs_to_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Cancels a run of an in progress thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_assistants_cancel_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_thread_and_run( + self, + *, + assistant_id: str, + content_type: str = "application/json", + thread: Optional[_models.AssistantThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new assistant thread and immediately starts a run using that new thread. + + :keyword assistant_id: The ID of the assistant for which the thread should be created. + Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.assistants.models.AssistantThreadCreationOptions + :keyword model: The overridden model that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: Override the tools the assistant can use for this run. This is useful + for modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread_and_run( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new assistant thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread_and_run( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new assistant thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_thread_and_run( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + thread: Optional[_models.AssistantThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new assistant thread and immediately starts a run using that new thread. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the assistant for which the thread should be created. + Required. + :paramtype assistant_id: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.assistants.models.AssistantThreadCreationOptions + :keyword model: The overridden model that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: Override the tools the assistant can use for this run. This is useful + for modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "parallel_tool_calls": parallel_tool_calls, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "thread": thread, + "tool_choice": tool_choice, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_thread_and_run_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_run_step( + self, + thread_id: str, + run_id: str, + step_id: str, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + **kwargs: Any + ) -> _models.RunStep: + """Gets a single run step from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param step_id: Identifier of the run step. Required. + :type step_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :return: RunStep. The RunStep is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.RunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) + + _request = build_assistants_get_run_step_request( + thread_id=thread_id, + run_id=run_id, + step_id=step_id, + include=include, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.RunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_run_steps( + self, + thread_id: str, + run_id: str, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfRunStep: + """Gets a list of run steps from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfRunStep. The OpenAIPageableListOfRunStep is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfRunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) + + _request = build_assistants_list_run_steps_request( + thread_id=thread_id, + run_id=run_id, + include=include, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfRunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_files( + self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any + ) -> _models.FileListResponse: + """Gets a list of previously uploaded files. + + :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Default value is + None. + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose + :return: FileListResponse. The FileListResponse is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.FileListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) + + _request = build_assistants_list_files_request( + purpose=purpose, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileListResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def _upload_file(self, body: _models._models.UploadFileRequest, **kwargs: Any) -> _models.OpenAIFile: ... + @overload + def _upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: ... + + @distributed_trace + def _upload_file(self, body: Union[_models._models.UploadFileRequest, JSON], **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Multipart body. Is either a UploadFileRequest type or a JSON type. Required. + :type body: ~azure.ai.assistants.models._models.UploadFileRequest or JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + _body = body.as_dict() if isinstance(body, _model_base.Model) else body + _file_fields: List[str] = ["file"] + _data_fields: List[str] = ["purpose", "filename"] + _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) + + _request = build_assistants_upload_file_request( + api_version=self._config.api_version, + files=_files, + data=_data, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: + """Delete a previously uploaded file. + + :param file_id: The ID of the file to delete. Required. + :type file_id: str + :return: FileDeletionStatus. The FileDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.FileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_assistants_delete_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + _request = build_assistants_get_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def _get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: + """Retrieves the raw content of a specific file. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: Iterator[bytes] + :rtype: Iterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_assistants_get_file_content_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", True) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_vector_stores( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStore: + """Returns a list of vector stores. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStore. The OpenAIPageableListOfVectorStore is compatible + with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfVectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) + + _request = build_assistants_list_vector_stores_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + store_configuration: Optional[_models.VectorStoreConfiguration] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword store_configuration: The vector store configuration, used when vector store is created + from Azure asset URIs. Default value is None. + :paramtype store_configuration: ~azure.ai.assistants.models.VectorStoreConfiguration + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + store_configuration: Optional[_models.VectorStoreConfiguration] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword store_configuration: The vector store configuration, used when vector store is created + from Azure asset URIs. Default value is None. + :paramtype store_configuration: ~azure.ai.assistants.models.VectorStoreConfiguration + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "chunking_strategy": chunking_strategy, + "configuration": store_configuration, + "expires_after": expires_after, + "file_ids": file_ids, + "metadata": metadata, + "name": name, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_vector_store_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: + """Returns the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + _request = build_assistants_get_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def modify_vector_store( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def modify_vector_store( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def modify_vector_store( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def modify_vector_store( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"expires_after": expires_after, "metadata": metadata, "name": name} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_modify_vector_store_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: + """Deletes the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStoreDeletionStatus. The VectorStoreDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) + + _request = build_assistants_delete_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_vector_store_files( + self, + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.assistants.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_assistants_list_vector_store_files_request( + vector_store_id=vector_store_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store_file( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"chunking_strategy": chunking_strategy, "data_source": data_source, "file_id": file_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_vector_store_file_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: + """Retrieves a vector store file. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + _request = build_assistants_get_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_vector_store_file( + self, vector_store_id: str, file_id: str, **kwargs: Any + ) -> _models.VectorStoreFileDeletionStatus: + """Delete a vector store file. This will remove the file from the vector store but the file itself + will not be deleted. + To delete the file, use the delete file endpoint. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFileDeletionStatus. The VectorStoreFileDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_assistants_delete_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store_file_batch( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: List of file identifiers. Default value is None. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file_batch( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Default value is None. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"chunking_strategy": chunking_strategy, "data_sources": data_sources, "file_ids": file_ids} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_vector_store_file_batch_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Retrieve a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_assistants_get_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def cancel_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch + as soon as possible. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_assistants_cancel_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_vector_store_file_batch_files( + self, + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files in a batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.assistants.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_assistants_list_vector_store_file_batch_files_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py new file mode 100644 index 000000000000..d75f6dd1d754 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py @@ -0,0 +1,2524 @@ +# pylint: disable=too-many-lines +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +import io +import logging +import os +import sys +import time +from pathlib import Path +from typing import ( + IO, + TYPE_CHECKING, + Any, + Dict, + Iterator, + List, + Optional, + Union, + cast, + overload, +) + +from azure.core.credentials import TokenCredential, AzureKeyCredential +from azure.core.tracing.decorator import distributed_trace + +from . import models as _models +from ._vendor import FileType +from .models._enums import FilePurpose, RunStatus +from ._client import AssistantsClient as AssistantsClientGenerated + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from . import _types + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() + +logger = logging.getLogger(__name__) + + +class AssistantsClient(AssistantsClientGenerated): # pylint: disable=client-accepts-api-version-keyword + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, TokenCredential], **kwargs: Any) -> None: + # TODO: Remove this custom code when 1DP service will be available + if not endpoint: + raise ValueError("Connection string or 1DP endpoint is required") + parts = endpoint.split(";") + # Detect legacy endpoint and build it in old way. + if len(parts) == 4: + endpoint = "https://" + parts[0] + subscription_id = parts[1] + resource_group_name = parts[2] + project_name = parts[3] + endpoint = ( + f"{endpoint}/agents/v1.0/subscriptions" + f"/{subscription_id}/resourceGroups/{resource_group_name}/providers" + f"/Microsoft.MachineLearningServices/workspaces/{project_name}" + ) + # Override the credential scope with the legacy one. + kwargs['credential_scopes'] = ["https://management.azure.com/.default"] + # End of legacy endpoints handling. + super().__init__(endpoint, credential, **kwargs) + self._toolset: Dict[str, _models.ToolSet] = {} + + # pylint: disable=arguments-differ + @overload + def create_assistant( # pylint: disable=arguments-differ + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Creates a new assistant. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new assistant. Default value is None. + :paramtype name: str + :keyword description: The description of the new assistant. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new assistant to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new assistant. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + # pylint: disable=arguments-differ + @overload + def create_assistant( # pylint: disable=arguments-differ + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Creates a new assistant. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new assistant. Default value is None. + :paramtype name: str + :keyword description: The description of the new assistant. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new assistant to use. Default value is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.assistants.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_assistant( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_assistant( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_assistant( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.Assistant: + """ + Creates a new assistant with various configurations, delegating to the generated operations. + + :param body: JSON or IO[bytes]. Required if `model` is not provided. + :type body: Union[JSON, IO[bytes]] + :keyword model: The ID of the model to use. Required if `body` is not provided. + :paramtype model: str + :keyword name: The name of the new assistant. + :paramtype name: Optional[str] + :keyword description: A description for the new assistant. + :paramtype description: Optional[str] + :keyword instructions: System instructions for the assistant. + :paramtype instructions: Optional[str] + :keyword tools: List of tools definitions for the assistant. + :paramtype tools: Optional[List[_models.ToolDefinition]] + :keyword tool_resources: Resources used by the assistant's tools. + :paramtype tool_resources: Optional[_models.ToolResources] + :keyword toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). + :paramtype toolset: Optional[_models.ToolSet] + :keyword temperature: Sampling temperature for generating assistant responses. + :paramtype temperature: Optional[float] + :keyword top_p: Nucleus sampling parameter. + :paramtype top_p: Optional[float] + :keyword response_format: Response format for tool calls. + :paramtype response_format: Optional["_types.AssistantsApiResponseFormatOption"] + :keyword metadata: Key/value pairs for storing additional information. + :paramtype metadata: Optional[Dict[str, str]] + :keyword content_type: Content type of the body. + :paramtype content_type: str + :return: An Assistant object. + :rtype: _models.Assistant + :raises: HttpResponseError for HTTP errors. + """ + + self._validate_tools_and_tool_resources(tools, tool_resources) + + if body is not _Unset: + if isinstance(body, io.IOBase): + return super().create_assistant(body=body, content_type=content_type, **kwargs) + return super().create_assistant(body=body, **kwargs) + + if toolset is not None: + tools = toolset.definitions + tool_resources = toolset.resources + + new_assistant = super().create_assistant( + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + if toolset is not None: + self._toolset[new_assistant.id] = toolset + return new_assistant + + # pylint: disable=arguments-differ + @overload + def update_assistant( # pylint: disable=arguments-differ + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the assistant. Default value is + None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + # pylint: disable=arguments-differ + @overload + def update_assistant( # pylint: disable=arguments-differ + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default value + is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.assistants.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_assistant( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_assistant( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_assistant( + self, + assistant_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the assistant. Default value is + None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.assistants.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + self._validate_tools_and_tool_resources(tools, tool_resources) + + if body is not _Unset: + if isinstance(body, io.IOBase): + return super().update_assistant(body=body, content_type=content_type, **kwargs) + return super().update_assistant(body=body, **kwargs) + + if toolset is not None: + self._toolset[assistant_id] = toolset + tools = toolset.definitions + tool_resources = toolset.resources + + return super().update_assistant( + assistant_id=assistant_id, + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + def _validate_tools_and_tool_resources( + self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources] + ): + if tool_resources is None: + return + if tools is None: + tools = [] + + if tool_resources.file_search is not None and not any( + isinstance(tool, _models.FileSearchToolDefinition) for tool in tools + ): + raise ValueError( + "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" + ) + if tool_resources.code_interpreter is not None and not any( + isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools + ): + raise ValueError( + "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" + ) + + # pylint: disable=arguments-differ + @overload + def create_run( # pylint: disable=arguments-differ + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, + thread_id: str, + body: JSON, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, + thread_id: str, + body: IO[bytes], + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + include=include, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=False, + stream=False, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + parallel_tool_calls=parallel_tool_calls, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + return response + + @distributed_trace + def create_and_process_run( + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: int = 1, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread and processes the run. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword model: The overridden model name that the assistant should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword toolset: The Collection of tools and resources (alternative to `tools` and + `tool_resources`). Default value is None. + :paramtype toolset: ~azure.ai.assistants.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: The time in seconds to wait between polling the service for run status. + Default value is 1. + :paramtype sleep_interval: int + :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.assistants.models.AssistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + # Create and initiate the run with additional parameters + run = self.create_run( + thread_id=thread_id, + include=include, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=toolset.definitions if toolset else None, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + parallel_tool_calls=parallel_tool_calls, + metadata=metadata, + **kwargs, + ) + + # Monitor and process the run status + while run.status in [ + RunStatus.QUEUED, + RunStatus.IN_PROGRESS, + RunStatus.REQUIRES_ACTION, + ]: + time.sleep(sleep_interval) + run = self.get_run(thread_id=thread_id, run_id=run.id) + + if run.status == RunStatus.REQUIRES_ACTION and isinstance( + run.required_action, _models.SubmitToolOutputsAction + ): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logging.warning("No tool calls provided - cancelling run") + self.cancel_run(thread_id=thread_id, run_id=run.id) + break + # We need tool set only if we are executing local function. In case if + # the tool is azure_function we just need to wait when it will be finished. + if any(tool_call.type == "function" for tool_call in tool_calls): + toolset = toolset or self._toolset.get(run.assistant_id) + if toolset is not None: + tool_outputs = toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + logging.info("Tool outputs: %s", tool_outputs) + if tool_outputs: + self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) + + logging.info("Current run status: %s", run.status) + + return run + + @overload + def create_stream( + self, + thread_id: str, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: None = None, + **kwargs: Any, + ) -> _models.AssistantRunStream[_models.AssistantEventHandler]: + """Creates a new stream for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: None + :paramtype event_handler: None. _models.AssistantEventHandler will be applied as default. + :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.assistants.models.AssistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_stream( + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: _models.BaseAssistantEventHandlerT, + **kwargs: Any, + ) -> _models.AssistantRunStream[_models.BaseAssistantEventHandlerT]: + """Creates a new stream for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.assistants.models.AssistantEventHandler + :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.assistants.models.AssistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_stream( + self, + thread_id: str, + body: Union[JSON, IO[bytes]], + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + event_handler: None = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AssistantRunStream[_models.AssistantEventHandler]: + """Creates a new run for an assistant thread. + + Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword event_handler: None + :paramtype event_handler: None. _models.AssistantEventHandler will be applied as default. + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.assistants.models.AssistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_stream( + self, + thread_id: str, + body: Union[JSON, IO[bytes]], + *, + event_handler: _models.BaseAssistantEventHandlerT, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AssistantRunStream[_models.BaseAssistantEventHandlerT]: + """Creates a new run for an assistant thread. + + Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.assistants.models.AssistantEventHandler + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.assistants.models.AssistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_stream( # pyright: ignore[reportInconsistentOverload] + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.BaseAssistantEventHandlerT] = None, + **kwargs: Any, + ) -> _models.AssistantRunStream[_models.BaseAssistantEventHandlerT]: + """Creates a new run for an assistant thread. + + Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.assistants.models.AssistantEventHandler + :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.assistants.models.AssistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + include=include, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=True, + stream=True, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + parallel_tool_calls=parallel_tool_calls, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) + + if not event_handler: + event_handler = cast(_models.BaseAssistantEventHandlerT, _models.AssistantEventHandler()) + return _models.AssistantRunStream( + response_iterator=response_iterator, + submit_tool_outputs=self._handle_submit_tool_outputs, + event_handler=event_handler, + ) + + # pylint: disable=arguments-differ + @overload + def submit_tool_outputs_to_run( # pylint: disable=arguments-differ + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: Optional[_models.AssistantEventHandler] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.assistants.models.AssistantEventHandler + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, + run_id, + tool_outputs=tool_outputs, + stream_parameter=False, + stream=False, + **kwargs, + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + return response + + @overload + def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]], + *, + event_handler: _models.BaseAssistantEventHandler, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword event_handler: The event handler to use for processing events during the run. + :paramtype event_handler: ~azure.ai.assistants.models.BaseAssistantEventHandler + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: _models.BaseAssistantEventHandler, + **kwargs: Any, + ) -> None: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. + :paramtype event_handler: ~azure.ai.assistants.models.BaseAssistantEventHandler + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_stream( # pyright: ignore[reportInconsistentOverload] + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + event_handler: _models.BaseAssistantEventHandler, + **kwargs: Any, + ) -> None: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :keyword event_handler: The event handler to use for processing events during the run. + :paramtype event_handler: ~azure.ai.assistants.models.BaseAssistantEventHandler + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # Cast the response to Iterator[bytes] for type correctness + response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) + + event_handler.initialize(response_iterator, self._handle_submit_tool_outputs) + + def _handle_submit_tool_outputs( + self, run: _models.ThreadRun, event_handler: _models.BaseAssistantEventHandler + ) -> None: + if isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logger.debug("No tool calls to execute.") + return + + # We need tool set only if we are executing local function. In case if + # the tool is azure_function we just need to wait when it will be finished. + if any(tool_call.type == "function" for tool_call in tool_calls): + toolset = self._toolset.get(run.assistant_id) + if toolset: + tool_outputs = toolset.execute_tool_calls(tool_calls) + else: + logger.debug("Toolset is not available in the client.") + return + + logger.info("Tool outputs: %s", tool_outputs) + if tool_outputs: + self.submit_tool_outputs_to_stream( + thread_id=run.thread_id, + run_id=run.id, + tool_outputs=tool_outputs, + event_handler=event_handler, + ) + + @distributed_trace + def upload_file( + self, + body: Optional[JSON] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + + + :param body: JSON. Required if `file` and `purpose` are not provided. + :type body: Optional[JSON] + :keyword file: File content. Required if `body` and `purpose` are not provided. + :type file: Optional[FileType] + :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. + :type file_path: Optional[str] + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :type purpose: Union[str, _models.FilePurpose, None] + :keyword filename: The name of the file. + :type filename: Optional[str] + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: _models.OpenAIFile + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + # If a JSON body is provided directly, pass it along + if body is not None: + return super()._upload_file(body=body, **kwargs) + + # Convert FilePurpose enum to string if necessary + if isinstance(purpose, FilePurpose): + purpose = purpose.value + + # If file content is passed in directly + if file is not None and purpose is not None: + return super()._upload_file(body={"file": file, "purpose": purpose, "filename": filename}, **kwargs) + + # If a file path is provided + if file_path is not None and purpose is not None: + if not os.path.isfile(file_path): + raise FileNotFoundError(f"The file path provided does not exist: {file_path}") + + try: + with open(file_path, "rb") as f: + content = f.read() + + # If no explicit filename is provided, use the base name + base_filename = filename or os.path.basename(file_path) + file_content: FileType = (base_filename, content) + + return super()._upload_file(body={"file": file_content, "purpose": purpose}, **kwargs) + except IOError as e: + raise IOError(f"Unable to read file: {file_path}") from e + + raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") + + @overload + def upload_file_and_poll(self, body: JSON, *, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file_and_poll( + self, + *, + file: FileType, + purpose: Union[str, _models.FilePurpose], + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.assistants._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file_and_poll( + self, *, file_path: str, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def upload_file_and_poll( + self, + body: Optional[JSON] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :type body: Optional[JSON] + :keyword file: File content. Required if `body` and `purpose` are not provided. + :paramtype file: Optional[FileType] + :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. + :paramtype file_path: Optional[str] + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :paramtype purpose: Union[str, _models.FilePurpose, None] + :keyword filename: The name of the file. + :paramtype filename: Optional[str] + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: _models.OpenAIFile + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + uploaded_file = self.upload_file(body=body, **kwargs) + elif file is not None and purpose is not None: + uploaded_file = self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + elif file_path is not None and purpose is not None: + uploaded_file = self.upload_file(file_path=file_path, purpose=purpose, **kwargs) + else: + raise ValueError( + "Invalid parameters for upload_file_and_poll. Please provide either 'body', " + "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." + ) + + while uploaded_file.status in ["uploaded", "pending", "running"]: + time.sleep(sleep_interval) + uploaded_file = self.get_file(uploaded_file.id) + + return uploaded_file + + @overload + def create_vector_store_and_poll( + self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_and_poll( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_and_poll( + self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_and_poll( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not _Unset: + if isinstance(body, dict): + vector_store = super().create_vector_store( + body=body, content_type=content_type or "application/json", **kwargs + ) + elif isinstance(body, io.IOBase): + vector_store = super().create_vector_store(body=body, content_type=content_type, **kwargs) + else: + raise ValueError("Invalid 'body' type: must be a dictionary (JSON) or a file-like object (IO[bytes]).") + else: + store_configuration = None + if data_sources: + store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) + + vector_store = super().create_vector_store( + file_ids=file_ids, + store_configuration=store_configuration, + name=name, + expires_after=expires_after, + chunking_strategy=chunking_strategy, + metadata=metadata, + **kwargs, + ) + + while vector_store.status == "in_progress": + time.sleep(sleep_interval) + vector_store = super().get_vector_store(vector_store.id) + + return vector_store + + @overload + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + *, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword content_type: Body parameter content-type. Defaults to "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not _Unset: + if isinstance(body, dict): + vector_store_file_batch = super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + body=body, + content_type=content_type or "application/json", + **kwargs, + ) + elif isinstance(body, io.IOBase): + vector_store_file_batch = super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + body=body, + content_type=content_type, + **kwargs, + ) + else: + raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like (IO[bytes]).") + else: + vector_store_file_batch = super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + file_ids=file_ids, + data_sources=data_sources, + chunking_strategy=chunking_strategy, + **kwargs, + ) + + while vector_store_file_batch.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file_batch = super().get_vector_store_file_batch( + vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id + ) + + return vector_store_file_batch + + @distributed_trace + def get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: + """ + Returns file content as byte stream for given file_id. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: An iterator that yields bytes from the file content. + :rtype: Iterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: If the HTTP request fails. + """ + kwargs["stream"] = True + response = super()._get_file_content(file_id, **kwargs) + return cast(Iterator[bytes], response) + + @distributed_trace + def save_file( # pylint: disable=client-method-missing-kwargs + self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None + ) -> None: + """ + Synchronously saves file content retrieved using a file identifier to the specified local directory. + + :param file_id: The unique identifier for the file to retrieve. + :type file_id: str + :param file_name: The name of the file to be saved. + :type file_name: str + :param target_dir: The directory where the file should be saved. Defaults to the current working directory. + :type target_dir: Optional[Union[str, Path]] + :raises ValueError: If the target path is not a directory or the file name is invalid. + :raises RuntimeError: If file content retrieval fails or no content is found. + :raises TypeError: If retrieved chunks are not bytes-like objects. + :raises IOError: If writing to the file fails. + """ + try: + # Determine and validate the target directory + path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() + path.mkdir(parents=True, exist_ok=True) + if not path.is_dir(): + raise ValueError(f"The target path '{path}' is not a directory.") + + # Sanitize and validate the file name + sanitized_file_name = Path(file_name).name + if not sanitized_file_name: + raise ValueError("The provided file name is invalid.") + + # Retrieve the file content + file_content_stream = self.get_file_content(file_id) + if not file_content_stream: + raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") + + target_file_path = path / sanitized_file_name + + # Write the file content to disk + with target_file_path.open("wb") as file: + for chunk in file_content_stream: + if isinstance(chunk, (bytes, bytearray)): + file.write(chunk) + else: + raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") + + logger.debug("File '%s' saved successfully at '%s'.", sanitized_file_name, target_file_path) + + except (ValueError, RuntimeError, TypeError, IOError) as e: + logger.error("An error occurred in save_file: %s", e) + raise + + @overload + def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_and_poll( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + content_type: str = "application/json", + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword content_type: Body Parameter content-type. Defaults to 'application/json'. + :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not _Unset: + if isinstance(body, dict): + vector_store_file = super().create_vector_store_file( + vector_store_id=vector_store_id, + body=body, + content_type=content_type or "application/json", + **kwargs, + ) + elif isinstance(body, io.IOBase): + vector_store_file = super().create_vector_store_file( + vector_store_id=vector_store_id, + body=body, + content_type=content_type, + **kwargs, + ) + else: + raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like object (IO[bytes]).") + else: + vector_store_file = super().create_vector_store_file( + vector_store_id=vector_store_id, + file_id=file_id, + data_source=data_source, + chunking_strategy=chunking_strategy, + **kwargs, + ) + + while vector_store_file.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file = super().get_vector_store_file( + vector_store_id=vector_store_id, file_id=vector_store_file.id + ) + + return vector_store_file + + @distributed_trace + def delete_assistant( # pylint: disable=delete-operation-wrong-return-type + self, assistant_id: str, **kwargs: Any + ) -> _models.AssistantDeletionStatus: + """Deletes an assistant. + + :param assistant_id: Identifier of the assistant. Required. + :type assistant_id: str + :return: AssistantDeletionStatus. The AssistantDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + if assistant_id in self._toolset: + del self._toolset[assistant_id] + return super().delete_assistant(assistant_id, **kwargs) + + +__all__: List[str] = ["AssistantsClient"] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_serialization.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_serialization.py new file mode 100644 index 000000000000..7a0232de5ddc --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_serialization.py @@ -0,0 +1,2050 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Dict, + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + MutableMapping, + List, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore +from typing_extensions import Self + +from azure.core.exceptions import DeserializationError, SerializationError +from azure.core.serialization import NULL as CoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + :return: The deserialized data. + :rtype: object + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) from err + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError as err: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise DeserializationError("XML is invalid") from err + elif content_type.startswith("text/"): + return data_as_str + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + + :param bytes body_bytes: The body of the response. + :param dict headers: The headers of the response. + :returns: The deserialized data. + :rtype: object + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + +TZ_UTC = datetime.timezone.utc + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Optional[Dict[str, Any]] = {} + for k in kwargs: # pylint: disable=consider-using-dict-items + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are equal + :rtype: bool + """ + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are not equal + :rtype: bool + """ + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node. + + :returns: The XML node + :rtype: xml.etree.ElementTree.Element + """ + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to server from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, keep_readonly=keep_readonly, **kwargs + ) + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs + ) + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: # pylint: disable=broad-exception-caught + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises DeserializationError: if something went wrong + :rtype: Self + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def from_dict( + cls, + data: Any, + key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> Self: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param function key_extractors: A key extractor function. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises DeserializationError: if something went wrong + :rtype: Self + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result.update(objects[valuetype]._flatten_subtype(key, objects)) # pylint: disable=protected-access + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + + :param dict response: The initial data + :param dict objects: The class objects + :returns: The class to be used + :rtype: class + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + :returns: The decoded key + :rtype: str + """ + return key.replace("\\.", ".") + + +class Serializer: # pylint: disable=too-many-public-methods + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals + self, target_obj, data_type=None, **kwargs + ): + """Serialize data into a string according to type. + + :param object target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises SerializationError: if serialization fails. + :returns: The serialized data. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() # pylint: disable=protected-access + try: + attributes = target_obj._attribute_map # pylint: disable=protected-access + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access + attr_name, {} + ).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized.update(target_obj.additional_properties) + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, + # we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError as err: + if isinstance(err, SerializationError): + raise + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise SerializationError(msg) from err + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises SerializationError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized request body + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access + except DeserializationError as err: + raise SerializationError("Unable to build a model: " + str(err)) from err + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param str name: The name of the URL path parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :returns: The serialized URL path + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + output = output.replace("{", quote("{")).replace("}", quote("}")) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param str name: The name of the query parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, list + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized query parameter + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + do_quote = not kwargs.get("skip_quote", False) + return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param str name: The name of the header. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized header + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :raises AttributeError: if required data is None. + :raises ValueError: if data is None + :raises SerializationError: if serialization fails. + :returns: The serialized data. + :rtype: str, int, float, bool, dict, list + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is CoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + if data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, data.__class__) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise SerializationError(msg.format(data, data_type)) from err + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param obj data: Object to be serialized. + :param str data_type: Type of object in the iterable. + :rtype: str, int, float, bool + :return: serialized object + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + return eval(data_type)(data) # nosec # pylint: disable=eval-used + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param str data: Object to be serialized. + :rtype: str + :return: serialized object + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list data: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + Defaults to False. + :rtype: list, str + :return: serialized iterable + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized.append(None) + + if kwargs.get("do_quote", False): + serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :rtype: dict + :return: serialized dictionary + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + :return: serialized object + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + if obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError as exc: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) from exc + + @staticmethod + def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument + """Serialize bytearray into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument + """Serialize str into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Decimal object to float. + + :param decimal attr: Object to be serialized. + :rtype: float + :return: serialized decimal + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): # pylint: disable=unused-argument + """Serialize long (Py2) or int (Py3). + + :param int attr: Object to be serialized. + :rtype: int/long + :return: serialized long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + :return: serialized date + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + :return: serialized time + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + :return: serialized duration + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises TypeError: if format invalid. + :return: serialized rfc + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError as exc: + raise TypeError("RFC1123 object must be valid Datetime object.") from exc + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises SerializationError: if format invalid. + :return: serialized iso + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise SerializationError(msg) from err + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise TypeError(msg) from err + + @staticmethod + def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises SerializationError: if format invalid + :return: serialied unix + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError as exc: + raise TypeError("Unix time object must be valid Datetime object.") from exc + + +def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(List[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements + attr, attr_desc, data +): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer: + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, str): + return self.deserialize_data(data, response) + if isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None or data is CoreNull: + return data + try: + attributes = response._attribute_map # type: ignore # pylint: disable=protected-access + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise DeserializationError(msg) from err + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :return: The classified target object and its class name. + :rtype: tuple + """ + if target is None: + return None, None + + if isinstance(target, str): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + :return: Deserialized object. + :rtype: object + """ + try: + return self(target_obj, data, content_type=content_type) + except: # pylint: disable=bare-except + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param obj raw_data: Data to be processed. + :param str content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + :rtype: object + :return: Unpacked content. + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param Response response: The response model class. + :param dict attrs: The deserialized response attributes. + :param dict additional_properties: Additional properties to be set. + :rtype: Response + :return: The instantiated response model. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("readonly") + ] + const = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("constant") + ] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties # type: ignore + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) from err + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) from exp + + def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment + "object", + "[]", + r"{}", + ] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise DeserializationError(msg) from err + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :return: Deserialized iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :return: Deserialized dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :return: Deserialized object. + :rtype: dict + :raises TypeError: if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, str): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :return: Deserialized basic type. + :rtype: str, int, float or bool + :raises TypeError: if string format is not valid. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + if isinstance(attr, str): + if attr.lower() in ["true", "1"]: + return True + if attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + return eval(data_type)(attr) # nosec # pylint: disable=eval-used + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :return: Deserialized string. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :return: Deserialized enum object. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + try: + return list(enum_obj.__members__.values())[data] + except IndexError as exc: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) from exc + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :return: Deserialized bytearray + :rtype: bytearray + :raises TypeError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :return: Deserialized base64 string + :rtype: bytearray + :raises TypeError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :return: Deserialized decimal + :raises DeserializationError: if string format invalid. + :rtype: decimal + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(str(attr)) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise DeserializationError(msg) from err + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :return: Deserialized int + :rtype: long or int + :raises ValueError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :return: Deserialized duration + :rtype: TimeDelta + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise DeserializationError(msg) from err + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :return: Deserialized date + :rtype: Date + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=0, defaultday=0) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :return: Deserialized time + :rtype: datetime.time + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized RFC datetime + :rtype: Datetime + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=datetime.timezone(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized ISO datetime + :rtype: Datetime + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :return: Deserialized datetime + :rtype: Datetime + :raises DeserializationError: if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + attr = int(attr) + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise DeserializationError(msg) from err + return date_obj diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py new file mode 100644 index 000000000000..af5212be9e0f --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py @@ -0,0 +1,24 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import List, TYPE_CHECKING, Union + +if TYPE_CHECKING: + from . import models as _models +AssistantsApiResponseFormatOption = Union[ + str, + str, + "_models.AssistantsApiResponseFormatMode", + "_models.AssistantsApiResponseFormat", + "_models.ResponseFormatJsonSchemaType", +] +MessageInputContent = Union[str, List["_models.MessageInputContentBlock"]] +MessageAttachmentToolDefinition = Union["_models.CodeInterpreterToolDefinition", "_models.FileSearchToolDefinition"] +AssistantsApiToolChoiceOption = Union[ + str, str, "_models.AssistantsApiToolChoiceOptionMode", "_models.AssistantsNamedToolChoice" +] diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_vendor.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_vendor.py new file mode 100644 index 000000000000..08b71686c335 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_vendor.py @@ -0,0 +1,66 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +import json +from typing import Any, Dict, IO, List, Mapping, Optional, TYPE_CHECKING, Tuple, Union + +from ._configuration import AssistantsClientConfiguration +from ._model_base import Model, SdkJSONEncoder + +if TYPE_CHECKING: + from azure.core import PipelineClient + + from ._serialization import Deserializer, Serializer + + +class AssistantsClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "PipelineClient" + _config: AssistantsClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +# file-like tuple could be `(filename, IO (or bytes))` or `(filename, IO (or bytes), content_type)` +FileContent = Union[str, bytes, IO[str], IO[bytes]] + +FileType = Union[ + # file (or bytes) + FileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], FileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], FileContent, Optional[str]], +] + + +def serialize_multipart_data_entry(data_entry: Any) -> Any: + if isinstance(data_entry, (list, tuple, dict, Model)): + return json.dumps(data_entry, cls=SdkJSONEncoder, exclude_readonly=True) + return data_entry + + +def prepare_multipart_form_data( + body: Mapping[str, Any], multipart_fields: List[str], data_fields: List[str] +) -> Tuple[List[FileType], Dict[str, Any]]: + files: List[FileType] = [] + data: Dict[str, Any] = {} + for multipart_field in multipart_fields: + multipart_entry = body.get(multipart_field) + if isinstance(multipart_entry, list): + files.extend([(multipart_field, e) for e in multipart_entry]) + elif multipart_entry: + files.append((multipart_field, multipart_entry)) + + for data_field in data_fields: + data_entry = body.get(data_field) + if data_entry: + data[data_field] = serialize_multipart_data_entry(data_entry) + + return files, data diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_version.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_version.py new file mode 100644 index 000000000000..be71c81bd282 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0.0b1" diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/__init__.py new file mode 100644 index 000000000000..4fea30ca6925 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import AssistantsClient # type: ignore + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AssistantsClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py new file mode 100644 index 000000000000..ff7afa16a25f --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py @@ -0,0 +1,106 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, TYPE_CHECKING, Union +from typing_extensions import Self + +from azure.core import AsyncPipelineClient +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies +from azure.core.rest import AsyncHttpResponse, HttpRequest + +from .._serialization import Deserializer, Serializer +from ._configuration import AssistantsClientConfiguration +from ._operations import AssistantsClientOperationsMixin + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + + +class AssistantsClient(AssistantsClientOperationsMixin): + """AssistantsClient. + + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + _endpoint = "{endpoint}" + self._config = AssistantsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> Self: + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py new file mode 100644 index 000000000000..637f56b4a09d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py @@ -0,0 +1,75 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING, Union + +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies + +from .._version import VERSION + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + + +class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for AssistantsClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-assistants/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Authorization", prefix="Bearer", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/__init__.py new file mode 100644 index 000000000000..ee3f17d82ddc --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/__init__.py @@ -0,0 +1,25 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import AssistantsClientOperationsMixin # type: ignore + +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AssistantsClientOperationsMixin", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py new file mode 100644 index 000000000000..a9696b0e89a2 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py @@ -0,0 +1,4781 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, AsyncIterator, Callable, Dict, IO, List, Optional, TYPE_CHECKING, TypeVar, Union, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import _model_base, models as _models +from ..._model_base import SdkJSONEncoder, _deserialize +from ..._operations._operations import ( + build_assistants_cancel_run_request, + build_assistants_cancel_vector_store_file_batch_request, + build_assistants_create_assistant_request, + build_assistants_create_message_request, + build_assistants_create_run_request, + build_assistants_create_thread_and_run_request, + build_assistants_create_thread_request, + build_assistants_create_vector_store_file_batch_request, + build_assistants_create_vector_store_file_request, + build_assistants_create_vector_store_request, + build_assistants_delete_assistant_request, + build_assistants_delete_file_request, + build_assistants_delete_thread_request, + build_assistants_delete_vector_store_file_request, + build_assistants_delete_vector_store_request, + build_assistants_get_assistant_request, + build_assistants_get_file_content_request, + build_assistants_get_file_request, + build_assistants_get_message_request, + build_assistants_get_run_request, + build_assistants_get_run_step_request, + build_assistants_get_thread_request, + build_assistants_get_vector_store_file_batch_request, + build_assistants_get_vector_store_file_request, + build_assistants_get_vector_store_request, + build_assistants_list_assistants_request, + build_assistants_list_files_request, + build_assistants_list_messages_request, + build_assistants_list_run_steps_request, + build_assistants_list_runs_request, + build_assistants_list_vector_store_file_batch_files_request, + build_assistants_list_vector_store_files_request, + build_assistants_list_vector_stores_request, + build_assistants_modify_vector_store_request, + build_assistants_submit_tool_outputs_to_run_request, + build_assistants_update_assistant_request, + build_assistants_update_message_request, + build_assistants_update_run_request, + build_assistants_update_thread_request, + build_assistants_upload_file_request, +) +from ..._vendor import prepare_multipart_form_data +from .._vendor import AssistantsClientMixinABC + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore + +if TYPE_CHECKING: + from ... import _types +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class AssistantsClientOperationsMixin(AssistantsClientMixinABC): # pylint: disable=too-many-public-methods + + @overload + async def create_assistant( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new assistant. Default value is None. + :paramtype name: str + :keyword description: The description of the new assistant. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new assistant to use. Default value is + None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new assistant. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_assistant( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_assistant( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_assistant( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword name: The name of the new assistant. Default value is None. + :paramtype name: str + :keyword description: The description of the new assistant. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new assistant to use. Default value is + None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new assistant. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Assistant] = kwargs.pop("cls", None) + + if body is _Unset: + if model is _Unset: + raise TypeError("missing required argument: model") + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_assistant_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Assistant, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_assistants( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfAssistant: + """Gets a list of assistants that were previously created. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfAssistant. The OpenAIPageableListOfAssistant is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfAssistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfAssistant] = kwargs.pop("cls", None) + + _request = build_assistants_list_assistants_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfAssistant, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_assistant(self, assistant_id: str, **kwargs: Any) -> _models.Assistant: + """Retrieves an existing assistant. + + :param assistant_id: Identifier of the assistant. Required. + :type assistant_id: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Assistant] = kwargs.pop("cls", None) + + _request = build_assistants_get_assistant_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Assistant, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_assistant( + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default + value is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the assistant. Default value is + None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_assistant( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_assistant( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_assistant( + self, + assistant_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default + value is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the assistant. Default value is + None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Assistant] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_update_assistant_request( + assistant_id=assistant_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Assistant, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.AssistantDeletionStatus: + """Deletes an assistant. + + :param assistant_id: Identifier of the assistant. Required. + :type assistant_id: str + :return: AssistantDeletionStatus. The AssistantDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AssistantDeletionStatus] = kwargs.pop("cls", None) + + _request = build_assistants_delete_assistant_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AssistantDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_thread( + self, + *, + content_type: str = "application/json", + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the assistant's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_thread( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the assistant's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AssistantThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_thread_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AssistantThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AssistantThread: + """Gets information about an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AssistantThread] = kwargs.pop("cls", None) + + _request = build_assistants_get_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AssistantThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_thread( + self, + thread_id: str, + *, + content_type: str = "application/json", + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AssistantThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword tool_resources: A set of resources that are made available to the assistant's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_thread( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AssistantThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_thread( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AssistantThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_thread( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AssistantThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_resources: A set of resources that are made available to the assistant's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AssistantThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_update_thread_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AssistantThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: + """Deletes an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: ThreadDeletionStatus. The ThreadDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) + + _request = build_assistants_delete_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_message( + self, + thread_id: str, + *, + role: Union[str, _models.MessageRole], + content: "_types.MessageInputContent", + content_type: str = "application/json", + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword role: The role of the entity that is creating the message. Allowed values include: + ``user``, which indicates the message is sent by an actual user (and should be + used in most cases to represent user-generated messages), and ``assistant``, + which indicates the message is generated by the agent (use this value to insert + messages from the agent into the conversation). Known values are: "user" and "assistant". + Required. + :paramtype role: str or ~azure.ai.assistants.models.MessageRole + :keyword content: The content of the initial message. This may be a basic string (if you only + need text) or an array of typed content blocks (for example, text, image_file, + image_url, and so on). Is either a str type or a [MessageInputContentBlock] type. Required. + :paramtype content: str or list[~azure.ai.assistants.models.MessageInputContentBlock] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.assistants.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_message( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_message( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_message( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + role: Union[str, _models.MessageRole] = _Unset, + content: "_types.MessageInputContent" = _Unset, + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword role: The role of the entity that is creating the message. Allowed values include: + ``user``, which indicates the message is sent by an actual user (and should be + used in most cases to represent user-generated messages), and ``assistant``, + which indicates the message is generated by the agent (use this value to insert + messages from the agent into the conversation). Known values are: "user" and "assistant". + Required. + :paramtype role: str or ~azure.ai.assistants.models.MessageRole + :keyword content: The content of the initial message. This may be a basic string (if you only + need text) or an array of typed content blocks (for example, text, image_file, + image_url, and so on). Is either a str type or a [MessageInputContentBlock] type. Required. + :paramtype content: str or list[~azure.ai.assistants.models.MessageInputContentBlock] + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.assistants.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + if role is _Unset: + raise TypeError("missing required argument: role") + if content is _Unset: + raise TypeError("missing required argument: content") + body = {"attachments": attachments, "content": content, "metadata": metadata, "role": role} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_message_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_messages( + self, + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadMessage: + """Gets a list of messages that exist on a thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword run_id: Filter messages by the run ID that generated them. Default value is None. + :paramtype run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadMessage. The OpenAIPageableListOfThreadMessage is compatible + with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) + + _request = build_assistants_list_messages_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: + """Gets an existing message from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + _request = build_assistants_get_message_request( + thread_id=thread_id, + message_id=message_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_message( + self, + thread_id: str, + message_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_message( + self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_message( + self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_message( + self, + thread_id: str, + message_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_update_message_request( + thread_id=thread_id, + message_id=message_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_run( + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the assistant should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, + thread_id: str, + body: JSON, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, + thread_id: str, + body: IO[bytes], + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword model: The overridden model name that the assistant should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "additional_instructions": additional_instructions, + "additional_messages": additional_messages, + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "parallel_tool_calls": parallel_tool_calls, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_run_request( + thread_id=thread_id, + include=include, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_runs( + self, + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadRun: + """Gets a list of runs for a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadRun. The OpenAIPageableListOfThreadRun is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) + + _request = build_assistants_list_runs_request( + thread_id=thread_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Gets an existing run from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_assistants_get_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_run( + self, + thread_id: str, + run_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_update_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if tool_outputs is _Unset: + raise TypeError("missing required argument: tool_outputs") + body = {"stream": stream_parameter, "tool_outputs": tool_outputs} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_submit_tool_outputs_to_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Cancels a run of an in progress thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_assistants_cancel_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_thread_and_run( + self, + *, + assistant_id: str, + content_type: str = "application/json", + thread: Optional[_models.AssistantThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new assistant thread and immediately starts a run using that new thread. + + :keyword assistant_id: The ID of the assistant for which the thread should be created. + Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.assistants.models.AssistantThreadCreationOptions + :keyword model: The overridden model that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: Override the tools the assistant can use for this run. This is useful + for modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread_and_run( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new assistant thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread_and_run( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new assistant thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_thread_and_run( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + thread: Optional[_models.AssistantThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new assistant thread and immediately starts a run using that new thread. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the assistant for which the thread should be created. + Required. + :paramtype assistant_id: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.assistants.models.AssistantThreadCreationOptions + :keyword model: The overridden model that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: Override the tools the assistant can use for this run. This is useful + for modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "parallel_tool_calls": parallel_tool_calls, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "thread": thread, + "tool_choice": tool_choice, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_thread_and_run_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_run_step( + self, + thread_id: str, + run_id: str, + step_id: str, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + **kwargs: Any + ) -> _models.RunStep: + """Gets a single run step from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param step_id: Identifier of the run step. Required. + :type step_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :return: RunStep. The RunStep is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.RunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) + + _request = build_assistants_get_run_step_request( + thread_id=thread_id, + run_id=run_id, + step_id=step_id, + include=include, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.RunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_run_steps( + self, + thread_id: str, + run_id: str, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfRunStep: + """Gets a list of run steps from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfRunStep. The OpenAIPageableListOfRunStep is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfRunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) + + _request = build_assistants_list_run_steps_request( + thread_id=thread_id, + run_id=run_id, + include=include, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfRunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_files( + self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any + ) -> _models.FileListResponse: + """Gets a list of previously uploaded files. + + :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Default value is + None. + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose + :return: FileListResponse. The FileListResponse is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.FileListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) + + _request = build_assistants_list_files_request( + purpose=purpose, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileListResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def _upload_file(self, body: _models._models.UploadFileRequest, **kwargs: Any) -> _models.OpenAIFile: ... + @overload + async def _upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: ... + + @distributed_trace_async + async def _upload_file( + self, body: Union[_models._models.UploadFileRequest, JSON], **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Multipart body. Is either a UploadFileRequest type or a JSON type. Required. + :type body: ~azure.ai.assistants.models._models.UploadFileRequest or JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + _body = body.as_dict() if isinstance(body, _model_base.Model) else body + _file_fields: List[str] = ["file"] + _data_fields: List[str] = ["purpose", "filename"] + _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) + + _request = build_assistants_upload_file_request( + api_version=self._config.api_version, + files=_files, + data=_data, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: + """Delete a previously uploaded file. + + :param file_id: The ID of the file to delete. Required. + :type file_id: str + :return: FileDeletionStatus. The FileDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.FileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_assistants_delete_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + _request = build_assistants_get_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def _get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[bytes]: + """Retrieves the raw content of a specific file. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: AsyncIterator[bytes] + :rtype: AsyncIterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_assistants_get_file_content_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", True) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_stores( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStore: + """Returns a list of vector stores. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStore. The OpenAIPageableListOfVectorStore is compatible + with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfVectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) + + _request = build_assistants_list_vector_stores_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + store_configuration: Optional[_models.VectorStoreConfiguration] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword store_configuration: The vector store configuration, used when vector store is created + from Azure asset URIs. Default value is None. + :paramtype store_configuration: ~azure.ai.assistants.models.VectorStoreConfiguration + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + store_configuration: Optional[_models.VectorStoreConfiguration] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword store_configuration: The vector store configuration, used when vector store is created + from Azure asset URIs. Default value is None. + :paramtype store_configuration: ~azure.ai.assistants.models.VectorStoreConfiguration + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "chunking_strategy": chunking_strategy, + "configuration": store_configuration, + "expires_after": expires_after, + "file_ids": file_ids, + "metadata": metadata, + "name": name, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_vector_store_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: + """Returns the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + _request = build_assistants_get_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def modify_vector_store( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def modify_vector_store( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def modify_vector_store( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def modify_vector_store( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"expires_after": expires_after, "metadata": metadata, "name": name} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_modify_vector_store_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: + """Deletes the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStoreDeletionStatus. The VectorStoreDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) + + _request = build_assistants_delete_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_store_files( + self, + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.assistants.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_assistants_list_vector_store_files_request( + vector_store_id=vector_store_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store_file( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"chunking_strategy": chunking_strategy, "data_source": data_source, "file_id": file_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_vector_store_file_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: + """Retrieves a vector store file. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + _request = build_assistants_get_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_vector_store_file( + self, vector_store_id: str, file_id: str, **kwargs: Any + ) -> _models.VectorStoreFileDeletionStatus: + """Delete a vector store file. This will remove the file from the vector store but the file itself + will not be deleted. + To delete the file, use the delete file endpoint. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFileDeletionStatus. The VectorStoreFileDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_assistants_delete_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store_file_batch( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: List of file identifiers. Default value is None. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file_batch( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Default value is None. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"chunking_strategy": chunking_strategy, "data_sources": data_sources, "file_ids": file_ids} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_assistants_create_vector_store_file_batch_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Retrieve a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_assistants_get_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def cancel_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch + as soon as possible. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_assistants_cancel_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_store_file_batch_files( + self, + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files in a batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.assistants.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_assistants_list_vector_store_file_batch_files_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py new file mode 100644 index 000000000000..5a7115701b6b --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py @@ -0,0 +1,25 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List, TYPE_CHECKING + +if TYPE_CHECKING: + from azure.core.credentials import AccessToken + from azure.core.credentials_async import AsyncTokenCredential + + +__all__: List[str] = [] + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py new file mode 100644 index 000000000000..a51ea8cfefaf --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py @@ -0,0 +1,2517 @@ +# pylint: disable=too-many-lines +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +import asyncio # pylint: disable = do-not-import-asyncio +import io +import logging +import os +import time + +from pathlib import Path + + +from typing import ( + IO, + TYPE_CHECKING, + Any, + AsyncIterator, + Dict, + List, + MutableMapping, + Optional, + Union, + cast, + overload, +) +from azure.core.tracing.decorator_async import distributed_trace_async + +from .. import models as _models +from .._vendor import FileType +from ..models._enums import FilePurpose, RunStatus +from ._client import AssistantsClient as AssistantsClientGenerated + +if TYPE_CHECKING: + from .. import _types + + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import AccessToken, AzureKeyCredential + from azure.core.credentials_async import AsyncTokenCredential + +logger = logging.getLogger(__name__) + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() + + +class AssistantsClient(AssistantsClientGenerated): # pylint: disable=client-accepts-api-version-keyword + + def __init__( + self, endpoint: str, credential: Union["AzureKeyCredential", "AsyncTokenCredential"], **kwargs: Any + ) -> None: + # TODO: Remove this custom code when 1DP service will be available + if not endpoint: + raise ValueError("Connection string or 1DP endpoint is required") + parts = endpoint.split(";") + # Detect legacy endpoint and build it in old way. + if len(parts) == 4: + endpoint = "https://" + parts[0] + subscription_id = parts[1] + resource_group_name = parts[2] + project_name = parts[3] + endpoint = ( + f"{endpoint}/agents/v1.0/subscriptions" + f"/{subscription_id}/resourceGroups/{resource_group_name}/providers" + f"/Microsoft.MachineLearningServices/workspaces/{project_name}" + ) + # Override the credential scope with the legacy one. + kwargs['credential_scopes'] = ["https://management.azure.com/.default"] + # End of legacy endpoints handling. + super().__init__(endpoint, credential, **kwargs) + self._toolset: Dict[str, _models.AsyncToolSet] = {} + + # pylint: disable=arguments-differ + @overload + async def create_assistant( # pylint: disable=arguments-differ + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Creates a new assistant. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new assistant. Default value is None. + :paramtype name: str + :keyword description: The description of the new assistant. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new assistant to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new assistant. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + # pylint: disable=arguments-differ + @overload + async def create_assistant( # pylint: disable=arguments-differ + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.AsyncToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Creates a new assistant. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new assistant. Default value is None. + :paramtype name: str + :keyword description: The description of the new assistant. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new assistant to use. Default value is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.assistants.models.AsyncToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_assistant( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_assistant( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_assistant( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.AsyncToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.Assistant: + """ + Creates a new assistant with various configurations, delegating to the generated operations. + + :param body: JSON or IO[bytes]. Required if `model` is not provided. + :type body: Union[JSON, IO[bytes]] + :keyword model: The ID of the model to use. Required if `body` is not provided. + :paramtype model: str + :keyword name: The name of the new assistant. + :paramtype name: Optional[str] + :keyword description: A description for the new assistant. + :paramtype description: Optional[str] + :keyword instructions: System instructions for the assistant. + :paramtype instructions: Optional[str] + :keyword tools: List of tools definitions for the assistant. + :paramtype tools: Optional[List[_models.ToolDefinition]] + :keyword tool_resources: Resources used by the assistant's tools. + :paramtype tool_resources: Optional[_models.ToolResources] + :keyword toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). + :paramtype toolset: Optional[_models.AsyncToolSet] + :keyword temperature: Sampling temperature for generating assistant responses. + :paramtype temperature: Optional[float] + :keyword top_p: Nucleus sampling parameter. + :paramtype top_p: Optional[float] + :keyword response_format: Response format for tool calls. + :paramtype response_format: Optional["_types.AssistantsApiResponseFormatOption"] + :keyword metadata: Key/value pairs for storing additional information. + :paramtype metadata: Optional[Dict[str, str]] + :keyword content_type: Content type of the body. + :paramtype content_type: str + :return: An assistant object. + :rtype: _models.Assistant + :raises: HttpResponseError for HTTP errors. + """ + if body is not _Unset: + if isinstance(body, io.IOBase): + return await super().create_assistant(body=body, content_type=content_type, **kwargs) + return await super().create_assistant(body=body, **kwargs) + + if toolset is not None: + tools = toolset.definitions + tool_resources = toolset.resources + + new_assistant = await super().create_assistant( + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + if toolset is not None: + self._toolset[new_assistant.id] = toolset + return new_assistant + + # pylint: disable=arguments-differ + @overload + async def update_assistant( # pylint: disable=arguments-differ + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the assistant. Default value is + None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + # pylint: disable=arguments-differ + @overload + async def update_assistant( # pylint: disable=arguments-differ + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.AsyncToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default value + is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.assistants.models.AsyncToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_assistant( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_assistant( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_assistant( + self, + assistant_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.AsyncToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the assistant. Default value is + None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.assistants.models.AsyncToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + self._validate_tools_and_tool_resources(tools, tool_resources) + + if body is not _Unset: + if isinstance(body, io.IOBase): + return await super().update_assistant(body=body, content_type=content_type, **kwargs) + return await super().update_assistant(body=body, **kwargs) + + if toolset is not None: + self._toolset[assistant_id] = toolset + tools = toolset.definitions + tool_resources = toolset.resources + + return await super().update_assistant( + assistant_id=assistant_id, + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + def _validate_tools_and_tool_resources( + self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources] + ): + if tool_resources is None: + return + if tools is None: + tools = [] + + if tool_resources.file_search is not None and not any( + isinstance(tool, _models.FileSearchToolDefinition) for tool in tools + ): + raise ValueError( + "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" + ) + if tool_resources.code_interpreter is not None and not any( + isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools + ): + raise ValueError( + "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" + ) + + # pylint: disable=arguments-differ + @overload + async def create_run( # pylint: disable=arguments-differ + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + assistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, + thread_id: str, + body: JSON, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, + thread_id: str, + body: IO[bytes], + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + assistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + include=include, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=False, + stream=False, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + parallel_tool_calls=parallel_tool_calls, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + return await response + + @distributed_trace_async + async def create_and_process_run( + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + toolset: Optional[_models.AsyncToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: int = 1, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread and processes the run. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword model: The overridden model name that the assistant should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword toolset: The Collection of tools and resources (alternative to `tools` and + `tool_resources`). Default value is None. + :paramtype toolset: ~azure.ai.assistants.models.AsyncToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + assistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.assistantsApiResponseFormatMode or + ~azure.ai.assistants.models.assistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: The time in seconds to wait between polling the service for run status. + Default value is 1. + :paramtype sleep_interval: int + :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + # Create and initiate the run with additional parameters + run = await self.create_run( + thread_id=thread_id, + assistant_id=assistant_id, + include=include, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=toolset.definitions if toolset else None, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + parallel_tool_calls=parallel_tool_calls, + metadata=metadata, + **kwargs, + ) + + # Monitor and process the run status + while run.status in [ + RunStatus.QUEUED, + RunStatus.IN_PROGRESS, + RunStatus.REQUIRES_ACTION, + ]: + time.sleep(sleep_interval) + run = await self.get_run(thread_id=thread_id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logging.warning("No tool calls provided - cancelling run") + await self.cancel_run(thread_id=thread_id, run_id=run.id) + break + # We need tool set only if we are executing local function. In case if + # the tool is azure_function we just need to wait when it will be finished. + if any(tool_call.type == "function" for tool_call in tool_calls): + toolset = toolset or self._toolset.get(run.assistant_id) + if toolset: + tool_outputs = await toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + logging.info("Tool outputs: %s", tool_outputs) + if tool_outputs: + await self.submit_tool_outputs_to_run( + thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs + ) + + logging.info("Current run status: %s", run.status) + + return run + + @overload + async def create_stream( + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: None = None, + **kwargs: Any, + ) -> _models.AsyncAssistantRunStream[_models.AsyncAssistantEventHandler]: + """Creates a new stream for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + assistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: None + :paramtype event_handler: None. _models.AsyncassistantEventHandler will be applied as default. + :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_stream( + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: _models.BaseAsyncAssistantEventHandlerT, + **kwargs: Any, + ) -> _models.AsyncAssistantRunStream[_models.BaseAsyncAssistantEventHandlerT]: + """Creates a new stream for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + assistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler + :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_stream( + self, + thread_id: str, + body: Union[JSON, IO[bytes]], + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + event_handler: None = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AsyncAssistantRunStream[_models.AsyncAssistantEventHandler]: + """Creates a new run for an assistant thread. + + Terminating when the Run enters a terminal state with a `data: [DONE]` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword event_handler: None + :paramtype event_handler: None. _models.AsyncassistantEventHandler will be applied as default. + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.assistants.models.AsyncAssistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_stream( + self, + thread_id: str, + body: Union[JSON, IO[bytes]], + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + event_handler: _models.BaseAsyncAssistantEventHandlerT, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AsyncAssistantRunStream[_models.BaseAsyncAssistantEventHandlerT]: + """Creates a new run for an assistant thread. + + Terminating when the Run enters a terminal state with a `data: [DONE]` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_stream( # pyright: ignore[reportInconsistentOverload] + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.BaseAsyncAssistantEventHandlerT] = None, + **kwargs: Any, + ) -> _models.AsyncAssistantRunStream[_models.BaseAsyncAssistantEventHandlerT]: + """Creates a new run for an assistant thread. + + Terminating when the Run enters a terminal state with a `data: [DONE]` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + assistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler + :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + assistant_id=assistant_id, + include=include, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=True, + stream=True, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + parallel_tool_calls=parallel_tool_calls, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) + + if not event_handler: + event_handler = cast(_models.BaseAsyncAssistantEventHandlerT, _models.AsyncAssistantEventHandler()) + + return _models.AsyncAssistantRunStream( + response_iterator=response_iterator, + submit_tool_outputs=self._handle_submit_tool_outputs, + event_handler=event_handler, + ) + + # pylint: disable=arguments-differ + @overload + async def submit_tool_outputs_to_run( # pylint: disable=arguments-differ + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + return await response + + @overload + async def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]], + *, + event_handler: _models.BaseAsyncAssistantEventHandler, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: _models.BaseAsyncAssistantEventHandler, + **kwargs: Any, + ) -> None: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_stream( # pyright: ignore[reportInconsistentOverload] + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + event_handler: _models.BaseAsyncAssistantEventHandler, + **kwargs: Any, + ) -> None: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :keyword event_handler: The event handler to use for processing events during the run. + :paramtype event_handler: ~azure.ai.assistants.models.AsyncAssistantEventHandler + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # Cast the response to Iterator[bytes] for type correctness + response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) + + event_handler.initialize(response_iterator, self._handle_submit_tool_outputs) + + async def _handle_submit_tool_outputs( + self, run: _models.ThreadRun, event_handler: _models.BaseAsyncAssistantEventHandler + ) -> None: + if isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logger.debug("No tool calls to execute.") + return + + # We need tool set only if we are executing local function. In case if + # the tool is azure_function we just need to wait when it will be finished. + if any(tool_call.type == "function" for tool_call in tool_calls): + toolset = self._toolset.get(run.assistant_id) + if toolset: + tool_outputs = await toolset.execute_tool_calls(tool_calls) + else: + logger.debug("Toolset is not available in the client.") + return + + logger.info("Tool outputs: %s", tool_outputs) + if tool_outputs: + await self.submit_tool_outputs_to_stream( + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler + ) + + @distributed_trace_async + async def upload_file( + self, + body: Optional[JSON] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :type body: Optional[JSON] + :keyword file: File content. Required if `body` and `purpose` are not provided. + :type file: Optional[FileType] + :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. + :type file_path: Optional[str] + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :type purpose: Union[str, _models.FilePurpose, None] + :keyword filename: The name of the file. + :type filename: Optional[str] + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: _models.OpenAIFile + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + # If a JSON body is provided directly, pass it along + if body is not None: + return await super()._upload_file(body=body, **kwargs) + + # Convert FilePurpose enum to string if necessary + if isinstance(purpose, FilePurpose): + purpose = purpose.value + + if file is not None and purpose is not None: + return await super()._upload_file(body={"file": file, "purpose": purpose, "filename": filename}, **kwargs) + + if file_path is not None and purpose is not None: + if not os.path.isfile(file_path): + raise FileNotFoundError(f"The file path provided does not exist: {file_path}") + + try: + with open(file_path, "rb") as f: + content = f.read() + + # If no explicit filename is provided, use the base name + base_filename = filename or os.path.basename(file_path) + file_content: FileType = (base_filename, content) + + return await super()._upload_file(body={"file": file_content, "purpose": purpose}, **kwargs) + except IOError as e: + raise IOError(f"Unable to read file: {file_path}.") from e + + raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") + + @overload + async def upload_file_and_poll(self, body: JSON, *, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file_and_poll( + self, + *, + file: FileType, + purpose: Union[str, _models.FilePurpose], + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.assistants._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file_and_poll( + self, *, file_path: str, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def upload_file_and_poll( + self, + body: Optional[JSON] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :type body: Optional[JSON] + :keyword file: File content. Required if `body` and `purpose` are not provided. + :paramtype file: Optional[FileType] + :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. + :paramtype file_path: Optional[str] + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :paramtype purpose: Union[str, _models.FilePurpose, None] + :keyword filename: The name of the file. + :paramtype filename: Optional[str] + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: _models.OpenAIFile + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + uploaded_file = await self.upload_file(body=body, **kwargs) + elif file is not None and purpose is not None: + uploaded_file = await self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + elif file_path is not None and purpose is not None: + uploaded_file = await self.upload_file(file_path=file_path, purpose=purpose, **kwargs) + else: + raise ValueError( + "Invalid parameters for upload_file_and_poll. Please provide either 'body', " + "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." + ) + + while uploaded_file.status in ["uploaded", "pending", "running"]: + time.sleep(sleep_interval) + uploaded_file = await self.get_file(uploaded_file.id) + + return uploaded_file + + @overload + async def create_vector_store_and_poll( + self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_and_poll( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_and_poll( + self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_and_poll( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not _Unset: + if isinstance(body, dict): + vector_store = await super().create_vector_store( + body=body, content_type=content_type or "application/json", **kwargs + ) + elif isinstance(body, io.IOBase): + vector_store = await super().create_vector_store(body=body, content_type=content_type, **kwargs) + else: + raise ValueError("Invalid 'body' type: must be a dictionary (JSON) or a file-like object (IO[bytes]).") + else: + store_configuration = None + if data_sources: + store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) + + vector_store = await super().create_vector_store( + file_ids=file_ids, + store_configuration=store_configuration, + name=name, + expires_after=expires_after, + chunking_strategy=chunking_strategy, + metadata=metadata, + **kwargs, + ) + + while vector_store.status == "in_progress": + time.sleep(sleep_interval) + vector_store = await super().get_vector_store(vector_store.id) + + return vector_store + + @overload + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + *, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword content_type: Body parameter content-type. Defaults to "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not _Unset: + if isinstance(body, dict): + vector_store_file_batch = await super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + body=body, + content_type=content_type or "application/json", + **kwargs, + ) + elif isinstance(body, io.IOBase): + vector_store_file_batch = await super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + body=body, + content_type=content_type, + **kwargs, + ) + else: + raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like (IO[bytes]).") + else: + vector_store_file_batch = await super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + file_ids=file_ids, + data_sources=data_sources, + chunking_strategy=chunking_strategy, + **kwargs, + ) + + while vector_store_file_batch.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file_batch = await super().get_vector_store_file_batch( + vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id + ) + + return vector_store_file_batch + + @overload + async def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_and_poll( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + content_type: str = "application/json", + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword content_type: Body Parameter content-type. Defaults to 'application/json'. + :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not _Unset: + if isinstance(body, dict): + vector_store_file = await super().create_vector_store_file( + vector_store_id=vector_store_id, + body=body, + content_type=content_type or "application/json", + **kwargs, + ) + elif isinstance(body, io.IOBase): + vector_store_file = await super().create_vector_store_file( + vector_store_id=vector_store_id, + body=body, + content_type=content_type, + **kwargs, + ) + else: + raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like object (IO[bytes]).") + else: + vector_store_file = await super().create_vector_store_file( + vector_store_id=vector_store_id, + file_id=file_id, + data_source=data_source, + chunking_strategy=chunking_strategy, + **kwargs, + ) + + while vector_store_file.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file = await super().get_vector_store_file( + vector_store_id=vector_store_id, file_id=vector_store_file.id + ) + + return vector_store_file + + @distributed_trace_async + async def get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[bytes]: + """ + Asynchronously returns file content as a byte stream for the given file_id. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: An async iterator that yields bytes from the file content. + :rtype: AsyncIterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: If the HTTP request fails. + """ + kwargs["stream"] = True + response = await super()._get_file_content(file_id, **kwargs) + return cast(AsyncIterator[bytes], response) + + @distributed_trace_async + async def save_file( # pylint: disable=client-method-missing-kwargs + self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None + ) -> None: + """ + Asynchronously saves file content retrieved using a file identifier to the specified local directory. + + :param file_id: The unique identifier for the file to retrieve. + :type file_id: str + :param file_name: The name of the file to be saved. + :type file_name: str + :param target_dir: The directory where the file should be saved. Defaults to the current working directory. + :type target_dir: str or Path + :raises ValueError: If the target path is not a directory or the file name is invalid. + :raises RuntimeError: If file content retrieval fails or no content is found. + :raises TypeError: If retrieved chunks are not bytes-like objects. + :raises IOError: If writing to the file fails. + """ + try: + # Determine and validate the target directory + path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() + path.mkdir(parents=True, exist_ok=True) + if not path.is_dir(): + raise ValueError(f"The target path '{path}' is not a directory.") + + # Sanitize and validate the file name + sanitized_file_name = Path(file_name).name + if not sanitized_file_name: + raise ValueError("The provided file name is invalid.") + + # Retrieve the file content + file_content_stream = await self.get_file_content(file_id) + if not file_content_stream: + raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") + + # Collect all chunks asynchronously + chunks = [] + async for chunk in file_content_stream: + if isinstance(chunk, (bytes, bytearray)): + chunks.append(chunk) + else: + raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") + + target_file_path = path / sanitized_file_name + + # Write the collected content to the file synchronously + def write_file(collected_chunks: list): + with open(target_file_path, "wb") as file: + for chunk in collected_chunks: + file.write(chunk) + + # Use the event loop to run the synchronous function in a thread executor + loop = asyncio.get_running_loop() + await loop.run_in_executor(None, write_file, chunks) + + logger.debug("File '%s' saved successfully at '%s'.", sanitized_file_name, target_file_path) + + except (ValueError, RuntimeError, TypeError, IOError) as e: + logger.error("An error occurred in save_file: %s", e) + raise + + @distributed_trace_async + async def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.AssistantDeletionStatus: + """Deletes an assistant. + + :param assistant_id: Identifier of the assistant. Required. + :type assistant_id: str + :return: AssistantDeletionStatus. The AssistantDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + if assistant_id in self._toolset: + del self._toolset[assistant_id] + return await super().delete_assistant(assistant_id, **kwargs) + + +__all__: List[str] = ["AssistantsClient"] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_vendor.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_vendor.py new file mode 100644 index 000000000000..48ecbadbc90e --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_vendor.py @@ -0,0 +1,25 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +from typing import TYPE_CHECKING + +from ._configuration import AssistantsClientConfiguration + +if TYPE_CHECKING: + from azure.core import AsyncPipelineClient + + from .._serialization import Deserializer, Serializer + + +class AssistantsClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "AsyncPipelineClient" + _config: AssistantsClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py new file mode 100644 index 000000000000..5a3c3ab7000b --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py @@ -0,0 +1,428 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + + +from ._models import ( # type: ignore + AISearchIndexResource, + Assistant, + AssistantDeletionStatus, + AssistantThread, + AssistantThreadCreationOptions, + AssistantsApiResponseFormat, + AssistantsNamedToolChoice, + AzureAISearchResource, + AzureAISearchToolDefinition, + AzureFunctionBinding, + AzureFunctionDefinition, + AzureFunctionStorageQueue, + AzureFunctionToolDefinition, + BingCustomSearchToolDefinition, + BingGroundingToolDefinition, + CodeInterpreterToolDefinition, + CodeInterpreterToolResource, + FileDeletionStatus, + FileListResponse, + FileSearchRankingOptions, + FileSearchToolCallContent, + FileSearchToolDefinition, + FileSearchToolDefinitionDetails, + FileSearchToolResource, + FunctionDefinition, + FunctionName, + FunctionToolDefinition, + IncompleteRunDetails, + MessageAttachment, + MessageContent, + MessageDelta, + MessageDeltaChunk, + MessageDeltaContent, + MessageDeltaImageFileContent, + MessageDeltaImageFileContentObject, + MessageDeltaTextAnnotation, + MessageDeltaTextContent, + MessageDeltaTextContentObject, + MessageDeltaTextFileCitationAnnotation, + MessageDeltaTextFileCitationAnnotationObject, + MessageDeltaTextFilePathAnnotation, + MessageDeltaTextFilePathAnnotationObject, + MessageDeltaTextUrlCitationAnnotation, + MessageDeltaTextUrlCitationDetails, + MessageImageFileContent, + MessageImageFileDetails, + MessageImageFileParam, + MessageImageUrlParam, + MessageIncompleteDetails, + MessageInputContentBlock, + MessageInputImageFileBlock, + MessageInputImageUrlBlock, + MessageInputTextBlock, + MessageTextAnnotation, + MessageTextContent, + MessageTextDetails, + MessageTextFileCitationAnnotation, + MessageTextFileCitationDetails, + MessageTextFilePathAnnotation, + MessageTextFilePathDetails, + MessageTextUrlCitationAnnotation, + MessageTextUrlCitationDetails, + MicrosoftFabricToolDefinition, + OpenAIFile, + OpenAIPageableListOfAssistant, + OpenAIPageableListOfRunStep, + OpenAIPageableListOfThreadMessage, + OpenAIPageableListOfThreadRun, + OpenAIPageableListOfVectorStore, + OpenAIPageableListOfVectorStoreFile, + OpenApiAnonymousAuthDetails, + OpenApiAuthDetails, + OpenApiConnectionAuthDetails, + OpenApiConnectionSecurityScheme, + OpenApiFunctionDefinition, + OpenApiManagedAuthDetails, + OpenApiManagedSecurityScheme, + OpenApiToolDefinition, + RequiredAction, + RequiredFunctionToolCall, + RequiredFunctionToolCallDetails, + RequiredToolCall, + ResponseFormatJsonSchema, + ResponseFormatJsonSchemaType, + RunCompletionUsage, + RunError, + RunStep, + RunStepAzureAISearchToolCall, + RunStepBingGroundingToolCall, + RunStepCodeInterpreterImageOutput, + RunStepCodeInterpreterImageReference, + RunStepCodeInterpreterLogOutput, + RunStepCodeInterpreterToolCall, + RunStepCodeInterpreterToolCallDetails, + RunStepCodeInterpreterToolCallOutput, + RunStepCompletionUsage, + RunStepCustomSearchToolCall, + RunStepDelta, + RunStepDeltaChunk, + RunStepDeltaCodeInterpreterDetailItemObject, + RunStepDeltaCodeInterpreterImageOutput, + RunStepDeltaCodeInterpreterImageOutputObject, + RunStepDeltaCodeInterpreterLogOutput, + RunStepDeltaCodeInterpreterOutput, + RunStepDeltaCodeInterpreterToolCall, + RunStepDeltaDetail, + RunStepDeltaFileSearchToolCall, + RunStepDeltaFunction, + RunStepDeltaFunctionToolCall, + RunStepDeltaMessageCreation, + RunStepDeltaMessageCreationObject, + RunStepDeltaToolCall, + RunStepDeltaToolCallObject, + RunStepDetails, + RunStepError, + RunStepFileSearchToolCall, + RunStepFileSearchToolCallResult, + RunStepFileSearchToolCallResults, + RunStepFunctionToolCall, + RunStepFunctionToolCallDetails, + RunStepMessageCreationDetails, + RunStepMessageCreationReference, + RunStepMicrosoftFabricToolCall, + RunStepOpenAPIToolCall, + RunStepSharepointToolCall, + RunStepToolCall, + RunStepToolCallDetails, + SearchConfiguration, + SearchConfigurationList, + SharepointToolDefinition, + SubmitToolOutputsAction, + SubmitToolOutputsDetails, + ThreadDeletionStatus, + ThreadMessage, + ThreadMessageOptions, + ThreadRun, + ToolConnection, + ToolConnectionList, + ToolDefinition, + ToolOutput, + ToolResources, + TruncationObject, + UpdateCodeInterpreterToolResourceOptions, + UpdateFileSearchToolResourceOptions, + UpdateToolResourcesOptions, + VectorStore, + VectorStoreAutoChunkingStrategyRequest, + VectorStoreAutoChunkingStrategyResponse, + VectorStoreChunkingStrategyRequest, + VectorStoreChunkingStrategyResponse, + VectorStoreConfiguration, + VectorStoreConfigurations, + VectorStoreDataSource, + VectorStoreDeletionStatus, + VectorStoreExpirationPolicy, + VectorStoreFile, + VectorStoreFileBatch, + VectorStoreFileCount, + VectorStoreFileDeletionStatus, + VectorStoreFileError, + VectorStoreStaticChunkingStrategyOptions, + VectorStoreStaticChunkingStrategyRequest, + VectorStoreStaticChunkingStrategyResponse, +) + +from ._enums import ( # type: ignore + AssistantStreamEvent, + AssistantsApiResponseFormatMode, + AssistantsApiToolChoiceOptionMode, + AssistantsNamedToolChoiceType, + AzureAISearchQueryType, + DoneEvent, + ErrorEvent, + FilePurpose, + FileState, + ImageDetailLevel, + IncompleteDetailsReason, + ListSortOrder, + MessageBlockType, + MessageIncompleteDetailsReason, + MessageRole, + MessageStatus, + MessageStreamEvent, + OpenApiAuthType, + ResponseFormat, + RunAdditionalFieldList, + RunStatus, + RunStepErrorCode, + RunStepStatus, + RunStepStreamEvent, + RunStepType, + RunStreamEvent, + ThreadStreamEvent, + TruncationStrategy, + VectorStoreChunkingStrategyRequestType, + VectorStoreChunkingStrategyResponseType, + VectorStoreDataSourceAssetType, + VectorStoreExpirationPolicyAnchor, + VectorStoreFileBatchStatus, + VectorStoreFileErrorCode, + VectorStoreFileStatus, + VectorStoreFileStatusFilter, + VectorStoreStatus, +) +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AISearchIndexResource", + "Assistant", + "AssistantDeletionStatus", + "AssistantThread", + "AssistantThreadCreationOptions", + "AssistantsApiResponseFormat", + "AssistantsNamedToolChoice", + "AzureAISearchResource", + "AzureAISearchToolDefinition", + "AzureFunctionBinding", + "AzureFunctionDefinition", + "AzureFunctionStorageQueue", + "AzureFunctionToolDefinition", + "BingCustomSearchToolDefinition", + "BingGroundingToolDefinition", + "CodeInterpreterToolDefinition", + "CodeInterpreterToolResource", + "FileDeletionStatus", + "FileListResponse", + "FileSearchRankingOptions", + "FileSearchToolCallContent", + "FileSearchToolDefinition", + "FileSearchToolDefinitionDetails", + "FileSearchToolResource", + "FunctionDefinition", + "FunctionName", + "FunctionToolDefinition", + "IncompleteRunDetails", + "MessageAttachment", + "MessageContent", + "MessageDelta", + "MessageDeltaChunk", + "MessageDeltaContent", + "MessageDeltaImageFileContent", + "MessageDeltaImageFileContentObject", + "MessageDeltaTextAnnotation", + "MessageDeltaTextContent", + "MessageDeltaTextContentObject", + "MessageDeltaTextFileCitationAnnotation", + "MessageDeltaTextFileCitationAnnotationObject", + "MessageDeltaTextFilePathAnnotation", + "MessageDeltaTextFilePathAnnotationObject", + "MessageDeltaTextUrlCitationAnnotation", + "MessageDeltaTextUrlCitationDetails", + "MessageImageFileContent", + "MessageImageFileDetails", + "MessageImageFileParam", + "MessageImageUrlParam", + "MessageIncompleteDetails", + "MessageInputContentBlock", + "MessageInputImageFileBlock", + "MessageInputImageUrlBlock", + "MessageInputTextBlock", + "MessageTextAnnotation", + "MessageTextContent", + "MessageTextDetails", + "MessageTextFileCitationAnnotation", + "MessageTextFileCitationDetails", + "MessageTextFilePathAnnotation", + "MessageTextFilePathDetails", + "MessageTextUrlCitationAnnotation", + "MessageTextUrlCitationDetails", + "MicrosoftFabricToolDefinition", + "OpenAIFile", + "OpenAIPageableListOfAssistant", + "OpenAIPageableListOfRunStep", + "OpenAIPageableListOfThreadMessage", + "OpenAIPageableListOfThreadRun", + "OpenAIPageableListOfVectorStore", + "OpenAIPageableListOfVectorStoreFile", + "OpenApiAnonymousAuthDetails", + "OpenApiAuthDetails", + "OpenApiConnectionAuthDetails", + "OpenApiConnectionSecurityScheme", + "OpenApiFunctionDefinition", + "OpenApiManagedAuthDetails", + "OpenApiManagedSecurityScheme", + "OpenApiToolDefinition", + "RequiredAction", + "RequiredFunctionToolCall", + "RequiredFunctionToolCallDetails", + "RequiredToolCall", + "ResponseFormatJsonSchema", + "ResponseFormatJsonSchemaType", + "RunCompletionUsage", + "RunError", + "RunStep", + "RunStepAzureAISearchToolCall", + "RunStepBingGroundingToolCall", + "RunStepCodeInterpreterImageOutput", + "RunStepCodeInterpreterImageReference", + "RunStepCodeInterpreterLogOutput", + "RunStepCodeInterpreterToolCall", + "RunStepCodeInterpreterToolCallDetails", + "RunStepCodeInterpreterToolCallOutput", + "RunStepCompletionUsage", + "RunStepCustomSearchToolCall", + "RunStepDelta", + "RunStepDeltaChunk", + "RunStepDeltaCodeInterpreterDetailItemObject", + "RunStepDeltaCodeInterpreterImageOutput", + "RunStepDeltaCodeInterpreterImageOutputObject", + "RunStepDeltaCodeInterpreterLogOutput", + "RunStepDeltaCodeInterpreterOutput", + "RunStepDeltaCodeInterpreterToolCall", + "RunStepDeltaDetail", + "RunStepDeltaFileSearchToolCall", + "RunStepDeltaFunction", + "RunStepDeltaFunctionToolCall", + "RunStepDeltaMessageCreation", + "RunStepDeltaMessageCreationObject", + "RunStepDeltaToolCall", + "RunStepDeltaToolCallObject", + "RunStepDetails", + "RunStepError", + "RunStepFileSearchToolCall", + "RunStepFileSearchToolCallResult", + "RunStepFileSearchToolCallResults", + "RunStepFunctionToolCall", + "RunStepFunctionToolCallDetails", + "RunStepMessageCreationDetails", + "RunStepMessageCreationReference", + "RunStepMicrosoftFabricToolCall", + "RunStepOpenAPIToolCall", + "RunStepSharepointToolCall", + "RunStepToolCall", + "RunStepToolCallDetails", + "SearchConfiguration", + "SearchConfigurationList", + "SharepointToolDefinition", + "SubmitToolOutputsAction", + "SubmitToolOutputsDetails", + "ThreadDeletionStatus", + "ThreadMessage", + "ThreadMessageOptions", + "ThreadRun", + "ToolConnection", + "ToolConnectionList", + "ToolDefinition", + "ToolOutput", + "ToolResources", + "TruncationObject", + "UpdateCodeInterpreterToolResourceOptions", + "UpdateFileSearchToolResourceOptions", + "UpdateToolResourcesOptions", + "VectorStore", + "VectorStoreAutoChunkingStrategyRequest", + "VectorStoreAutoChunkingStrategyResponse", + "VectorStoreChunkingStrategyRequest", + "VectorStoreChunkingStrategyResponse", + "VectorStoreConfiguration", + "VectorStoreConfigurations", + "VectorStoreDataSource", + "VectorStoreDeletionStatus", + "VectorStoreExpirationPolicy", + "VectorStoreFile", + "VectorStoreFileBatch", + "VectorStoreFileCount", + "VectorStoreFileDeletionStatus", + "VectorStoreFileError", + "VectorStoreStaticChunkingStrategyOptions", + "VectorStoreStaticChunkingStrategyRequest", + "VectorStoreStaticChunkingStrategyResponse", + "AssistantStreamEvent", + "AssistantsApiResponseFormatMode", + "AssistantsApiToolChoiceOptionMode", + "AssistantsNamedToolChoiceType", + "AzureAISearchQueryType", + "DoneEvent", + "ErrorEvent", + "FilePurpose", + "FileState", + "ImageDetailLevel", + "IncompleteDetailsReason", + "ListSortOrder", + "MessageBlockType", + "MessageIncompleteDetailsReason", + "MessageRole", + "MessageStatus", + "MessageStreamEvent", + "OpenApiAuthType", + "ResponseFormat", + "RunAdditionalFieldList", + "RunStatus", + "RunStepErrorCode", + "RunStepStatus", + "RunStepStreamEvent", + "RunStepType", + "RunStreamEvent", + "ThreadStreamEvent", + "TruncationStrategy", + "VectorStoreChunkingStrategyRequestType", + "VectorStoreChunkingStrategyResponseType", + "VectorStoreDataSourceAssetType", + "VectorStoreExpirationPolicyAnchor", + "VectorStoreFileBatchStatus", + "VectorStoreFileErrorCode", + "VectorStoreFileStatus", + "VectorStoreFileStatusFilter", + "VectorStoreStatus", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py new file mode 100644 index 000000000000..667871cc972b --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py @@ -0,0 +1,544 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class AssistantsApiResponseFormatMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Represents the mode in which the model will handle the return format of a tool call.""" + + AUTO = "auto" + """Default value. Let the model handle the return format.""" + NONE = "none" + """Setting the value to ``none``, will result in a 400 Bad request.""" + + +class AssistantsApiToolChoiceOptionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies how the tool choice will be used.""" + + NONE = "none" + """The model will not call a function and instead generates a message.""" + AUTO = "auto" + """The model can pick between generating a message or calling a function.""" + + +class AssistantsNamedToolChoiceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Available tool types for assistants named tools.""" + + FUNCTION = "function" + """Tool type ``function``""" + CODE_INTERPRETER = "code_interpreter" + """Tool type ``code_interpreter``""" + FILE_SEARCH = "file_search" + """Tool type ``file_search``""" + BING_GROUNDING = "bing_grounding" + """Tool type ``bing_grounding``""" + MICROSOFT_FABRIC = "fabric_dataagent" + """Tool type ``fabric_dataagent``""" + SHAREPOINT = "sharepoint_grounding" + """Tool type ``sharepoint_grounding``""" + AZURE_AI_SEARCH = "azure_ai_search" + """Tool type ``azure_ai_search``""" + BING_CUSTOM_SEARCH = "bing_custom_search" + """Tool type ``bing_custom_search``""" + + +class AssistantStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Each event in a server-sent events stream has an ``event`` and ``data`` property: + + + + .. code-block:: + + event: thread.created + data: {"id": "thread_123", "object": "thread", ...} + + We emit events whenever a new object is created, transitions to a new state, or is being + streamed in parts (deltas). For example, we emit ``thread.run.created`` when a new run + is created, ``thread.run.completed`` when a run completes, and so on. When an Assistant chooses + to create a message during a run, we emit a ``thread.message.created event``, a + ``thread.message.in_progress`` event, many ``thread.message.delta`` events, and finally a + ``thread.message.completed`` event. + + We may add additional events over time, so we recommend handling unknown events gracefully + in your code. + """ + + THREAD_CREATED = "thread.created" + """Event sent when a new thread is created. The data of this event is of type AssistantThread""" + THREAD_RUN_CREATED = "thread.run.created" + """Event sent when a new run is created. The data of this event is of type ThreadRun""" + THREAD_RUN_QUEUED = "thread.run.queued" + """Event sent when a run moves to ``queued`` status. The data of this event is of type ThreadRun""" + THREAD_RUN_IN_PROGRESS = "thread.run.in_progress" + """Event sent when a run moves to ``in_progress`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_REQUIRES_ACTION = "thread.run.requires_action" + """Event sent when a run moves to ``requires_action`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_COMPLETED = "thread.run.completed" + """Event sent when a run is completed. The data of this event is of type ThreadRun""" + THREAD_RUN_INCOMPLETE = "thread.run.incomplete" + """Event sent when a run ends incomplete. The data of this event is of type ThreadRun""" + THREAD_RUN_FAILED = "thread.run.failed" + """Event sent when a run fails. The data of this event is of type ThreadRun""" + THREAD_RUN_CANCELLING = "thread.run.cancelling" + """Event sent when a run moves to ``cancelling`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_CANCELLED = "thread.run.cancelled" + """Event sent when a run is cancelled. The data of this event is of type ThreadRun""" + THREAD_RUN_EXPIRED = "thread.run.expired" + """Event sent when a run is expired. The data of this event is of type ThreadRun""" + THREAD_RUN_STEP_CREATED = "thread.run.step.created" + """Event sent when a new thread run step is created. The data of this event is of type RunStep""" + THREAD_RUN_STEP_IN_PROGRESS = "thread.run.step.in_progress" + """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type + RunStep""" + THREAD_RUN_STEP_DELTA = "thread.run.step.delta" + """Event sent when a run step is being streamed. The data of this event is of type + RunStepDeltaChunk""" + THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" + """Event sent when a run step is completed. The data of this event is of type RunStep""" + THREAD_RUN_STEP_FAILED = "thread.run.step.failed" + """Event sent when a run step fails. The data of this event is of type RunStep""" + THREAD_RUN_STEP_CANCELLED = "thread.run.step.cancelled" + """Event sent when a run step is cancelled. The data of this event is of type RunStep""" + THREAD_RUN_STEP_EXPIRED = "thread.run.step.expired" + """Event sent when a run step is expired. The data of this event is of type RunStep""" + THREAD_MESSAGE_CREATED = "thread.message.created" + """Event sent when a new message is created. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_IN_PROGRESS = "thread.message.in_progress" + """Event sent when a message moves to ``in_progress`` status. The data of this event is of type + ThreadMessage""" + THREAD_MESSAGE_DELTA = "thread.message.delta" + """Event sent when a message is being streamed. The data of this event is of type + MessageDeltaChunk""" + THREAD_MESSAGE_COMPLETED = "thread.message.completed" + """Event sent when a message is completed. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_INCOMPLETE = "thread.message.incomplete" + """Event sent before a message is completed. The data of this event is of type ThreadMessage""" + ERROR = "error" + """Event sent when an error occurs, such as an internal server error or a timeout.""" + DONE = "done" + """Event sent when the stream is done.""" + + +class AzureAISearchQueryType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Available query types for Azure AI Search tool.""" + + SIMPLE = "simple" + """Query type ``simple``""" + SEMANTIC = "semantic" + """Query type ``semantic``""" + VECTOR = "vector" + """Query type ``vector``""" + VECTOR_SIMPLE_HYBRID = "vector_simple_hybrid" + """Query type ``vector_simple_hybrid``""" + VECTOR_SEMANTIC_HYBRID = "vector_semantic_hybrid" + """Query type ``vector_semantic_hybrid``""" + + +class DoneEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Terminal event indicating the successful end of a stream.""" + + DONE = "done" + """Event sent when the stream is done.""" + + +class ErrorEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Terminal event indicating a server side error while streaming.""" + + ERROR = "error" + """Event sent when an error occurs, such as an internal server error or a timeout.""" + + +class FilePurpose(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible values denoting the intended usage of a file.""" + + FINE_TUNE = "fine-tune" + """Indicates a file is used for fine tuning input.""" + FINE_TUNE_RESULTS = "fine-tune-results" + """Indicates a file is used for fine tuning results.""" + ASSISTANTS = "assistants" + """Indicates a file is used as input to assistants.""" + ASSISTANTS_OUTPUT = "assistants_output" + """Indicates a file is used as output by assistants.""" + BATCH = "batch" + """Indicates a file is used as input to .""" + BATCH_OUTPUT = "batch_output" + """Indicates a file is used as output by a vector store batch operation.""" + VISION = "vision" + """Indicates a file is used as input to a vision operation.""" + + +class FileState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The state of the file.""" + + UPLOADED = "uploaded" + """The file has been uploaded but it's not yet processed. This state is not returned by Azure + OpenAI and exposed only for + compatibility. It can be categorized as an inactive state.""" + PENDING = "pending" + """The operation was created and is not queued to be processed in the future. It can be + categorized as an inactive state.""" + RUNNING = "running" + """The operation has started to be processed. It can be categorized as an active state.""" + PROCESSED = "processed" + """The operation has successfully processed and is ready for consumption. It can be categorized as + a terminal state.""" + ERROR = "error" + """The operation has completed processing with a failure and cannot be further consumed. It can be + categorized as a terminal state.""" + DELETING = "deleting" + """The entity is in the process to be deleted. This state is not returned by Azure OpenAI and + exposed only for compatibility. + It can be categorized as an active state.""" + DELETED = "deleted" + """The entity has been deleted but may still be referenced by other entities predating the + deletion. It can be categorized as a + terminal state.""" + + +class ImageDetailLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies an image's detail level. Can be 'auto', 'low', 'high', or an unknown future value.""" + + AUTO = "auto" + """Automatically select an appropriate detail level.""" + LOW = "low" + """Use a lower detail level to reduce bandwidth or cost.""" + HIGH = "high" + """Use a higher detail level—potentially more resource-intensive.""" + + +class IncompleteDetailsReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The reason why the run is incomplete. This will point to which specific token limit was reached + over the course of the run. + """ + + MAX_COMPLETION_TOKENS = "max_completion_tokens" + """Maximum completion tokens exceeded""" + MAX_PROMPT_TOKENS = "max_prompt_tokens" + """Maximum prompt tokens exceeded""" + + +class ListSortOrder(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The available sorting options when requesting a list of response objects.""" + + ASCENDING = "asc" + """Specifies an ascending sort order.""" + DESCENDING = "desc" + """Specifies a descending sort order.""" + + +class MessageBlockType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies the kind of content block within a message. Could be text, an image file, an external + image URL, or an unknown future type. + """ + + TEXT = "text" + """Indicates a block containing text content.""" + IMAGE_FILE = "image_file" + """Indicates a block referencing an internally uploaded image file.""" + IMAGE_URL = "image_url" + """Indicates a block referencing an external image URL.""" + + +class MessageIncompleteDetailsReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """A set of reasons describing why a message is marked as incomplete.""" + + CONTENT_FILTER = "content_filter" + """The run generating the message was terminated due to content filter flagging.""" + MAX_TOKENS = "max_tokens" + """The run generating the message exhausted available tokens before completion.""" + RUN_CANCELLED = "run_cancelled" + """The run generating the message was cancelled before completion.""" + RUN_FAILED = "run_failed" + """The run generating the message failed.""" + RUN_EXPIRED = "run_expired" + """The run generating the message expired.""" + + +class MessageRole(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible values for roles attributed to messages in a thread.""" + + USER = "user" + """The role representing the end-user.""" + ASSISTANT = "assistant" + """The role representing the assistant.""" + + +class MessageStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible execution status values for a thread message.""" + + IN_PROGRESS = "in_progress" + """A run is currently creating this message.""" + INCOMPLETE = "incomplete" + """This message is incomplete. See incomplete_details for more information.""" + COMPLETED = "completed" + """This message was successfully completed by a run.""" + + +class MessageStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Message operation related streaming events.""" + + THREAD_MESSAGE_CREATED = "thread.message.created" + """Event sent when a new message is created. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_IN_PROGRESS = "thread.message.in_progress" + """Event sent when a message moves to ``in_progress`` status. The data of this event is of type + ThreadMessage""" + THREAD_MESSAGE_DELTA = "thread.message.delta" + """Event sent when a message is being streamed. The data of this event is of type + MessageDeltaChunk""" + THREAD_MESSAGE_COMPLETED = "thread.message.completed" + """Event sent when a message is completed. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_INCOMPLETE = "thread.message.incomplete" + """Event sent before a message is completed. The data of this event is of type ThreadMessage""" + + +class OpenApiAuthType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Authentication type for OpenApi endpoint. Allowed types are: + + * Anonymous (no authentication required) + * Connection (requires connection_id to endpoint, as setup in AI Foundry) + * Managed_Identity (requires audience for identity based auth). + """ + + ANONYMOUS = "anonymous" + CONNECTION = "connection" + MANAGED_IDENTITY = "managed_identity" + + +class ResponseFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible API response formats.""" + + TEXT = "text" + """``text`` format should be used for requests involving any sort of ToolCall.""" + JSON_OBJECT = "json_object" + """Using ``json_object`` format will limit the usage of ToolCall to only functions.""" + + +class RunAdditionalFieldList(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """A list of additional fields to include in the response.""" + + FILE_SEARCH_CONTENTS = "step_details.tool_calls[*].file_search.results[*].content" + """File search result content.""" + + +class RunStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible values for the status of an assistant thread run.""" + + QUEUED = "queued" + """Represents a run that is queued to start.""" + IN_PROGRESS = "in_progress" + """Represents a run that is in progress.""" + REQUIRES_ACTION = "requires_action" + """Represents a run that needs another operation, such as tool output submission, to continue.""" + CANCELLING = "cancelling" + """Represents a run that is in the process of cancellation.""" + CANCELLED = "cancelled" + """Represents a run that has been cancelled.""" + FAILED = "failed" + """Represents a run that failed.""" + COMPLETED = "completed" + """Represents a run that successfully completed.""" + EXPIRED = "expired" + """Represents a run that expired before it could otherwise finish.""" + + +class RunStepErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible error code values attributable to a failed run step.""" + + SERVER_ERROR = "server_error" + """Represents a server error.""" + RATE_LIMIT_EXCEEDED = "rate_limit_exceeded" + """Represents an error indicating configured rate limits were exceeded.""" + + +class RunStepStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible values for the status of a run step.""" + + IN_PROGRESS = "in_progress" + """Represents a run step still in progress.""" + CANCELLED = "cancelled" + """Represents a run step that was cancelled.""" + FAILED = "failed" + """Represents a run step that failed.""" + COMPLETED = "completed" + """Represents a run step that successfully completed.""" + EXPIRED = "expired" + """Represents a run step that expired before otherwise finishing.""" + + +class RunStepStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Run step operation related streaming events.""" + + THREAD_RUN_STEP_CREATED = "thread.run.step.created" + """Event sent when a new thread run step is created. The data of this event is of type RunStep""" + THREAD_RUN_STEP_IN_PROGRESS = "thread.run.step.in_progress" + """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type + RunStep""" + THREAD_RUN_STEP_DELTA = "thread.run.step.delta" + """Event sent when a run step is being streamed. The data of this event is of type + RunStepDeltaChunk""" + THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" + """Event sent when a run step is completed. The data of this event is of type RunStep""" + THREAD_RUN_STEP_FAILED = "thread.run.step.failed" + """Event sent when a run step fails. The data of this event is of type RunStep""" + THREAD_RUN_STEP_CANCELLED = "thread.run.step.cancelled" + """Event sent when a run step is cancelled. The data of this event is of type RunStep""" + THREAD_RUN_STEP_EXPIRED = "thread.run.step.expired" + """Event sent when a run step is expired. The data of this event is of type RunStep""" + + +class RunStepType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible types of run steps.""" + + MESSAGE_CREATION = "message_creation" + """Represents a run step to create a message.""" + TOOL_CALLS = "tool_calls" + """Represents a run step that calls tools.""" + + +class RunStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Run operation related streaming events.""" + + THREAD_RUN_CREATED = "thread.run.created" + """Event sent when a new run is created. The data of this event is of type ThreadRun""" + THREAD_RUN_QUEUED = "thread.run.queued" + """Event sent when a run moves to ``queued`` status. The data of this event is of type ThreadRun""" + THREAD_RUN_IN_PROGRESS = "thread.run.in_progress" + """Event sent when a run moves to ``in_progress`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_REQUIRES_ACTION = "thread.run.requires_action" + """Event sent when a run moves to ``requires_action`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_COMPLETED = "thread.run.completed" + """Event sent when a run is completed. The data of this event is of type ThreadRun""" + THREAD_RUN_INCOMPLETE = "thread.run.incomplete" + """Event sent when a run ends incomplete. The data of this event is of type ThreadRun""" + THREAD_RUN_FAILED = "thread.run.failed" + """Event sent when a run fails. The data of this event is of type ThreadRun""" + THREAD_RUN_CANCELLING = "thread.run.cancelling" + """Event sent when a run moves to ``cancelling`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_CANCELLED = "thread.run.cancelled" + """Event sent when a run is cancelled. The data of this event is of type ThreadRun""" + THREAD_RUN_EXPIRED = "thread.run.expired" + """Event sent when a run is expired. The data of this event is of type ThreadRun""" + + +class ThreadStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Thread operation related streaming events.""" + + THREAD_CREATED = "thread.created" + """Event sent when a new thread is created. The data of this event is of type AssistantThread""" + + +class TruncationStrategy(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible truncation strategies for the thread.""" + + AUTO = "auto" + """Default value. Messages in the middle of the thread will be dropped to fit the context length + of the model.""" + LAST_MESSAGES = "last_messages" + """The thread will truncate to the ``lastMessages`` count of recent messages.""" + + +class VectorStoreChunkingStrategyRequestType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of chunking strategy.""" + + AUTO = "auto" + STATIC = "static" + + +class VectorStoreChunkingStrategyResponseType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of chunking strategy.""" + + OTHER = "other" + STATIC = "static" + + +class VectorStoreDataSourceAssetType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of vector storage asset. Asset type may be a uri_asset, in this case it should contain + asset URI ID, + in the case of id_asset it should contain the data ID. + """ + + URI_ASSET = "uri_asset" + """Azure URI""" + ID_ASSET = "id_asset" + """The data ID""" + + +class VectorStoreExpirationPolicyAnchor(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Describes the relationship between the days and the expiration of this vector store.""" + + LAST_ACTIVE_AT = "last_active_at" + """The expiration policy is based on the last time the vector store was active.""" + + +class VectorStoreFileBatchStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The status of the vector store file batch.""" + + IN_PROGRESS = "in_progress" + """The vector store is still processing this file batch.""" + COMPLETED = "completed" + """the vector store file batch is ready for use.""" + CANCELLED = "cancelled" + """The vector store file batch was cancelled.""" + FAILED = "failed" + """The vector store file batch failed to process.""" + + +class VectorStoreFileErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Error code variants for vector store file processing.""" + + SERVER_ERROR = "server_error" + """An server error occurred.""" + INVALID_FILE = "invalid_file" + """The file is not valid.""" + UNSUPPORTED_FILE = "unsupported_file" + """The file is of unsupported type.""" + + +class VectorStoreFileStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Vector store file status.""" + + IN_PROGRESS = "in_progress" + """The file is currently being processed.""" + COMPLETED = "completed" + """The file has been successfully processed.""" + FAILED = "failed" + """The file has failed to process.""" + CANCELLED = "cancelled" + """The file was cancelled.""" + + +class VectorStoreFileStatusFilter(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Query parameter filter for vector store file retrieval endpoint.""" + + IN_PROGRESS = "in_progress" + """Retrieve only files that are currently being processed""" + COMPLETED = "completed" + """Retrieve only files that have been successfully processed""" + FAILED = "failed" + """Retrieve only files that have failed to process""" + CANCELLED = "cancelled" + """Retrieve only files that were cancelled""" + + +class VectorStoreStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Vector store possible status.""" + + EXPIRED = "expired" + """expired status indicates that this vector store has expired and is no longer available for use.""" + IN_PROGRESS = "in_progress" + """in_progress status indicates that this vector store is still processing files.""" + COMPLETED = "completed" + """completed status indicates that this vector store is ready for use.""" diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py new file mode 100644 index 000000000000..3a916bf99252 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py @@ -0,0 +1,6875 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=useless-super-delegation + +import datetime +from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload + +from .. import _model_base +from .._model_base import rest_discriminator, rest_field +from .._vendor import FileType +from ._enums import ( + MessageBlockType, + OpenApiAuthType, + RunStepType, + VectorStoreChunkingStrategyRequestType, + VectorStoreChunkingStrategyResponseType, +) + +if TYPE_CHECKING: + from .. import _types, models as _models + + +class AISearchIndexResource(_model_base.Model): + """A AI Search Index resource. + + :ivar index_connection_id: An index connection id in an IndexResource attached to this + assistant. Required. + :vartype index_connection_id: str + :ivar index_name: The name of an index in an IndexResource attached to this assistant. + Required. + :vartype index_name: str + :ivar query_type: Type of query in an AIIndexResource attached to this assistant. Known values + are: "simple", "semantic", "vector", "vector_simple_hybrid", and "vector_semantic_hybrid". + :vartype query_type: str or ~azure.ai.assistants.models.AzureAISearchQueryType + :ivar top_k: Number of documents to retrieve from search and present to the model. + :vartype top_k: int + :ivar filter: Odata filter string for search resource. + :vartype filter: str + """ + + index_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """An index connection id in an IndexResource attached to this assistant. Required.""" + index_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of an index in an IndexResource attached to this assistant. Required.""" + query_type: Optional[Union[str, "_models.AzureAISearchQueryType"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Type of query in an AIIndexResource attached to this assistant. Known values are: \"simple\", + \"semantic\", \"vector\", \"vector_simple_hybrid\", and \"vector_semantic_hybrid\".""" + top_k: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Number of documents to retrieve from search and present to the model.""" + filter: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Odata filter string for search resource.""" + + @overload + def __init__( + self, + *, + index_connection_id: str, + index_name: str, + query_type: Optional[Union[str, "_models.AzureAISearchQueryType"]] = None, + top_k: Optional[int] = None, + filter: Optional[str] = None, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Assistant(_model_base.Model): + """Represents an assistant that can call the model and use tools. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always assistant. Required. Default value is + "assistant". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar name: The name of the assistant. Required. + :vartype name: str + :ivar description: The description of the assistant. Required. + :vartype description: str + :ivar model: The ID of the model to use. Required. + :vartype model: str + :ivar instructions: The system instructions for the assistant to use. Required. + :vartype instructions: str + :ivar tools: The collection of tools enabled for the assistant. Required. + :vartype tools: list[~azure.ai.assistants.models.ToolDefinition] + :ivar tool_resources: A set of resources that are used by the assistant's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Required. + :vartype tool_resources: ~azure.ai.assistants.models.ToolResources + :ivar temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Required. + :vartype temperature: float + :ivar top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Required. + :vartype top_p: float + :ivar response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType + :vartype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["assistant"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always assistant. Required. Default value is \"assistant\".""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the assistant. Required.""" + description: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The description of the assistant. Required.""" + model: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the model to use. Required.""" + instructions: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The system instructions for the assistant to use. Required.""" + tools: List["_models.ToolDefinition"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The collection of tools enabled for the assistant. Required.""" + tool_resources: "_models.ToolResources" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of resources that are used by the assistant's tools. The resources are specific to the + type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Required.""" + temperature: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, + while lower values like 0.2 will make it more focused and deterministic. Required.""" + top_p: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Required.""" + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The response format of the tool calls used by this assistant. Is one of the following types: + str, Union[str, \"_models.AssistantsApiResponseFormatMode\"], AssistantsApiResponseFormat, + ResponseFormatJsonSchemaType""" + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + name: str, + description: str, + model: str, + instructions: str, + tools: List["_models.ToolDefinition"], + tool_resources: "_models.ToolResources", + temperature: float, + top_p: float, + metadata: Dict[str, str], + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["assistant"] = "assistant" + + +class AssistantDeletionStatus(_model_base.Model): + """The status of an assistant deletion operation. + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'assistant.deleted'. Required. Default value is + "assistant.deleted". + :vartype object: str + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether deletion was successful. Required.""" + object: Literal["assistant.deleted"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'assistant.deleted'. Required. Default value is + \"assistant.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["assistant.deleted"] = "assistant.deleted" + + +class AssistantsApiResponseFormat(_model_base.Model): + """An object describing the expected output of the model. If ``json_object`` only ``function`` + type ``tools`` are allowed to be passed to the Run. + If ``text`` the model can return text or any value needed. + + :ivar type: Must be one of ``text`` or ``json_object``. Known values are: "text" and + "json_object". + :vartype type: str or ~azure.ai.assistants.models.ResponseFormat + """ + + type: Optional[Union[str, "_models.ResponseFormat"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Must be one of ``text`` or ``json_object``. Known values are: \"text\" and \"json_object\".""" + + @overload + def __init__( + self, + *, + type: Optional[Union[str, "_models.ResponseFormat"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AssistantsNamedToolChoice(_model_base.Model): + """Specifies a tool the model should use. Use to force the model to call a specific tool. + + :ivar type: the type of tool. If type is ``function``, the function name must be set. Required. + Known values are: "function", "code_interpreter", "file_search", "bing_grounding", + "fabric_dataagent", "sharepoint_grounding", "azure_ai_search", and "bing_custom_search". + :vartype type: str or ~azure.ai.assistants.models.AssistantsNamedToolChoiceType + :ivar function: The name of the function to call. + :vartype function: ~azure.ai.assistants.models.FunctionName + """ + + type: Union[str, "_models.AssistantsNamedToolChoiceType"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """the type of tool. If type is ``function``, the function name must be set. Required. Known + values are: \"function\", \"code_interpreter\", \"file_search\", \"bing_grounding\", + \"fabric_dataagent\", \"sharepoint_grounding\", \"azure_ai_search\", and + \"bing_custom_search\".""" + function: Optional["_models.FunctionName"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the function to call.""" + + @overload + def __init__( + self, + *, + type: Union[str, "_models.AssistantsNamedToolChoiceType"], + function: Optional["_models.FunctionName"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AssistantThread(_model_base.Model): + """Information about a single thread associated with an assistant. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread'. Required. Default value is "thread". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar tool_resources: A set of resources that are made available to the assistant's tools in + this thread. The resources are specific to the type + of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list + of vector store IDs. Required. + :vartype tool_resources: ~azure.ai.assistants.models.ToolResources + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'thread'. Required. Default value is \"thread\".""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + tool_resources: "_models.ToolResources" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of resources that are made available to the assistant's tools in this thread. The + resources are specific to the type + of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list + of vector store IDs. Required.""" + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + tool_resources: "_models.ToolResources", + metadata: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread"] = "thread" + + +class AssistantThreadCreationOptions(_model_base.Model): + """The details used to create a new assistant thread. + + :ivar messages: The initial messages to associate with the new thread. + :vartype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :ivar tool_resources: A set of resources that are made available to the assistant's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires + a list of vector store IDs. + :vartype tool_resources: ~azure.ai.assistants.models.ToolResources + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. + :vartype metadata: dict[str, str] + """ + + messages: Optional[List["_models.ThreadMessageOptions"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The initial messages to associate with the new thread.""" + tool_resources: Optional["_models.ToolResources"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """A set of resources that are made available to the assistant's tools in this thread. The + resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires + a list of vector store IDs.""" + metadata: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length.""" + + @overload + def __init__( + self, + *, + messages: Optional[List["_models.ThreadMessageOptions"]] = None, + tool_resources: Optional["_models.ToolResources"] = None, + metadata: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AzureAISearchResource(_model_base.Model): + """A set of index resources used by the ``azure_ai_search`` tool. + + :ivar index_list: The indices attached to this assistant. There can be a maximum of 1 index + resource attached to the assistant. + :vartype index_list: list[~azure.ai.assistants.models.AISearchIndexResource] + """ + + index_list: Optional[List["_models.AISearchIndexResource"]] = rest_field( + name="indexes", visibility=["read", "create", "update", "delete", "query"] + ) + """The indices attached to this assistant. There can be a maximum of 1 index + resource attached to the assistant.""" + + @overload + def __init__( + self, + *, + index_list: Optional[List["_models.AISearchIndexResource"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ToolDefinition(_model_base.Model): + """An abstract representation of an input tool definition that an assistant can use. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AzureAISearchToolDefinition, AzureFunctionToolDefinition, BingCustomSearchToolDefinition, + BingGroundingToolDefinition, CodeInterpreterToolDefinition, MicrosoftFabricToolDefinition, + FileSearchToolDefinition, FunctionToolDefinition, OpenApiToolDefinition, + SharepointToolDefinition + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AzureAISearchToolDefinition(ToolDefinition, discriminator="azure_ai_search"): + """The input definition information for an Azure AI search tool as used to configure an assistant. + + :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is + "azure_ai_search". + :vartype type: str + """ + + type: Literal["azure_ai_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'azure_ai_search'. Required. Default value is + \"azure_ai_search\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="azure_ai_search", **kwargs) + + +class AzureFunctionBinding(_model_base.Model): + """The structure for keeping storage queue name and URI. + + :ivar type: The type of binding, which is always 'storage_queue'. Required. Default value is + "storage_queue". + :vartype type: str + :ivar storage_queue: Storage queue. Required. + :vartype storage_queue: ~azure.ai.assistants.models.AzureFunctionStorageQueue + """ + + type: Literal["storage_queue"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The type of binding, which is always 'storage_queue'. Required. Default value is + \"storage_queue\".""" + storage_queue: "_models.AzureFunctionStorageQueue" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Storage queue. Required.""" + + @overload + def __init__( + self, + *, + storage_queue: "_models.AzureFunctionStorageQueue", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.type: Literal["storage_queue"] = "storage_queue" + + +class AzureFunctionDefinition(_model_base.Model): + """The definition of Azure function. + + :ivar function: The definition of azure function and its parameters. Required. + :vartype function: ~azure.ai.assistants.models.FunctionDefinition + :ivar input_binding: Input storage queue. The queue storage trigger runs a function as messages + are added to it. Required. + :vartype input_binding: ~azure.ai.assistants.models.AzureFunctionBinding + :ivar output_binding: Output storage queue. The function writes output to this queue when the + input items are processed. Required. + :vartype output_binding: ~azure.ai.assistants.models.AzureFunctionBinding + """ + + function: "_models.FunctionDefinition" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The definition of azure function and its parameters. Required.""" + input_binding: "_models.AzureFunctionBinding" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Input storage queue. The queue storage trigger runs a function as messages are added to it. + Required.""" + output_binding: "_models.AzureFunctionBinding" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Output storage queue. The function writes output to this queue when the input items are + processed. Required.""" + + @overload + def __init__( + self, + *, + function: "_models.FunctionDefinition", + input_binding: "_models.AzureFunctionBinding", + output_binding: "_models.AzureFunctionBinding", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AzureFunctionStorageQueue(_model_base.Model): + """The structure for keeping storage queue name and URI. + + :ivar storage_service_endpoint: URI to the Azure Storage Queue service allowing you to + manipulate a queue. Required. + :vartype storage_service_endpoint: str + :ivar queue_name: The name of an Azure function storage queue. Required. + :vartype queue_name: str + """ + + storage_service_endpoint: str = rest_field( + name="queue_service_endpoint", visibility=["read", "create", "update", "delete", "query"] + ) + """URI to the Azure Storage Queue service allowing you to manipulate a queue. Required.""" + queue_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of an Azure function storage queue. Required.""" + + @overload + def __init__( + self, + *, + storage_service_endpoint: str, + queue_name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AzureFunctionToolDefinition(ToolDefinition, discriminator="azure_function"): + """The input definition information for a azure function tool as used to configure an assistant. + + :ivar type: The object type, which is always 'azure_function'. Required. Default value is + "azure_function". + :vartype type: str + :ivar azure_function: The definition of the concrete function that the function tool should + call. Required. + :vartype azure_function: ~azure.ai.assistants.models.AzureFunctionDefinition + """ + + type: Literal["azure_function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'azure_function'. Required. Default value is + \"azure_function\".""" + azure_function: "_models.AzureFunctionDefinition" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The definition of the concrete function that the function tool should call. Required.""" + + @overload + def __init__( + self, + *, + azure_function: "_models.AzureFunctionDefinition", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="azure_function", **kwargs) + + +class BingCustomSearchToolDefinition(ToolDefinition, discriminator="bing_custom_search"): + """The input definition information for a Bing custom search tool as used to configure an + assistant. + + :ivar type: The object type, which is always 'bing_custom_search'. Required. Default value is + "bing_custom_search". + :vartype type: str + :ivar bing_custom_search: The list of search configurations used by the bing custom search + tool. Required. + :vartype bing_custom_search: ~azure.ai.assistants.models.SearchConfigurationList + """ + + type: Literal["bing_custom_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'bing_custom_search'. Required. Default value is + \"bing_custom_search\".""" + bing_custom_search: "_models.SearchConfigurationList" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The list of search configurations used by the bing custom search tool. Required.""" + + @overload + def __init__( + self, + *, + bing_custom_search: "_models.SearchConfigurationList", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="bing_custom_search", **kwargs) + + +class BingGroundingToolDefinition(ToolDefinition, discriminator="bing_grounding"): + """The input definition information for a bing grounding search tool as used to configure an + assistant. + + :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is + "bing_grounding". + :vartype type: str + :ivar bing_grounding: The list of connections used by the bing grounding tool. Required. + :vartype bing_grounding: ~azure.ai.assistants.models.ToolConnectionList + """ + + type: Literal["bing_grounding"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'bing_grounding'. Required. Default value is + \"bing_grounding\".""" + bing_grounding: "_models.ToolConnectionList" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The list of connections used by the bing grounding tool. Required.""" + + @overload + def __init__( + self, + *, + bing_grounding: "_models.ToolConnectionList", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="bing_grounding", **kwargs) + + +class CodeInterpreterToolDefinition(ToolDefinition, discriminator="code_interpreter"): + """The input definition information for a code interpreter tool as used to configure an assistant. + + :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is + "code_interpreter". + :vartype type: str + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'code_interpreter'. Required. Default value is + \"code_interpreter\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="code_interpreter", **kwargs) + + +class CodeInterpreterToolResource(_model_base.Model): + """A set of resources that are used by the ``code_interpreter`` tool. + + :ivar file_ids: A list of file IDs made available to the ``code_interpreter`` tool. There can + be a maximum of 20 files + associated with the tool. + :vartype file_ids: list[str] + :ivar data_sources: The data sources to be used. This option is mutually exclusive with the + ``fileIds`` property. + :vartype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + """ + + file_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A list of file IDs made available to the ``code_interpreter`` tool. There can be a maximum of + 20 files + associated with the tool.""" + data_sources: Optional[List["_models.VectorStoreDataSource"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The data sources to be used. This option is mutually exclusive with the ``fileIds`` property.""" + + @overload + def __init__( + self, + *, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List["_models.VectorStoreDataSource"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FileDeletionStatus(_model_base.Model): + """A status response from a file deletion operation. + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'file'. Required. Default value is "file". + :vartype object: str + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether deletion was successful. Required.""" + object: Literal["file"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'file'. Required. Default value is \"file\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["file"] = "file" + + +class FileListResponse(_model_base.Model): + """The response data from a file list operation. + + :ivar object: The object type, which is always 'list'. Required. Default value is "list". + :vartype object: str + :ivar data: The files returned for the request. Required. + :vartype data: list[~azure.ai.assistants.models.OpenAIFile] + """ + + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'list'. Required. Default value is \"list\".""" + data: List["_models.OpenAIFile"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The files returned for the request. Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.OpenAIFile"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class FileSearchRankingOptions(_model_base.Model): + """Ranking options for file search. + + :ivar ranker: File search ranker. Required. + :vartype ranker: str + :ivar score_threshold: Ranker search threshold. Required. + :vartype score_threshold: float + """ + + ranker: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """File search ranker. Required.""" + score_threshold: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Ranker search threshold. Required.""" + + @overload + def __init__( + self, + *, + ranker: str, + score_threshold: float, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FileSearchToolCallContent(_model_base.Model): + """The file search result content object. + + :ivar type: The type of the content. Required. Default value is "text". + :vartype type: str + :ivar text: The text content of the file. Required. + :vartype text: str + """ + + type: Literal["text"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The type of the content. Required. Default value is \"text\".""" + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The text content of the file. Required.""" + + @overload + def __init__( + self, + *, + text: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.type: Literal["text"] = "text" + + +class FileSearchToolDefinition(ToolDefinition, discriminator="file_search"): + """The input definition information for a file search tool as used to configure an assistant. + + :ivar type: The object type, which is always 'file_search'. Required. Default value is + "file_search". + :vartype type: str + :ivar file_search: Options overrides for the file search tool. + :vartype file_search: ~azure.ai.assistants.models.FileSearchToolDefinitionDetails + """ + + type: Literal["file_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" + file_search: Optional["_models.FileSearchToolDefinitionDetails"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Options overrides for the file search tool.""" + + @overload + def __init__( + self, + *, + file_search: Optional["_models.FileSearchToolDefinitionDetails"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_search", **kwargs) + + +class FileSearchToolDefinitionDetails(_model_base.Model): + """Options overrides for the file search tool. + + :ivar max_num_results: The maximum number of results the file search tool should output. The + default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 + inclusive. + + Note that the file search tool may output fewer than ``max_num_results`` results. See the file + search tool documentation for more information. + :vartype max_num_results: int + :ivar ranking_options: Ranking options for file search. + :vartype ranking_options: ~azure.ai.assistants.models.FileSearchRankingOptions + """ + + max_num_results: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The maximum number of results the file search tool should output. The default is 20 for gpt-4* + models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than ``max_num_results`` results. See the file + search tool documentation for more information.""" + ranking_options: Optional["_models.FileSearchRankingOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Ranking options for file search.""" + + @overload + def __init__( + self, + *, + max_num_results: Optional[int] = None, + ranking_options: Optional["_models.FileSearchRankingOptions"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FileSearchToolResource(_model_base.Model): + """A set of resources that are used by the ``file_search`` tool. + + :ivar vector_store_ids: The ID of the vector store attached to this assistant. There can be a + maximum of 1 vector + store attached to the assistant. + :vartype vector_store_ids: list[str] + :ivar vector_stores: The list of vector store configuration objects from Azure. + This list is limited to one element. + The only element of this list contains the list of azure asset IDs used by the search tool. + :vartype vector_stores: list[~azure.ai.assistants.models.VectorStoreConfigurations] + """ + + vector_store_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the vector store attached to this assistant. There can be a maximum of 1 vector + store attached to the assistant.""" + vector_stores: Optional[List["_models.VectorStoreConfigurations"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The list of vector store configuration objects from Azure. + This list is limited to one element. + The only element of this list contains the list of azure asset IDs used by the search tool.""" + + @overload + def __init__( + self, + *, + vector_store_ids: Optional[List[str]] = None, + vector_stores: Optional[List["_models.VectorStoreConfigurations"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FunctionDefinition(_model_base.Model): + """The input definition information for a function. + + :ivar name: The name of the function to be called. Required. + :vartype name: str + :ivar description: A description of what the function does, used by the model to choose when + and how to call the function. + :vartype description: str + :ivar parameters: The parameters the functions accepts, described as a JSON Schema object. + Required. + :vartype parameters: any + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the function to be called. Required.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A description of what the function does, used by the model to choose when and how to call the + function.""" + parameters: Any = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The parameters the functions accepts, described as a JSON Schema object. Required.""" + + @overload + def __init__( + self, + *, + name: str, + parameters: Any, + description: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FunctionName(_model_base.Model): + """The function name that will be used, if using the ``function`` tool. + + :ivar name: The name of the function to call. Required. + :vartype name: str + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the function to call. Required.""" + + @overload + def __init__( + self, + *, + name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FunctionToolDefinition(ToolDefinition, discriminator="function"): + """The input definition information for a function tool as used to configure an assistant. + + :ivar type: The object type, which is always 'function'. Required. Default value is "function". + :vartype type: str + :ivar function: The definition of the concrete function that the function tool should call. + Required. + :vartype function: ~azure.ai.assistants.models.FunctionDefinition + """ + + type: Literal["function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'function'. Required. Default value is \"function\".""" + function: "_models.FunctionDefinition" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The definition of the concrete function that the function tool should call. Required.""" + + @overload + def __init__( + self, + *, + function: "_models.FunctionDefinition", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class IncompleteRunDetails(_model_base.Model): + """Details on why the run is incomplete. Will be ``null`` if the run is not incomplete. + + :ivar reason: The reason why the run is incomplete. This indicates which specific token limit + was reached during the run. Required. Known values are: "max_completion_tokens" and + "max_prompt_tokens". + :vartype reason: str or ~azure.ai.assistants.models.IncompleteDetailsReason + """ + + reason: Union[str, "_models.IncompleteDetailsReason"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The reason why the run is incomplete. This indicates which specific token limit was reached + during the run. Required. Known values are: \"max_completion_tokens\" and + \"max_prompt_tokens\".""" + + @overload + def __init__( + self, + *, + reason: Union[str, "_models.IncompleteDetailsReason"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageAttachment(_model_base.Model): + """This describes to which tools a file has been attached. + + :ivar file_id: The ID of the file to attach to the message. + :vartype file_id: str + :ivar data_source: Azure asset ID. + :vartype data_source: ~azure.ai.assistants.models.VectorStoreDataSource + :ivar tools: The tools to add to this file. Required. + :vartype tools: list[~azure.ai.assistants.models.CodeInterpreterToolDefinition or + ~azure.ai.assistants.models.FileSearchToolDefinition] + """ + + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the file to attach to the message.""" + data_source: Optional["_models.VectorStoreDataSource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Azure asset ID.""" + tools: List["_types.MessageAttachmentToolDefinition"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The tools to add to this file. Required.""" + + @overload + def __init__( + self, + *, + tools: List["_types.MessageAttachmentToolDefinition"], + file_id: Optional[str] = None, + data_source: Optional["_models.VectorStoreDataSource"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageContent(_model_base.Model): + """An abstract representation of a single item of thread message content. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageImageFileContent, MessageTextContent + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDelta(_model_base.Model): + """Represents the typed 'delta' payload within a streaming message delta chunk. + + :ivar role: The entity that produced the message. Required. Known values are: "user" and + "assistant". + :vartype role: str or ~azure.ai.assistants.models.MessageRole + :ivar content: The content of the message as an array of text and/or images. Required. + :vartype content: list[~azure.ai.assistants.models.MessageDeltaContent] + """ + + role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The entity that produced the message. Required. Known values are: \"user\" and \"assistant\".""" + content: List["_models.MessageDeltaContent"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The content of the message as an array of text and/or images. Required.""" + + @overload + def __init__( + self, + *, + role: Union[str, "_models.MessageRole"], + content: List["_models.MessageDeltaContent"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaChunk(_model_base.Model): + """Represents a message delta i.e. any changed fields on a message during streaming. + + :ivar id: The identifier of the message, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``thread.message.delta``. Required. Default + value is "thread.message.delta". + :vartype object: str + :ivar delta: The delta containing the fields that have changed on the Message. Required. + :vartype delta: ~azure.ai.assistants.models.MessageDelta + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier of the message, which can be referenced in API endpoints. Required.""" + object: Literal["thread.message.delta"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always ``thread.message.delta``. Required. Default value is + \"thread.message.delta\".""" + delta: "_models.MessageDelta" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The delta containing the fields that have changed on the Message. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + delta: "_models.MessageDelta", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.message.delta"] = "thread.message.delta" + + +class MessageDeltaContent(_model_base.Model): + """The abstract base representation of a partial streamed message content payload. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageDeltaImageFileContent, MessageDeltaTextContent + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The index of the content part of the message. Required.""" + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The type of content for this content part. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaImageFileContent(MessageDeltaContent, discriminator="image_file"): + """Represents a streamed image file content part within a streaming message delta chunk. + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part, which is always "image_file.". Required. + Default value is "image_file". + :vartype type: str + :ivar image_file: The image_file data. + :vartype image_file: ~azure.ai.assistants.models.MessageDeltaImageFileContentObject + """ + + type: Literal["image_file"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The type of content for this content part, which is always \"image_file.\". Required. Default + value is \"image_file\".""" + image_file: Optional["_models.MessageDeltaImageFileContentObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The image_file data.""" + + @overload + def __init__( + self, + *, + index: int, + image_file: Optional["_models.MessageDeltaImageFileContentObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image_file", **kwargs) + + +class MessageDeltaImageFileContentObject(_model_base.Model): + """Represents the 'image_file' payload within streaming image file content. + + :ivar file_id: The file ID of the image in the message content. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The file ID of the image in the message content.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextAnnotation(_model_base.Model): + """The abstract base representation of a streamed text content part's text annotation. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageDeltaTextFileCitationAnnotation, MessageDeltaTextFilePathAnnotation, + MessageDeltaTextUrlCitationAnnotation + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The index of the annotation within a text content part. Required.""" + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The type of the text content annotation. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextContent(MessageDeltaContent, discriminator="text"): + """Represents a streamed text content part within a streaming message delta chunk. + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part, which is always "text.". Required. + Default value is "text". + :vartype type: str + :ivar text: The text content details. + :vartype text: ~azure.ai.assistants.models.MessageDeltaTextContentObject + """ + + type: Literal["text"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The type of content for this content part, which is always \"text.\". Required. Default value + is \"text\".""" + text: Optional["_models.MessageDeltaTextContentObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The text content details.""" + + @overload + def __init__( + self, + *, + index: int, + text: Optional["_models.MessageDeltaTextContentObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="text", **kwargs) + + +class MessageDeltaTextContentObject(_model_base.Model): + """Represents the data of a streamed text content part within a streaming message delta chunk. + + :ivar value: The data that makes up the text. + :vartype value: str + :ivar annotations: Annotations for the text. + :vartype annotations: list[~azure.ai.assistants.models.MessageDeltaTextAnnotation] + """ + + value: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The data that makes up the text.""" + annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Annotations for the text.""" + + @overload + def __init__( + self, + *, + value: Optional[str] = None, + annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextFileCitationAnnotation(MessageDeltaTextAnnotation, discriminator="file_citation"): + """Represents a streamed file citation applied to a streaming text content part. + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation, which is always "file_citation.". + Required. Default value is "file_citation". + :vartype type: str + :ivar file_citation: The file citation information. + :vartype file_citation: + ~azure.ai.assistants.models.MessageDeltaTextFileCitationAnnotationObject + :ivar text: The text in the message content that needs to be replaced. + :vartype text: str + :ivar start_index: The start index of this annotation in the content text. + :vartype start_index: int + :ivar end_index: The end index of this annotation in the content text. + :vartype end_index: int + """ + + type: Literal["file_citation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The type of the text content annotation, which is always \"file_citation.\". Required. Default + value is \"file_citation\".""" + file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The file citation information.""" + text: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The text in the message content that needs to be replaced.""" + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The start index of this annotation in the content text.""" + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The end index of this annotation in the content text.""" + + @overload + def __init__( + self, + *, + index: int, + file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = None, + text: Optional[str] = None, + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_citation", **kwargs) + + +class MessageDeltaTextFileCitationAnnotationObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the data of a streamed file citation as applied to a streaming text content part. + + :ivar file_id: The ID of the specific file the citation is from. + :vartype file_id: str + :ivar quote: The specific quote in the cited file. + :vartype quote: str + """ + + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the specific file the citation is from.""" + quote: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The specific quote in the cited file.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + quote: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextFilePathAnnotation(MessageDeltaTextAnnotation, discriminator="file_path"): + """Represents a streamed file path annotation applied to a streaming text content part. + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation, which is always "file_path.". Required. + Default value is "file_path". + :vartype type: str + :ivar file_path: The file path information. + :vartype file_path: ~azure.ai.assistants.models.MessageDeltaTextFilePathAnnotationObject + :ivar start_index: The start index of this annotation in the content text. + :vartype start_index: int + :ivar end_index: The end index of this annotation in the content text. + :vartype end_index: int + :ivar text: The text in the message content that needs to be replaced. + :vartype text: str + """ + + type: Literal["file_path"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The type of the text content annotation, which is always \"file_path.\". Required. Default + value is \"file_path\".""" + file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The file path information.""" + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The start index of this annotation in the content text.""" + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The end index of this annotation in the content text.""" + text: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The text in the message content that needs to be replaced.""" + + @overload + def __init__( + self, + *, + index: int, + file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = None, + start_index: Optional[int] = None, + end_index: Optional[int] = None, + text: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_path", **kwargs) + + +class MessageDeltaTextFilePathAnnotationObject(_model_base.Model): + """Represents the data of a streamed file path annotation as applied to a streaming text content + part. + + :ivar file_id: The file ID for the annotation. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The file ID for the annotation.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextUrlCitationAnnotation(MessageDeltaTextAnnotation, discriminator="url_citation"): + """A citation within the message that points to a specific URL associated with the message. + Generated when the assistant uses tools such as 'bing_grounding' to search the Internet. + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The object type, which is always 'url_citation'. Required. Default value is + "url_citation". + :vartype type: str + :ivar url_citation: The details of the URL citation. Required. + :vartype url_citation: ~azure.ai.assistants.models.MessageDeltaTextUrlCitationDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["url_citation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'url_citation'. Required. Default value is \"url_citation\".""" + url_citation: "_models.MessageDeltaTextUrlCitationDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The details of the URL citation. Required.""" + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + index: int, + url_citation: "_models.MessageDeltaTextUrlCitationDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="url_citation", **kwargs) + + +class MessageDeltaTextUrlCitationDetails(_model_base.Model): + """A representation of a URL citation, as used in text thread message content. + + :ivar url: The URL associated with this citation. Required. + :vartype url: str + :ivar title: The title of the URL. + :vartype title: str + """ + + url: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The URL associated with this citation. Required.""" + title: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The title of the URL.""" + + @overload + def __init__( + self, + *, + url: str, + title: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageImageFileContent(MessageContent, discriminator="image_file"): + """A representation of image file content in a thread message. + + :ivar type: The object type, which is always 'image_file'. Required. Default value is + "image_file". + :vartype type: str + :ivar image_file: The image file for this thread message content item. Required. + :vartype image_file: ~azure.ai.assistants.models.MessageImageFileDetails + """ + + type: Literal["image_file"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'image_file'. Required. Default value is \"image_file\".""" + image_file: "_models.MessageImageFileDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The image file for this thread message content item. Required.""" + + @overload + def __init__( + self, + *, + image_file: "_models.MessageImageFileDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image_file", **kwargs) + + +class MessageImageFileDetails(_model_base.Model): + """An image reference, as represented in thread message content. + + :ivar file_id: The ID for the file associated with this image. Required. + :vartype file_id: str + """ + + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID for the file associated with this image. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageImageFileParam(_model_base.Model): + """Defines how an internally uploaded image file is referenced when creating an image-file block. + + :ivar file_id: The ID of the previously uploaded image file. Required. + :vartype file_id: str + :ivar detail: Optional detail level for the image (auto, low, or high). Known values are: + "auto", "low", and "high". + :vartype detail: str or ~azure.ai.assistants.models.ImageDetailLevel + """ + + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the previously uploaded image file. Required.""" + detail: Optional[Union[str, "_models.ImageDetailLevel"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Optional detail level for the image (auto, low, or high). Known values are: \"auto\", \"low\", + and \"high\".""" + + @overload + def __init__( + self, + *, + file_id: str, + detail: Optional[Union[str, "_models.ImageDetailLevel"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageImageUrlParam(_model_base.Model): + """Defines how an external image URL is referenced when creating an image-URL block. + + :ivar url: The publicly accessible URL of the external image. Required. + :vartype url: str + :ivar detail: Optional detail level for the image (auto, low, or high). Defaults to 'auto' if + not specified. Known values are: "auto", "low", and "high". + :vartype detail: str or ~azure.ai.assistants.models.ImageDetailLevel + """ + + url: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The publicly accessible URL of the external image. Required.""" + detail: Optional[Union[str, "_models.ImageDetailLevel"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Optional detail level for the image (auto, low, or high). Defaults to 'auto' if not specified. + Known values are: \"auto\", \"low\", and \"high\".""" + + @overload + def __init__( + self, + *, + url: str, + detail: Optional[Union[str, "_models.ImageDetailLevel"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageIncompleteDetails(_model_base.Model): + """Information providing additional detail about a message entering an incomplete status. + + :ivar reason: The provided reason describing why the message was marked as incomplete. + Required. Known values are: "content_filter", "max_tokens", "run_cancelled", "run_failed", and + "run_expired". + :vartype reason: str or ~azure.ai.assistants.models.MessageIncompleteDetailsReason + """ + + reason: Union[str, "_models.MessageIncompleteDetailsReason"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The provided reason describing why the message was marked as incomplete. Required. Known values + are: \"content_filter\", \"max_tokens\", \"run_cancelled\", \"run_failed\", and + \"run_expired\".""" + + @overload + def __init__( + self, + *, + reason: Union[str, "_models.MessageIncompleteDetailsReason"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageInputContentBlock(_model_base.Model): + """Defines a single content block when creating a message. The 'type' field determines whether it + is text, an image file, or an external image URL, etc. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageInputImageFileBlock, MessageInputImageUrlBlock, MessageInputTextBlock + + :ivar type: Specifies which kind of content block this is (text, image_file, image_url, etc.). + Required. Known values are: "text", "image_file", and "image_url". + :vartype type: str or ~azure.ai.assistants.models.MessageBlockType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """Specifies which kind of content block this is (text, image_file, image_url, etc.). Required. + Known values are: \"text\", \"image_file\", and \"image_url\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageInputImageFileBlock(MessageInputContentBlock, discriminator="image_file"): + """An image-file block in a new message, referencing an internally uploaded image by file ID. + + :ivar type: Must be 'image_file' for an internally uploaded image block. Required. Indicates a + block referencing an internally uploaded image file. + :vartype type: str or ~azure.ai.assistants.models.IMAGE_FILE + :ivar image_file: Information about the referenced image file, including file ID and optional + detail level. Required. + :vartype image_file: ~azure.ai.assistants.models.MessageImageFileParam + """ + + type: Literal[MessageBlockType.IMAGE_FILE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Must be 'image_file' for an internally uploaded image block. Required. Indicates a block + referencing an internally uploaded image file.""" + image_file: "_models.MessageImageFileParam" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Information about the referenced image file, including file ID and optional detail level. + Required.""" + + @overload + def __init__( + self, + *, + image_file: "_models.MessageImageFileParam", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=MessageBlockType.IMAGE_FILE, **kwargs) + + +class MessageInputImageUrlBlock(MessageInputContentBlock, discriminator="image_url"): + """An image-URL block in a new message, referencing an external image by URL. + + :ivar type: Must be 'image_url' for an externally hosted image block. Required. Indicates a + block referencing an external image URL. + :vartype type: str or ~azure.ai.assistants.models.IMAGE_URL + :ivar image_url: Information about the external image URL, including the URL and optional + detail level. Required. + :vartype image_url: ~azure.ai.assistants.models.MessageImageUrlParam + """ + + type: Literal[MessageBlockType.IMAGE_URL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Must be 'image_url' for an externally hosted image block. Required. Indicates a block + referencing an external image URL.""" + image_url: "_models.MessageImageUrlParam" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Information about the external image URL, including the URL and optional detail level. + Required.""" + + @overload + def __init__( + self, + *, + image_url: "_models.MessageImageUrlParam", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=MessageBlockType.IMAGE_URL, **kwargs) + + +class MessageInputTextBlock(MessageInputContentBlock, discriminator="text"): + """A text block in a new message, containing plain text content. + + :ivar type: Must be 'text' for a text block. Required. Indicates a block containing text + content. + :vartype type: str or ~azure.ai.assistants.models.TEXT + :ivar text: The plain text content for this block. Required. + :vartype text: str + """ + + type: Literal[MessageBlockType.TEXT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Must be 'text' for a text block. Required. Indicates a block containing text content.""" + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The plain text content for this block. Required.""" + + @overload + def __init__( + self, + *, + text: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=MessageBlockType.TEXT, **kwargs) + + +class MessageTextAnnotation(_model_base.Model): + """An abstract representation of an annotation to text thread message content. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageTextFileCitationAnnotation, MessageTextFilePathAnnotation, + MessageTextUrlCitationAnnotation + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Default value is None.""" + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The textual content associated with this text annotation item. Required.""" + + @overload + def __init__( + self, + *, + type: str, + text: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextContent(MessageContent, discriminator="text"): + """A representation of a textual item of thread message content. + + :ivar type: The object type, which is always 'text'. Required. Default value is "text". + :vartype type: str + :ivar text: The text and associated annotations for this thread message content item. Required. + :vartype text: ~azure.ai.assistants.models.MessageTextDetails + """ + + type: Literal["text"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'text'. Required. Default value is \"text\".""" + text: "_models.MessageTextDetails" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The text and associated annotations for this thread message content item. Required.""" + + @overload + def __init__( + self, + *, + text: "_models.MessageTextDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="text", **kwargs) + + +class MessageTextDetails(_model_base.Model): + """The text and associated annotations for a single item of assistant thread message content. + + :ivar value: The text data. Required. + :vartype value: str + :ivar annotations: A list of annotations associated with this text. Required. + :vartype annotations: list[~azure.ai.assistants.models.MessageTextAnnotation] + """ + + value: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The text data. Required.""" + annotations: List["_models.MessageTextAnnotation"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """A list of annotations associated with this text. Required.""" + + @overload + def __init__( + self, + *, + value: str, + annotations: List["_models.MessageTextAnnotation"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextFileCitationAnnotation(MessageTextAnnotation, discriminator="file_citation"): + """A citation within the message that points to a specific quote from a specific File associated + with the assistant or the message. Generated when the assistant uses the 'file_search' tool to + search files. + + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + :ivar type: The object type, which is always 'file_citation'. Required. Default value is + "file_citation". + :vartype type: str + :ivar file_citation: A citation within the message that points to a specific quote from a + specific file. + Generated when the assistant uses the "file_search" tool to search files. Required. + :vartype file_citation: ~azure.ai.assistants.models.MessageTextFileCitationDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["file_citation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'file_citation'. Required. Default value is \"file_citation\".""" + file_citation: "_models.MessageTextFileCitationDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """A citation within the message that points to a specific quote from a specific file. + Generated when the assistant uses the \"file_search\" tool to search files. Required.""" + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + text: str, + file_citation: "_models.MessageTextFileCitationDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_citation", **kwargs) + + +class MessageTextFileCitationDetails(_model_base.Model): + """A representation of a file-based text citation, as used in a file-based annotation of text + thread message content. + + :ivar file_id: The ID of the file associated with this citation. Required. + :vartype file_id: str + :ivar quote: The specific quote cited in the associated file. Required. + :vartype quote: str + """ + + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the file associated with this citation. Required.""" + quote: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The specific quote cited in the associated file. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + quote: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextFilePathAnnotation(MessageTextAnnotation, discriminator="file_path"): + """A citation within the message that points to a file located at a specific path. + + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + :ivar type: The object type, which is always 'file_path'. Required. Default value is + "file_path". + :vartype type: str + :ivar file_path: A URL for the file that's generated when the assistant used the + code_interpreter tool to generate a file. Required. + :vartype file_path: ~azure.ai.assistants.models.MessageTextFilePathDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["file_path"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'file_path'. Required. Default value is \"file_path\".""" + file_path: "_models.MessageTextFilePathDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """A URL for the file that's generated when the assistant used the code_interpreter tool to + generate a file. Required.""" + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + text: str, + file_path: "_models.MessageTextFilePathDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_path", **kwargs) + + +class MessageTextFilePathDetails(_model_base.Model): + """An encapsulation of an image file ID, as used by message image content. + + :ivar file_id: The ID of the specific file that the citation is from. Required. + :vartype file_id: str + """ + + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the specific file that the citation is from. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextUrlCitationAnnotation(MessageTextAnnotation, discriminator="url_citation"): + """A citation within the message that points to a specific URL associated with the message. + Generated when the assistant uses tools such as 'bing_grounding' to search the Internet. + + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + :ivar type: The object type, which is always 'url_citation'. Required. Default value is + "url_citation". + :vartype type: str + :ivar url_citation: The details of the URL citation. Required. + :vartype url_citation: ~azure.ai.assistants.models.MessageTextUrlCitationDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["url_citation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'url_citation'. Required. Default value is \"url_citation\".""" + url_citation: "_models.MessageTextUrlCitationDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The details of the URL citation. Required.""" + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + text: str, + url_citation: "_models.MessageTextUrlCitationDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="url_citation", **kwargs) + + +class MessageTextUrlCitationDetails(_model_base.Model): + """A representation of a URL citation, as used in text thread message content. + + :ivar url: The URL associated with this citation. Required. + :vartype url: str + :ivar title: The title of the URL. + :vartype title: str + """ + + url: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The URL associated with this citation. Required.""" + title: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The title of the URL.""" + + @overload + def __init__( + self, + *, + url: str, + title: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MicrosoftFabricToolDefinition(ToolDefinition, discriminator="fabric_dataagent"): + """The input definition information for a Microsoft Fabric tool as used to configure an assistant. + + :ivar type: The object type, which is always 'fabric_dataagent'. Required. Default value is + "fabric_dataagent". + :vartype type: str + :ivar fabric_dataagent: The list of connections used by the Microsoft Fabric tool. Required. + :vartype fabric_dataagent: ~azure.ai.assistants.models.ToolConnectionList + """ + + type: Literal["fabric_dataagent"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'fabric_dataagent'. Required. Default value is + \"fabric_dataagent\".""" + fabric_dataagent: "_models.ToolConnectionList" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The list of connections used by the Microsoft Fabric tool. Required.""" + + @overload + def __init__( + self, + *, + fabric_dataagent: "_models.ToolConnectionList", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="fabric_dataagent", **kwargs) + + +class OpenAIFile(_model_base.Model): + """Represents an assistant that can call the model and use tools. + + :ivar object: The object type, which is always 'file'. Required. Default value is "file". + :vartype object: str + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar bytes: The size of the file, in bytes. Required. + :vartype bytes: int + :ivar filename: The name of the file. Required. + :vartype filename: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar purpose: The intended purpose of a file. Required. Known values are: "fine-tune", + "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". + :vartype purpose: str or ~azure.ai.assistants.models.FilePurpose + :ivar status: The state of the file. This field is available in Azure OpenAI only. Known values + are: "uploaded", "pending", "running", "processed", "error", "deleting", and "deleted". + :vartype status: str or ~azure.ai.assistants.models.FileState + :ivar status_details: The error message with details in case processing of this file failed. + This field is available in Azure OpenAI only. + :vartype status_details: str + """ + + object: Literal["file"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'file'. Required. Default value is \"file\".""" + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + bytes: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The size of the file, in bytes. Required.""" + filename: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the file. Required.""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + purpose: Union[str, "_models.FilePurpose"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The intended purpose of a file. Required. Known values are: \"fine-tune\", + \"fine-tune-results\", \"assistants\", \"assistants_output\", \"batch\", \"batch_output\", and + \"vision\".""" + status: Optional[Union[str, "_models.FileState"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The state of the file. This field is available in Azure OpenAI only. Known values are: + \"uploaded\", \"pending\", \"running\", \"processed\", \"error\", \"deleting\", and + \"deleted\".""" + status_details: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The error message with details in case processing of this file failed. This field is available + in Azure OpenAI only.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + bytes: int, + filename: str, + created_at: datetime.datetime, + purpose: Union[str, "_models.FilePurpose"], + status: Optional[Union[str, "_models.FileState"]] = None, + status_details: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["file"] = "file" + + +class OpenAIPageableListOfAssistant(_model_base.Model): + """The response data for a requested list of items. + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.assistants.models.Assistant] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.Assistant"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The requested list of items. Required.""" + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first ID represented in this list. Required.""" + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last ID represented in this list. Required.""" + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.Assistant"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfRunStep(_model_base.Model): + """The response data for a requested list of items. + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.assistants.models.RunStep] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.RunStep"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The requested list of items. Required.""" + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first ID represented in this list. Required.""" + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last ID represented in this list. Required.""" + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.RunStep"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfThreadMessage(_model_base.Model): + """The response data for a requested list of items. + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.assistants.models.ThreadMessage] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.ThreadMessage"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The requested list of items. Required.""" + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first ID represented in this list. Required.""" + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last ID represented in this list. Required.""" + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.ThreadMessage"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfThreadRun(_model_base.Model): + """The response data for a requested list of items. + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.assistants.models.ThreadRun] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.ThreadRun"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The requested list of items. Required.""" + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first ID represented in this list. Required.""" + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last ID represented in this list. Required.""" + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.ThreadRun"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfVectorStore(_model_base.Model): + """The response data for a requested list of items. + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.assistants.models.VectorStore] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.VectorStore"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The requested list of items. Required.""" + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first ID represented in this list. Required.""" + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last ID represented in this list. Required.""" + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.VectorStore"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfVectorStoreFile(_model_base.Model): + """The response data for a requested list of items. + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.assistants.models.VectorStoreFile] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.VectorStoreFile"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The requested list of items. Required.""" + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first ID represented in this list. Required.""" + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last ID represented in this list. Required.""" + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.VectorStoreFile"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenApiAuthDetails(_model_base.Model): + """authentication details for OpenApiFunctionDefinition. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + OpenApiAnonymousAuthDetails, OpenApiConnectionAuthDetails, OpenApiManagedAuthDetails + + :ivar type: The type of authentication, must be anonymous/connection/managed_identity. + Required. Known values are: "anonymous", "connection", and "managed_identity". + :vartype type: str or ~azure.ai.assistants.models.OpenApiAuthType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The type of authentication, must be anonymous/connection/managed_identity. Required. Known + values are: \"anonymous\", \"connection\", and \"managed_identity\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class OpenApiAnonymousAuthDetails(OpenApiAuthDetails, discriminator="anonymous"): + """Security details for OpenApi anonymous authentication. + + :ivar type: The object type, which is always 'anonymous'. Required. + :vartype type: str or ~azure.ai.assistants.models.ANONYMOUS + """ + + type: Literal[OpenApiAuthType.ANONYMOUS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'anonymous'. Required.""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=OpenApiAuthType.ANONYMOUS, **kwargs) + + +class OpenApiConnectionAuthDetails(OpenApiAuthDetails, discriminator="connection"): + """Security details for OpenApi connection authentication. + + :ivar type: The object type, which is always 'connection'. Required. + :vartype type: str or ~azure.ai.assistants.models.CONNECTION + :ivar security_scheme: Connection auth security details. Required. + :vartype security_scheme: ~azure.ai.assistants.models.OpenApiConnectionSecurityScheme + """ + + type: Literal[OpenApiAuthType.CONNECTION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'connection'. Required.""" + security_scheme: "_models.OpenApiConnectionSecurityScheme" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Connection auth security details. Required.""" + + @overload + def __init__( + self, + *, + security_scheme: "_models.OpenApiConnectionSecurityScheme", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=OpenApiAuthType.CONNECTION, **kwargs) + + +class OpenApiConnectionSecurityScheme(_model_base.Model): + """Security scheme for OpenApi managed_identity authentication. + + :ivar connection_id: Connection id for Connection auth type. Required. + :vartype connection_id: str + """ + + connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Connection id for Connection auth type. Required.""" + + @overload + def __init__( + self, + *, + connection_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class OpenApiFunctionDefinition(_model_base.Model): + """The input definition information for an openapi function. + + :ivar name: The name of the function to be called. Required. + :vartype name: str + :ivar description: A description of what the function does, used by the model to choose when + and how to call the function. + :vartype description: str + :ivar spec: The openapi function shape, described as a JSON Schema object. Required. + :vartype spec: any + :ivar auth: Open API authentication details. Required. + :vartype auth: ~azure.ai.assistants.models.OpenApiAuthDetails + :ivar default_params: List of OpenAPI spec parameters that will use user-provided defaults. + :vartype default_params: list[str] + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the function to be called. Required.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A description of what the function does, used by the model to choose when and how to call the + function.""" + spec: Any = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The openapi function shape, described as a JSON Schema object. Required.""" + auth: "_models.OpenApiAuthDetails" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Open API authentication details. Required.""" + default_params: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """List of OpenAPI spec parameters that will use user-provided defaults.""" + + @overload + def __init__( + self, + *, + name: str, + spec: Any, + auth: "_models.OpenApiAuthDetails", + description: Optional[str] = None, + default_params: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class OpenApiManagedAuthDetails(OpenApiAuthDetails, discriminator="managed_identity"): + """Security details for OpenApi managed_identity authentication. + + :ivar type: The object type, which is always 'managed_identity'. Required. + :vartype type: str or ~azure.ai.assistants.models.MANAGED_IDENTITY + :ivar security_scheme: Connection auth security details. Required. + :vartype security_scheme: ~azure.ai.assistants.models.OpenApiManagedSecurityScheme + """ + + type: Literal[OpenApiAuthType.MANAGED_IDENTITY] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'managed_identity'. Required.""" + security_scheme: "_models.OpenApiManagedSecurityScheme" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Connection auth security details. Required.""" + + @overload + def __init__( + self, + *, + security_scheme: "_models.OpenApiManagedSecurityScheme", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=OpenApiAuthType.MANAGED_IDENTITY, **kwargs) + + +class OpenApiManagedSecurityScheme(_model_base.Model): + """Security scheme for OpenApi managed_identity authentication. + + :ivar audience: Authentication scope for managed_identity auth type. Required. + :vartype audience: str + """ + + audience: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Authentication scope for managed_identity auth type. Required.""" + + @overload + def __init__( + self, + *, + audience: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class OpenApiToolDefinition(ToolDefinition, discriminator="openapi"): + """The input definition information for an OpenAPI tool as used to configure an assistant. + + :ivar type: The object type, which is always 'openapi'. Required. Default value is "openapi". + :vartype type: str + :ivar openapi: The openapi function definition. Required. + :vartype openapi: ~azure.ai.assistants.models.OpenApiFunctionDefinition + """ + + type: Literal["openapi"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'openapi'. Required. Default value is \"openapi\".""" + openapi: "_models.OpenApiFunctionDefinition" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The openapi function definition. Required.""" + + @overload + def __init__( + self, + *, + openapi: "_models.OpenApiFunctionDefinition", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="openapi", **kwargs) + + +class RequiredAction(_model_base.Model): + """An abstract representation of a required action for an assistant thread run to continue. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + SubmitToolOutputsAction + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RequiredToolCall(_model_base.Model): + """An abstract representation of a tool invocation needed by the model to continue a run. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RequiredFunctionToolCall + + :ivar type: The object type for the required tool call. Required. Default value is None. + :vartype type: str + :ivar id: The ID of the tool call. This ID must be referenced when submitting tool outputs. + Required. + :vartype id: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type for the required tool call. Required. Default value is None.""" + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the tool call. This ID must be referenced when submitting tool outputs. Required.""" + + @overload + def __init__( + self, + *, + type: str, + id: str, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RequiredFunctionToolCall(RequiredToolCall, discriminator="function"): + """A representation of a requested call to a function tool, needed by the model to continue + evaluation of a run. + + :ivar id: The ID of the tool call. This ID must be referenced when submitting tool outputs. + Required. + :vartype id: str + :ivar type: The object type of the required tool call. Always 'function' for function tools. + Required. Default value is "function". + :vartype type: str + :ivar function: Detailed information about the function to be executed by the tool that + includes name and arguments. Required. + :vartype function: ~azure.ai.assistants.models.RequiredFunctionToolCallDetails + """ + + type: Literal["function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type of the required tool call. Always 'function' for function tools. Required. + Default value is \"function\".""" + function: "_models.RequiredFunctionToolCallDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Detailed information about the function to be executed by the tool that includes name and + arguments. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + function: "_models.RequiredFunctionToolCallDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class RequiredFunctionToolCallDetails(_model_base.Model): + """The detailed information for a function invocation, as provided by a required action invoking a + function tool, that includes the name of and arguments to the function. + + :ivar name: The name of the function. Required. + :vartype name: str + :ivar arguments: The arguments to use when invoking the named function, as provided by the + model. Arguments are presented as a JSON document that should be validated and parsed for + evaluation. Required. + :vartype arguments: str + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the function. Required.""" + arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The arguments to use when invoking the named function, as provided by the model. Arguments are + presented as a JSON document that should be validated and parsed for evaluation. Required.""" + + @overload + def __init__( + self, + *, + name: str, + arguments: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ResponseFormatJsonSchema(_model_base.Model): + """A description of what the response format is for, used by the model to determine how to respond + in the format. + + :ivar description: A description of what the response format is for, used by the model to + determine how to respond in the format. + :vartype description: str + :ivar name: The name of a schema. Required. + :vartype name: str + :ivar schema: The JSON schema object, describing the response format. Required. + :vartype schema: any + """ + + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A description of what the response format is for, used by the model to determine how to respond + in the format.""" + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of a schema. Required.""" + schema: Any = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The JSON schema object, describing the response format. Required.""" + + @overload + def __init__( + self, + *, + name: str, + schema: Any, + description: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ResponseFormatJsonSchemaType(_model_base.Model): + """The type of response format being defined: ``json_schema``. + + :ivar type: Type. Required. Default value is "json_schema". + :vartype type: str + :ivar json_schema: The JSON schema, describing response format. Required. + :vartype json_schema: ~azure.ai.assistants.models.ResponseFormatJsonSchema + """ + + type: Literal["json_schema"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Type. Required. Default value is \"json_schema\".""" + json_schema: "_models.ResponseFormatJsonSchema" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The JSON schema, describing response format. Required.""" + + @overload + def __init__( + self, + *, + json_schema: "_models.ResponseFormatJsonSchema", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.type: Literal["json_schema"] = "json_schema" + + +class RunCompletionUsage(_model_base.Model): + """Usage statistics related to the run. This value will be ``null`` if the run is not in a + terminal state (i.e. ``in_progress``, ``queued``, etc.). + + :ivar completion_tokens: Number of completion tokens used over the course of the run. Required. + :vartype completion_tokens: int + :ivar prompt_tokens: Number of prompt tokens used over the course of the run. Required. + :vartype prompt_tokens: int + :ivar total_tokens: Total number of tokens used (prompt + completion). Required. + :vartype total_tokens: int + """ + + completion_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Number of completion tokens used over the course of the run. Required.""" + prompt_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Number of prompt tokens used over the course of the run. Required.""" + total_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Total number of tokens used (prompt + completion). Required.""" + + @overload + def __init__( + self, + *, + completion_tokens: int, + prompt_tokens: int, + total_tokens: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunError(_model_base.Model): + """The details of an error as encountered by an assistant thread run. + + :ivar code: The status for the error. Required. + :vartype code: str + :ivar message: The human-readable text associated with the error. Required. + :vartype message: str + """ + + code: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The status for the error. Required.""" + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The human-readable text associated with the error. Required.""" + + @overload + def __init__( + self, + *, + code: str, + message: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStep(_model_base.Model): + """Detailed information about a single step of an assistant thread run. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.run.step'. Required. Default value is + "thread.run.step". + :vartype object: str + :ivar type: The type of run step, which can be either message_creation or tool_calls. Required. + Known values are: "message_creation" and "tool_calls". + :vartype type: str or ~azure.ai.assistants.models.RunStepType + :ivar assistant_id: The ID of the assistant associated with the run step. Required. + :vartype assistant_id: str + :ivar thread_id: The ID of the thread that was run. Required. + :vartype thread_id: str + :ivar run_id: The ID of the run that this run step is a part of. Required. + :vartype run_id: str + :ivar status: The status of this run step. Required. Known values are: "in_progress", + "cancelled", "failed", "completed", and "expired". + :vartype status: str or ~azure.ai.assistants.models.RunStepStatus + :ivar step_details: The details for this run step. Required. + :vartype step_details: ~azure.ai.assistants.models.RunStepDetails + :ivar last_error: If applicable, information about the last error encountered by this run step. + Required. + :vartype last_error: ~azure.ai.assistants.models.RunStepError + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar expired_at: The Unix timestamp, in seconds, representing when this item expired. + Required. + :vartype expired_at: ~datetime.datetime + :ivar completed_at: The Unix timestamp, in seconds, representing when this completed. Required. + :vartype completed_at: ~datetime.datetime + :ivar cancelled_at: The Unix timestamp, in seconds, representing when this was cancelled. + Required. + :vartype cancelled_at: ~datetime.datetime + :ivar failed_at: The Unix timestamp, in seconds, representing when this failed. Required. + :vartype failed_at: ~datetime.datetime + :ivar usage: Usage statistics related to the run step. This value will be ``null`` while the + run step's status is ``in_progress``. + :vartype usage: ~azure.ai.assistants.models.RunStepCompletionUsage + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run.step"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'thread.run.step'. Required. Default value is + \"thread.run.step\".""" + type: Union[str, "_models.RunStepType"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The type of run step, which can be either message_creation or tool_calls. Required. Known + values are: \"message_creation\" and \"tool_calls\".""" + assistant_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the assistant associated with the run step. Required.""" + thread_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the thread that was run. Required.""" + run_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the run that this run step is a part of. Required.""" + status: Union[str, "_models.RunStepStatus"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The status of this run step. Required. Known values are: \"in_progress\", \"cancelled\", + \"failed\", \"completed\", and \"expired\".""" + step_details: "_models.RunStepDetails" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The details for this run step. Required.""" + last_error: "_models.RunStepError" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """If applicable, information about the last error encountered by this run step. Required.""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + expired_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this item expired. Required.""" + completed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this completed. Required.""" + cancelled_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" + failed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this failed. Required.""" + usage: Optional["_models.RunStepCompletionUsage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Usage statistics related to the run step. This value will be ``null`` while the run step's + status is ``in_progress``.""" + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + type: Union[str, "_models.RunStepType"], + assistant_id: str, + thread_id: str, + run_id: str, + status: Union[str, "_models.RunStepStatus"], + step_details: "_models.RunStepDetails", + last_error: "_models.RunStepError", + created_at: datetime.datetime, + expired_at: datetime.datetime, + completed_at: datetime.datetime, + cancelled_at: datetime.datetime, + failed_at: datetime.datetime, + metadata: Dict[str, str], + usage: Optional["_models.RunStepCompletionUsage"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run.step"] = "thread.run.step" + + +class RunStepToolCall(_model_base.Model): + """An abstract representation of a detailed tool call as recorded within a run step for an + existing run. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepAzureAISearchToolCall, RunStepCustomSearchToolCall, RunStepBingGroundingToolCall, + RunStepCodeInterpreterToolCall, RunStepMicrosoftFabricToolCall, RunStepFileSearchToolCall, + RunStepFunctionToolCall, RunStepOpenAPIToolCall, RunStepSharepointToolCall + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Default value is None.""" + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the tool call. This ID must be referenced when you submit tool outputs. Required.""" + + @overload + def __init__( + self, + *, + type: str, + id: str, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepAzureAISearchToolCall(RunStepToolCall, discriminator="azure_ai_search"): + """A record of a call to an Azure AI Search tool, issued by the model in evaluation of a defined + tool, that represents + executed Azure AI search. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is + "azure_ai_search". + :vartype type: str + :ivar azure_ai_search: Reserved for future use. Required. + :vartype azure_ai_search: dict[str, str] + """ + + type: Literal["azure_ai_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'azure_ai_search'. Required. Default value is + \"azure_ai_search\".""" + azure_ai_search: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + azure_ai_search: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="azure_ai_search", **kwargs) + + +class RunStepBingGroundingToolCall(RunStepToolCall, discriminator="bing_grounding"): + """A record of a call to a bing grounding tool, issued by the model in evaluation of a defined + tool, that represents + executed search with bing grounding. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is + "bing_grounding". + :vartype type: str + :ivar bing_grounding: Reserved for future use. Required. + :vartype bing_grounding: dict[str, str] + """ + + type: Literal["bing_grounding"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'bing_grounding'. Required. Default value is + \"bing_grounding\".""" + bing_grounding: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + bing_grounding: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="bing_grounding", **kwargs) + + +class RunStepCodeInterpreterToolCallOutput(_model_base.Model): + """An abstract representation of an emitted output from a code interpreter tool. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepCodeInterpreterImageOutput, RunStepCodeInterpreterLogOutput + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepCodeInterpreterImageOutput(RunStepCodeInterpreterToolCallOutput, discriminator="image"): + """A representation of an image output emitted by a code interpreter tool in response to a tool + call by the model. + + :ivar type: The object type, which is always 'image'. Required. Default value is "image". + :vartype type: str + :ivar image: Referential information for the image associated with this output. Required. + :vartype image: ~azure.ai.assistants.models.RunStepCodeInterpreterImageReference + """ + + type: Literal["image"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'image'. Required. Default value is \"image\".""" + image: "_models.RunStepCodeInterpreterImageReference" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Referential information for the image associated with this output. Required.""" + + @overload + def __init__( + self, + *, + image: "_models.RunStepCodeInterpreterImageReference", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image", **kwargs) + + +class RunStepCodeInterpreterImageReference(_model_base.Model): + """An image reference emitted by a code interpreter tool in response to a tool call by the model. + + :ivar file_id: The ID of the file associated with this image. Required. + :vartype file_id: str + """ + + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the file associated with this image. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepCodeInterpreterLogOutput(RunStepCodeInterpreterToolCallOutput, discriminator="logs"): + """A representation of a log output emitted by a code interpreter tool in response to a tool call + by the model. + + :ivar type: The object type, which is always 'logs'. Required. Default value is "logs". + :vartype type: str + :ivar logs: The serialized log output emitted by the code interpreter. Required. + :vartype logs: str + """ + + type: Literal["logs"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'logs'. Required. Default value is \"logs\".""" + logs: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The serialized log output emitted by the code interpreter. Required.""" + + @overload + def __init__( + self, + *, + logs: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="logs", **kwargs) + + +class RunStepCodeInterpreterToolCall(RunStepToolCall, discriminator="code_interpreter"): + """A record of a call to a code interpreter tool, issued by the model in evaluation of a defined + tool, that + represents inputs and outputs consumed and emitted by the code interpreter. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is + "code_interpreter". + :vartype type: str + :ivar code_interpreter: The details of the tool call to the code interpreter tool. Required. + :vartype code_interpreter: ~azure.ai.assistants.models.RunStepCodeInterpreterToolCallDetails + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'code_interpreter'. Required. Default value is + \"code_interpreter\".""" + code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The details of the tool call to the code interpreter tool. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="code_interpreter", **kwargs) + + +class RunStepCodeInterpreterToolCallDetails(_model_base.Model): + """The detailed information about a code interpreter invocation by the model. + + :ivar input: The input provided by the model to the code interpreter tool. Required. + :vartype input: str + :ivar outputs: The outputs produced by the code interpreter tool back to the model in response + to the tool call. Required. + :vartype outputs: list[~azure.ai.assistants.models.RunStepCodeInterpreterToolCallOutput] + """ + + input: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The input provided by the model to the code interpreter tool. Required.""" + outputs: List["_models.RunStepCodeInterpreterToolCallOutput"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The outputs produced by the code interpreter tool back to the model in response to the tool + call. Required.""" + + @overload + def __init__( + self, + *, + input: str, + outputs: List["_models.RunStepCodeInterpreterToolCallOutput"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepCompletionUsage(_model_base.Model): + """Usage statistics related to the run step. + + :ivar completion_tokens: Number of completion tokens used over the course of the run step. + Required. + :vartype completion_tokens: int + :ivar prompt_tokens: Number of prompt tokens used over the course of the run step. Required. + :vartype prompt_tokens: int + :ivar total_tokens: Total number of tokens used (prompt + completion). Required. + :vartype total_tokens: int + """ + + completion_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Number of completion tokens used over the course of the run step. Required.""" + prompt_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Number of prompt tokens used over the course of the run step. Required.""" + total_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Total number of tokens used (prompt + completion). Required.""" + + @overload + def __init__( + self, + *, + completion_tokens: int, + prompt_tokens: int, + total_tokens: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepCustomSearchToolCall(RunStepToolCall, discriminator="bing_custom_search"): + """A record of a call to a bing custom search tool, issued by the model in evaluation of a defined + tool, that represents + executed search with bing custom search. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'bing_custom_search'. Required. Default value is + "bing_custom_search". + :vartype type: str + :ivar bing_custom_search: Reserved for future use. Required. + :vartype bing_custom_search: dict[str, str] + """ + + type: Literal["bing_custom_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'bing_custom_search'. Required. Default value is + \"bing_custom_search\".""" + bing_custom_search: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + bing_custom_search: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="bing_custom_search", **kwargs) + + +class RunStepDelta(_model_base.Model): + """Represents the delta payload in a streaming run step delta chunk. + + :ivar step_details: The details of the run step. + :vartype step_details: ~azure.ai.assistants.models.RunStepDeltaDetail + """ + + step_details: Optional["_models.RunStepDeltaDetail"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The details of the run step.""" + + @overload + def __init__( + self, + *, + step_details: Optional["_models.RunStepDeltaDetail"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaChunk(_model_base.Model): + """Represents a run step delta i.e. any changed fields on a run step during streaming. + + :ivar id: The identifier of the run step, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``thread.run.step.delta``. Required. Default + value is "thread.run.step.delta". + :vartype object: str + :ivar delta: The delta containing the fields that have changed on the run step. Required. + :vartype delta: ~azure.ai.assistants.models.RunStepDelta + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier of the run step, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run.step.delta"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always ``thread.run.step.delta``. Required. Default value is + \"thread.run.step.delta\".""" + delta: "_models.RunStepDelta" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The delta containing the fields that have changed on the run step. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + delta: "_models.RunStepDelta", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run.step.delta"] = "thread.run.step.delta" + + +class RunStepDeltaCodeInterpreterDetailItemObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the Code Interpreter tool call data in a streaming run step's tool calls. + + :ivar input: The input into the Code Interpreter tool call. + :vartype input: str + :ivar outputs: The outputs from the Code Interpreter tool call. Code Interpreter can output one + or more + items, including text (``logs``) or images (``image``). Each of these are represented by a + different object type. + :vartype outputs: list[~azure.ai.assistants.models.RunStepDeltaCodeInterpreterOutput] + """ + + input: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The input into the Code Interpreter tool call.""" + outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The outputs from the Code Interpreter tool call. Code Interpreter can output one or more + items, including text (``logs``) or images (``image``). Each of these are represented by a + different object type.""" + + @overload + def __init__( + self, + *, + input: Optional[str] = None, + outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterOutput(_model_base.Model): + """The abstract base representation of a streaming run step tool call's Code Interpreter tool + output. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaCodeInterpreterImageOutput, RunStepDeltaCodeInterpreterLogOutput + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The type of the streaming run step tool call's Code Interpreter output. Required. + Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The index of the output in the streaming run step tool call's Code Interpreter outputs array. + Required.""" + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The type of the streaming run step tool call's Code Interpreter output. Required. Default value + is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterImageOutput(RunStepDeltaCodeInterpreterOutput, discriminator="image"): + """Represents an image output as produced the Code interpreter tool and as represented in a + streaming run step's delta tool calls collection. + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The object type, which is always "image.". Required. Default value is "image". + :vartype type: str + :ivar image: The image data for the Code Interpreter tool call output. + :vartype image: ~azure.ai.assistants.models.RunStepDeltaCodeInterpreterImageOutputObject + """ + + type: Literal["image"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always \"image.\". Required. Default value is \"image\".""" + image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The image data for the Code Interpreter tool call output.""" + + @overload + def __init__( + self, + *, + index: int, + image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image", **kwargs) + + +class RunStepDeltaCodeInterpreterImageOutputObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the data for a streaming run step's Code Interpreter tool call image output. + + :ivar file_id: The file ID for the image. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The file ID for the image.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterLogOutput(RunStepDeltaCodeInterpreterOutput, discriminator="logs"): + """Represents a log output as produced by the Code Interpreter tool and as represented in a + streaming run step's delta tool calls collection. + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The type of the object, which is always "logs.". Required. Default value is "logs". + :vartype type: str + :ivar logs: The text output from the Code Interpreter tool call. + :vartype logs: str + """ + + type: Literal["logs"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The type of the object, which is always \"logs.\". Required. Default value is \"logs\".""" + logs: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The text output from the Code Interpreter tool call.""" + + @overload + def __init__( + self, + *, + index: int, + logs: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="logs", **kwargs) + + +class RunStepDeltaToolCall(_model_base.Model): + """The abstract base representation of a single tool call within a streaming run step's delta tool + call details. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaCodeInterpreterToolCall, RunStepDeltaFileSearchToolCall, + RunStepDeltaFunctionToolCall + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The type of the tool call detail item in a streaming run step's details. Required. + Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The index of the tool call detail in the run step's tool_calls array. Required.""" + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the tool call, used when submitting outputs to the run. Required.""" + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The type of the tool call detail item in a streaming run step's details. Required. Default + value is None.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterToolCall(RunStepDeltaToolCall, discriminator="code_interpreter"): + """Represents a Code Interpreter tool call within a streaming run step's tool call details. + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "code_interpreter.". Required. Default value is + "code_interpreter". + :vartype type: str + :ivar code_interpreter: The Code Interpreter data for the tool call. + :vartype code_interpreter: + ~azure.ai.assistants.models.RunStepDeltaCodeInterpreterDetailItemObject + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always \"code_interpreter.\". Required. Default value is + \"code_interpreter\".""" + code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The Code Interpreter data for the tool call.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="code_interpreter", **kwargs) + + +class RunStepDeltaDetail(_model_base.Model): + """Represents a single run step detail item in a streaming run step's delta payload. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaMessageCreation, RunStepDeltaToolCallObject + + :ivar type: The object type for the run step detail object. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type for the run step detail object. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaFileSearchToolCall(RunStepDeltaToolCall, discriminator="file_search"): + """Represents a file search tool call within a streaming run step's tool call details. + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "file_search.". Required. Default value is + "file_search". + :vartype type: str + :ivar file_search: Reserved for future use. + :vartype file_search: ~azure.ai.assistants.models.RunStepFileSearchToolCallResults + """ + + type: Literal["file_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always \"file_search.\". Required. Default value is \"file_search\".""" + file_search: Optional["_models.RunStepFileSearchToolCallResults"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Reserved for future use.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + file_search: Optional["_models.RunStepFileSearchToolCallResults"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_search", **kwargs) + + +class RunStepDeltaFunction(_model_base.Model): + """Represents the function data in a streaming run step delta's function tool call. + + :ivar name: The name of the function. + :vartype name: str + :ivar arguments: The arguments passed to the function as input. + :vartype arguments: str + :ivar output: The output of the function, null if outputs have not yet been submitted. + :vartype output: str + """ + + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the function.""" + arguments: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The arguments passed to the function as input.""" + output: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The output of the function, null if outputs have not yet been submitted.""" + + @overload + def __init__( + self, + *, + name: Optional[str] = None, + arguments: Optional[str] = None, + output: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaFunctionToolCall(RunStepDeltaToolCall, discriminator="function"): + """Represents a function tool call within a streaming run step's tool call details. + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "function.". Required. Default value is + "function". + :vartype type: str + :ivar function: The function data for the tool call. + :vartype function: ~azure.ai.assistants.models.RunStepDeltaFunction + """ + + type: Literal["function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always \"function.\". Required. Default value is \"function\".""" + function: Optional["_models.RunStepDeltaFunction"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The function data for the tool call.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + function: Optional["_models.RunStepDeltaFunction"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class RunStepDeltaMessageCreation(RunStepDeltaDetail, discriminator="message_creation"): + """Represents a message creation within a streaming run step delta. + + :ivar type: The object type, which is always "message_creation.". Required. Default value is + "message_creation". + :vartype type: str + :ivar message_creation: The message creation data. + :vartype message_creation: ~azure.ai.assistants.models.RunStepDeltaMessageCreationObject + """ + + type: Literal["message_creation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always \"message_creation.\". Required. Default value is + \"message_creation\".""" + message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The message creation data.""" + + @overload + def __init__( + self, + *, + message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="message_creation", **kwargs) + + +class RunStepDeltaMessageCreationObject(_model_base.Model): + """Represents the data within a streaming run step message creation response object. + + :ivar message_id: The ID of the newly-created message. + :vartype message_id: str + """ + + message_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the newly-created message.""" + + @overload + def __init__( + self, + *, + message_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaToolCallObject(RunStepDeltaDetail, discriminator="tool_calls"): + """Represents an invocation of tool calls as part of a streaming run step. + + :ivar type: The object type, which is always "tool_calls.". Required. Default value is + "tool_calls". + :vartype type: str + :ivar tool_calls: The collection of tool calls for the tool call detail item. + :vartype tool_calls: list[~azure.ai.assistants.models.RunStepDeltaToolCall] + """ + + type: Literal["tool_calls"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always \"tool_calls.\". Required. Default value is \"tool_calls\".""" + tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The collection of tool calls for the tool call detail item.""" + + @overload + def __init__( + self, + *, + tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="tool_calls", **kwargs) + + +class RunStepDetails(_model_base.Model): + """An abstract representation of the details for a run step. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepMessageCreationDetails, RunStepToolCallDetails + + :ivar type: The object type. Required. Known values are: "message_creation" and "tool_calls". + :vartype type: str or ~azure.ai.assistants.models.RunStepType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Known values are: \"message_creation\" and \"tool_calls\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepError(_model_base.Model): + """The error information associated with a failed run step. + + :ivar code: The error code for this error. Required. Known values are: "server_error" and + "rate_limit_exceeded". + :vartype code: str or ~azure.ai.assistants.models.RunStepErrorCode + :ivar message: The human-readable text associated with this error. Required. + :vartype message: str + """ + + code: Union[str, "_models.RunStepErrorCode"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The error code for this error. Required. Known values are: \"server_error\" and + \"rate_limit_exceeded\".""" + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The human-readable text associated with this error. Required.""" + + @overload + def __init__( + self, + *, + code: Union[str, "_models.RunStepErrorCode"], + message: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepFileSearchToolCall(RunStepToolCall, discriminator="file_search"): + """A record of a call to a file search tool, issued by the model in evaluation of a defined tool, + that represents + executed file search. + + :ivar type: The object type, which is always 'file_search'. Required. Default value is + "file_search". + :vartype type: str + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar file_search: For now, this is always going to be an empty object. Required. + :vartype file_search: ~azure.ai.assistants.models.RunStepFileSearchToolCallResults + """ + + type: Literal["file_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" + file_search: "_models.RunStepFileSearchToolCallResults" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """For now, this is always going to be an empty object. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + file_search: "_models.RunStepFileSearchToolCallResults", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_search", **kwargs) + + +class RunStepFileSearchToolCallResult(_model_base.Model): + """File search tool call result. + + :ivar file_id: The ID of the file that result was found in. Required. + :vartype file_id: str + :ivar file_name: The name of the file that result was found in. Required. + :vartype file_name: str + :ivar score: The score of the result. All values must be a floating point number between 0 and + 1. Required. + :vartype score: float + :ivar content: The content of the result that was found. The content is only included if + requested via the include query parameter. + :vartype content: list[~azure.ai.assistants.models.FileSearchToolCallContent] + """ + + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the file that result was found in. Required.""" + file_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the file that result was found in. Required.""" + score: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The score of the result. All values must be a floating point number between 0 and 1. Required.""" + content: Optional[List["_models.FileSearchToolCallContent"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The content of the result that was found. The content is only included if requested via the + include query parameter.""" + + @overload + def __init__( + self, + *, + file_id: str, + file_name: str, + score: float, + content: Optional[List["_models.FileSearchToolCallContent"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepFileSearchToolCallResults(_model_base.Model): + """The results of the file search. + + :ivar ranking_options: Ranking options for file search. + :vartype ranking_options: ~azure.ai.assistants.models.FileSearchRankingOptions + :ivar results: The array of a file search results. Required. + :vartype results: list[~azure.ai.assistants.models.RunStepFileSearchToolCallResult] + """ + + ranking_options: Optional["_models.FileSearchRankingOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Ranking options for file search.""" + results: List["_models.RunStepFileSearchToolCallResult"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The array of a file search results. Required.""" + + @overload + def __init__( + self, + *, + results: List["_models.RunStepFileSearchToolCallResult"], + ranking_options: Optional["_models.FileSearchRankingOptions"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepFunctionToolCall(RunStepToolCall, discriminator="function"): + """A record of a call to a function tool, issued by the model in evaluation of a defined tool, + that represents the inputs + and output consumed and emitted by the specified function. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'function'. Required. Default value is "function". + :vartype type: str + :ivar function: The detailed information about the function called by the model. Required. + :vartype function: ~azure.ai.assistants.models.RunStepFunctionToolCallDetails + """ + + type: Literal["function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'function'. Required. Default value is \"function\".""" + function: "_models.RunStepFunctionToolCallDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The detailed information about the function called by the model. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + function: "_models.RunStepFunctionToolCallDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class RunStepFunctionToolCallDetails(_model_base.Model): + """The detailed information about the function called by the model. + + :ivar name: The name of the function. Required. + :vartype name: str + :ivar arguments: The arguments that the model requires are provided to the named function. + Required. + :vartype arguments: str + :ivar output: The output of the function, only populated for function calls that have already + have had their outputs submitted. Required. + :vartype output: str + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the function. Required.""" + arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The arguments that the model requires are provided to the named function. Required.""" + output: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The output of the function, only populated for function calls that have already have had their + outputs submitted. Required.""" + + @overload + def __init__( + self, + *, + name: str, + arguments: str, + output: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepMessageCreationDetails(RunStepDetails, discriminator="message_creation"): + """The detailed information associated with a message creation run step. + + :ivar type: The object type, which is always 'message_creation'. Required. Represents a run + step to create a message. + :vartype type: str or ~azure.ai.assistants.models.MESSAGE_CREATION + :ivar message_creation: Information about the message creation associated with this run step. + Required. + :vartype message_creation: ~azure.ai.assistants.models.RunStepMessageCreationReference + """ + + type: Literal[RunStepType.MESSAGE_CREATION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'message_creation'. Required. Represents a run step to create + a message.""" + message_creation: "_models.RunStepMessageCreationReference" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Information about the message creation associated with this run step. Required.""" + + @overload + def __init__( + self, + *, + message_creation: "_models.RunStepMessageCreationReference", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=RunStepType.MESSAGE_CREATION, **kwargs) + + +class RunStepMessageCreationReference(_model_base.Model): + """The details of a message created as a part of a run step. + + :ivar message_id: The ID of the message created by this run step. Required. + :vartype message_id: str + """ + + message_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the message created by this run step. Required.""" + + @overload + def __init__( + self, + *, + message_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepMicrosoftFabricToolCall(RunStepToolCall, discriminator="fabric_dataagent"): + """A record of a call to a Microsoft Fabric tool, issued by the model in evaluation of a defined + tool, that represents + executed Microsoft Fabric operations. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'fabric_dataagent'. Required. Default value is + "fabric_dataagent". + :vartype type: str + :ivar microsoft_fabric: Reserved for future use. Required. + :vartype microsoft_fabric: dict[str, str] + """ + + type: Literal["fabric_dataagent"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'fabric_dataagent'. Required. Default value is + \"fabric_dataagent\".""" + microsoft_fabric: Dict[str, str] = rest_field( + name="fabric_dataagent", visibility=["read", "create", "update", "delete", "query"] + ) + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + microsoft_fabric: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="fabric_dataagent", **kwargs) + + +class RunStepOpenAPIToolCall(RunStepToolCall, discriminator="openapi"): + """A record of a call to an OpenAPI tool, issued by the model in evaluation of a defined tool, + that represents + executed OpenAPI operations. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'openapi'. Required. Default value is "openapi". + :vartype type: str + :ivar open_api: Reserved for future use. Required. + :vartype open_api: dict[str, str] + """ + + type: Literal["openapi"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'openapi'. Required. Default value is \"openapi\".""" + open_api: Dict[str, str] = rest_field(name="openapi", visibility=["read", "create", "update", "delete", "query"]) + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + open_api: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="openapi", **kwargs) + + +class RunStepSharepointToolCall(RunStepToolCall, discriminator="sharepoint_grounding"): + """A record of a call to a SharePoint tool, issued by the model in evaluation of a defined tool, + that represents + executed SharePoint actions. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'sharepoint_grounding'. Required. Default value is + "sharepoint_grounding". + :vartype type: str + :ivar share_point: Reserved for future use. Required. + :vartype share_point: dict[str, str] + """ + + type: Literal["sharepoint_grounding"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'sharepoint_grounding'. Required. Default value is + \"sharepoint_grounding\".""" + share_point: Dict[str, str] = rest_field( + name="sharepoint_grounding", visibility=["read", "create", "update", "delete", "query"] + ) + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + share_point: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="sharepoint_grounding", **kwargs) + + +class RunStepToolCallDetails(RunStepDetails, discriminator="tool_calls"): + """The detailed information associated with a run step calling tools. + + :ivar type: The object type, which is always 'tool_calls'. Required. Represents a run step that + calls tools. + :vartype type: str or ~azure.ai.assistants.models.TOOL_CALLS + :ivar tool_calls: A list of tool call details for this run step. Required. + :vartype tool_calls: list[~azure.ai.assistants.models.RunStepToolCall] + """ + + type: Literal[RunStepType.TOOL_CALLS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'tool_calls'. Required. Represents a run step that calls + tools.""" + tool_calls: List["_models.RunStepToolCall"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A list of tool call details for this run step. Required.""" + + @overload + def __init__( + self, + *, + tool_calls: List["_models.RunStepToolCall"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=RunStepType.TOOL_CALLS, **kwargs) + + +class SearchConfiguration(_model_base.Model): + """A custom search configuration. + + :ivar connection_id: A connection in a ToolConnectionList attached to this tool. Required. + :vartype connection_id: str + :ivar instance_name: Name of the custom configuration instance given to config. Required. + :vartype instance_name: str + """ + + connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A connection in a ToolConnectionList attached to this tool. Required.""" + instance_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name of the custom configuration instance given to config. Required.""" + + @overload + def __init__( + self, + *, + connection_id: str, + instance_name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchConfigurationList(_model_base.Model): + """A list of search configurations currently used by the ``bing_custom_search`` tool. + + :ivar search_configurations: The connections attached to this tool. There can be a maximum of 1 + connection + resource attached to the tool. Required. + :vartype search_configurations: list[~azure.ai.assistants.models.SearchConfiguration] + """ + + search_configurations: List["_models.SearchConfiguration"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The connections attached to this tool. There can be a maximum of 1 connection + resource attached to the tool. Required.""" + + @overload + def __init__( + self, + *, + search_configurations: List["_models.SearchConfiguration"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SharepointToolDefinition(ToolDefinition, discriminator="sharepoint_grounding"): + """The input definition information for a sharepoint tool as used to configure an assistant. + + :ivar type: The object type, which is always 'sharepoint_grounding'. Required. Default value is + "sharepoint_grounding". + :vartype type: str + :ivar sharepoint_grounding: The list of connections used by the SharePoint tool. Required. + :vartype sharepoint_grounding: ~azure.ai.assistants.models.ToolConnectionList + """ + + type: Literal["sharepoint_grounding"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'sharepoint_grounding'. Required. Default value is + \"sharepoint_grounding\".""" + sharepoint_grounding: "_models.ToolConnectionList" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The list of connections used by the SharePoint tool. Required.""" + + @overload + def __init__( + self, + *, + sharepoint_grounding: "_models.ToolConnectionList", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="sharepoint_grounding", **kwargs) + + +class SubmitToolOutputsAction(RequiredAction, discriminator="submit_tool_outputs"): + """The details for required tool calls that must be submitted for an assistant thread run to + continue. + + :ivar type: The object type, which is always 'submit_tool_outputs'. Required. Default value is + "submit_tool_outputs". + :vartype type: str + :ivar submit_tool_outputs: The details describing tools that should be called to submit tool + outputs. Required. + :vartype submit_tool_outputs: ~azure.ai.assistants.models.SubmitToolOutputsDetails + """ + + type: Literal["submit_tool_outputs"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'submit_tool_outputs'. Required. Default value is + \"submit_tool_outputs\".""" + submit_tool_outputs: "_models.SubmitToolOutputsDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The details describing tools that should be called to submit tool outputs. Required.""" + + @overload + def __init__( + self, + *, + submit_tool_outputs: "_models.SubmitToolOutputsDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="submit_tool_outputs", **kwargs) + + +class SubmitToolOutputsDetails(_model_base.Model): + """The details describing tools that should be called to submit tool outputs. + + :ivar tool_calls: The list of tool calls that must be resolved for the assistant thread run to + continue. Required. + :vartype tool_calls: list[~azure.ai.assistants.models.RequiredToolCall] + """ + + tool_calls: List["_models.RequiredToolCall"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The list of tool calls that must be resolved for the assistant thread run to continue. + Required.""" + + @overload + def __init__( + self, + *, + tool_calls: List["_models.RequiredToolCall"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ThreadDeletionStatus(_model_base.Model): + """The status of a thread deletion operation. + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'thread.deleted'. Required. Default value is + "thread.deleted". + :vartype object: str + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether deletion was successful. Required.""" + object: Literal["thread.deleted"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'thread.deleted'. Required. Default value is + \"thread.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.deleted"] = "thread.deleted" + + +class ThreadMessage(_model_base.Model): + """A single, existing message within an assistant thread. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.message'. Required. Default value is + "thread.message". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar thread_id: The ID of the thread that this message belongs to. Required. + :vartype thread_id: str + :ivar status: The status of the message. Required. Known values are: "in_progress", + "incomplete", and "completed". + :vartype status: str or ~azure.ai.assistants.models.MessageStatus + :ivar incomplete_details: On an incomplete message, details about why the message is + incomplete. Required. + :vartype incomplete_details: ~azure.ai.assistants.models.MessageIncompleteDetails + :ivar completed_at: The Unix timestamp (in seconds) for when the message was completed. + Required. + :vartype completed_at: ~datetime.datetime + :ivar incomplete_at: The Unix timestamp (in seconds) for when the message was marked as + incomplete. Required. + :vartype incomplete_at: ~datetime.datetime + :ivar role: The role associated with the assistant thread message. Required. Known values are: + "user" and "assistant". + :vartype role: str or ~azure.ai.assistants.models.MessageRole + :ivar content: The list of content items associated with the assistant thread message. + Required. + :vartype content: list[~azure.ai.assistants.models.MessageContent] + :ivar assistant_id: If applicable, the ID of the assistant that authored this message. + Required. + :vartype assistant_id: str + :ivar run_id: If applicable, the ID of the run associated with the authoring of this message. + Required. + :vartype run_id: str + :ivar attachments: A list of files attached to the message, and the tools they were added to. + Required. + :vartype attachments: list[~azure.ai.assistants.models.MessageAttachment] + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.message"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'thread.message'. Required. Default value is + \"thread.message\".""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + thread_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the thread that this message belongs to. Required.""" + status: Union[str, "_models.MessageStatus"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The status of the message. Required. Known values are: \"in_progress\", \"incomplete\", and + \"completed\".""" + incomplete_details: "_models.MessageIncompleteDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """On an incomplete message, details about why the message is incomplete. Required.""" + completed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (in seconds) for when the message was completed. Required.""" + incomplete_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (in seconds) for when the message was marked as incomplete. Required.""" + role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The role associated with the assistant thread message. Required. Known values are: \"user\" and + \"assistant\".""" + content: List["_models.MessageContent"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The list of content items associated with the assistant thread message. Required.""" + assistant_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """If applicable, the ID of the assistant that authored this message. Required.""" + run_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """If applicable, the ID of the run associated with the authoring of this message. Required.""" + attachments: List["_models.MessageAttachment"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """A list of files attached to the message, and the tools they were added to. Required.""" + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + thread_id: str, + status: Union[str, "_models.MessageStatus"], + incomplete_details: "_models.MessageIncompleteDetails", + completed_at: datetime.datetime, + incomplete_at: datetime.datetime, + role: Union[str, "_models.MessageRole"], + content: List["_models.MessageContent"], + assistant_id: str, + run_id: str, + attachments: List["_models.MessageAttachment"], + metadata: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.message"] = "thread.message" + + +class ThreadMessageOptions(_model_base.Model): + """A single message within an agent thread, + as provided during that thread's creation for its initial state. + + :ivar role: The role of the entity that is creating the message. Allowed values include: + ``user``, which indicates the message is sent by an actual user (and should be + used in most cases to represent user-generated messages), and ``assistant``, + which indicates the message is generated by the agent (use this value to insert + messages from the agent into the conversation). Required. Known values are: "user" and + "assistant". + :vartype role: str or ~azure.ai.assistants.models.MessageRole + :ivar content: The content of the initial message. This may be a basic string (if you only + need text) or an array of typed content blocks (for example, text, image_file, + image_url, and so on). Required. Is either a str type or a [MessageInputContentBlock] type. + :vartype content: str or list[~azure.ai.assistants.models.MessageInputContentBlock] + :ivar attachments: A list of files attached to the message, and the tools they should be added + to. + :vartype attachments: list[~azure.ai.assistants.models.MessageAttachment] + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. + :vartype metadata: dict[str, str] + """ + + role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The role of the entity that is creating the message. Allowed values include: + ``user``, which indicates the message is sent by an actual user (and should be + used in most cases to represent user-generated messages), and ``assistant``, + which indicates the message is generated by the agent (use this value to insert + messages from the agent into the conversation). Required. Known values are: \"user\" and + \"assistant\".""" + content: "_types.MessageInputContent" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The content of the initial message. This may be a basic string (if you only + need text) or an array of typed content blocks (for example, text, image_file, + image_url, and so on). Required. Is either a str type or a [MessageInputContentBlock] type.""" + attachments: Optional[List["_models.MessageAttachment"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """A list of files attached to the message, and the tools they should be added to.""" + metadata: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length.""" + + @overload + def __init__( + self, + *, + role: Union[str, "_models.MessageRole"], + content: "_types.MessageInputContent", + attachments: Optional[List["_models.MessageAttachment"]] = None, + metadata: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ThreadRun(_model_base.Model): + """Data representing a single evaluation run of an assistant thread. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.run'. Required. Default value is + "thread.run". + :vartype object: str + :ivar thread_id: The ID of the thread associated with this run. Required. + :vartype thread_id: str + :ivar assistant_id: The ID of the assistant associated with the thread this run was performed + against. Required. + :vartype assistant_id: str + :ivar status: The status of the assistant thread run. Required. Known values are: "queued", + "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", and + "expired". + :vartype status: str or ~azure.ai.assistants.models.RunStatus + :ivar required_action: The details of the action required for the assistant thread run to + continue. + :vartype required_action: ~azure.ai.assistants.models.RequiredAction + :ivar last_error: The last error, if any, encountered by this assistant thread run. Required. + :vartype last_error: ~azure.ai.assistants.models.RunError + :ivar model: The ID of the model to use. Required. + :vartype model: str + :ivar instructions: The overridden system instructions used for this assistant thread run. + Required. + :vartype instructions: str + :ivar tools: The overridden enabled tools used for this assistant thread run. Required. + :vartype tools: list[~azure.ai.assistants.models.ToolDefinition] + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar expires_at: The Unix timestamp, in seconds, representing when this item expires. + Required. + :vartype expires_at: ~datetime.datetime + :ivar started_at: The Unix timestamp, in seconds, representing when this item was started. + Required. + :vartype started_at: ~datetime.datetime + :ivar completed_at: The Unix timestamp, in seconds, representing when this completed. Required. + :vartype completed_at: ~datetime.datetime + :ivar cancelled_at: The Unix timestamp, in seconds, representing when this was cancelled. + Required. + :vartype cancelled_at: ~datetime.datetime + :ivar failed_at: The Unix timestamp, in seconds, representing when this failed. Required. + :vartype failed_at: ~datetime.datetime + :ivar incomplete_details: Details on why the run is incomplete. Will be ``null`` if the run is + not incomplete. Required. + :vartype incomplete_details: ~azure.ai.assistants.models.IncompleteRunDetails + :ivar usage: Usage statistics related to the run. This value will be ``null`` if the run is not + in a terminal state (i.e. ``in_progress``, ``queued``, etc.). Required. + :vartype usage: ~azure.ai.assistants.models.RunCompletionUsage + :ivar temperature: The sampling temperature used for this run. If not set, defaults to 1. + :vartype temperature: float + :ivar top_p: The nucleus sampling value used for this run. If not set, defaults to 1. + :vartype top_p: float + :ivar max_prompt_tokens: The maximum number of prompt tokens specified to have been used over + the course of the run. Required. + :vartype max_prompt_tokens: int + :ivar max_completion_tokens: The maximum number of completion tokens specified to have been + used over the course of the run. Required. + :vartype max_completion_tokens: int + :ivar truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Required. + :vartype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :ivar tool_choice: Controls whether or not and which tool is called by the model. Required. Is + one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice + :vartype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice + :ivar response_format: The response format of the tool calls used in this run. Required. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType + :vartype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + :ivar tool_resources: Override the tools the assistant can use for this run. This is useful for + modifying the behavior on a per-run basis. + :vartype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions + :ivar parallel_tool_calls: Determines if tools can be executed in parallel within the run. + Required. + :vartype parallel_tool_calls: bool + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'thread.run'. Required. Default value is \"thread.run\".""" + thread_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the thread associated with this run. Required.""" + assistant_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the assistant associated with the thread this run was performed against. Required.""" + status: Union[str, "_models.RunStatus"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The status of the assistant thread run. Required. Known values are: \"queued\", + \"in_progress\", \"requires_action\", \"cancelling\", \"cancelled\", \"failed\", \"completed\", + and \"expired\".""" + required_action: Optional["_models.RequiredAction"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The details of the action required for the assistant thread run to continue.""" + last_error: "_models.RunError" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last error, if any, encountered by this assistant thread run. Required.""" + model: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the model to use. Required.""" + instructions: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The overridden system instructions used for this assistant thread run. Required.""" + tools: List["_models.ToolDefinition"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The overridden enabled tools used for this assistant thread run. Required.""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + expires_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this item expires. Required.""" + started_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this item was started. Required.""" + completed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this completed. Required.""" + cancelled_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" + failed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this failed. Required.""" + incomplete_details: "_models.IncompleteRunDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Details on why the run is incomplete. Will be ``null`` if the run is not incomplete. Required.""" + usage: "_models.RunCompletionUsage" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Usage statistics related to the run. This value will be ``null`` if the run is not in a + terminal state (i.e. ``in_progress``, ``queued``, etc.). Required.""" + temperature: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The sampling temperature used for this run. If not set, defaults to 1.""" + top_p: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The nucleus sampling value used for this run. If not set, defaults to 1.""" + max_prompt_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The maximum number of prompt tokens specified to have been used over the course of the run. + Required.""" + max_completion_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The maximum number of completion tokens specified to have been used over the course of the run. + Required.""" + truncation_strategy: "_models.TruncationObject" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The strategy to use for dropping messages as the context windows moves forward. Required.""" + tool_choice: "_types.AssistantsApiToolChoiceOption" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Controls whether or not and which tool is called by the model. Required. Is one of the + following types: str, Union[str, \"_models.AssistantsApiToolChoiceOptionMode\"], + AssistantsNamedToolChoice""" + response_format: "_types.AssistantsApiResponseFormatOption" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The response format of the tool calls used in this run. Required. Is one of the following + types: str, Union[str, \"_models.AssistantsApiResponseFormatMode\"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType""" + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + tool_resources: Optional["_models.UpdateToolResourcesOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Override the tools the assistant can use for this run. This is useful for modifying the + behavior on a per-run basis.""" + parallel_tool_calls: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Determines if tools can be executed in parallel within the run. Required.""" + + @overload + def __init__( # pylint: disable=too-many-locals + self, + *, + id: str, # pylint: disable=redefined-builtin + thread_id: str, + assistant_id: str, + status: Union[str, "_models.RunStatus"], + last_error: "_models.RunError", + model: str, + instructions: str, + tools: List["_models.ToolDefinition"], + created_at: datetime.datetime, + expires_at: datetime.datetime, + started_at: datetime.datetime, + completed_at: datetime.datetime, + cancelled_at: datetime.datetime, + failed_at: datetime.datetime, + incomplete_details: "_models.IncompleteRunDetails", + usage: "_models.RunCompletionUsage", + max_prompt_tokens: int, + max_completion_tokens: int, + truncation_strategy: "_models.TruncationObject", + tool_choice: "_types.AssistantsApiToolChoiceOption", + response_format: "_types.AssistantsApiResponseFormatOption", + metadata: Dict[str, str], + parallel_tool_calls: bool, + required_action: Optional["_models.RequiredAction"] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + tool_resources: Optional["_models.UpdateToolResourcesOptions"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run"] = "thread.run" + + +class ToolConnection(_model_base.Model): + """A connection resource. + + :ivar connection_id: A connection in a ToolConnectionList attached to this tool. Required. + :vartype connection_id: str + """ + + connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A connection in a ToolConnectionList attached to this tool. Required.""" + + @overload + def __init__( + self, + *, + connection_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ToolConnectionList(_model_base.Model): + """A set of connection resources currently used by either the ``bing_grounding``, + ``fabric_dataagent``, or ``sharepoint_grounding`` tools. + + :ivar connection_list: The connections attached to this tool. There can be a maximum of 1 + connection + resource attached to the tool. + :vartype connection_list: list[~azure.ai.assistants.models.ToolConnection] + """ + + connection_list: Optional[List["_models.ToolConnection"]] = rest_field( + name="connections", visibility=["read", "create", "update", "delete", "query"] + ) + """The connections attached to this tool. There can be a maximum of 1 connection + resource attached to the tool.""" + + @overload + def __init__( + self, + *, + connection_list: Optional[List["_models.ToolConnection"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ToolOutput(_model_base.Model): + """The data provided during a tool outputs submission to resolve pending tool calls and allow the + model to continue. + + :ivar tool_call_id: The ID of the tool call being resolved, as provided in the tool calls of a + required action from a run. + :vartype tool_call_id: str + :ivar output: The output from the tool to be submitted. + :vartype output: str + """ + + tool_call_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the tool call being resolved, as provided in the tool calls of a required action from + a run.""" + output: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The output from the tool to be submitted.""" + + @overload + def __init__( + self, + *, + tool_call_id: Optional[str] = None, + output: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ToolResources(_model_base.Model): + """A set of resources that are used by the assistant's tools. The resources are specific to the + type of + tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` + tool requires a list of vector store IDs. + + :ivar code_interpreter: Resources to be used by the ``code_interpreter`` tool consisting of + file IDs. + :vartype code_interpreter: ~azure.ai.assistants.models.CodeInterpreterToolResource + :ivar file_search: Resources to be used by the ``file_search`` tool consisting of vector store + IDs. + :vartype file_search: ~azure.ai.assistants.models.FileSearchToolResource + :ivar azure_ai_search: Resources to be used by the ``azure_ai_search`` tool consisting of index + IDs and names. + :vartype azure_ai_search: ~azure.ai.assistants.models.AzureAISearchResource + """ + + code_interpreter: Optional["_models.CodeInterpreterToolResource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Resources to be used by the ``code_interpreter`` tool consisting of file IDs.""" + file_search: Optional["_models.FileSearchToolResource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Resources to be used by the ``file_search`` tool consisting of vector store IDs.""" + azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Resources to be used by the ``azure_ai_search`` tool consisting of index IDs and names.""" + + @overload + def __init__( + self, + *, + code_interpreter: Optional["_models.CodeInterpreterToolResource"] = None, + file_search: Optional["_models.FileSearchToolResource"] = None, + azure_ai_search: Optional["_models.AzureAISearchResource"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TruncationObject(_model_base.Model): + """Controls for how a thread will be truncated prior to the run. Use this to control the initial + context window of the run. + + :ivar type: The truncation strategy to use for the thread. The default is ``auto``. If set to + ``last_messages``, the thread will + be truncated to the ``lastMessages`` count most recent messages in the thread. When set to + ``auto``, messages in the middle of the thread + will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known + values are: "auto" and "last_messages". + :vartype type: str or ~azure.ai.assistants.models.TruncationStrategy + :ivar last_messages: The number of most recent messages from the thread when constructing the + context for the run. + :vartype last_messages: int + """ + + type: Union[str, "_models.TruncationStrategy"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The truncation strategy to use for the thread. The default is ``auto``. If set to + ``last_messages``, the thread will + be truncated to the ``lastMessages`` count most recent messages in the thread. When set to + ``auto``, messages in the middle of the thread + will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known + values are: \"auto\" and \"last_messages\".""" + last_messages: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The number of most recent messages from the thread when constructing the context for the run.""" + + @overload + def __init__( + self, + *, + type: Union[str, "_models.TruncationStrategy"], + last_messages: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UpdateCodeInterpreterToolResourceOptions(_model_base.Model): + """Request object to update ``code_interpreted`` tool resources. + + :ivar file_ids: A list of file IDs to override the current list of the assistant. + :vartype file_ids: list[str] + """ + + file_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A list of file IDs to override the current list of the assistant.""" + + @overload + def __init__( + self, + *, + file_ids: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UpdateFileSearchToolResourceOptions(_model_base.Model): + """Request object to update ``file_search`` tool resources. + + :ivar vector_store_ids: A list of vector store IDs to override the current list of the + assistant. + :vartype vector_store_ids: list[str] + """ + + vector_store_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A list of vector store IDs to override the current list of the assistant.""" + + @overload + def __init__( + self, + *, + vector_store_ids: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UpdateToolResourcesOptions(_model_base.Model): + """Request object. A set of resources that are used by the assistant's tools. The resources are + specific to the type of tool. + For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list of + vector store IDs. + + :ivar code_interpreter: Overrides the list of file IDs made available to the + ``code_interpreter`` tool. There can be a maximum of 20 files + associated with the tool. + :vartype code_interpreter: ~azure.ai.assistants.models.UpdateCodeInterpreterToolResourceOptions + :ivar file_search: Overrides the vector store attached to this assistant. There can be a + maximum of 1 vector store attached to the assistant. + :vartype file_search: ~azure.ai.assistants.models.UpdateFileSearchToolResourceOptions + :ivar azure_ai_search: Overrides the resources to be used by the ``azure_ai_search`` tool + consisting of index IDs and names. + :vartype azure_ai_search: ~azure.ai.assistants.models.AzureAISearchResource + """ + + code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Overrides the list of file IDs made available to the ``code_interpreter`` tool. There can be a + maximum of 20 files + associated with the tool.""" + file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Overrides the vector store attached to this assistant. There can be a maximum of 1 vector store + attached to the assistant.""" + azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Overrides the resources to be used by the ``azure_ai_search`` tool consisting of index IDs and + names.""" + + @overload + def __init__( + self, + *, + code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = None, + file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = None, + azure_ai_search: Optional["_models.AzureAISearchResource"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UploadFileRequest(_model_base.Model): + """UploadFileRequest. + + :ivar file: The file data, in bytes. Required. + :vartype file: ~azure.ai.assistants._vendor.FileType + :ivar purpose: The intended purpose of the uploaded file. Use ``assistants`` for Assistants and + Message files, ``vision`` for Assistants image file inputs, ``batch`` for Batch API, and + ``fine-tune`` for Fine-tuning. Required. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". + :vartype purpose: str or ~azure.ai.assistants.models.FilePurpose + :ivar filename: The name of the file. + :vartype filename: str + """ + + file: FileType = rest_field( + visibility=["read", "create", "update", "delete", "query"], is_multipart_file_input=True + ) + """The file data, in bytes. Required.""" + purpose: Union[str, "_models.FilePurpose"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The intended purpose of the uploaded file. Use ``assistants`` for Assistants and Message files, + ``vision`` for Assistants image file inputs, ``batch`` for Batch API, and ``fine-tune`` for + Fine-tuning. Required. Known values are: \"fine-tune\", \"fine-tune-results\", \"assistants\", + \"assistants_output\", \"batch\", \"batch_output\", and \"vision\".""" + filename: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the file.""" + + @overload + def __init__( + self, + *, + file: FileType, + purpose: Union[str, "_models.FilePurpose"], + filename: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStore(_model_base.Model): + """A vector store is a collection of processed files can be used by the ``file_search`` tool. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store``. Required. Default value is + "vector_store". + :vartype object: str + :ivar created_at: The Unix timestamp (in seconds) for when the vector store was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar name: The name of the vector store. Required. + :vartype name: str + :ivar usage_bytes: The total number of bytes used by the files in the vector store. Required. + :vartype usage_bytes: int + :ivar file_counts: Files count grouped by status processed or being processed by this vector + store. Required. + :vartype file_counts: ~azure.ai.assistants.models.VectorStoreFileCount + :ivar status: The status of the vector store, which can be either ``expired``, ``in_progress``, + or ``completed``. A status of ``completed`` indicates that the vector store is ready for use. + Required. Known values are: "expired", "in_progress", and "completed". + :vartype status: str or ~azure.ai.assistants.models.VectorStoreStatus + :ivar expires_after: Details on when this vector store expires. + :vartype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :ivar expires_at: The Unix timestamp (in seconds) for when the vector store will expire. + :vartype expires_at: ~datetime.datetime + :ivar last_active_at: The Unix timestamp (in seconds) for when the vector store was last + active. Required. + :vartype last_active_at: ~datetime.datetime + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always ``vector_store``. Required. Default value is \"vector_store\".""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (in seconds) for when the vector store was created. Required.""" + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the vector store. Required.""" + usage_bytes: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The total number of bytes used by the files in the vector store. Required.""" + file_counts: "_models.VectorStoreFileCount" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Files count grouped by status processed or being processed by this vector store. Required.""" + status: Union[str, "_models.VectorStoreStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The status of the vector store, which can be either ``expired``, ``in_progress``, or + ``completed``. A status of ``completed`` indicates that the vector store is ready for use. + Required. Known values are: \"expired\", \"in_progress\", and \"completed\".""" + expires_after: Optional["_models.VectorStoreExpirationPolicy"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Details on when this vector store expires.""" + expires_at: Optional[datetime.datetime] = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (in seconds) for when the vector store will expire.""" + last_active_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (in seconds) for when the vector store was last active. Required.""" + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + name: str, + usage_bytes: int, + file_counts: "_models.VectorStoreFileCount", + status: Union[str, "_models.VectorStoreStatus"], + last_active_at: datetime.datetime, + metadata: Dict[str, str], + expires_after: Optional["_models.VectorStoreExpirationPolicy"] = None, + expires_at: Optional[datetime.datetime] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store"] = "vector_store" + + +class VectorStoreChunkingStrategyRequest(_model_base.Model): + """An abstract representation of a vector store chunking strategy configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + VectorStoreAutoChunkingStrategyRequest, VectorStoreStaticChunkingStrategyRequest + + :ivar type: The object type. Required. Known values are: "auto" and "static". + :vartype type: str or ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequestType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Known values are: \"auto\" and \"static\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreAutoChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, discriminator="auto"): + """The default strategy. This strategy currently uses a max_chunk_size_tokens of 800 and + chunk_overlap_tokens of 400. + + :ivar type: The object type, which is always 'auto'. Required. + :vartype type: str or ~azure.ai.assistants.models.AUTO + """ + + type: Literal[VectorStoreChunkingStrategyRequestType.AUTO] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'auto'. Required.""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyRequestType.AUTO, **kwargs) + + +class VectorStoreChunkingStrategyResponse(_model_base.Model): + """An abstract representation of a vector store chunking strategy configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + VectorStoreAutoChunkingStrategyResponse, VectorStoreStaticChunkingStrategyResponse + + :ivar type: The object type. Required. Known values are: "other" and "static". + :vartype type: str or ~azure.ai.assistants.models.VectorStoreChunkingStrategyResponseType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Known values are: \"other\" and \"static\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreAutoChunkingStrategyResponse(VectorStoreChunkingStrategyResponse, discriminator="other"): + """This is returned when the chunking strategy is unknown. Typically, this is because the file was + indexed before the chunking_strategy concept was introduced in the API. + + :ivar type: The object type, which is always 'other'. Required. + :vartype type: str or ~azure.ai.assistants.models.OTHER + """ + + type: Literal[VectorStoreChunkingStrategyResponseType.OTHER] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'other'. Required.""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.OTHER, **kwargs) + + +class VectorStoreConfiguration(_model_base.Model): + """Vector storage configuration is the list of data sources, used when multiple + files can be used for the enterprise file search. + + :ivar data_sources: Data sources. Required. + :vartype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + """ + + data_sources: List["_models.VectorStoreDataSource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Data sources. Required.""" + + @overload + def __init__( + self, + *, + data_sources: List["_models.VectorStoreDataSource"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreConfigurations(_model_base.Model): + """The structure, containing the list of vector storage configurations i.e. the list of azure + asset IDs. + + :ivar store_name: Name. Required. + :vartype store_name: str + :ivar store_configuration: Configurations. Required. + :vartype store_configuration: ~azure.ai.assistants.models.VectorStoreConfiguration + """ + + store_name: str = rest_field(name="name", visibility=["read", "create", "update", "delete", "query"]) + """Name. Required.""" + store_configuration: "_models.VectorStoreConfiguration" = rest_field( + name="configuration", visibility=["read", "create", "update", "delete", "query"] + ) + """Configurations. Required.""" + + @overload + def __init__( + self, + *, + store_name: str, + store_configuration: "_models.VectorStoreConfiguration", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreDataSource(_model_base.Model): + """The structure, containing Azure asset URI path and the asset type of the file used as a data + source + for the enterprise file search. + + :ivar asset_identifier: Asset URI. Required. + :vartype asset_identifier: str + :ivar asset_type: The asset type. Required. Known values are: "uri_asset" and "id_asset". + :vartype asset_type: str or ~azure.ai.assistants.models.VectorStoreDataSourceAssetType + """ + + asset_identifier: str = rest_field(name="uri", visibility=["read", "create", "update", "delete", "query"]) + """Asset URI. Required.""" + asset_type: Union[str, "_models.VectorStoreDataSourceAssetType"] = rest_field( + name="type", visibility=["read", "create", "update", "delete", "query"] + ) + """The asset type. Required. Known values are: \"uri_asset\" and \"id_asset\".""" + + @overload + def __init__( + self, + *, + asset_identifier: str, + asset_type: Union[str, "_models.VectorStoreDataSourceAssetType"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreDeletionStatus(_model_base.Model): + """Response object for deleting a vector store. + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'vector_store.deleted'. Required. Default value + is "vector_store.deleted". + :vartype object: str + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether deletion was successful. Required.""" + object: Literal["vector_store.deleted"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'vector_store.deleted'. Required. Default value is + \"vector_store.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.deleted"] = "vector_store.deleted" + + +class VectorStoreExpirationPolicy(_model_base.Model): + """The expiration policy for a vector store. + + :ivar anchor: Anchor timestamp after which the expiration policy applies. Supported anchors: + ``last_active_at``. Required. "last_active_at" + :vartype anchor: str or ~azure.ai.assistants.models.VectorStoreExpirationPolicyAnchor + :ivar days: The anchor timestamp after which the expiration policy applies. Required. + :vartype days: int + """ + + anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Anchor timestamp after which the expiration policy applies. Supported anchors: + ``last_active_at``. Required. \"last_active_at\"""" + days: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The anchor timestamp after which the expiration policy applies. Required.""" + + @overload + def __init__( + self, + *, + anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"], + days: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreFile(_model_base.Model): + """Description of a file attached to a vector store. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store.file``. Required. Default value + is "vector_store.file". + :vartype object: str + :ivar usage_bytes: The total vector store usage in bytes. Note that this may be different from + the original file + size. Required. + :vartype usage_bytes: int + :ivar created_at: The Unix timestamp (in seconds) for when the vector store file was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar vector_store_id: The ID of the vector store that the file is attached to. Required. + :vartype vector_store_id: str + :ivar status: The status of the vector store file, which can be either ``in_progress``, + ``completed``, ``cancelled``, or ``failed``. The status ``completed`` indicates that the vector + store file is ready for use. Required. Known values are: "in_progress", "completed", "failed", + and "cancelled". + :vartype status: str or ~azure.ai.assistants.models.VectorStoreFileStatus + :ivar last_error: The last error associated with this vector store file. Will be ``null`` if + there are no errors. Required. + :vartype last_error: ~azure.ai.assistants.models.VectorStoreFileError + :ivar chunking_strategy: The strategy used to chunk the file. Required. + :vartype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyResponse + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store.file"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always ``vector_store.file``. Required. Default value is + \"vector_store.file\".""" + usage_bytes: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The total vector store usage in bytes. Note that this may be different from the original file + size. Required.""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (in seconds) for when the vector store file was created. Required.""" + vector_store_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the vector store that the file is attached to. Required.""" + status: Union[str, "_models.VectorStoreFileStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The status of the vector store file, which can be either ``in_progress``, ``completed``, + ``cancelled``, or ``failed``. The status ``completed`` indicates that the vector store file is + ready for use. Required. Known values are: \"in_progress\", \"completed\", \"failed\", and + \"cancelled\".""" + last_error: "_models.VectorStoreFileError" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last error associated with this vector store file. Will be ``null`` if there are no errors. + Required.""" + chunking_strategy: "_models.VectorStoreChunkingStrategyResponse" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The strategy used to chunk the file. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + usage_bytes: int, + created_at: datetime.datetime, + vector_store_id: str, + status: Union[str, "_models.VectorStoreFileStatus"], + last_error: "_models.VectorStoreFileError", + chunking_strategy: "_models.VectorStoreChunkingStrategyResponse", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.file"] = "vector_store.file" + + +class VectorStoreFileBatch(_model_base.Model): + """A batch of files attached to a vector store. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store.file_batch``. Required. Default + value is "vector_store.files_batch". + :vartype object: str + :ivar created_at: The Unix timestamp (in seconds) for when the vector store files batch was + created. Required. + :vartype created_at: ~datetime.datetime + :ivar vector_store_id: The ID of the vector store that the file is attached to. Required. + :vartype vector_store_id: str + :ivar status: The status of the vector store files batch, which can be either ``in_progress``, + ``completed``, ``cancelled`` or ``failed``. Required. Known values are: "in_progress", + "completed", "cancelled", and "failed". + :vartype status: str or ~azure.ai.assistants.models.VectorStoreFileBatchStatus + :ivar file_counts: Files count grouped by status processed or being processed by this vector + store. Required. + :vartype file_counts: ~azure.ai.assistants.models.VectorStoreFileCount + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store.files_batch"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always ``vector_store.file_batch``. Required. Default value is + \"vector_store.files_batch\".""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (in seconds) for when the vector store files batch was created. Required.""" + vector_store_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the vector store that the file is attached to. Required.""" + status: Union[str, "_models.VectorStoreFileBatchStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The status of the vector store files batch, which can be either ``in_progress``, ``completed``, + ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", \"completed\", + \"cancelled\", and \"failed\".""" + file_counts: "_models.VectorStoreFileCount" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Files count grouped by status processed or being processed by this vector store. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + vector_store_id: str, + status: Union[str, "_models.VectorStoreFileBatchStatus"], + file_counts: "_models.VectorStoreFileCount", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.files_batch"] = "vector_store.files_batch" + + +class VectorStoreFileCount(_model_base.Model): + """Counts of files processed or being processed by this vector store grouped by status. + + :ivar in_progress: The number of files that are currently being processed. Required. + :vartype in_progress: int + :ivar completed: The number of files that have been successfully processed. Required. + :vartype completed: int + :ivar failed: The number of files that have failed to process. Required. + :vartype failed: int + :ivar cancelled: The number of files that were cancelled. Required. + :vartype cancelled: int + :ivar total: The total number of files. Required. + :vartype total: int + """ + + in_progress: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The number of files that are currently being processed. Required.""" + completed: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The number of files that have been successfully processed. Required.""" + failed: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The number of files that have failed to process. Required.""" + cancelled: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The number of files that were cancelled. Required.""" + total: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The total number of files. Required.""" + + @overload + def __init__( + self, + *, + in_progress: int, + completed: int, + failed: int, + cancelled: int, + total: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreFileDeletionStatus(_model_base.Model): + """Response object for deleting a vector store file relationship. + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'vector_store.deleted'. Required. Default value + is "vector_store.file.deleted". + :vartype object: str + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether deletion was successful. Required.""" + object: Literal["vector_store.file.deleted"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The object type, which is always 'vector_store.deleted'. Required. Default value is + \"vector_store.file.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.file.deleted"] = "vector_store.file.deleted" + + +class VectorStoreFileError(_model_base.Model): + """Details on the error that may have occurred while processing a file for this vector store. + + :ivar code: One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: + "server_error", "invalid_file", and "unsupported_file". + :vartype code: str or ~azure.ai.assistants.models.VectorStoreFileErrorCode + :ivar message: A human-readable description of the error. Required. + :vartype message: str + """ + + code: Union[str, "_models.VectorStoreFileErrorCode"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: + \"server_error\", \"invalid_file\", and \"unsupported_file\".""" + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A human-readable description of the error. Required.""" + + @overload + def __init__( + self, + *, + code: Union[str, "_models.VectorStoreFileErrorCode"], + message: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreStaticChunkingStrategyOptions(_model_base.Model): + """Options to configure a vector store static chunking strategy. + + :ivar max_chunk_size_tokens: The maximum number of tokens in each chunk. The default value is + 800. The minimum value is 100 and the maximum value is 4096. Required. + :vartype max_chunk_size_tokens: int + :ivar chunk_overlap_tokens: The number of tokens that overlap between chunks. The default value + is 400. + Note that the overlap must not exceed half of max_chunk_size_tokens. Required. + :vartype chunk_overlap_tokens: int + """ + + max_chunk_size_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The maximum number of tokens in each chunk. The default value is 800. The minimum value is 100 + and the maximum value is 4096. Required.""" + chunk_overlap_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The number of tokens that overlap between chunks. The default value is 400. + Note that the overlap must not exceed half of max_chunk_size_tokens. Required.""" + + @overload + def __init__( + self, + *, + max_chunk_size_tokens: int, + chunk_overlap_tokens: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreStaticChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, discriminator="static"): + """A statically configured chunking strategy. + + :ivar type: The object type, which is always 'static'. Required. + :vartype type: str or ~azure.ai.assistants.models.STATIC + :ivar static: The options for the static chunking strategy. Required. + :vartype static: ~azure.ai.assistants.models.VectorStoreStaticChunkingStrategyOptions + """ + + type: Literal[VectorStoreChunkingStrategyRequestType.STATIC] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'static'. Required.""" + static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The options for the static chunking strategy. Required.""" + + @overload + def __init__( + self, + *, + static: "_models.VectorStoreStaticChunkingStrategyOptions", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyRequestType.STATIC, **kwargs) + + +class VectorStoreStaticChunkingStrategyResponse( + VectorStoreChunkingStrategyResponse, discriminator="static" +): # pylint: disable=name-too-long + """A statically configured chunking strategy. + + :ivar type: The object type, which is always 'static'. Required. + :vartype type: str or ~azure.ai.assistants.models.STATIC + :ivar static: The options for the static chunking strategy. Required. + :vartype static: ~azure.ai.assistants.models.VectorStoreStaticChunkingStrategyOptions + """ + + type: Literal[VectorStoreChunkingStrategyResponseType.STATIC] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'static'. Required.""" + static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The options for the static chunking strategy. Required.""" + + @overload + def __init__( + self, + *, + static: "_models.VectorStoreStaticChunkingStrategyOptions", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.STATIC, **kwargs) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py new file mode 100644 index 000000000000..84a1440612ed --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py @@ -0,0 +1,1691 @@ +# pylint: disable=too-many-lines +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +import asyncio # pylint: disable = do-not-import-asyncio +import inspect +import itertools +import json +import logging +import re +from abc import ABC, abstractmethod +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Dict, + Generic, + Iterator, + List, + Mapping, + Optional, + Set, + Tuple, + Type, + TypeVar, + Union, + cast, + get_args, + get_origin, + overload, +) + +from ._enums import AssistantStreamEvent, MessageRole, AzureAISearchQueryType +from ._models import ( + AISearchIndexResource, + AzureAISearchResource, + AzureAISearchToolDefinition, + AzureFunctionDefinition, + AzureFunctionStorageQueue, + AzureFunctionToolDefinition, + AzureFunctionBinding, + BingGroundingToolDefinition, + CodeInterpreterToolDefinition, + CodeInterpreterToolResource, + FileSearchToolDefinition, + FileSearchToolResource, + FunctionDefinition, + FunctionToolDefinition, + MessageImageFileContent, + MessageTextContent, + MessageTextFileCitationAnnotation, + MessageTextUrlCitationAnnotation, + MessageTextFilePathAnnotation, + MicrosoftFabricToolDefinition, + OpenApiAuthDetails, + OpenApiToolDefinition, + OpenApiFunctionDefinition, + RequiredFunctionToolCall, + RunStep, + RunStepDeltaChunk, + SharepointToolDefinition, + SubmitToolOutputsAction, + ThreadRun, + ToolConnection, + ToolConnectionList, + ToolDefinition, + ToolResources, + MessageDeltaTextContent, + VectorStoreDataSource, +) + +from ._models import MessageDeltaChunk as MessageDeltaChunkGenerated +from ._models import ThreadMessage as ThreadMessageGenerated +from ._models import OpenAIPageableListOfThreadMessage as OpenAIPageableListOfThreadMessageGenerated +from ._models import MessageAttachment as MessageAttachmentGenerated + +from .. import _types + +logger = logging.getLogger(__name__) + +StreamEventData = Union["MessageDeltaChunk", "ThreadMessage", ThreadRun, RunStep, str] + + +def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[str, Any]: + """ + Remove the parameters, non present in class public fields; return shallow copy of a dictionary. + + **Note:** Classes inherited from the model check that the parameters are present + in the list of attributes and if they are not, the error is being raised. This check may not + be relevant for classes, not inherited from azure.ai.assistants._model_base.Model. + :param Type model_class: The class of model to be used. + :param parameters: The parsed dictionary with parameters. + :type parameters: Union[str, Dict[str, Any]] + :return: The dictionary with all invalid parameters removed. + :rtype: Dict[str, Any] + """ + new_params = {} + valid_parameters = set( + filter( + lambda x: not x.startswith("_") and hasattr(model_class.__dict__[x], "_type"), model_class.__dict__.keys() + ) + ) + for k in filter(lambda x: x in valid_parameters, parameters.keys()): + new_params[k] = parameters[k] + return new_params + + +def _safe_instantiate( + model_class: Type, parameters: Union[str, Dict[str, Any]], *, generated_class: Optional[Type] = None +) -> StreamEventData: + """ + Instantiate class with the set of parameters from the server. + + :param Type model_class: The class of model to be used. + :param parameters: The parsed dictionary with parameters. + :type parameters: Union[str, Dict[str, Any]] + :keyword Optional[Type] generated_class: The optional generated type. + :return: The class of model_class type if parameters is a dictionary, or the parameters themselves otherwise. + :rtype: Any + """ + if not generated_class: + generated_class = model_class + if not isinstance(parameters, dict): + return parameters + return cast(StreamEventData, model_class(**_filter_parameters(generated_class, parameters))) + + +def _parse_event(event_data_str: str) -> Tuple[str, StreamEventData]: + event_lines = event_data_str.strip().split("\n") + event_type: Optional[str] = None + event_data = "" + event_obj: StreamEventData + for line in event_lines: + if line.startswith("event:"): + event_type = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + event_data = line.split(":", 1)[1].strip() + + if not event_type: + raise ValueError("Event type not specified in the event data.") + + try: + parsed_data: Union[str, Dict[str, StreamEventData]] = cast(Dict[str, StreamEventData], json.loads(event_data)) + except json.JSONDecodeError: + parsed_data = event_data + + # Workaround for service bug: Rename 'expires_at' to 'expired_at' + if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: + parsed_data["expired_at"] = parsed_data.pop("expires_at") + + # Map to the appropriate class instance + if event_type in { + AssistantStreamEvent.THREAD_RUN_CREATED.value, + AssistantStreamEvent.THREAD_RUN_QUEUED.value, + AssistantStreamEvent.THREAD_RUN_INCOMPLETE.value, + AssistantStreamEvent.THREAD_RUN_IN_PROGRESS.value, + AssistantStreamEvent.THREAD_RUN_REQUIRES_ACTION.value, + AssistantStreamEvent.THREAD_RUN_COMPLETED.value, + AssistantStreamEvent.THREAD_RUN_FAILED.value, + AssistantStreamEvent.THREAD_RUN_CANCELLING.value, + AssistantStreamEvent.THREAD_RUN_CANCELLED.value, + AssistantStreamEvent.THREAD_RUN_EXPIRED.value, + }: + event_obj = _safe_instantiate(ThreadRun, parsed_data) + elif event_type in { + AssistantStreamEvent.THREAD_RUN_STEP_CREATED.value, + AssistantStreamEvent.THREAD_RUN_STEP_IN_PROGRESS.value, + AssistantStreamEvent.THREAD_RUN_STEP_COMPLETED.value, + AssistantStreamEvent.THREAD_RUN_STEP_FAILED.value, + AssistantStreamEvent.THREAD_RUN_STEP_CANCELLED.value, + AssistantStreamEvent.THREAD_RUN_STEP_EXPIRED.value, + }: + event_obj = _safe_instantiate(RunStep, parsed_data) + elif event_type in { + AssistantStreamEvent.THREAD_MESSAGE_CREATED.value, + AssistantStreamEvent.THREAD_MESSAGE_IN_PROGRESS.value, + AssistantStreamEvent.THREAD_MESSAGE_COMPLETED.value, + AssistantStreamEvent.THREAD_MESSAGE_INCOMPLETE.value, + }: + event_obj = _safe_instantiate(ThreadMessage, parsed_data, generated_class=ThreadMessageGenerated) + elif event_type == AssistantStreamEvent.THREAD_MESSAGE_DELTA.value: + event_obj = _safe_instantiate(MessageDeltaChunk, parsed_data, generated_class=MessageDeltaChunkGenerated) + + elif event_type == AssistantStreamEvent.THREAD_RUN_STEP_DELTA.value: + event_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) + else: + event_obj = str(parsed_data) + + return event_type, event_obj + + +# Define type_map to translate Python type annotations to JSON Schema types +type_map = { + "str": "string", + "int": "integer", + "float": "number", + "bool": "boolean", + "NoneType": "null", + "list": "array", + "dict": "object", +} + + +def _map_type(annotation) -> Dict[str, Any]: # pylint: disable=too-many-return-statements + if annotation == inspect.Parameter.empty: + return {"type": "string"} # Default type if annotation is missing + + origin = get_origin(annotation) + + if origin in {list, List}: + args = get_args(annotation) + item_type = args[0] if args else str + return {"type": "array", "items": _map_type(item_type)} + if origin in {dict, Dict}: + return {"type": "object"} + if origin is Union: + args = get_args(annotation) + # If Union contains None, it is an optional parameter + if type(None) in args: + # If Union contains only one non-None type, it is a nullable parameter + non_none_args = [arg for arg in args if arg is not type(None)] + if len(non_none_args) == 1: + schema = _map_type(non_none_args[0]) + if "type" in schema: + if isinstance(schema["type"], str): + schema["type"] = [schema["type"], "null"] + elif "null" not in schema["type"]: + schema["type"].append("null") + else: + schema["type"] = ["null"] + return schema + # If Union contains multiple types, it is a oneOf parameter + return {"oneOf": [_map_type(arg) for arg in args]} + if isinstance(annotation, type): + schema_type = type_map.get(annotation.__name__, "string") + return {"type": schema_type} + + return {"type": "string"} # Fallback to "string" if type is unrecognized + + +def is_optional(annotation) -> bool: + origin = get_origin(annotation) + if origin is Union: + args = get_args(annotation) + return type(None) in args + return False + + +class MessageDeltaChunk(MessageDeltaChunkGenerated): + @property + def text(self) -> str: + """Get the text content of the delta chunk. + + :rtype: str + """ + if not self.delta or not self.delta.content: + return "" + return "".join( + content_part.text.value or "" + for content_part in self.delta.content + if isinstance(content_part, MessageDeltaTextContent) and content_part.text + ) + + +class ThreadMessage(ThreadMessageGenerated): + @property + def text_messages(self) -> List[MessageTextContent]: + """Returns all text message contents in the messages. + + :rtype: List[MessageTextContent] + """ + if not self.content: + return [] + return [content for content in self.content if isinstance(content, MessageTextContent)] + + @property + def image_contents(self) -> List[MessageImageFileContent]: + """Returns all image file contents from image message contents in the messages. + + :rtype: List[MessageImageFileContent] + """ + if not self.content: + return [] + return [content for content in self.content if isinstance(content, MessageImageFileContent)] + + @property + def file_citation_annotations(self) -> List[MessageTextFileCitationAnnotation]: + """Returns all file citation annotations from text message annotations in the messages. + + :rtype: List[MessageTextFileCitationAnnotation] + """ + if not self.content: + return [] + + return [ + annotation + for content in self.content + if isinstance(content, MessageTextContent) + for annotation in content.text.annotations + if isinstance(annotation, MessageTextFileCitationAnnotation) + ] + + @property + def file_path_annotations(self) -> List[MessageTextFilePathAnnotation]: + """Returns all file path annotations from text message annotations in the messages. + + :rtype: List[MessageTextFilePathAnnotation] + """ + if not self.content: + return [] + return [ + annotation + for content in self.content + if isinstance(content, MessageTextContent) + for annotation in content.text.annotations + if isinstance(annotation, MessageTextFilePathAnnotation) + ] + + @property + def url_citation_annotations(self) -> List[MessageTextUrlCitationAnnotation]: + """Returns all URL citation annotations from text message annotations in the messages. + + :rtype: List[MessageTextUrlCitationAnnotation] + """ + if not self.content: + return [] + return [ + annotation + for content in self.content + if isinstance(content, MessageTextContent) + for annotation in content.text.annotations + if isinstance(annotation, MessageTextUrlCitationAnnotation) + ] + + +class MessageAttachment(MessageAttachmentGenerated): + @overload + def __init__( + self, + *, + tools: List["FileSearchToolDefinition"], + file_id: Optional[str] = None, + data_source: Optional["VectorStoreDataSource"] = None, + ) -> None: ... + @overload + def __init__( + self, + *, + tools: List["CodeInterpreterToolDefinition"], + file_id: Optional[str] = None, + data_source: Optional["VectorStoreDataSource"] = None, + ) -> None: ... + @overload + def __init__( + self, + *, + tools: List["_types.MessageAttachmentToolDefinition"], + file_id: Optional[str] = None, + data_source: Optional["VectorStoreDataSource"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +ToolDefinitionT = TypeVar("ToolDefinitionT", bound=ToolDefinition) +ToolT = TypeVar("ToolT", bound="Tool") + + +class Tool(ABC, Generic[ToolDefinitionT]): + """ + An abstract class representing a tool that can be used by an assistant. + """ + + @property + @abstractmethod + def definitions(self) -> List[ToolDefinitionT]: + """Get the tool definitions.""" + + @property + @abstractmethod + def resources(self) -> ToolResources: + """Get the tool resources.""" + + @abstractmethod + def execute(self, tool_call: Any) -> Any: + """ + Execute the tool with the provided tool call. + + :param Any tool_call: The tool call to execute. + :return: The output of the tool operations. + """ + + +class BaseFunctionTool(Tool[FunctionToolDefinition]): + """ + A tool that executes user-defined functions. + """ + + def __init__(self, functions: Set[Callable[..., Any]]): + """ + Initialize FunctionTool with a set of functions. + + :param functions: A set of function objects. + """ + self._functions = self._create_function_dict(functions) + self._definitions = self._build_function_definitions(self._functions) + + def add_functions(self, extra_functions: Set[Callable[..., Any]]) -> None: + """ + Add more functions into this FunctionTool’s existing function set. + If a function with the same name already exists, it is overwritten. + + :param extra_functions: A set of additional functions to be added to + the existing function set. Functions are defined as callables and + may have any number of arguments and return types. + :type extra_functions: Set[Callable[..., Any]] + """ + # Convert the existing dictionary of { name: function } back into a set + existing_functions = set(self._functions.values()) + # Merge old + new + combined = existing_functions.union(extra_functions) + # Rebuild state + self._functions = self._create_function_dict(combined) + self._definitions = self._build_function_definitions(self._functions) + + def _create_function_dict(self, functions: Set[Callable[..., Any]]) -> Dict[str, Callable[..., Any]]: + return {func.__name__: func for func in functions} + + def _build_function_definitions(self, functions: Dict[str, Any]) -> List[FunctionToolDefinition]: + specs: List[FunctionToolDefinition] = [] + # Flexible regex to capture ':param : ' + param_pattern = re.compile( + r""" + ^\s* # Optional leading whitespace + :param # Literal ':param' + \s+ # At least one whitespace character + (?P[^:\s\(\)]+) # Parameter name (no spaces, colons, or parentheses) + (?:\s*\(\s*(?P[^)]+?)\s*\))? # Optional type in parentheses, allowing internal spaces + \s*:\s* # Colon ':' surrounded by optional whitespace + (?P.+) # Description (rest of the line) + """, + re.VERBOSE, + ) + + for name, func in functions.items(): + sig = inspect.signature(func) + params = sig.parameters + docstring = inspect.getdoc(func) or "" + description = docstring.split("\n", maxsplit=1)[0] if docstring else "No description" + + param_descriptions = {} + for line in docstring.splitlines(): + line = line.strip() + match = param_pattern.match(line) + if match: + groups = match.groupdict() + param_name = groups.get("name") + param_desc = groups.get("description") + param_desc = param_desc.strip() if param_desc else "No description" + param_descriptions[param_name] = param_desc.strip() + + properties = {} + required = [] + for param_name, param in params.items(): + param_type_info = _map_type(param.annotation) + param_description = param_descriptions.get(param_name, "No description") + + properties[param_name] = {**param_type_info, "description": param_description} + + # If the parameter has no default value and is not optional, add it to the required list + if param.default is inspect.Parameter.empty and not is_optional(param.annotation): + required.append(param_name) + + function_def = FunctionDefinition( + name=name, + description=description, + parameters={"type": "object", "properties": properties, "required": required}, + ) + tool_def = FunctionToolDefinition(function=function_def) + specs.append(tool_def) + + return specs + + def _get_func_and_args(self, tool_call: RequiredFunctionToolCall) -> Tuple[Any, Dict[str, Any]]: + function_name = tool_call.function.name + arguments = tool_call.function.arguments + + if function_name not in self._functions: + logging.error("Function '%s' not found.", function_name) + raise ValueError(f"Function '{function_name}' not found.") + + function = self._functions[function_name] + + try: + parsed_arguments = json.loads(arguments) + except json.JSONDecodeError as e: + logging.error("Invalid JSON arguments for function '%s': %s", function_name, e) + raise ValueError(f"Invalid JSON arguments: {e}") from e + + if not isinstance(parsed_arguments, dict): + logging.error("Arguments must be a JSON object for function '%s'.", function_name) + raise TypeError("Arguments must be a JSON object.") + + return function, parsed_arguments + + @property + def definitions(self) -> List[FunctionToolDefinition]: + """ + Get the function definitions. + + :return: A list of function definitions. + :rtype: List[ToolDefinition] + """ + return self._definitions + + @property + def resources(self) -> ToolResources: + """ + Get the tool resources for the assistant. + + :return: An empty ToolResources as FunctionTool doesn't have specific resources. + :rtype: ToolResources + """ + return ToolResources() + + +class FunctionTool(BaseFunctionTool): + + def execute(self, tool_call: RequiredFunctionToolCall) -> Any: + function, parsed_arguments = self._get_func_and_args(tool_call) + + try: + return function(**parsed_arguments) if parsed_arguments else function() + except TypeError as e: + error_message = f"Error executing function '{tool_call.function.name}': {e}" + logging.error(error_message) + # Return error message as JSON string back to assistant in order to make possible self + # correction to the function call + return json.dumps({"error": error_message}) + + +class AsyncFunctionTool(BaseFunctionTool): + + async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: # pylint: disable=invalid-overridden-method + function, parsed_arguments = self._get_func_and_args(tool_call) + + try: + if inspect.iscoroutinefunction(function): + return await function(**parsed_arguments) if parsed_arguments else await function() + return function(**parsed_arguments) if parsed_arguments else function() + except TypeError as e: + error_message = f"Error executing function '{tool_call.function.name}': {e}" + logging.error(error_message) + # Return error message as JSON string back to assistant in order to make possible self correction + # to the function call + return json.dumps({"error": error_message}) + + +class AzureAISearchTool(Tool[AzureAISearchToolDefinition]): + """ + A tool that searches for information using Azure AI Search. + :param connection_id: Connection ID used by tool. All connection tools allow only one connection. + """ + + def __init__( + self, + index_connection_id: str, + index_name: str, + query_type: AzureAISearchQueryType = AzureAISearchQueryType.SIMPLE, + filter: str = "", + top_k: int = 5, + ): + """ + Initialize AzureAISearch with an index_connection_id and index_name, with optional params. + + :param index_connection_id: Index Connection ID used by tool. Allows only one connection. + :type index_connection_id: str + :param index_name: Name of Index in search resource to be used by tool. + :type index_name: str + :param query_type: Type of query in an AIIndexResource attached to this assistant. + Default value is AzureAISearchQueryType.SIMPLE. + :type query_type: AzureAISearchQueryType + :param filter: Odata filter string for search resource. + :type filter: str + :param top_k: Number of documents to retrieve from search and present to the model. + :type top_k: int + """ + self.index_list = [ + AISearchIndexResource( + index_connection_id=index_connection_id, + index_name=index_name, + query_type=query_type, + filter=filter, + top_k=top_k, + ) + ] + + @property + def definitions(self) -> List[AzureAISearchToolDefinition]: + """ + Get the Azure AI search tool definitions. + + :return: A list of tool definitions. + :rtype: List[ToolDefinition] + """ + return [AzureAISearchToolDefinition()] + + @property + def resources(self) -> ToolResources: + """ + Get the Azure AI search resources. + + :return: ToolResources populated with azure_ai_search associated resources. + :rtype: ToolResources + """ + return ToolResources(azure_ai_search=AzureAISearchResource(index_list=self.index_list)) + + def execute(self, tool_call: Any): + """ + AI Search tool does not execute client-side. + + :param Any tool_call: The tool call to execute. + """ + + +class OpenApiTool(Tool[OpenApiToolDefinition]): + """ + A tool that retrieves information using OpenAPI specs. + Initialized with an initial API definition (name, description, spec, auth), + this class also supports adding and removing additional API definitions dynamically. + """ + + def __init__(self, name: str, description: str, spec: Any, auth: OpenApiAuthDetails): + """ + Constructor initializes the tool with a primary API definition. + + :param name: The name of the API. + :param description: The API description. + :param spec: The API specification. + :param auth: Authentication details for the API. + :type auth: OpenApiAuthDetails + """ + self._default_auth = auth + self._definitions: List[OpenApiToolDefinition] = [ + OpenApiToolDefinition( + openapi=OpenApiFunctionDefinition(name=name, description=description, spec=spec, auth=auth) + ) + ] + + @property + def definitions(self) -> List[OpenApiToolDefinition]: + """ + Get the list of all API definitions for the tool. + + :return: A list of OpenAPI tool definitions. + :rtype: List[ToolDefinition] + """ + return self._definitions + + def add_definition(self, name: str, description: str, spec: Any, auth: Optional[OpenApiAuthDetails] = None) -> None: + """ + Adds a new API definition dynamically. + Raises a ValueError if a definition with the same name already exists. + + :param name: The name of the API. + :type name: str + :param description: The description of the API. + :type description: str + :param spec: The API specification. + :type spec: Any + :param auth: Optional authentication details for this particular API definition. + If not provided, the tool's default authentication details will be used. + :type auth: Optional[OpenApiAuthDetails] + :raises ValueError: If a definition with the same name exists. + """ + # Check if a definition with the same name exists. + if any(definition.openapi.name == name for definition in self._definitions): + raise ValueError(f"Definition '{name}' already exists and cannot be added again.") + + # Use provided auth if specified, otherwise use default + auth_to_use = auth if auth is not None else self._default_auth + + new_definition = OpenApiToolDefinition( + openapi=OpenApiFunctionDefinition(name=name, description=description, spec=spec, auth=auth_to_use) + ) + self._definitions.append(new_definition) + + def remove_definition(self, name: str) -> None: + """ + Removes an API definition based on its name. + + :param name: The name of the API definition to remove. + :type name: str + :raises ValueError: If the definition with the specified name does not exist. + """ + for definition in self._definitions: + if definition.openapi.name == name: + self._definitions.remove(definition) + logging.info("Definition '%s' removed. Total definitions: %d.", name, len(self._definitions)) + return + raise ValueError(f"Definition with the name '{name}' does not exist.") + + @property + def resources(self) -> ToolResources: + """ + Get the tool resources for the assistant. + + :return: An empty ToolResources as OpenApiTool doesn't have specific resources. + :rtype: ToolResources + """ + return ToolResources() + + def execute(self, tool_call: Any) -> None: + """ + OpenApiTool does not execute client-side. + + :param Any tool_call: The tool call to execute. + :type tool_call: Any + """ + + +class AzureFunctionTool(Tool[AzureFunctionToolDefinition]): + """ + A tool that is used to inform assistant about available the Azure function. + + :param name: The azure function name. + :param description: The azure function description. + :param parameters: The description of function parameters. + :param input_queue: Input queue used, by azure function. + :param output_queue: Output queue used, by azure function. + """ + + def __init__( + self, + name: str, + description: str, + parameters: Dict[str, Any], + input_queue: AzureFunctionStorageQueue, + output_queue: AzureFunctionStorageQueue, + ) -> None: + self._definitions = [ + AzureFunctionToolDefinition( + azure_function=AzureFunctionDefinition( + function=FunctionDefinition( + name=name, + description=description, + parameters=parameters, + ), + input_binding=AzureFunctionBinding(storage_queue=input_queue), + output_binding=AzureFunctionBinding(storage_queue=output_queue), + ) + ) + ] + + @property + def definitions(self) -> List[AzureFunctionToolDefinition]: + """ + Get the Azure AI search tool definitions. + + :rtype: List[ToolDefinition] + """ + return self._definitions + + @property + def resources(self) -> ToolResources: + """ + Get the Azure AI search resources. + + :rtype: ToolResources + """ + return ToolResources() + + def execute(self, tool_call: Any) -> Any: + pass + + +class ConnectionTool(Tool[ToolDefinitionT]): + """ + A tool that requires connection ids. + Used as base class for Bing Grounding, Sharepoint, and Microsoft Fabric + """ + + def __init__(self, connection_id: str): + """ + Initialize ConnectionTool with a connection_id. + + :param connection_id: Connection ID used by tool. All connection tools allow only one connection. + """ + self.connection_ids = [ToolConnection(connection_id=connection_id)] + + @property + def resources(self) -> ToolResources: + """ + Get the connection tool resources. + + :rtype: ToolResources + """ + return ToolResources() + + def execute(self, tool_call: Any) -> Any: + pass + + +class BingGroundingTool(ConnectionTool[BingGroundingToolDefinition]): + """ + A tool that searches for information using Bing. + """ + + @property + def definitions(self) -> List[BingGroundingToolDefinition]: + """ + Get the Bing grounding tool definitions. + + :rtype: List[ToolDefinition] + """ + return [BingGroundingToolDefinition(bing_grounding=ToolConnectionList(connection_list=self.connection_ids))] + + +class FabricTool(ConnectionTool[MicrosoftFabricToolDefinition]): + """ + A tool that searches for information using Microsoft Fabric. + """ + + @property + def definitions(self) -> List[MicrosoftFabricToolDefinition]: + """ + Get the Microsoft Fabric tool definitions. + + :rtype: List[ToolDefinition] + """ + return [MicrosoftFabricToolDefinition(fabric_dataagent=ToolConnectionList(connection_list=self.connection_ids))] + + +class SharepointTool(ConnectionTool[SharepointToolDefinition]): + """ + A tool that searches for information using Sharepoint. + """ + + @property + def definitions(self) -> List[SharepointToolDefinition]: + """ + Get the Sharepoint tool definitions. + + :rtype: List[ToolDefinition] + """ + return [SharepointToolDefinition(sharepoint_grounding=ToolConnectionList(connection_list=self.connection_ids))] + + +class FileSearchTool(Tool[FileSearchToolDefinition]): + """ + A tool that searches for uploaded file information from the created vector stores. + + :param vector_store_ids: A list of vector store IDs to search for files. + :type vector_store_ids: list[str] + """ + + def __init__(self, vector_store_ids: Optional[List[str]] = None): + if vector_store_ids is None: + self.vector_store_ids = set() + else: + self.vector_store_ids = set(vector_store_ids) + + def add_vector_store(self, store_id: str) -> None: + """ + Add a vector store ID to the list of vector stores to search for files. + + :param store_id: The ID of the vector store to search for files. + :type store_id: str + + """ + self.vector_store_ids.add(store_id) + + def remove_vector_store(self, store_id: str) -> None: + """ + Remove a vector store ID from the list of vector stores to search for files. + + :param store_id: The ID of the vector store to remove. + :type store_id: str + + """ + self.vector_store_ids.remove(store_id) + + @property + def definitions(self) -> List[FileSearchToolDefinition]: + """ + Get the file search tool definitions. + + :rtype: List[ToolDefinition] + """ + return [FileSearchToolDefinition()] + + @property + def resources(self) -> ToolResources: + """ + Get the file search resources. + + :rtype: ToolResources + """ + return ToolResources(file_search=FileSearchToolResource(vector_store_ids=list(self.vector_store_ids))) + + def execute(self, tool_call: Any) -> Any: + pass + + +class CodeInterpreterTool(Tool[CodeInterpreterToolDefinition]): + """ + A tool that interprets code files uploaded to the assistant. + + :param file_ids: A list of file IDs to interpret. + :type file_ids: list[str] + """ + + def __init__(self, file_ids: Optional[List[str]] = None): + if file_ids is None: + self.file_ids = set() + else: + self.file_ids = set(file_ids) + + def add_file(self, file_id: str) -> None: + """ + Add a file ID to the list of files to interpret. + + :param file_id: The ID of the file to interpret. + :type file_id: str + """ + self.file_ids.add(file_id) + + def remove_file(self, file_id: str) -> None: + """ + Remove a file ID from the list of files to interpret. + + :param file_id: The ID of the file to remove. + :type file_id: str + """ + self.file_ids.remove(file_id) + + @property + def definitions(self) -> List[CodeInterpreterToolDefinition]: + """ + Get the code interpreter tool definitions. + + :rtype: List[ToolDefinition] + """ + return [CodeInterpreterToolDefinition()] + + @property + def resources(self) -> ToolResources: + """ + Get the code interpreter resources. + + :rtype: ToolResources + """ + if not self.file_ids: + return ToolResources() + return ToolResources(code_interpreter=CodeInterpreterToolResource(file_ids=list(self.file_ids))) + + def execute(self, tool_call: Any) -> Any: + pass + + +class BaseToolSet: + """ + Abstract class for a collection of tools that can be used by an assistant. + """ + + def __init__(self) -> None: + self._tools: List[Tool] = [] + + def validate_tool_type(self, tool: Tool) -> None: + pass + + def add(self, tool: Tool): + """ + Add a tool to the tool set. + + :param Tool tool: The tool to add. + :raises ValueError: If a tool of the same type already exists. + """ + self.validate_tool_type(tool) + + if any(isinstance(existing_tool, type(tool)) for existing_tool in self._tools): + raise ValueError("Tool of type {type(tool).__name__} already exists in the ToolSet.") + self._tools.append(tool) + + def remove(self, tool_type: Type[Tool]) -> None: + """ + Remove a tool of the specified type from the tool set. + + :param Type[Tool] tool_type: The type of tool to remove. + :raises ValueError: If a tool of the specified type is not found. + """ + for i, tool in enumerate(self._tools): + if isinstance(tool, tool_type): + del self._tools[i] + logging.info("Tool of type %s removed from the ToolSet.", tool_type.__name__) + return + raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the definitions for all tools in the tool set. + + :rtype: List[ToolDefinition] + """ + tools = [] + for tool in self._tools: + tools.extend(tool.definitions) + return tools + + @property + def resources(self) -> ToolResources: + """ + Get the resources for all tools in the tool set. + + :rtype: ToolResources + """ + tool_resources: Dict[str, Any] = {} + for tool in self._tools: + resources = tool.resources + for key, value in resources.items(): + if key in tool_resources: + if isinstance(tool_resources[key], dict) and isinstance(value, dict): + tool_resources[key].update(value) + else: + tool_resources[key] = value + return self._create_tool_resources_from_dict(tool_resources) + + def _create_tool_resources_from_dict(self, resources: Dict[str, Any]) -> ToolResources: + """ + Safely converts a dictionary into a ToolResources instance. + + :param resources: A dictionary of tool resources. Should be a mapping + accepted by ~azure.ai.assistants.models.AzureAISearchResource + :type resources: Dict[str, Any] + :return: A ToolResources instance. + :rtype: ToolResources + """ + try: + return ToolResources(**resources) + except TypeError as e: + logging.error("Error creating ToolResources: %s", e) + raise ValueError("Invalid resources for ToolResources.") from e + + def get_definitions_and_resources(self) -> Dict[str, Any]: + """ + Get the definitions and resources for all tools in the tool set. + + :return: A dictionary containing the tool resources and definitions. + :rtype: Dict[str, Any] + """ + return { + "tool_resources": self.resources, + "tools": self.definitions, + } + + def get_tool(self, tool_type: Type[ToolT]) -> ToolT: + """ + Get a tool of the specified type from the tool set. + + :param Type[Tool] tool_type: The type of tool to get. + :return: The tool of the specified type. + :rtype: Tool + :raises ValueError: If a tool of the specified type is not found. + """ + for tool in self._tools: + if isinstance(tool, tool_type): + return cast(ToolT, tool) + raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") + + +class ToolSet(BaseToolSet): + """ + A collection of tools that can be used by an synchronize assistant. + """ + + def validate_tool_type(self, tool: Tool) -> None: + """ + Validate the type of the tool. + + :param Tool tool: The type of the tool to validate. + :raises ValueError: If the tool type is not a subclass of Tool. + """ + if isinstance(tool, AsyncFunctionTool): + raise ValueError( + "AsyncFunctionTool is not supported in ToolSet. " + + "To use async functions, use AsyncToolSet and assistants operations in azure.ai.assistants.aio." + ) + + def execute_tool_calls(self, tool_calls: List[Any]) -> Any: + """ + Execute a tool of the specified type with the provided tool calls. + + :param List[Any] tool_calls: A list of tool calls to execute. + :return: The output of the tool operations. + :rtype: Any + """ + tool_outputs = [] + + for tool_call in tool_calls: + try: + if tool_call.type == "function": + tool = self.get_tool(FunctionTool) + output = tool.execute(tool_call) + tool_output = { + "tool_call_id": tool_call.id, + "output": output, + } + tool_outputs.append(tool_output) + except Exception as e: # pylint: disable=broad-exception-caught + logging.error("Failed to execute tool call %s: %s", tool_call, e) + + return tool_outputs + + +class AsyncToolSet(BaseToolSet): + """ + A collection of tools that can be used by an asynchronous assistant. + """ + + def validate_tool_type(self, tool: Tool) -> None: + """ + Validate the type of the tool. + + :param Tool tool: The type of the tool to validate. + :raises ValueError: If the tool type is not a subclass of Tool. + """ + if isinstance(tool, FunctionTool): + raise ValueError( + "FunctionTool is not supported in AsyncToolSet. " + + "Please use AsyncFunctionTool instead and provide sync and/or async function(s)." + ) + + async def execute_tool_calls(self, tool_calls: List[Any]) -> Any: + """ + Execute a tool of the specified type with the provided tool calls. + + :param List[Any] tool_calls: A list of tool calls to execute. + :return: The output of the tool operations. + :rtype: Any + """ + tool_outputs = [] + + for tool_call in tool_calls: + try: + if tool_call.type == "function": + tool = self.get_tool(AsyncFunctionTool) + output = await tool.execute(tool_call) + tool_output = { + "tool_call_id": tool_call.id, + "output": output, + } + tool_outputs.append(tool_output) + except Exception as e: # pylint: disable=broad-exception-caught + logging.error("Failed to execute tool call %s: %s", tool_call, e) + + return tool_outputs + + +EventFunctionReturnT = TypeVar("EventFunctionReturnT") +T = TypeVar("T") +BaseAsyncAssistantEventHandlerT = TypeVar("BaseAsyncAssistantEventHandlerT", bound="BaseAsyncAssistantEventHandler") +BaseAssistantEventHandlerT = TypeVar("BaseAssistantEventHandlerT", bound="BaseAssistantEventHandler") + + +async def async_chain(*iterators: AsyncIterator[T]) -> AsyncIterator[T]: + for iterator in iterators: + async for item in iterator: + yield item + + +class BaseAsyncAssistantEventHandler(AsyncIterator[T]): + + def __init__(self) -> None: + self.response_iterator: Optional[AsyncIterator[bytes]] = None + self.submit_tool_outputs: Optional[ + Callable[[ThreadRun, "BaseAsyncAssistantEventHandler[T]"], Awaitable[None]] + ] = None + self.buffer: Optional[bytes] = None + + def initialize( + self, + response_iterator: AsyncIterator[bytes], + submit_tool_outputs: Callable[[ThreadRun, "BaseAsyncAssistantEventHandler[T]"], Awaitable[None]], + ): + self.response_iterator = ( + async_chain(self.response_iterator, response_iterator) if self.response_iterator else response_iterator + ) + self.submit_tool_outputs = submit_tool_outputs + + # cspell:disable-next-line + async def __anext__(self) -> T: + # cspell:disable-next-line + event_bytes = await self.__anext_impl__() + return await self._process_event(event_bytes.decode("utf-8")) + + # cspell:disable-next-line + async def __anext_impl__(self) -> bytes: + self.buffer = b"" if self.buffer is None else self.buffer + if self.response_iterator is None: + raise ValueError("The response handler was not initialized.") + + if not b"\n\n" in self.buffer: + async for chunk in self.response_iterator: + self.buffer += chunk + if b"\n\n" in self.buffer: + break + + if self.buffer == b"": + raise StopAsyncIteration() + + event_bytes = b"" + if b"\n\n" in self.buffer: + event_end_index = self.buffer.index(b"\n\n") + event_bytes = self.buffer[:event_end_index] + self.buffer = self.buffer[event_end_index:].lstrip() + else: + event_bytes = self.buffer + self.buffer = b"" + + return event_bytes + + async def _process_event(self, event_data_str: str) -> T: + raise NotImplementedError("This method needs to be implemented.") + + async def until_done(self) -> None: + """ + Iterates through all events until the stream is marked as done. + Calls the provided callback function with each event data. + """ + try: + async for _ in self: + pass + except StopAsyncIteration: + pass + + +class BaseAssistantEventHandler(Iterator[T]): + + def __init__(self) -> None: + self.response_iterator: Optional[Iterator[bytes]] = None + self.submit_tool_outputs: Optional[Callable[[ThreadRun, "BaseAssistantEventHandler[T]"], None]] = None + self.buffer: Optional[bytes] = None + + def initialize( + self, + response_iterator: Iterator[bytes], + submit_tool_outputs: Callable[[ThreadRun, "BaseAssistantEventHandler[T]"], None], + ) -> None: + self.response_iterator = ( + itertools.chain(self.response_iterator, response_iterator) if self.response_iterator else response_iterator + ) + self.submit_tool_outputs = submit_tool_outputs + + def __next__(self) -> T: + event_bytes = self.__next_impl__() + return self._process_event(event_bytes.decode("utf-8")) + + def __next_impl__(self) -> bytes: + self.buffer = b"" if self.buffer is None else self.buffer + if self.response_iterator is None: + raise ValueError("The response handler was not initialized.") + + if not b"\n\n" in self.buffer: + for chunk in self.response_iterator: + self.buffer += chunk + if b"\n\n" in self.buffer: + break + + if self.buffer == b"": + raise StopIteration() + + event_bytes = b"" + if b"\n\n" in self.buffer: + event_end_index = self.buffer.index(b"\n\n") + event_bytes = self.buffer[:event_end_index] + self.buffer = self.buffer[event_end_index:].lstrip() + else: + event_bytes = self.buffer + self.buffer = b"" + + return event_bytes + + def _process_event(self, event_data_str: str) -> T: + raise NotImplementedError("This method needs to be implemented.") + + def until_done(self) -> None: + """ + Iterates through all events until the stream is marked as done. + Calls the provided callback function with each event data. + """ + try: + for _ in self: + pass + except StopIteration: + pass + + +class AsyncAssistantEventHandler( + BaseAsyncAssistantEventHandler[Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]] +): + + async def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]: + event_type, event_data_obj = _parse_event(event_data_str) + if ( + isinstance(event_data_obj, ThreadRun) + and event_data_obj.status == "requires_action" + and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) + ): + await cast( + Callable[[ThreadRun, "BaseAsyncAssistantEventHandler"], Awaitable[None]], self.submit_tool_outputs + )(event_data_obj, self) + + func_rt: Optional[EventFunctionReturnT] = None + try: + if isinstance(event_data_obj, MessageDeltaChunk): + func_rt = await self.on_message_delta(event_data_obj) + elif isinstance(event_data_obj, ThreadMessage): + func_rt = await self.on_thread_message(event_data_obj) + elif isinstance(event_data_obj, ThreadRun): + func_rt = await self.on_thread_run(event_data_obj) + elif isinstance(event_data_obj, RunStep): + func_rt = await self.on_run_step(event_data_obj) + elif isinstance(event_data_obj, RunStepDeltaChunk): + func_rt = await self.on_run_step_delta(event_data_obj) + elif event_type == AssistantStreamEvent.ERROR: + func_rt = await self.on_error(event_data_obj) + elif event_type == AssistantStreamEvent.DONE: + func_rt = await self.on_done() + else: + func_rt = await self.on_unhandled_event( + event_type, event_data_obj + ) # pylint: disable=assignment-from-none + except Exception as e: # pylint: disable=broad-exception-caught + logging.error("Error in event handler for event '%s': %s", event_type, e) + return event_type, event_data_obj, func_rt + + async def on_message_delta( + self, delta: "MessageDeltaChunk" # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle message delta events. + + :param MessageDeltaChunk delta: The message delta. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + async def on_thread_message( + self, message: "ThreadMessage" # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle thread message events. + + :param ThreadMessage message: The thread message. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + async def on_thread_run( + self, run: "ThreadRun" # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle thread run events. + + :param ThreadRun run: The thread run. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + async def on_run_step(self, step: "RunStep") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument + """Handle run step events. + + :param RunStep step: The run step. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + async def on_run_step_delta( + self, delta: "RunStepDeltaChunk" # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle run step delta events. + + :param RunStepDeltaChunk delta: The run step delta. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + async def on_error(self, data: str) -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument + """Handle error events. + + :param str data: The error event's data. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + async def on_done( + self, + ) -> Optional[EventFunctionReturnT]: + """Handle the completion of the stream. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + async def on_unhandled_event( + self, event_type: str, event_data: str # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle any unhandled event types. + + :param str event_type: The event type. + :param Any event_data: The event's data. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + +class AssistantEventHandler(BaseAssistantEventHandler[Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]]): + + def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]: + + event_type, event_data_obj = _parse_event(event_data_str) + if ( + isinstance(event_data_obj, ThreadRun) + and event_data_obj.status == "requires_action" + and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) + ): + cast(Callable[[ThreadRun, "BaseAssistantEventHandler"], Awaitable[None]], self.submit_tool_outputs)( + event_data_obj, self + ) + + func_rt: Optional[EventFunctionReturnT] = None + try: + if isinstance(event_data_obj, MessageDeltaChunk): + func_rt = self.on_message_delta(event_data_obj) # pylint: disable=assignment-from-none + elif isinstance(event_data_obj, ThreadMessage): + func_rt = self.on_thread_message(event_data_obj) # pylint: disable=assignment-from-none + elif isinstance(event_data_obj, ThreadRun): + func_rt = self.on_thread_run(event_data_obj) # pylint: disable=assignment-from-none + elif isinstance(event_data_obj, RunStep): + func_rt = self.on_run_step(event_data_obj) # pylint: disable=assignment-from-none + elif isinstance(event_data_obj, RunStepDeltaChunk): + func_rt = self.on_run_step_delta(event_data_obj) # pylint: disable=assignment-from-none + elif event_type == AssistantStreamEvent.ERROR: + func_rt = self.on_error(event_data_obj) # pylint: disable=assignment-from-none + elif event_type == AssistantStreamEvent.DONE: + func_rt = self.on_done() # pylint: disable=assignment-from-none + else: + func_rt = self.on_unhandled_event(event_type, event_data_obj) # pylint: disable=assignment-from-none + except Exception as e: # pylint: disable=broad-exception-caught + logging.error("Error in event handler for event '%s': %s", event_type, e) + return event_type, event_data_obj, func_rt + + def on_message_delta( + self, delta: "MessageDeltaChunk" # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle message delta events. + + :param MessageDeltaChunk delta: The message delta. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + def on_thread_message( + self, message: "ThreadMessage" # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle thread message events. + + :param ThreadMessage message: The thread message. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + def on_thread_run(self, run: "ThreadRun") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument + """Handle thread run events. + + :param ThreadRun run: The thread run. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + def on_run_step(self, step: "RunStep") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument + """Handle run step events. + + :param RunStep step: The run step. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + def on_run_step_delta( + self, delta: "RunStepDeltaChunk" # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle run step delta events. + + :param RunStepDeltaChunk delta: The run step delta. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + def on_error(self, data: str) -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument + """Handle error events. + + :param str data: The error event's data. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + def on_done( + self, + ) -> Optional[EventFunctionReturnT]: + """Handle the completion of the stream.""" + return None + + def on_unhandled_event( + self, event_type: str, event_data: str # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle any unhandled event types. + + :param str event_type: The event type. + :param Any event_data: The event's data. + """ + return None + + +class AsyncAssistantRunStream(Generic[BaseAsyncAssistantEventHandlerT]): + def __init__( + self, + response_iterator: AsyncIterator[bytes], + submit_tool_outputs: Callable[[ThreadRun, BaseAsyncAssistantEventHandlerT], Awaitable[None]], + event_handler: BaseAsyncAssistantEventHandlerT, + ): + self.response_iterator = response_iterator + self.event_handler = event_handler + self.submit_tool_outputs = submit_tool_outputs + self.event_handler.initialize( + self.response_iterator, + cast(Callable[[ThreadRun, BaseAsyncAssistantEventHandler], Awaitable[None]], submit_tool_outputs), + ) + + async def __aenter__(self): + return self.event_handler + + async def __aexit__(self, exc_type, exc_val, exc_tb): + close_method = getattr(self.response_iterator, "close", None) + if callable(close_method): + result = close_method() + if asyncio.iscoroutine(result): + await result + + +class AssistantRunStream(Generic[BaseAssistantEventHandlerT]): + def __init__( + self, + response_iterator: Iterator[bytes], + submit_tool_outputs: Callable[[ThreadRun, BaseAssistantEventHandlerT], None], + event_handler: BaseAssistantEventHandlerT, + ): + self.response_iterator = response_iterator + self.event_handler = event_handler + self.submit_tool_outputs = submit_tool_outputs + self.event_handler.initialize( + self.response_iterator, + cast(Callable[[ThreadRun, BaseAssistantEventHandler], None], submit_tool_outputs), + ) + + def __enter__(self): + return self.event_handler + + def __exit__(self, exc_type, exc_val, exc_tb): + close_method = getattr(self.response_iterator, "close", None) + if callable(close_method): + close_method() + + +class OpenAIPageableListOfThreadMessage(OpenAIPageableListOfThreadMessageGenerated): + + @property + def text_messages(self) -> List[MessageTextContent]: + """Returns all text message contents in the messages. + + :rtype: List[MessageTextContent] + """ + texts = [content for msg in self.data for content in msg.text_messages] + return texts + + @property + def image_contents(self) -> List[MessageImageFileContent]: + """Returns all image file contents from image message contents in the messages. + + :rtype: List[MessageImageFileContent] + """ + return [content for msg in self.data for content in msg.image_contents] + + @property + def file_citation_annotations(self) -> List[MessageTextFileCitationAnnotation]: + """Returns all file citation annotations from text message annotations in the messages. + + :rtype: List[MessageTextFileCitationAnnotation] + """ + annotations = [annotation for msg in self.data for annotation in msg.file_citation_annotations] + return annotations + + @property + def file_path_annotations(self) -> List[MessageTextFilePathAnnotation]: + """Returns all file path annotations from text message annotations in the messages. + + :rtype: List[MessageTextFilePathAnnotation] + """ + annotations = [annotation for msg in self.data for annotation in msg.file_path_annotations] + return annotations + + def get_last_message_by_role(self, role: MessageRole) -> Optional[ThreadMessage]: + """Returns the last message from a sender in the specified role. + + :param role: The role of the sender. + :type role: MessageRole + + :return: The last message from a sender in the specified role. + :rtype: ~azure.ai.assistants.models.ThreadMessage + """ + for msg in self.data: + if msg.role == role: + return msg + return None + + def get_last_text_message_by_role(self, role: MessageRole) -> Optional[MessageTextContent]: + """Returns the last text message from a sender in the specified role. + + :param role: The role of the sender. + :type role: MessageRole + + :return: The last text message from a sender in the specified role. + :rtype: ~azure.ai.assistants.models.MessageTextContent + """ + for msg in self.data: + if msg.role == role: + for content in msg.content: + if isinstance(content, MessageTextContent): + return content + return None + + +__all__: List[str] = [ + "AssistantEventHandler", + "AssistantRunStream", + "AsyncAssistantRunStream", + "AsyncFunctionTool", + "AsyncToolSet", + "AzureAISearchTool", + "AzureFunctionTool", + "BaseAsyncAssistantEventHandler", + "BaseAssistantEventHandler", + "CodeInterpreterTool", + "AsyncAssistantEventHandler", + "OpenAIPageableListOfThreadMessage", + "FileSearchTool", + "FunctionTool", + "OpenApiTool", + "BingGroundingTool", + "StreamEventData", + "SharepointTool", + "FabricTool", + "AzureAISearchTool", + "Tool", + "ToolSet", + "BaseAsyncAssistantEventHandlerT", + "BaseAssistantEventHandlerT", + "ThreadMessage", + "MessageTextFileCitationAnnotation", + "MessageDeltaChunk", + "MessageAttachment", +] + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/py.typed b/sdk/ai/azure-ai-assistants/azure/ai/assistants/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py new file mode 100644 index 000000000000..4d638973f4c0 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py @@ -0,0 +1,14 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._ai_assistants_instrumentor import AIAssistantsInstrumentor +from ._utils import enable_telemetry +from ._trace_function import trace_function + + +__all__ = ["AIAssistantsInstrumentor", "enable_telemetry", "trace_function"] diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py new file mode 100644 index 000000000000..e0512a34ca6b --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py @@ -0,0 +1,1917 @@ +# pylint: disable=too-many-lines,line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import copy +import functools +import importlib +import json +import logging +import os +from enum import Enum +from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast, TYPE_CHECKING +from urllib.parse import urlparse + +from azure.ai.assistants.models import AssistantRunStream, AsyncAssistantRunStream, _models +from azure.ai.assistants.models._enums import AssistantsApiResponseFormatMode, MessageRole, RunStepStatus +from azure.ai.assistants.models import ( + MessageAttachment, + MessageDeltaChunk, + MessageIncompleteDetails, + RequiredFunctionToolCall, + RunStep, + RunStepDeltaChunk, + RunStepFunctionToolCall, + RunStepToolCallDetails, + SubmitToolOutputsAction, + ThreadMessage, + ThreadRun, + ToolDefinition, + ToolOutput, + ToolResources, +) +from azure.ai.assistants.models._patch import AssistantEventHandler, AsyncAssistantEventHandler, ToolSet +from azure.ai.assistants.telemetry._utils import ( + AZ_AI_ASSISTANT_SYSTEM, + ERROR_TYPE, + GEN_AI_ASSISTANT_DESCRIPTION, + GEN_AI_ASSISTANT_ID, + GEN_AI_ASSISTANT_NAME, + GEN_AI_EVENT_CONTENT, + GEN_AI_MESSAGE_ID, + GEN_AI_MESSAGE_STATUS, + GEN_AI_RESPONSE_MODEL, + GEN_AI_SYSTEM, + GEN_AI_SYSTEM_MESSAGE, + GEN_AI_THREAD_ID, + GEN_AI_THREAD_RUN_ID, + GEN_AI_THREAD_RUN_STATUS, + GEN_AI_USAGE_INPUT_TOKENS, + GEN_AI_USAGE_OUTPUT_TOKENS, + OperationName, + start_span, +) +from azure.core import CaseInsensitiveEnumMeta # type: ignore +from azure.core.settings import settings +from azure.core.tracing import AbstractSpan + +_Unset: Any = object() + +try: + # pylint: disable = no-name-in-module + from opentelemetry.trace import Span, StatusCode + + _tracing_library_available = True +except ModuleNotFoundError: + _tracing_library_available = False + +if TYPE_CHECKING: + from .. import _types + + +__all__ = [ + "AIAssistantsInstrumentor", +] + + +_assistants_traces_enabled: bool = False +_trace_assistants_content: bool = False + + +class TraceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): # pylint: disable=C4747 + """An enumeration class to represent different types of traces.""" + + ASSISTANTS = "Assistants" + + +class AIAssistantsInstrumentor: + """ + A class for managing the trace instrumentation of AI Assistants. + + This class allows enabling or disabling tracing for AI Assistants. + and provides functionality to check whether instrumentation is active. + + """ + + def __init__(self): + if not _tracing_library_available: + raise ModuleNotFoundError( + "Azure Core Tracing Opentelemetry is not installed. " + "Please install it using 'pip install azure-core-tracing-opentelemetry'" + ) + # In the future we could support different versions from the same library + # and have a parameter that specifies the version to use. + self._impl = _AIAssistantsInstrumentorPreview() + + def instrument(self, enable_content_recording: Optional[bool] = None) -> None: + """ + Enable trace instrumentation for AI Assistants. + + :param enable_content_recording: Whether content recording is enabled as part + of the traces or not. Content in this context refers to chat message content + and function call tool related function names, function parameter names and + values. True will enable content recording, False will disable it. If no value + is provided, then the value read from environment variable + AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED is used. If the environment variable + is not found, then the value will default to False. Please note that successive calls + to instrument will always apply the content recording value provided with the most + recent call to instrument (including applying the environment variable if no value is + provided and defaulting to false if the environment variable is not found), even if + instrument was already previously called without uninstrument being called in between + the instrument calls. + :type enable_content_recording: bool, optional + + """ + self._impl.instrument(enable_content_recording) + + def uninstrument(self) -> None: + """ + Remove trace instrumentation for AI Assistants. + + This method removes any active instrumentation, stopping the tracing + of AI Assistants. + """ + self._impl.uninstrument() + + def is_instrumented(self) -> bool: + """ + Check if trace instrumentation for AI Assistants is currently enabled. + + :return: True if instrumentation is active, False otherwise. + :rtype: bool + """ + return self._impl.is_instrumented() + + def is_content_recording_enabled(self) -> bool: + """This function gets the content recording value. + + :return: A bool value indicating whether content recording is enabled. + :rtype: bool + """ + return self._impl.is_content_recording_enabled() + + +class _AIAssistantsInstrumentorPreview: + # pylint: disable=R0904 + """ + A class for managing the trace instrumentation of AI Assistants. + + This class allows enabling or disabling tracing for AI Assistants. + and provides functionality to check whether instrumentation is active. + """ + + def _str_to_bool(self, s): + if s is None: + return False + return str(s).lower() == "true" + + def instrument(self, enable_content_recording: Optional[bool] = None): + """ + Enable trace instrumentation for AI Assistants. + + :param enable_content_recording: Whether content recording is enabled as part + of the traces or not. Content in this context refers to chat message content + and function call tool related function names, function parameter names and + values. True will enable content recording, False will disable it. If no value + is provided, then the value read from environment variable + AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED is used. If the environment variable + is not found, then the value will default to False. + + :type enable_content_recording: bool, optional + """ + if enable_content_recording is None: + var_value = os.environ.get("AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED") + enable_content_recording = self._str_to_bool(var_value) + if not self.is_instrumented(): + self._instrument_assistants(enable_content_recording) + else: + self._set_enable_content_recording(enable_content_recording=enable_content_recording) + + def uninstrument(self): + """ + Disable trace instrumentation for AI Assistants. + + This method removes any active instrumentation, stopping the tracing + of AI Assistants. + """ + if self.is_instrumented(): + self._uninstrument_assistants() + + def is_instrumented(self): + """ + Check if trace instrumentation for AI Assistants is currently enabled. + + :return: True if instrumentation is active, False otherwise. + :rtype: bool + """ + return self._is_instrumented() + + def set_enable_content_recording(self, enable_content_recording: bool = False) -> None: + """This function sets the content recording value. + + :param enable_content_recording: Indicates whether tracing of message content should be enabled. + This also controls whether function call tool function names, + parameter names and parameter values are traced. + :type enable_content_recording: bool + """ + self._set_enable_content_recording(enable_content_recording=enable_content_recording) + + def is_content_recording_enabled(self) -> bool: + """This function gets the content recording value. + + :return: A bool value indicating whether content tracing is enabled. + :rtype bool + """ + return self._is_content_recording_enabled() + + def _set_attributes(self, span: "AbstractSpan", *attrs: Tuple[str, Any]) -> None: + for attr in attrs: + key, value = attr + if value is not None: + span.add_attribute(key, value) + + def _parse_url(self, url): + parsed = urlparse(url) + server_address = parsed.hostname + port = parsed.port + return server_address, port + + def _remove_function_call_names_and_arguments(self, tool_calls: list) -> list: + tool_calls_copy = copy.deepcopy(tool_calls) + for tool_call in tool_calls_copy: + if "function" in tool_call: + if "name" in tool_call["function"]: + del tool_call["function"]["name"] + if "arguments" in tool_call["function"]: + del tool_call["function"]["arguments"] + if not tool_call["function"]: + del tool_call["function"] + return tool_calls_copy + + def _create_event_attributes( + self, + thread_id: Optional[str] = None, + assistant_id: Optional[str] = None, + thread_run_id: Optional[str] = None, + message_id: Optional[str] = None, + message_status: Optional[str] = None, + usage: Optional[_models.RunStepCompletionUsage] = None, + ) -> Dict[str, Any]: + attrs: Dict[str, Any] = {GEN_AI_SYSTEM: AZ_AI_ASSISTANT_SYSTEM} + if thread_id: + attrs[GEN_AI_THREAD_ID] = thread_id + + if assistant_id: + attrs[GEN_AI_ASSISTANT_ID] = assistant_id + + if thread_run_id: + attrs[GEN_AI_THREAD_RUN_ID] = thread_run_id + + if message_id: + attrs[GEN_AI_MESSAGE_ID] = message_id + + if message_status: + attrs[GEN_AI_MESSAGE_STATUS] = self._status_to_string(message_status) + + if usage: + attrs[GEN_AI_USAGE_INPUT_TOKENS] = usage.prompt_tokens + attrs[GEN_AI_USAGE_OUTPUT_TOKENS] = usage.completion_tokens + + return attrs + + def add_thread_message_event( + self, span, message: ThreadMessage, usage: Optional[_models.RunStepCompletionUsage] = None + ) -> None: + content_body = {} + if _trace_assistants_content: + for content in message.content: + typed_content = content.get(content.type, None) + if typed_content: + content_details = {"value": self._get_field(typed_content, "value")} + annotations = self._get_field(typed_content, "annotations") + if annotations: + content_details["annotations"] = [a.as_dict() for a in annotations] + content_body[content.type] = content_details + + self._add_message_event( + span, + self._get_role(message.role), + content_body, + attachments=message.attachments, + thread_id=message.thread_id, + assistant_id=message.assistant_id, + message_id=message.id, + thread_run_id=message.run_id, + message_status=message.status, + incomplete_details=message.incomplete_details, + usage=usage, + ) + + def _add_message_event( + self, + span, + role: str, + content: Any, + attachments: Any = None, # Optional[List[MessageAttachment]] or dict + thread_id: Optional[str] = None, + assistant_id: Optional[str] = None, + message_id: Optional[str] = None, + thread_run_id: Optional[str] = None, + message_status: Optional[str] = None, + incomplete_details: Optional[MessageIncompleteDetails] = None, + usage: Optional[_models.RunStepCompletionUsage] = None, + ) -> None: + # TODO document new fields + + event_body = {} + if _trace_assistants_content: + event_body["content"] = content + if attachments: + event_body["attachments"] = [] + for attachment in attachments: + attachment_body = {"id": attachment.file_id} + if attachment.tools: + attachment_body["tools"] = [self._get_field(tool, "type") for tool in attachment.tools] + event_body["attachments"].append(attachment_body) + + if incomplete_details: + event_body["incomplete_details"] = incomplete_details + event_body["role"] = role + + attributes = self._create_event_attributes( + thread_id=thread_id, + assistant_id=assistant_id, + thread_run_id=thread_run_id, + message_id=message_id, + message_status=message_status, + usage=usage, + ) + attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_body, ensure_ascii=False) + span.span_instance.add_event(name=f"gen_ai.{role}.message", attributes=attributes) + + def _get_field(self, obj: Any, field: str) -> Any: + if not obj: + return None + + if isinstance(obj, dict): + return obj.get(field, None) + + return getattr(obj, field, None) + + def _add_instructions_event( + self, + span: "AbstractSpan", + instructions: Optional[str], + additional_instructions: Optional[str], + assistant_id: Optional[str] = None, + thread_id: Optional[str] = None, + ) -> None: + if not instructions: + return + + event_body: Dict[str, Any] = {} + if _trace_assistants_content and (instructions or additional_instructions): + if instructions and additional_instructions: + event_body["content"] = f"{instructions} {additional_instructions}" + else: + event_body["content"] = instructions or additional_instructions + + attributes = self._create_event_attributes(assistant_id=assistant_id, thread_id=thread_id) + attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_body, ensure_ascii=False) + span.span_instance.add_event(name=GEN_AI_SYSTEM_MESSAGE, attributes=attributes) + + def _get_role(self, role: Optional[Union[str, MessageRole]]) -> str: + if role is None or role is _Unset: + return "user" + + if isinstance(role, MessageRole): + return role.value + + return role + + def _status_to_string(self, status: Any) -> str: + return status.value if hasattr(status, "value") else status + + def _add_tool_assistant_message_event(self, span, step: RunStep) -> None: + tool_calls = [ + { + "id": t.id, + "type": t.type, + "function": ( + {"name": t.function.name, "arguments": json.loads(t.function.arguments)} + if isinstance(t, RunStepFunctionToolCall) + else None + ), + } + for t in cast(RunStepToolCallDetails, step.step_details).tool_calls + ] + + attributes = self._create_event_attributes( + thread_id=step.thread_id, + assistant_id=step.assistant_id, + thread_run_id=step.run_id, + message_status=step.status, + usage=step.usage, + ) + + if _trace_assistants_content: + attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls}, ensure_ascii=False) + else: + tool_calls_non_recording = self._remove_function_call_names_and_arguments(tool_calls=tool_calls) + attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls_non_recording}, ensure_ascii=False) + span.span_instance.add_event(name="gen_ai.assistant.message", attributes=attributes) + + def _add_tool_event_from_thread_run(self, span, run: ThreadRun) -> None: + tool_calls = [] + + for t in run.required_action.submit_tool_outputs.tool_calls: # type: ignore + try: + parsed_arguments = json.loads(t.function.arguments) + except json.JSONDecodeError: + parsed_arguments = {} + + tool_call = { + "id": t.id, + "type": t.type, + "function": ( + {"name": t.function.name, "arguments": parsed_arguments} + if isinstance(t, RequiredFunctionToolCall) + else None + ), + } + tool_calls.append(tool_call) + + attributes = self._create_event_attributes( + thread_id=run.thread_id, + assistant_id=run.assistant_id, + thread_run_id=run.id, + message_status=run.status, + ) + + if _trace_assistants_content: + attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls}) + else: + tool_calls_non_recording = self._remove_function_call_names_and_arguments(tool_calls=tool_calls) + attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls_non_recording}) + span.span_instance.add_event(name="gen_ai.assistant.message", attributes=attributes) + + def set_end_run(self, span: "AbstractSpan", run: Optional[ThreadRun]) -> None: + if run and span and span.span_instance.is_recording: + span.add_attribute(GEN_AI_THREAD_RUN_STATUS, self._status_to_string(run.status)) + span.add_attribute(GEN_AI_RESPONSE_MODEL, run.model) + if run and run.usage: + span.add_attribute(GEN_AI_USAGE_INPUT_TOKENS, run.usage.prompt_tokens) + span.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, run.usage.completion_tokens) + + @staticmethod + def assistant_api_response_to_str(response_format: Any) -> Optional[str]: + """ + Convert response_format to string. + + :param response_format: The response format. + :type response_format: ~azure.ai.assistants._types.AssistantsApiResponseFormatOption + :returns: string for the response_format. + :rtype: Optional[str] + :raises: Value error if response_format is not of type AssistantsApiResponseFormatOption. + """ + if isinstance(response_format, str) or response_format is None: + return response_format + if isinstance(response_format, AssistantsApiResponseFormatMode): + return response_format.value + if isinstance(response_format, _models.AssistantsApiResponseFormat): + return response_format.type + if isinstance(response_format, _models.ResponseFormatJsonSchemaType): + return response_format.type + raise ValueError(f"Unknown response format {type(response_format)}") + + def start_thread_run_span( + self, + operation_name: OperationName, + project_name: str, + thread_id: Optional[str] = None, + assistant_id: Optional[str] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[ThreadMessage]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + _tools: Optional[List[ToolDefinition]] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + ) -> "Optional[AbstractSpan]": + span = start_span( + operation_name, + project_name, + thread_id=thread_id, + assistant_id=assistant_id, + model=model, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=_AIAssistantsInstrumentorPreview.assistant_api_response_to_str(response_format), + ) + if span and span.span_instance.is_recording and instructions and additional_instructions: + self._add_instructions_event( + span, instructions, additional_instructions, thread_id=thread_id, assistant_id=assistant_id + ) + + if additional_messages: + for message in additional_messages: + self.add_thread_message_event(span, message) + return span + + def start_submit_tool_outputs_span( + self, + project_name: str, + thread_id: Optional[str] = None, + run_id: Optional[str] = None, + tool_outputs: Optional[List[ToolOutput]] = None, + event_handler: Optional[Union[AssistantEventHandler, AsyncAssistantEventHandler]] = None, + ) -> "Optional[AbstractSpan]": + run_span = event_handler.span if isinstance(event_handler, _AssistantEventHandlerTraceWrapper) else None + if run_span is None: + run_span = ( + event_handler.span if isinstance(event_handler, _AsyncAssistantEventHandlerTraceWrapper) else None + ) + + if run_span: + recorded = self._add_tool_message_events(run_span, tool_outputs) + else: + recorded = False + + span = start_span(OperationName.SUBMIT_TOOL_OUTPUTS, project_name, thread_id=thread_id, run_id=run_id) + if not recorded: + self._add_tool_message_events(span, tool_outputs) + return span + + def _add_tool_message_events( + self, span: "Optional[AbstractSpan]", tool_outputs: Optional[List[ToolOutput]] + ) -> bool: + if span and span.span_instance.is_recording and tool_outputs: + for tool_output in tool_outputs: + if _trace_assistants_content: + body = {"content": tool_output["output"], "id": tool_output["tool_call_id"]} + else: + body = {"content": "", "id": tool_output["tool_call_id"]} + span.span_instance.add_event( + "gen_ai.tool.message", {"gen_ai.event.content": json.dumps(body, ensure_ascii=False)} + ) + return True + + return False + + def start_create_assistant_span( + self, + project_name: str, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + _tools: Optional[List[ToolDefinition]] = None, + _tool_resources: Optional[ToolResources] = None, + _toolset: Optional[ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + ) -> "Optional[AbstractSpan]": + span = start_span( + OperationName.CREATE_ASSISTANT, + project_name, + span_name=f"{OperationName.CREATE_ASSISTANT.value} {name}", + model=model, + temperature=temperature, + top_p=top_p, + response_format=_AIAssistantsInstrumentorPreview.assistant_api_response_to_str(response_format), + ) + if span and span.span_instance.is_recording: + if name: + span.add_attribute(GEN_AI_ASSISTANT_NAME, name) + if description: + span.add_attribute(GEN_AI_ASSISTANT_DESCRIPTION, description) + self._add_instructions_event(span, instructions, None) + + return span + + def start_create_thread_span( + self, + project_name: str, + messages: Optional[List[ThreadMessage]] = None, + _tool_resources: Optional[ToolResources] = None, + ) -> "Optional[AbstractSpan]": + span = start_span(OperationName.CREATE_THREAD, project_name) + if span and span.span_instance.is_recording: + for message in messages or []: + self.add_thread_message_event(span, message) + + return span + + def start_list_messages_span(self, project_name: str, thread_id: Optional[str] = None) -> "Optional[AbstractSpan]": + return start_span(OperationName.LIST_MESSAGES, project_name, thread_id=thread_id) + + def trace_create_assistant(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + name = kwargs.get("name") + model = kwargs.get("model") + description = kwargs.get("description") + instructions = kwargs.get("instructions") + tools = kwargs.get("tools") + tool_resources = kwargs.get("tool_resources") + toolset = kwargs.get("toolset") + temperature = kwargs.get("temperature") + top_p = kwargs.get("top_p") + response_format = kwargs.get("response_format") + + span = self.start_create_assistant_span( + project_name=project_name, + name=name, + model=model, + description=description, + instructions=instructions, + _tools=tools, + _tool_resources=tool_resources, + _toolset=toolset, + temperature=temperature, + top_p=top_p, + response_format=response_format, + ) + + if span is None: + return function(*args, **kwargs) + + with span: + try: + result = function(*args, **kwargs) + span.add_attribute(GEN_AI_ASSISTANT_ID, result.id) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_assistant_async(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + name = kwargs.get("name") + model = kwargs.get("model") + description = kwargs.get("description") + instructions = kwargs.get("instructions") + tools = kwargs.get("tools") + tool_resources = kwargs.get("tool_resources") + toolset = kwargs.get("toolset") + temperature = kwargs.get("temperature") + top_p = kwargs.get("top_p") + response_format = kwargs.get("response_format") + + span = self.start_create_assistant_span( + project_name=project_name, + name=name, + model=model, + description=description, + instructions=instructions, + _tools=tools, + _tool_resources=tool_resources, + _toolset=toolset, + temperature=temperature, + top_p=top_p, + response_format=response_format, + ) + + if span is None: + return await function(*args, **kwargs) + + with span: + try: + result = await function(*args, **kwargs) + span.add_attribute(GEN_AI_ASSISTANT_ID, result.id) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_create_thread(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + messages = kwargs.get("messages") + + span = self.start_create_thread_span(project_name=project_name, messages=messages) + + if span is None: + return function(*args, **kwargs) + + with span: + try: + result = function(*args, **kwargs) + span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_thread_async(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + messages = kwargs.get("messages") + + span = self.start_create_thread_span(project_name=project_name, messages=messages) + + if span is None: + return await function(*args, **kwargs) + + with span: + try: + result = await function(*args, **kwargs) + span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_create_message(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + role = kwargs.get("role") + content = kwargs.get("content") + attachments = kwargs.get("attachments") + + span = self.start_create_message_span( + project_name=project_name, thread_id=thread_id, content=content, role=role, attachments=attachments + ) + + if span is None: + return function(*args, **kwargs) + + with span: + try: + result = function(*args, **kwargs) + span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_message_async(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + role = kwargs.get("role") + content = kwargs.get("content") + attachments = kwargs.get("attachments") + + span = self.start_create_message_span( + project_name=project_name, thread_id=thread_id, content=content, role=role, attachments=attachments + ) + + if span is None: + return await function(*args, **kwargs) + + with span: + try: + result = await function(*args, **kwargs) + span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_create_run(self, operation_name, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + assistant_id = kwargs.get("assistant_id") + model = kwargs.get("model") + instructions = kwargs.get("instructions") + additional_instructions = kwargs.get("additional_instructions") + additional_messages = kwargs.get("additional_messages") + temperature = kwargs.get("temperature") + tools = kwargs.get("tools") + top_p = kwargs.get("top_p") + max_prompt_tokens = kwargs.get("max_prompt_tokens") + max_completion_tokens = kwargs.get("max_completion_tokens") + response_format = kwargs.get("response_format") + + span = self.start_thread_run_span( + operation_name, + project_name, + thread_id, + assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + temperature=temperature, + _tools=tools, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=response_format, + ) + + if span is None: + return function(*args, **kwargs) + + with span: + try: + result = function(*args, **kwargs) + self.set_end_run(span, result) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_run_async(self, operation_name, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + assistant_id = kwargs.get("assistant_id") + model = kwargs.get("model") + instructions = kwargs.get("instructions") + additional_instructions = kwargs.get("additional_instructions") + additional_messages = kwargs.get("additional_messages") + temperature = kwargs.get("temperature") + tools = kwargs.get("tools") + top_p = kwargs.get("top_p") + max_prompt_tokens = kwargs.get("max_prompt_tokens") + max_completion_tokens = kwargs.get("max_completion_tokens") + response_format = kwargs.get("response_format") + + span = self.start_thread_run_span( + operation_name, + project_name, + thread_id, + assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + temperature=temperature, + _tools=tools, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=response_format, + ) + + if span is None: + return await function(*args, **kwargs) + + with span: + try: + result = await function(*args, **kwargs) + if span.span_instance.is_recording: + span.add_attribute(GEN_AI_THREAD_RUN_STATUS, self._status_to_string(result.status)) + span.add_attribute(GEN_AI_RESPONSE_MODEL, result.model) + if result.usage: + span.add_attribute(GEN_AI_USAGE_INPUT_TOKENS, result.usage.prompt_tokens) + span.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, result.usage.completion_tokens) + span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_submit_tool_outputs(self, stream, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + run_id = kwargs.get("run_id") + tool_outputs = kwargs.get("tool_outputs") + event_handler = kwargs.get("event_handler") + + span = self.start_submit_tool_outputs_span( + project_name=project_name, + thread_id=thread_id, + run_id=run_id, + tool_outputs=tool_outputs, + event_handler=event_handler, + ) + + if span is None: + return function(*args, **kwargs) + + with span: + try: + if stream and event_handler: + kwargs["event_handler"] = self.wrap_handler(event_handler, span) + + result = function(*args, **kwargs) + if not isinstance(result, AssistantRunStream): + self.set_end_run(span, result) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_submit_tool_outputs_async(self, stream, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + run_id = kwargs.get("run_id") + tool_outputs = kwargs.get("tool_outputs") + event_handler = kwargs.get("event_handler") + + span = self.start_submit_tool_outputs_span( + project_name=project_name, + thread_id=thread_id, + run_id=run_id, + tool_outputs=tool_outputs, + event_handler=event_handler, + ) + + if span is None: + return await function(*args, **kwargs) + + with span: + try: + if stream: + kwargs["event_handler"] = self.wrap_async_handler(event_handler, span) + + result = await function(*args, **kwargs) + if not isinstance(result, AsyncAssistantRunStream): + self.set_end_run(span, result) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_handle_submit_tool_outputs(self, function, *args, **kwargs): + event_handler = kwargs.get("event_handler") + if event_handler is None: + event_handler = args[2] + span = getattr(event_handler, "span", None) + + if span is None: + return function(*args, **kwargs) + + with span.change_context(span.span_instance): + try: + result = function(*args, **kwargs) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_handle_submit_tool_outputs_async(self, function, *args, **kwargs): + event_handler = kwargs.get("event_handler") + if event_handler is None: + event_handler = args[2] + span = getattr(event_handler, "span", None) + + if span is None: + return await function(*args, **kwargs) + + with span.change_context(span.span_instance): + try: + result = await function(*args, **kwargs) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_create_stream(self, function, *args, **kwargs): + operation_name = OperationName.PROCESS_THREAD_RUN + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + assistant_id = kwargs.get("assistant_id") + model = kwargs.get("model") + instructions = kwargs.get("instructions") + additional_instructions = kwargs.get("additional_instructions") + additional_messages = kwargs.get("additional_messages") + temperature = kwargs.get("temperature") + tools = kwargs.get("tools") + top_p = kwargs.get("top_p") + max_prompt_tokens = kwargs.get("max_prompt_tokens") + max_completion_tokens = kwargs.get("max_completion_tokens") + response_format = kwargs.get("response_format") + event_handler = kwargs.get("event_handler") + + span = self.start_thread_run_span( + operation_name, + project_name, + thread_id, + assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + temperature=temperature, + _tools=tools, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=response_format, + ) + + if span is None: + return function(*args, **kwargs) + + with span.change_context(span.span_instance): + try: + kwargs["event_handler"] = self.wrap_handler(event_handler, span) + result = function(*args, **kwargs) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_stream_async(self, function, *args, **kwargs): + operation_name = OperationName.PROCESS_THREAD_RUN + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + assistant_id = kwargs.get("assistant_id") + model = kwargs.get("model") + instructions = kwargs.get("instructions") + additional_instructions = kwargs.get("additional_instructions") + additional_messages = kwargs.get("additional_messages") + temperature = kwargs.get("temperature") + tools = kwargs.get("tools") + top_p = kwargs.get("top_p") + max_prompt_tokens = kwargs.get("max_prompt_tokens") + max_completion_tokens = kwargs.get("max_completion_tokens") + response_format = kwargs.get("response_format") + event_handler = kwargs.get("event_handler") + + span = self.start_thread_run_span( + operation_name, + project_name, + thread_id, + assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + temperature=temperature, + _tools=tools, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=response_format, + ) + + if span is None: + return await function(*args, **kwargs) + + # TODO: how to keep span active in the current context without existing? + # TODO: dummy span for none + with span.change_context(span.span_instance): + try: + kwargs["event_handler"] = self.wrap_async_handler(event_handler, span) + result = await function(*args, **kwargs) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_list_messages(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + + span = self.start_list_messages_span(project_name=project_name, thread_id=thread_id) + + if span is None: + return function(*args, **kwargs) + + with span: + try: + result = function(*args, **kwargs) + for message in result.data: + self.add_thread_message_event(span, message) + + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_list_messages_async(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + + span = self.start_list_messages_span(project_name=project_name, thread_id=thread_id) + + if span is None: + return await function(*args, **kwargs) + + with span: + try: + result = await function(*args, **kwargs) + for message in result.data: + self.add_thread_message_event(span, message) + + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def handle_run_stream_exit(self, _function, *args, **kwargs): + assistant_run_stream = args[0] + exc_type = kwargs.get("exc_type") + exc_val = kwargs.get("exc_val") + exc_tb = kwargs.get("exc_tb") + # TODO: is it a good idea? + # if not, we'll need to wrap stream and call exit + if ( + assistant_run_stream.event_handler + and assistant_run_stream.event_handler.__class__.__name__ == "_AssistantEventHandlerTraceWrapper" + ): + assistant_run_stream.event_handler.__exit__(exc_type, exc_val, exc_tb) + elif ( + assistant_run_stream.event_handler + and assistant_run_stream.event_handler.__class__.__name__ == "_AsyncAssistantEventHandlerTraceWrapper" + ): + assistant_run_stream.event_handler.__aexit__(exc_type, exc_val, exc_tb) + + def wrap_handler( + self, handler: "Optional[AssistantEventHandler]" = None, span: "Optional[AbstractSpan]" = None + ) -> "Optional[AssistantEventHandler]": + # Do not create a handler wrapper if we do not have handler in the first place. + if not handler: + return None + + if isinstance(handler, _AssistantEventHandlerTraceWrapper): + return handler + + if span and span.span_instance.is_recording: + return _AssistantEventHandlerTraceWrapper(self, span, handler) + + return handler + + def wrap_async_handler( + self, handler: "Optional[AsyncAssistantEventHandler]" = None, span: "Optional[AbstractSpan]" = None + ) -> "Optional[AsyncAssistantEventHandler]": + # Do not create a handler wrapper if we do not have handler in the first place. + if not handler: + return None + + if isinstance(handler, _AsyncAssistantEventHandlerTraceWrapper): + return handler + + if span and span.span_instance.is_recording: + return _AsyncAssistantEventHandlerTraceWrapper(self, span, handler) + + return handler + + def start_create_message_span( + self, + project_name: str, + thread_id: Optional[str] = None, + content: Optional[str] = None, + role: Optional[Union[str, MessageRole]] = None, + attachments: Optional[List[MessageAttachment]] = None, + ) -> "Optional[AbstractSpan]": + role_str = self._get_role(role) + span = start_span(OperationName.CREATE_MESSAGE, project_name, thread_id=thread_id) + if span and span.span_instance.is_recording: + self._add_message_event(span, role_str, content, attachments=attachments, thread_id=thread_id) + return span + + def _trace_sync_function( + self, + function: Callable, + *, + _args_to_ignore: Optional[List[str]] = None, + _trace_type=TraceType.ASSISTANTS, + _name: Optional[str] = None, + ) -> Callable: + """ + Decorator that adds tracing to a synchronous function. + + :param function: The function to be traced. + :type function: Callable + :param args_to_ignore: A list of argument names to be ignored in the trace. + Defaults to None. + :type: args_to_ignore: [List[str]], optional + :param trace_type: The type of the trace. Defaults to TraceType.ASSISTANTS. + :type trace_type: TraceType, optional + :param name: The name of the trace, will set to func name if not provided. + :type name: str, optional + :return: The traced function. + :rtype: Callable + """ + + @functools.wraps(function) + def inner(*args, **kwargs): # pylint: disable=R0911 + span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 + if span_impl_type is None: + return function(*args, **kwargs) + + class_function_name = function.__qualname__ + + if class_function_name.startswith("AssistantsClient.create_assistant"): + kwargs.setdefault("merge_span", True) + return self.trace_create_assistant(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClientOperationsMixin.create_thread"): + kwargs.setdefault("merge_span", True) + return self.trace_create_thread(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClientOperationsMixin.create_message"): + kwargs.setdefault("merge_span", True) + return self.trace_create_message(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.create_run"): + kwargs.setdefault("merge_span", True) + return self.trace_create_run(OperationName.START_THREAD_RUN, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.create_and_process_run"): + kwargs.setdefault("merge_span", True) + return self.trace_create_run(OperationName.PROCESS_THREAD_RUN, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.submit_tool_outputs_to_run"): + kwargs.setdefault("merge_span", True) + return self.trace_submit_tool_outputs(False, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.submit_tool_outputs_to_stream"): + kwargs.setdefault("merge_span", True) + return self.trace_submit_tool_outputs(True, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient._handle_submit_tool_outputs"): + return self.trace_handle_submit_tool_outputs(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.create_stream"): + kwargs.setdefault("merge_span", True) + return self.trace_create_stream(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClientOperationsMixin.list_messages"): + kwargs.setdefault("merge_span", True) + return self.trace_list_messages(function, *args, **kwargs) + if class_function_name.startswith("AssistantRunStream.__exit__"): + return self.handle_run_stream_exit(function, *args, **kwargs) + # Handle the default case (if the function name does not match) + return None # Ensure all paths return + + return inner + + def _trace_async_function( + self, + function: Callable, + *, + _args_to_ignore: Optional[List[str]] = None, + _trace_type=TraceType.ASSISTANTS, + _name: Optional[str] = None, + ) -> Callable: + """ + Decorator that adds tracing to an asynchronous function. + + :param function: The function to be traced. + :type function: Callable + :param args_to_ignore: A list of argument names to be ignored in the trace. + Defaults to None. + :type: args_to_ignore: [List[str]], optional + :param trace_type: The type of the trace. Defaults to TraceType.ASSISTANTS. + :type trace_type: TraceType, optional + :param name: The name of the trace, will set to func name if not provided. + :type name: str, optional + :return: The traced function. + :rtype: Callable + """ + + @functools.wraps(function) + async def inner(*args, **kwargs): # pylint: disable=R0911 + span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 + if span_impl_type is None: + return function(*args, **kwargs) + + class_function_name = function.__qualname__ + + if class_function_name.startswith("AssistantsClient.create_assistant"): + kwargs.setdefault("merge_span", True) + return await self.trace_create_assistant_async(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClientOperationsMixin.create_thread"): + kwargs.setdefault("merge_span", True) + return await self.trace_create_thread_async(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClientOperationsMixin.create_message"): + kwargs.setdefault("merge_span", True) + return await self.trace_create_message_async(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.create_run"): + kwargs.setdefault("merge_span", True) + return await self.trace_create_run_async(OperationName.START_THREAD_RUN, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.create_and_process_run"): + kwargs.setdefault("merge_span", True) + return await self.trace_create_run_async(OperationName.PROCESS_THREAD_RUN, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.submit_tool_outputs_to_run"): + kwargs.setdefault("merge_span", True) + return await self.trace_submit_tool_outputs_async(False, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.submit_tool_outputs_to_stream"): + kwargs.setdefault("merge_span", True) + return await self.trace_submit_tool_outputs_async(True, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient._handle_submit_tool_outputs"): + return await self.trace_handle_submit_tool_outputs_async(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.create_stream"): + kwargs.setdefault("merge_span", True) + return await self.trace_create_stream_async(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClientOperationsMixin.list_messages"): + kwargs.setdefault("merge_span", True) + return await self.trace_list_messages_async(function, *args, **kwargs) + if class_function_name.startswith("AsyncAssistantRunStream.__aexit__"): + return self.handle_run_stream_exit(function, *args, **kwargs) + # Handle the default case (if the function name does not match) + return None # Ensure all paths return + + return inner + + def _inject_async(self, f, _trace_type, _name): + wrapper_fun = self._trace_async_function(f) + wrapper_fun._original = f # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + return wrapper_fun + + def _inject_sync(self, f, _trace_type, _name): + wrapper_fun = self._trace_sync_function(f) + wrapper_fun._original = f # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + return wrapper_fun + + def _assistants_apis(self): + sync_apis = ( + ("azure.ai.assistants", "AssistantsClient", "create_assistant", TraceType.ASSISTANTS, "assistant_create"), + ("azure.ai.assistants", "AssistantsClient", "create_thread", TraceType.ASSISTANTS, "thread_create"), + ("azure.ai.assistants", "AssistantsClient", "create_message", TraceType.ASSISTANTS, "message_create"), + ("azure.ai.assistants", "AssistantsClient", "create_run", TraceType.ASSISTANTS, "create_run"), + ( + "azure.ai.assistants", + "AssistantsClient", + "create_and_process_run", + TraceType.ASSISTANTS, + "create_and_process_run", + ), + ( + "azure.ai.assistants", + "AssistantsClient", + "submit_tool_outputs_to_run", + TraceType.ASSISTANTS, + "submit_tool_outputs_to_run", + ), + ( + "azure.ai.assistants", + "AssistantsClient", + "submit_tool_outputs_to_stream", + TraceType.ASSISTANTS, + "submit_tool_outputs_to_stream", + ), + ( + "azure.ai.assistants", + "AssistantsClient", + "_handle_submit_tool_outputs", + TraceType.ASSISTANTS, + "_handle_submit_tool_outputs", + ), + ("azure.ai.assistants", "AssistantsClient", "create_stream", TraceType.ASSISTANTS, "create_stream"), + ("azure.ai.assistants", "AssistantsClient", "list_messages", TraceType.ASSISTANTS, "list_messages"), + ("azure.ai.assistants.models", "AssistantRunStream", "__exit__", TraceType.ASSISTANTS, "__exit__"), + ) + async_apis = ( + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "create_assistant", + TraceType.ASSISTANTS, + "assistant_create", + ), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "create_thread", + TraceType.ASSISTANTS, + "assistants_thread_create", + ), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "create_message", + TraceType.ASSISTANTS, + "assistants_thread_message", + ), + ("azure.ai.assistants.aio", "AssistantsClient", "create_run", TraceType.ASSISTANTS, "create_run"), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "create_and_process_run", + TraceType.ASSISTANTS, + "create_and_process_run", + ), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "submit_tool_outputs_to_run", + TraceType.ASSISTANTS, + "submit_tool_outputs_to_run", + ), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "submit_tool_outputs_to_stream", + TraceType.ASSISTANTS, + "submit_tool_outputs_to_stream", + ), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "_handle_submit_tool_outputs", + TraceType.ASSISTANTS, + "_handle_submit_tool_outputs", + ), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "create_stream", + TraceType.ASSISTANTS, + "create_stream", + ), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "list_messages", + TraceType.ASSISTANTS, + "list_messages", + ), + ("azure.ai.assistants.models", "AsyncAssistantRunStream", "__aexit__", TraceType.ASSISTANTS, "__aexit__"), + ) + return sync_apis, async_apis + + def _assistants_api_list(self): + sync_apis, async_apis = self._assistants_apis() + yield sync_apis, self._inject_sync + yield async_apis, self._inject_async + + def _generate_api_and_injector(self, apis): + for api, injector in apis: + for module_name, class_name, method_name, trace_type, name in api: + try: + module = importlib.import_module(module_name) + api = getattr(module, class_name) + if hasattr(api, method_name): + yield api, method_name, trace_type, injector, name + except AttributeError as e: + # Log the attribute exception with the missing class information + logging.warning( + "AttributeError: The module '%s' does not have the class '%s'. %s", + module_name, + class_name, + str(e), + ) + except Exception as e: # pylint: disable=broad-except + # Log other exceptions as a warning, as we are not sure what they might be + logging.warning("An unexpected error occurred: '%s'", str(e)) + + def _available_assistants_apis_and_injectors(self): + """ + Generates a sequence of tuples containing Assistants API classes, method names, and + corresponding injector functions. + + :return: A generator yielding tuples. + :rtype: tuple + """ + yield from self._generate_api_and_injector(self._assistants_api_list()) + + def _instrument_assistants(self, enable_content_tracing: bool = False): + """This function modifies the methods of the Assistants API classes to + inject logic before calling the original methods. + The original methods are stored as _original attributes of the methods. + + :param enable_content_tracing: Indicates whether tracing of message content should be enabled. + This also controls whether function call tool function names, + parameter names and parameter values are traced. + :type enable_content_tracing: bool + """ + # pylint: disable=W0603 + global _assistants_traces_enabled + global _trace_assistants_content + if _assistants_traces_enabled: + raise RuntimeError("Traces already started for AI Assistants") + _assistants_traces_enabled = True + _trace_assistants_content = enable_content_tracing + for ( + api, + method, + trace_type, + injector, + name, + ) in self._available_assistants_apis_and_injectors(): + # Check if the method of the api class has already been modified + if not hasattr(getattr(api, method), "_original"): + setattr(api, method, injector(getattr(api, method), trace_type, name)) + + def _uninstrument_assistants(self): + """This function restores the original methods of the Assistants API classes + by assigning them back from the _original attributes of the modified methods. + """ + # pylint: disable=W0603 + global _assistants_traces_enabled + global _trace_assistants_content + _trace_assistants_content = False + for api, method, _, _, _ in self._available_assistants_apis_and_injectors(): + if hasattr(getattr(api, method), "_original"): + setattr(api, method, getattr(getattr(api, method), "_original")) + _assistants_traces_enabled = False + + def _is_instrumented(self): + """This function returns True if Assistants API has already been instrumented + for tracing and False if it has not been instrumented. + + :return: A value indicating whether the Assistants API is currently instrumented or not. + :rtype: bool + """ + return _assistants_traces_enabled + + def _set_enable_content_recording(self, enable_content_recording: bool = False) -> None: + """This function sets the content recording value. + + :param enable_content_recording: Indicates whether tracing of message content should be enabled. + This also controls whether function call tool function names, + parameter names and parameter values are traced. + :type enable_content_recording: bool + """ + global _trace_assistants_content # pylint: disable=W0603 + _trace_assistants_content = enable_content_recording + + def _is_content_recording_enabled(self) -> bool: + """This function gets the content recording value. + + :return: A bool value indicating whether content tracing is enabled. + :rtype bool + """ + return _trace_assistants_content + + +class _AssistantEventHandlerTraceWrapper(AssistantEventHandler): + def __init__( + self, + instrumentor: _AIAssistantsInstrumentorPreview, + span: "AbstractSpan", + inner_handler: Optional[AssistantEventHandler] = None, + ): + super().__init__() + self.span = span + self.inner_handler = inner_handler + self.ended = False + self.last_run: Optional[ThreadRun] = None + self.last_message: Optional[ThreadMessage] = None + self.instrumentor = instrumentor + + def initialize( + self, + response_iterator, + submit_tool_outputs, + ) -> None: + self.submit_tool_outputs = submit_tool_outputs + if self.inner_handler: + self.inner_handler.initialize(response_iterator=response_iterator, submit_tool_outputs=submit_tool_outputs) + + def __next__(self) -> Any: + if self.inner_handler: + event_bytes = self.inner_handler.__next_impl__() + return self._process_event(event_bytes.decode("utf-8")) + return None + + # pylint: disable=R1710 + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return self.inner_handler.on_message_delta(delta) # type: ignore + + def on_thread_message(self, message: "ThreadMessage") -> None: # type: ignore[func-returns-value] + retval = None + if self.inner_handler: + retval = self.inner_handler.on_thread_message(message) # type: ignore + + if message.status in {"completed", "incomplete"}: + self.last_message = message + + return retval # type: ignore + + def on_thread_run(self, run: "ThreadRun") -> None: # type: ignore[func-returns-value] + retval = None + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + self.instrumentor._add_tool_event_from_thread_run( # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + self.span, run + ) + + if self.inner_handler: + retval = self.inner_handler.on_thread_run(run) # type: ignore + self.last_run = run + + return retval # type: ignore + + def on_run_step(self, step: "RunStep") -> None: # type: ignore[func-returns-value] + retval = None + if self.inner_handler: + retval = self.inner_handler.on_run_step(step) # type: ignore + + # todo - report errors for failure statuses here and in run ? + if step.type == "message_creation" and step.status == RunStepStatus.COMPLETED: + self.instrumentor.add_thread_message_event(self.span, cast(ThreadMessage, self.last_message), step.usage) + self.last_message = None + + return retval # type: ignore + + def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return self.inner_handler.on_run_step_delta(delta) # type: ignore + + def on_error(self, data: str) -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return self.inner_handler.on_error(data) # type: ignore + + def on_done(self) -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return self.inner_handler.on_done() # type: ignore + # it could be called multiple tines (for each step) __exit__ + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return self.inner_handler.on_unhandled_event(event_type, event_data) # type: ignore + + # pylint: enable=R1710 + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.ended: + self.ended = True + self.instrumentor.set_end_run(self.span, self.last_run) + + if self.last_run and self.last_run.last_error: + self.span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + self.last_run.last_error.message, + ) + self.span.add_attribute(ERROR_TYPE, self.last_run.last_error.code) + + self.span.__exit__(exc_type, exc_val, exc_tb) + self.span.finish() + + +class _AsyncAssistantEventHandlerTraceWrapper(AsyncAssistantEventHandler): + def __init__( + self, + instrumentor: _AIAssistantsInstrumentorPreview, + span: "AbstractSpan", + inner_handler: Optional[AsyncAssistantEventHandler] = None, + ): + super().__init__() + self.span = span + self.inner_handler = inner_handler + self.ended = False + self.last_run: Optional[ThreadRun] = None + self.last_message: Optional[ThreadMessage] = None + self.instrumentor = instrumentor + + def initialize( + self, + response_iterator, + submit_tool_outputs, + ) -> None: + self.submit_tool_outputs = submit_tool_outputs + if self.inner_handler: + self.inner_handler.initialize(response_iterator=response_iterator, submit_tool_outputs=submit_tool_outputs) + + # cspell:disable-next-line + async def __anext__(self) -> Any: + if self.inner_handler: + # cspell:disable-next-line + event_bytes = await self.inner_handler.__anext_impl__() + return await self._process_event(event_bytes.decode("utf-8")) + + # pylint: disable=R1710 + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return await self.inner_handler.on_message_delta(delta) # type: ignore + + async def on_thread_message(self, message: "ThreadMessage") -> None: # type: ignore[func-returns-value] + retval = None + if self.inner_handler: + retval = await self.inner_handler.on_thread_message(message) # type: ignore + + if message.status in {"completed", "incomplete"}: + self.last_message = message + + return retval # type: ignore + + async def on_thread_run(self, run: "ThreadRun") -> None: # type: ignore[func-returns-value] + retval = None + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + self.instrumentor._add_tool_event_from_thread_run( # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + self.span, run + ) + + if self.inner_handler: + retval = await self.inner_handler.on_thread_run(run) # type: ignore + self.last_run = run + + return retval # type: ignore + + async def on_run_step(self, step: "RunStep") -> None: # type: ignore[func-returns-value] + retval = None + if self.inner_handler: + retval = await self.inner_handler.on_run_step(step) # type: ignore + + # todo - report errors for failure statuses here and in run ? + if step.type == "message_creation" and step.status == RunStepStatus.COMPLETED: + self.instrumentor.add_thread_message_event(self.span, cast(ThreadMessage, self.last_message), step.usage) + self.last_message = None + + return retval # type: ignore + + async def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return await self.inner_handler.on_run_step_delta(delta) # type: ignore + + async def on_error(self, data: str) -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return await self.inner_handler.on_error(data) # type: ignore + + async def on_done(self) -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return await self.inner_handler.on_done() # type: ignore + # it could be called multiple tines (for each step) __exit__ + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return await self.inner_handler.on_unhandled_event(event_type, event_data) # type: ignore + + # pylint: enable=R1710 + + def __aexit__(self, exc_type, exc_val, exc_tb): + if not self.ended: + self.ended = True + self.instrumentor.set_end_run(self.span, self.last_run) + + if self.last_run and self.last_run.last_error: + self.span.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + self.last_run.last_error.message, + ) + self.span.add_attribute(ERROR_TYPE, self.last_run.last_error.code) + + self.span.__exit__(exc_type, exc_val, exc_tb) + self.span.finish() diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py new file mode 100644 index 000000000000..0ac5ea43c13f --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py @@ -0,0 +1,204 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import functools +import asyncio # pylint: disable = do-not-import-asyncio +from typing import Any, Callable, Optional, Dict + +try: + # pylint: disable = no-name-in-module + from opentelemetry import trace as opentelemetry_trace + + tracer = opentelemetry_trace.get_tracer(__name__) + _tracing_library_available = True +except ModuleNotFoundError: + _tracing_library_available = False + +if _tracing_library_available: + + def trace_function(span_name: Optional[str] = None): + """ + A decorator for tracing function calls using OpenTelemetry. + + This decorator handles various data types for function parameters and return values, + and records them as attributes in the trace span. The supported data types include: + - Basic data types: str, int, float, bool + - Collections: list, dict, tuple, set + + Special handling for collections: + - If a collection (list, dict, tuple, set) contains nested collections, the entire collection + is converted to a string before being recorded as an attribute. + - Sets and dictionaries are always converted to strings to ensure compatibility with span attributes. + + Object types are omitted, and the corresponding parameter is not traced. + + :param span_name: The name of the span. If not provided, the function name is used. + :type span_name: Optional[str] + :return: The decorated function with tracing enabled. + :rtype: Callable + """ + + def decorator(func: Callable) -> Callable: + @functools.wraps(func) + async def async_wrapper(*args: Any, **kwargs: Any) -> Any: + """ + Wrapper function for asynchronous functions. + + :param args: Positional arguments passed to the function. + :type args: Tuple[Any] + :return: The result of the decorated asynchronous function. + :rtype: Any + """ + name = span_name if span_name else func.__name__ + with tracer.start_as_current_span(name) as span: + try: + # Sanitize parameters and set them as attributes + sanitized_params = sanitize_parameters(func, *args, **kwargs) + span.set_attributes(sanitized_params) + result = await func(*args, **kwargs) + sanitized_result = sanitize_for_attributes(result) + if sanitized_result is not None: + if isinstance(sanitized_result, (list, dict, tuple, set)): + if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_result): + sanitized_result = str(sanitized_result) + span.set_attribute("code.function.return.value", sanitized_result) # type: ignore + return result + except Exception as e: + span.record_exception(e) + span.set_attribute("error.type", e.__class__.__qualname__) # type: ignore + raise + + @functools.wraps(func) + def sync_wrapper(*args: Any, **kwargs: Any) -> Any: + """ + Wrapper function for synchronous functions. + + :param args: Positional arguments passed to the function. + :type args: Tuple[Any] + :return: The result of the decorated synchronous function. + :rtype: Any + """ + name = span_name if span_name else func.__name__ + with tracer.start_as_current_span(name) as span: + try: + # Sanitize parameters and set them as attributes + sanitized_params = sanitize_parameters(func, *args, **kwargs) + span.set_attributes(sanitized_params) + result = func(*args, **kwargs) + sanitized_result = sanitize_for_attributes(result) + if sanitized_result is not None: + if isinstance(sanitized_result, (list, dict, tuple, set)): + if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_result): + sanitized_result = str(sanitized_result) + span.set_attribute("code.function.return.value", sanitized_result) # type: ignore + return result + except Exception as e: + span.record_exception(e) + span.set_attribute("error.type", e.__class__.__qualname__) # type: ignore + raise + + # Determine if the function is async + if asyncio.iscoroutinefunction(func): + return async_wrapper + return sync_wrapper + + return decorator + +else: + # Define a no-op decorator if OpenTelemetry is not available + def trace_function(span_name: Optional[str] = None): # pylint: disable=unused-argument + """ + A no-op decorator for tracing function calls when OpenTelemetry is not available. + + :param span_name: Not used in this version. + :type span_name: Optional[str] + :return: The original function. + :rtype: Callable + """ + + def decorator(func: Callable) -> Callable: + return func + + return decorator + + +def sanitize_parameters(func, *args, **kwargs) -> Dict[str, Any]: + """ + Sanitize function parameters to include only built-in data types. + + :param func: The function being decorated. + :type func: Callable + :param args: Positional arguments passed to the function. + :type args: Tuple[Any] + :return: A dictionary of sanitized parameters. + :rtype: Dict[str, Any] + """ + import inspect + + params = inspect.signature(func).parameters + sanitized_params = {} + + for i, (name, param) in enumerate(params.items()): + if param.default == inspect.Parameter.empty and i < len(args): + value = args[i] + else: + value = kwargs.get(name, param.default) + + sanitized_value = sanitize_for_attributes(value) + # Check if the collection has nested collections + if isinstance(sanitized_value, (list, dict, tuple, set)): + if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_value): + sanitized_value = str(sanitized_value) + if sanitized_value is not None: + sanitized_params["code.function.parameter." + name] = sanitized_value + + return sanitized_params + + +# pylint: disable=R0911 +def sanitize_for_attributes(value: Any, is_recursive: bool = False) -> Any: + """ + Sanitize a value to be used as an attribute. + + :param value: The value to sanitize. + :type value: Any + :param is_recursive: Indicates if the function is being called recursively. Default is False. + :type is_recursive: bool + :return: The sanitized value or None if the value is not a supported type. + :rtype: Any + """ + if isinstance(value, (str, int, float, bool)): + return value + if isinstance(value, list): + return [ + sanitize_for_attributes(item, True) + for item in value + if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) + ] + if isinstance(value, dict): + retval = { + k: sanitize_for_attributes(v, True) + for k, v in value.items() + if isinstance(v, (str, int, float, bool, list, dict, tuple, set)) + } + # dict to compatible with span attribute, so return it as a string + if is_recursive: + return retval + return str(retval) + if isinstance(value, tuple): + return tuple( + sanitize_for_attributes(item, True) + for item in value + if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) + ) + if isinstance(value, set): + retval_set = { + sanitize_for_attributes(item, True) + for item in value + if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) + } + if is_recursive: + return retval_set + return str(retval_set) + return None diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py new file mode 100644 index 000000000000..424771f27914 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py @@ -0,0 +1,293 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +from typing import Any, Optional, TextIO, Union, cast + +import io +import logging +import sys + +from enum import Enum + +from azure.core.tracing import AbstractSpan, SpanKind # type: ignore +from azure.core.settings import settings # type: ignore + +try: + from opentelemetry.trace import StatusCode, Span # noqa: F401 # pylint: disable=unused-import + + _span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable +except ModuleNotFoundError: + _span_impl_type = None + +logger = logging.getLogger(__name__) + + +GEN_AI_MESSAGE_ID = "gen_ai.message.id" +GEN_AI_MESSAGE_STATUS = "gen_ai.message.status" +GEN_AI_THREAD_ID = "gen_ai.thread.id" +GEN_AI_THREAD_RUN_ID = "gen_ai.thread.run.id" +GEN_AI_ASSISTANT_ID = "gen_ai.assistant.id" +GEN_AI_ASSISTANT_NAME = "gen_ai.assistant.name" +GEN_AI_ASSISTANT_DESCRIPTION = "gen_ai.assistant.description" +GEN_AI_OPERATION_NAME = "gen_ai.operation.name" +GEN_AI_THREAD_RUN_STATUS = "gen_ai.thread.run.status" +GEN_AI_REQUEST_MODEL = "gen_ai.request.model" +GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature" +GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p" +GEN_AI_REQUEST_MAX_INPUT_TOKENS = "gen_ai.request.max_input_tokens" +GEN_AI_REQUEST_MAX_OUTPUT_TOKENS = "gen_ai.request.max_output_tokens" +GEN_AI_RESPONSE_MODEL = "gen_ai.response.model" +GEN_AI_SYSTEM = "gen_ai.system" +SERVER_ADDRESS = "server.address" +AZ_AI_ASSISTANT_SYSTEM = "az.ai.assistants" +GEN_AI_TOOL_NAME = "gen_ai.tool.name" +GEN_AI_TOOL_CALL_ID = "gen_ai.tool.call.id" +GEN_AI_REQUEST_RESPONSE_FORMAT = "gen_ai.request.response_format" +GEN_AI_USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens" +GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens" +GEN_AI_SYSTEM_MESSAGE = "gen_ai.system.message" +GEN_AI_EVENT_CONTENT = "gen_ai.event.content" +ERROR_TYPE = "error.type" + + +class OperationName(Enum): + CREATE_ASSISTANT = "create_assistant" + CREATE_THREAD = "create_thread" + CREATE_MESSAGE = "create_message" + START_THREAD_RUN = "start_thread_run" + EXECUTE_TOOL = "execute_tool" + LIST_MESSAGES = "list_messages" + SUBMIT_TOOL_OUTPUTS = "submit_tool_outputs" + PROCESS_THREAD_RUN = "process_thread_run" + + +def trace_tool_execution( + tool_call_id: str, + tool_name: str, + thread_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow + assistant_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow + run_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow +) -> "Optional[AbstractSpan]": + span = start_span( + OperationName.EXECUTE_TOOL, + server_address=None, + span_name=f"execute_tool {tool_name}", + thread_id=thread_id, + assistant_id=assistant_id, + run_id=run_id, + gen_ai_system=None, + ) # it's a client code execution, not GenAI span + if span is not None and span.span_instance.is_recording: + span.add_attribute(GEN_AI_TOOL_CALL_ID, tool_call_id) + span.add_attribute(GEN_AI_TOOL_NAME, tool_name) + + return span + + +def start_span( + operation_name: OperationName, + server_address: Optional[str], + span_name: Optional[str] = None, + thread_id: Optional[str] = None, + assistant_id: Optional[str] = None, + run_id: Optional[str] = None, + model: Optional[str] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + response_format: Optional[str] = None, + gen_ai_system: Optional[str] = AZ_AI_ASSISTANT_SYSTEM, + kind: SpanKind = SpanKind.CLIENT, +) -> "Optional[AbstractSpan]": + if _span_impl_type is None: + return None + + span = _span_impl_type(name=span_name or operation_name.value, kind=kind) + + if span and span.span_instance.is_recording: + if gen_ai_system: + span.add_attribute(GEN_AI_SYSTEM, AZ_AI_ASSISTANT_SYSTEM) + + span.add_attribute(GEN_AI_OPERATION_NAME, operation_name.value) + + if server_address: + span.add_attribute(SERVER_ADDRESS, server_address) + + if thread_id: + span.add_attribute(GEN_AI_THREAD_ID, thread_id) + + if assistant_id: + span.add_attribute(GEN_AI_ASSISTANT_ID, assistant_id) + + if run_id: + span.add_attribute(GEN_AI_THREAD_RUN_ID, run_id) + + if model: + span.add_attribute(GEN_AI_REQUEST_MODEL, model) + + if temperature: + span.add_attribute(GEN_AI_REQUEST_TEMPERATURE, str(temperature)) + + if top_p: + span.add_attribute(GEN_AI_REQUEST_TOP_P, str(top_p)) + + if max_prompt_tokens: + span.add_attribute(GEN_AI_REQUEST_MAX_INPUT_TOKENS, max_prompt_tokens) + + if max_completion_tokens: + span.add_attribute(GEN_AI_REQUEST_MAX_OUTPUT_TOKENS, max_completion_tokens) + + if response_format: + span.add_attribute(GEN_AI_REQUEST_RESPONSE_FORMAT, response_format) + + return span + + +# Internal helper functions to enable OpenTelemetry, used by both sync and async clients +def _get_trace_exporter(destination: Union[TextIO, str, None]) -> Any: + if isinstance(destination, str): + # `destination` is the OTLP endpoint + # See: https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html#usage + try: + from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter # type: ignore + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + "OpenTelemetry OTLP exporter is not installed. " + + "Please install it using 'pip install opentelemetry-exporter-otlp-proto-grpc'" + ) from e + return OTLPSpanExporter(endpoint=destination) + + if isinstance(destination, io.TextIOWrapper): + if destination is sys.stdout: + # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter # pylint: disable=line-too-long + try: + from opentelemetry.sdk.trace.export import ConsoleSpanExporter + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" + ) from e + + return ConsoleSpanExporter() + raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") + + return None + + +def _get_log_exporter(destination: Union[TextIO, str, None]) -> Any: + if isinstance(destination, str): + # `destination` is the OTLP endpoint + # See: https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html#usage + try: + # _logs are considered beta (not internal) in OpenTelemetry Python API/SDK. + # So it's ok to use it for local development, but we'll swallow + # any errors in case of any breaking changes on OTel side. + from opentelemetry.exporter.otlp.proto.grpc._log_exporter import OTLPLogExporter # type: ignore # pylint: disable=import-error,no-name-in-module + except Exception as ex: # pylint: disable=broad-exception-caught + # since OTel logging is still in beta in Python, we're going to swallow any errors + # and just warn about them. + logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) + return None + + return OTLPLogExporter(endpoint=destination) + + if isinstance(destination, io.TextIOWrapper): + if destination is sys.stdout: + # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter # pylint: disable=line-too-long + try: + from opentelemetry.sdk._logs.export import ConsoleLogExporter + + return ConsoleLogExporter() + except ModuleNotFoundError as ex: + # since OTel logging is still in beta in Python, we're going to swallow any errors + # and just warn about them. + logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) + return None + raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") + + return None + + +def _configure_tracing(span_exporter: Any) -> None: + if span_exporter is None: + return + + try: + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.trace.export import SimpleSpanProcessor + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" + ) from e + + # if tracing was not setup before, we need to create a new TracerProvider + if not isinstance(trace.get_tracer_provider(), TracerProvider): + # If the provider is NoOpTracerProvider, we need to create a new TracerProvider + provider = TracerProvider() + trace.set_tracer_provider(provider) + + # get_tracer_provider returns opentelemetry.trace.TracerProvider + # however, we have opentelemetry.sdk.trace.TracerProvider, which implements + # add_span_processor method, though we need to cast it to fix type checking. + provider = cast(TracerProvider, trace.get_tracer_provider()) + provider.add_span_processor(SimpleSpanProcessor(span_exporter)) + + +def _configure_logging(log_exporter: Any) -> None: + if log_exporter is None: + return + + try: + # _events and _logs are considered beta (not internal) in + # OpenTelemetry Python API/SDK. + # So it's ok to use them for local development, but we'll swallow + # any errors in case of any breaking changes on OTel side. + from opentelemetry import _logs, _events + from opentelemetry.sdk._logs import LoggerProvider # pylint: disable=import-error,no-name-in-module + from opentelemetry.sdk._events import EventLoggerProvider # pylint: disable=import-error,no-name-in-module + from opentelemetry.sdk._logs.export import ( + SimpleLogRecordProcessor, + ) # pylint: disable=import-error,no-name-in-module + + if not isinstance(_logs.get_logger_provider(), LoggerProvider): + logger_provider = LoggerProvider() + _logs.set_logger_provider(logger_provider) + + # get_logger_provider returns opentelemetry._logs.LoggerProvider + # however, we have opentelemetry.sdk._logs.LoggerProvider, which implements + # add_log_record_processor method, though we need to cast it to fix type checking. + logger_provider = cast(LoggerProvider, _logs.get_logger_provider()) + logger_provider.add_log_record_processor(SimpleLogRecordProcessor(log_exporter)) + _events.set_event_logger_provider(EventLoggerProvider(logger_provider)) + except Exception as ex: # pylint: disable=broad-exception-caught + # since OTel logging is still in beta in Python, we're going to swallow any errors + # and just warn about them. + logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) + + +def enable_telemetry(destination: Union[TextIO, str, None] = None, **kwargs) -> None: # pylint: disable=unused-argument + """Enable tracing and logging to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) endpoint. + + :param destination: `sys.stdout` to print telemetry to console or a string holding the + OpenTelemetry protocol (OTLP) endpoint. + If not provided, this method enables instrumentation, but does not configure OpenTelemetry + SDK to export traces and logs. + :type destination: Union[TextIO, str, None] + """ + span_exporter = _get_trace_exporter(destination) + _configure_tracing(span_exporter) + + log_exporter = _get_log_exporter(destination) + _configure_logging(log_exporter) + + try: + from azure.ai.assistants.telemetry import AIAssistantsInstrumentor + + assistants_instrumentor = AIAssistantsInstrumentor() + if not assistants_instrumentor.is_instrumented(): + assistants_instrumentor.instrument() + except Exception as exc: # pylint: disable=broad-exception-caught + logger.warning("Could not call `AIAssistantsInstrumentor().instrument()`", exc_info=exc) diff --git a/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env b/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env new file mode 100644 index 000000000000..3c74e991b06b --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env @@ -0,0 +1,14 @@ +# +# Environment variables required for running tests. +# +# All values should be empty by default. Fill them in locally before running live tests on your dev box, +# but do not commit these changes to the repository. +# + +######################################################################################################################## +# Agents tests +# +AZURE_AI_ASSISTANTS_TESTS_PROJECT_ENDPOINT= +AZURE_AI_ASSISTANTS_TESTS_DATA_PATH= +AZURE_AI_ASSISTANTS_TESTS_STORAGE_QUEUE= +AZURE_AI_ASSISTANTS_TESTS_SEARCH_INDEX_NAME= diff --git a/sdk/ai/azure-ai-assistants/dev_requirements.txt b/sdk/ai/azure-ai-assistants/dev_requirements.txt new file mode 100644 index 000000000000..b5272c25b382 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/dev_requirements.txt @@ -0,0 +1,8 @@ +-e ../../../tools/azure-sdk-tools +../../core/azure-core +../../identity/azure-identity +../../core/azure-core-tracing-opentelemetry +aiohttp +opentelemetry-sdk +opentelemetry-exporter-otlp-proto-grpc +azure-ai-ml diff --git a/sdk/ai/azure-ai-assistants/pyproject.toml b/sdk/ai/azure-ai-assistants/pyproject.toml new file mode 100644 index 000000000000..9bbdfd71420c --- /dev/null +++ b/sdk/ai/azure-ai-assistants/pyproject.toml @@ -0,0 +1,36 @@ +[tool.mypy] +python_version = "3.10" +exclude = [ + "downloaded", + # In run_mypy.py python version is hardcoded to 3.8. It does not allow + # obligatory named parameters as fun(a, *, b=1, c=2). + "sample_assistants_vector_store_batch_enterprise_file_search_async\\.py", + # Error in typing caused by the typespec. + "sample_assistants_with_file_search_attachment\\.py", + "sample_assistants_with_code_interpreter_file_attachment\\.py", + "sample_assistants_code_interpreter_attachment_enterprise_search\\.py", + "sample_assistants_with_file_search_attachment_async\\.py", + "sample_assistants_code_interpreter_attachment_enterprise_search_async\\.py", + "sample_assistants_code_interpreter_attachment_enterprise_search_async\\.py", + "sample_assistants_code_interpreter_attachment_async\\.py", +] +warn_unused_configs = true +ignore_missing_imports = true +follow_imports_for_stubs = false + +[tool.isort] +profile = "black" +line_length = 120 +known_first_party = ["azure"] +filter_files=true +extend_skip_glob = [ + "*/_vendor/*", + "*/_generated/*", + "*/_restclient/*", + "*/doc/*", + "*/.tox/*", +] + +[tool.azure-sdk-build] +whl_no_aio= false + diff --git a/sdk/ai/azure-ai-assistants/pyrightconfig.json b/sdk/ai/azure-ai-assistants/pyrightconfig.json new file mode 100644 index 000000000000..3af0746bdada --- /dev/null +++ b/sdk/ai/azure-ai-assistants/pyrightconfig.json @@ -0,0 +1,21 @@ +{ + "reportTypeCommentUsage": true, + "reportMissingImports": false, + "pythonVersion": "3.11", + "exclude": [ + "**/downloaded", + "**/sample_assistants_vector_store_batch_enterprise_file_search_async.py", + "**/sample_assistants_with_file_search_attachment.py", + "**/sample_assistants_with_code_interpreter_file_attachment.py", + "**/sample_assistants_code_interpreter_attachment_enterprise_search.py", + "**/sample_assistants_with_file_search_attachment_async.py", + "**/sample_assistants_code_interpreter_attachment_enterprise_search_async.py", + "**/sample_assistants_code_interpreter_attachment_enterprise_search_async.py", + "**/sample_assistants_code_interpreter_attachment_async.py" + ], + "extraPaths": [ + "./../../core/azure-core", + "./../../identity/azure-identity", + "./../../monitor/azure-monitor-opentelemetry" + ] +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/Operating_Profit_Transportation_Sector.png b/sdk/ai/azure-ai-assistants/samples/async_samples/Operating_Profit_Transportation_Sector.png new file mode 100644 index 000000000000..a5ca50cfd76f Binary files /dev/null and b/sdk/ai/azure-ai-assistants/samples/async_samples/Operating_Profit_Transportation_Sector.png differ diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/assistant-WTHWm9BcJfvM6YNr9AY1MJ_image_file.png b/sdk/ai/azure-ai-assistants/samples/async_samples/assistant-WTHWm9BcJfvM6YNr9AY1MJ_image_file.png new file mode 100644 index 000000000000..78fb3ddf9828 Binary files /dev/null and b/sdk/ai/azure-ai-assistants/samples/async_samples/assistant-WTHWm9BcJfvM6YNr9AY1MJ_image_file.png differ diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py new file mode 100644 index 000000000000..44045e154c0f --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py @@ -0,0 +1,110 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import asyncio + +""" +DESCRIPTION: + This sample demonstrates how to use azure function assistant operations from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_azure_functions_async.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + STORAGE_SERVICE_ENDPONT - the storage service queue endpoint, triggering Azure function. + Please see Getting Started with Azure Functions page for more information on Azure Functions: + https://learn.microsoft.com/azure/azure-functions/functions-get-started +""" + +import os +from azure.ai.assistants.aio import AssistantsClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.assistants.models import ( + AzureFunctionStorageQueue, + AzureFunctionTool, + MessageRole, +) + + +async def main(): + + async with DefaultAzureCredential( + exclude_managed_identity_credential=True, exclude_environment_credential=True + ) as creds: + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) as assistants_client: + + storage_service_endpoint = os.environ["STORAGE_SERVICE_ENDPONT"] + azure_function_tool = AzureFunctionTool( + name="foo", + description="Get answers from the foo bot.", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + }, + input_queue=AzureFunctionStorageQueue( + queue_name="azure-function-foo-input", + storage_service_endpoint=storage_service_endpoint, + ), + output_queue=AzureFunctionStorageQueue( + queue_name="azure-function-tool-output", + storage_service_endpoint=storage_service_endpoint, + ), + ) + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="azure-function-assistant-foo", + instructions=f"You are a helpful support assistant. Use the provided function any time the prompt contains the string 'What would foo say?'. When you invoke the function, ALWAYS specify the output queue uri parameter as '{storage_service_endpoint}/azure-function-tool-output'. Always responds with \"Foo says\" and then the response from the tool.", + tools=azure_function_tool.definitions, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + # Create a thread + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="What is the most prevalent element in the universe? What would foo say?", + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Get messages from the thread + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + # Get the last message from the sender + last_msg = messages.get_last_text_message_by_role(MessageRole.ASSISTANT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + # Delete the assistant once done + result = await assistants_client.delete_assistant(assistant.id) + if result.deleted: + print(f"Deleted assistant {result.id}") + else: + print(f"Failed to delete assistant {result.id}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py new file mode 100644 index 000000000000..77a0803cb416 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py @@ -0,0 +1,79 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_basics_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" +import asyncio +import time + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ( + MessageTextContent, + ListSortOrder +) +from azure.identity.aio import DefaultAzureCredential + +import os + + +async def main() -> None: + + async with DefaultAzureCredential() as creds: + assistant_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) + + async with assistant_client: + assistant = await assistant_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistant_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistant_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = await assistant_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await assistant_client.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + await assistant_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistant_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for data_point in messages.data: + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..2ce29e6db236 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py @@ -0,0 +1,90 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations from + the Azure Assistants service using a asynchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Foundry project page. + +USAGE: + python sample_assistants_basics_async_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-monitor-opentelemetry aiohttp + + Set these environment variables with your own values: + * PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" +import asyncio +import time +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ListSortOrder +from azure.ai.assistants.telemetry import enable_telemetry +from azure.identity.aio import DefaultAzureCredential +from opentelemetry import trace +import os +from azure.monitor.opentelemetry import configure_azure_monitor + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + + +async def main() -> None: + + async with DefaultAzureCredential() as creds: + assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) + + # Enable Azure Monitor tracing + application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] + configure_azure_monitor(connection_string=application_insights_connection_string) + + # enable additional instrumentations + enable_telemetry() + + with tracer.start_as_current_span(scenario): + async with assistants_client: + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py new file mode 100644 index 000000000000..605391bc6ad7 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py @@ -0,0 +1,87 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations from + the Azure Assistants service using a asynchronous client with tracing to console. + +USAGE: + python sample_assistants_basics_async_with_console_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry aiohttp + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + * PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" +import asyncio +import time +import sys +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ListSortOrder +from azure.ai.assistants.telemetry import enable_telemetry +from azure.identity.aio import DefaultAzureCredential +from opentelemetry import trace +import os + + +tracer = trace.get_tracer(__name__) + + +@tracer.start_as_current_span(__file__) +async def main() -> None: + + async with DefaultAzureCredential() as creds: + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistant_client: + + # Enable console tracing + # or, if you have local OTLP endpoint running, change it to + # assistant_client.telemetry.enable(destination="http://localhost:4317") + enable_telemetry(destination=sys.stdout) + + assistant = await assistant_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistant_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistant_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = await assistant_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await assistant_client.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + await assistant_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistant_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py new file mode 100644 index 000000000000..4bab1ba8aa61 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py @@ -0,0 +1,100 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use code interpreter tool with assistant from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_code_interpreter_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" +import asyncio + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import CodeInterpreterTool, FilePurpose, ListSortOrder, MessageRole +from azure.identity.aio import DefaultAzureCredential +from pathlib import Path + +import os + + +async def main() -> None: + + async with DefaultAzureCredential() as creds: + + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistants_client: + # Upload a file and wait for it to be processed + file = await assistants_client.upload_file_and_poll( + file_path="../nifty_500_quarterly_results.csv", purpose=FilePurpose.ASSISTANTS + ) + print(f"Uploaded file, file ID: {file.id}") + + code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + messages = await assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + print(f"Messages: {messages}") + + last_msg = messages.get_last_text_message_by_role(MessageRole.ASSISTANT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + for image_content in messages.image_contents: + print(f"Image File ID: {image_content.image_file.file_id}") + file_name = f"{image_content.image_file.file_id}_image_file.png" + await assistants_client.save_file(file_id=image_content.image_file.file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + + for file_path_annotation in messages.file_path_annotations: + print(f"File Paths:") + print(f"Type: {file_path_annotation.type}") + print(f"Text: {file_path_annotation.text}") + print(f"File ID: {file_path_annotation.file_path.file_id}") + print(f"Start Index: {file_path_annotation.start_index}") + print(f"End Index: {file_path_annotation.end_index}") + file_name = Path(file_path_annotation.text).name + await assistants_client.save_file(file_id=file_path_annotation.file_path.file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py new file mode 100644 index 000000000000..0fabad0a180a --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py @@ -0,0 +1,79 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with code interpreter from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_code_interpreter_attachment_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" +import asyncio +import os +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import CodeInterpreterTool, FilePurpose, MessageAttachment, ListSortOrder +from azure.identity.aio import DefaultAzureCredential + + +async def main(): + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, conn_str=os.environ["PROJECT_ENDPOINT"] + ) as assistants_client: + # Upload a file and wait for it to be processed + file = await assistants_client.upload_file_and_poll( + file_path="../product_info_1.md", purpose=FilePurpose.ASSISTANTS + ) + print(f"Uploaded file, file ID: {file.id}") + + code_interpreter = CodeInterpreterTool() + + # Notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the attachment + attachment = MessageAttachment(file_id=file.id, tools=code_interpreter.definitions) + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + await assistants_client.delete_file(file.id) + print("Deleted file") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py new file mode 100644 index 000000000000..78256571f64b --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py @@ -0,0 +1,80 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with code interpreter from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_code_interpreter_attachment_enterprise_search_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" +import asyncio +import os +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ( + CodeInterpreterTool, + ListSortOrder, + MessageAttachment, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) +from azure.identity.aio import DefaultAzureCredential + + +async def main(): + async with DefaultAzureCredential() as credential: + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=credential + ) as assistants_client: + + code_interpreter = CodeInterpreterTool() + + # Notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # We will upload the local file to Azure and will use it for vector store creation. + asset_uri = os.environ["AZURE_BLOB_URI"] + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + + # Create a message with the attachment + attachment = MessageAttachment(data_source=ds, tools=code_interpreter.definitions) + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py new file mode 100644 index 000000000000..399e5610e771 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py @@ -0,0 +1,107 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_assistants_functions_async.py + +DESCRIPTION: + This sample demonstrates how to use assistant operations with custom functions from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_functions_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" +import asyncio +import time +import os +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import AsyncFunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput +from azure.identity.aio import DefaultAzureCredential +from user_async_functions import user_async_functions + + +async def main() -> None: + async with DefaultAzureCredential() as creds: + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistants_client: + # Initialize assistant functions + functions = AsyncFunctionTool(functions=user_async_functions) + + # Create assistant + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=functions.definitions, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + # Create thread for communication + thread = await assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create and send message + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, what's the time?" + ) + print(f"Created message, ID: {message.id}") + + # Create and run assistant task + run = await assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, ID: {run.id}") + + # Polling loop for run status + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(4) + run = await assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + await assistants_client.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = await functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + await assistants_client.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the assistant when done + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_base64_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_base64_async.py new file mode 100644 index 000000000000..54d16fcb845c --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_base64_async.py @@ -0,0 +1,110 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations using image file input for the + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_image_input_base64.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import os, time, base64 +from typing import List +from azure.ai.assistants.aio import AssistantsClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageInputContentBlock, + MessageImageUrlParam, + MessageInputTextBlock, + MessageInputImageUrlBlock, +) + + +def image_to_base64(image_path: str) -> str: + """ + Convert an image file to a Base64-encoded string. + + :param image_path: The path to the image file (e.g. 'image_file.png') + :return: A Base64-encoded string representing the image. + :raises FileNotFoundError: If the provided file path does not exist. + :raises OSError: If there's an error reading the file. + """ + if not os.path.isfile(image_path): + raise FileNotFoundError(f"File not found at: {image_path}") + + try: + with open(image_path, "rb") as image_file: + file_data = image_file.read() + return base64.b64encode(file_data).decode("utf-8") + except Exception as exc: + raise OSError(f"Error reading file '{image_path}'") from exc + + +async def main(): + async with DefaultAzureCredential() as creds: + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistants_client: + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + input_message = "Hello, what is in the image ?" + image_base64 = image_to_base64("../image_file.png") + img_url = f"data:image/png;base64,{image_base64}" + url_param = MessageImageUrlParam(url=img_url, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageUrlBlock(image_url=url_param), + ] + message = await assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await assistants_client.get_run(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_file_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_file_async.py new file mode 100644 index 000000000000..cc5ad1669d98 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_file_async.py @@ -0,0 +1,93 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations using image file input for the + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_image_input_file.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import os, time +from typing import List +from azure.ai.assistants.aio import AssistantsClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageInputContentBlock, + MessageImageFileParam, + MessageInputTextBlock, + MessageInputImageFileBlock, +) + + +async def main(): + async with DefaultAzureCredential() as creds: + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistants_client: + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + image_file = await assistants_client.upload_file_and_poll( + file_path="../image_file.png", purpose="assistants" + ) + print(f"Uploaded file, file ID: {image_file.id}") + + input_message = "Hello, what is in the image ?" + file_param = MessageImageFileParam(file_id=image_file.id, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageFileBlock(image_file=file_param), + ] + message = await assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await assistants_client.get_run(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_url_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_url_async.py new file mode 100644 index 000000000000..626c5af8c3c3 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_url_async.py @@ -0,0 +1,90 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations using image url input for the + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_image_input_url.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import os, time +from typing import List +from azure.ai.assistants.aio import AssistantsClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageInputContentBlock, + MessageImageUrlParam, + MessageInputTextBlock, + MessageInputImageUrlBlock, +) + + +async def main(): + async with DefaultAzureCredential() as creds: + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistants_client: + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + input_message = "Hello, what is in the image ?" + url_param = MessageImageUrlParam(url=image_url, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageUrlBlock(image_url=url_param), + ] + message = await assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await assistants_client.get_run(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py new file mode 100644 index 000000000000..51b1528405c8 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py @@ -0,0 +1,114 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistants with JSON schema output format. + +USAGE: + python sample_assistants_json_schema_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity pydantic + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" + +import asyncio +import os + +from enum import Enum +from pydantic import BaseModel, TypeAdapter +from azure.ai.assistants.aio import AssistantsClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageRole, + ResponseFormatJsonSchema, + ResponseFormatJsonSchemaType, + RunStatus, +) + + +# Create the pydantic model to represent the planet names and there masses. +class Planets(str, Enum): + Earth = "Earth" + Mars = "Mars" + Jupyter = "Jupyter" + + +class Planet(BaseModel): + planet: Planets + mass: float + + +async def main(): + async with DefaultAzureCredential() as creds: + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) as assistants_client: + + # [START create_assistant] + assistant = await assistants_client.create_assistant( + # Note only gpt-4o-mini-2024-07-18 and + # gpt-4o-2024-08-06 and later support structured output. + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Extract the information about planets.", + headers={"x-ms-enable-preview": "true"}, + response_format=ResponseFormatJsonSchemaType( + json_schema=ResponseFormatJsonSchema( + name="planet_mass", + description="Extract planet mass.", + schema=Planet.model_json_schema(), + ) + ), + ) + # [END create_assistant] + print(f"Created assistant, assistant ID: {assistant.id}") + + # [START create_thread] + thread = await assistants_client.create_thread() + # [END create_thread] + print(f"Created thread, thread ID: {thread.id}") + + # [START create_message] + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content=("The mass of the Mars is 6.4171E23 kg; the mass of the Earth is 5.972168E24 kg;"), + ) + # [END create_message] + print(f"Created message, message ID: {message.id}") + + # [START create_run] + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + + if run.status != RunStatus.COMPLETED: + print(f"The run did not succeed: {run.status=}.") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # [START list_messages] + messages = await assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + # We will only list assistant responses here. + if isinstance(last_message_content, MessageTextContent) and data_point.role == MessageRole.ASSISTANT: + planet = TypeAdapter(Planet).validate_json(last_message_content.text.value) + print(f"The mass of {planet.planet} is {planet.mass} kg.") + + # [END list_messages] + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py new file mode 100644 index 000000000000..49ea7ef96fed --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py @@ -0,0 +1,82 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with toolset from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_run_with_toolset_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" + +import os, asyncio +from azure.ai.assistants.aio import AssistantsClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.assistants.models import AsyncFunctionTool, AsyncToolSet +from user_async_functions import user_async_functions + + +async def main() -> None: + + async with DefaultAzureCredential() as creds: + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) as assistants_client: + + # Initialize assistant toolset with user functions and code interpreter + # [START create_assistant_with_async_function_tool] + functions = AsyncFunctionTool(user_async_functions) + + toolset = AsyncToolSet() + toolset.add(functions) + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, + ) + # [END create_assistant_with_async_function_tool] + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = await assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py new file mode 100644 index 000000000000..5a1d47d1f167 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py @@ -0,0 +1,98 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler in streaming from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_stream_eventhandler_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" +import asyncio +from typing import Any, Optional + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ( + MessageDeltaChunk, + RunStep, + ThreadMessage, + ThreadRun, +) +from azure.ai.assistants.models import AsyncAssistantEventHandler +from azure.identity.aio import DefaultAzureCredential + +import os + + +class MyEventHandler(AsyncAssistantEventHandler[str]): + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> Optional[str]: + return f"Text delta received: {delta.text}" + + async def on_thread_message(self, message: "ThreadMessage") -> Optional[str]: + return f"ThreadMessage created. ID: {message.id}, Status: {message.status}" + + async def on_thread_run(self, run: "ThreadRun") -> Optional[str]: + return f"ThreadRun status: {run.status}" + + async def on_run_step(self, step: "RunStep") -> Optional[str]: + return f"RunStep type: {step.type}, Status: {step.status}" + + async def on_error(self, data: str) -> Optional[str]: + return f"An error occurred. Data: {data}" + + async def on_done(self) -> Optional[str]: + return "Stream completed." + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional[str]: + return f"Unhandled Event Type: {event_type}, Data: {event_data}" + + +async def main() -> None: + + async with DefaultAzureCredential() as creds: + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) as assistants_client: + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID {message.id}") + + async with await assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + async for event_type, event_data, func_return in stream: + print(f"Received data.") + print(f"Streaming receive Event Type: {event_type}") + print(f"Event Data: {str(event_data)[:100]}...") + print(f"Event Function return: {func_return}\n") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py new file mode 100644 index 000000000000..e62fe546a05a --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py @@ -0,0 +1,141 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler and toolset from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_stream_eventhandler_with_functions_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" +import asyncio +from typing import Any + +import os +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ( + AsyncAssistantEventHandler, + AsyncFunctionTool, + MessageDeltaChunk, + RequiredFunctionToolCall, + RunStep, + SubmitToolOutputsAction, + ThreadMessage, + ThreadRun, + ToolOutput, +) +from azure.identity.aio import DefaultAzureCredential +from user_async_functions import user_async_functions + + +class MyEventHandler(AsyncAssistantEventHandler[str]): + + def __init__(self, functions: AsyncFunctionTool, assistants_client: AssistantsClient) -> None: + super().__init__() + self.functions = functions + self.assistants_client = assistants_client + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + async def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + async def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = await self.functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + await self.assistants_client.submit_tool_outputs_to_stream( + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=self + ) + + async def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + async def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + async def on_done(self) -> None: + print("Stream completed.") + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +async def main() -> None: + async with DefaultAzureCredential() as creds: + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) as assistants_client: + + # [START create_assistant_with_function_tool] + functions = AsyncFunctionTool(functions=user_async_functions) + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + # [END create_assistant_with_function_tool] + print(f"Created assistant, ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.", + ) + print(f"Created message, message ID {message.id}") + + async with await assistants_client.create_stream( + thread_id=thread.id, + assistant_id=assistant.id, + event_handler=MyEventHandler(functions, assistants_client), + ) as stream: + await stream.until_done() + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py new file mode 100644 index 000000000000..d1b6276fe284 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py @@ -0,0 +1,105 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler and toolset from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_stream_eventhandler_with_toolset_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" +import asyncio +from typing import Any + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import MessageDeltaChunk, RunStep, ThreadMessage, ThreadRun +from azure.ai.assistants.models import AsyncAssistantEventHandler, AsyncFunctionTool, AsyncToolSet +from azure.identity.aio import DefaultAzureCredential + +import os + +from user_async_functions import user_async_functions + + +class MyEventHandler(AsyncAssistantEventHandler): + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + async def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + async def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + async def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + async def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + async def on_done(self) -> None: + print("Stream completed.") + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +async def main() -> None: + async with DefaultAzureCredential() as creds: + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) as assistants_client: + + # Initialize toolset with user functions + functions = AsyncFunctionTool(user_async_functions) + toolset = AsyncToolSet() + toolset.add(functions) + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + toolset=toolset, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", + ) + print(f"Created message, message ID {message.id}") + + async with await assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + await stream.until_done() + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py new file mode 100644 index 000000000000..c6a3c7f8933d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py @@ -0,0 +1,82 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with interation in streaming from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_stream_iteration_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" +import asyncio + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import AssistantStreamEvent +from azure.ai.assistants.models import MessageDeltaChunk, RunStep, ThreadMessage, ThreadRun +from azure.identity.aio import DefaultAzureCredential + +import os + + +async def main() -> None: + async with DefaultAzureCredential() as creds: + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) as assistants_client: + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID {message.id}") + + async with await assistants_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + async for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AssistantStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py new file mode 100644 index 000000000000..edc8c7bab614 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py @@ -0,0 +1,110 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to override the base event handler, parse the events, and iterate through them. + In your use case, you might not want to write the iteration code similar to sample_assistants_stream_iteration_async.py. + If you have multiple places to call create_stream, you might find the iteration code cumbersome. + This example shows how to override the base event handler, parse the events, and iterate through them, which can be + reused in multiple create_stream calls to help keep the code clean. + +USAGE: + python sample_assistants_stream_with_base_override_eventhandler_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set these environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" +import asyncio +import json +from typing import AsyncGenerator, Optional + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models._models import ( + MessageDeltaChunk, + MessageDeltaTextContent, +) +from azure.ai.assistants.models import AssistantStreamEvent, BaseAsyncAssistantEventHandler +from azure.identity.aio import DefaultAzureCredential + +import os + + +# Our goal is to parse the event data in a string and return the chunk in text for each iteration. +# Because we want the iteration to be a string, we define str as the generic type for BaseAsyncAssistantEventHandler +# and override the _process_event method to return a string. +# The get_stream_chunks method is defined to return the chunks as strings because the iteration is a string. +class MyEventHandler(BaseAsyncAssistantEventHandler[Optional[str]]): + + async def _process_event(self, event_data_str: str) -> Optional[str]: + + event_lines = event_data_str.strip().split("\n") + event_type: Optional[str] = None + event_data = "" + for line in event_lines: + if line.startswith("event:"): + event_type = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + event_data = line.split(":", 1)[1].strip() + + if not event_type: + raise ValueError("Event type not specified in the event data.") + + if event_type == AssistantStreamEvent.THREAD_MESSAGE_DELTA.value: + + event_obj: MessageDeltaChunk = MessageDeltaChunk(**json.loads(event_data)) + + for content_part in event_obj.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + if content_part.text is not None: + return content_part.text.value + return None + + async def get_stream_chunks(self) -> AsyncGenerator[str, None]: + async for chunk in self: + if chunk: + yield chunk + + +async def main() -> None: + + async with DefaultAzureCredential() as creds: + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) as assistants_client: + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID {message.id}") + + async with await assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + async for chunk in stream.get_stream_chunks(): + print(chunk) + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py new file mode 100644 index 000000000000..e4ebf2ddd616 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py @@ -0,0 +1,115 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations to add files to an existing vector store and perform search from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_vector_store_batch_enterprise_file_search_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity azure-ai-ml aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" +import asyncio +import os + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ( + FileSearchTool, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) +from azure.identity.aio import DefaultAzureCredential + + +async def main(): + + async with DefaultAzureCredential() as credential: + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) as assistants_client: + # We will upload the local file to Azure and will use it for vector store creation. + asset_uri = os.environ["AZURE_BLOB_URI"] + ds = VectorStoreDataSource( + asset_identifier=asset_uri, + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + vector_store = await assistants_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Add the file to the vector store or you can supply file ids in the vector store creation + vector_store_file_batch = await assistants_client.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=[ds] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="What feature does Smart Eyewear offer?", + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + await assistants_client.update_assistant( + assistant_id=assistant.id, + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Updated assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="What feature does Smart Eyewear offer?", + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + await assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py new file mode 100644 index 000000000000..252a62cee11b --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py @@ -0,0 +1,109 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations to add files to an existing vector store and perform search from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_vector_store_batch_file_search_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" + +import asyncio +import os +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import FileSearchTool, FilePurpose +from azure.identity.aio import DefaultAzureCredential + + +async def main() -> None: + async with DefaultAzureCredential() as creds: + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) as assistants_client: + # Upload a file and wait for it to be processed + file = await assistants_client.upload_file_and_poll( + file_path="../product_info_1.md", purpose=FilePurpose.ASSISTANTS + ) + print(f"Uploaded file, file ID: {file.id}") + + # Create a vector store with no file and wait for it to be processed + vector_store = await assistants_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Add the file to the vector store or you can supply file ids in the vector store creation + vector_store_file_batch = await assistants_client.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, file_ids=[file.id] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + await assistants_client.update_assistant( + assistant_id=assistant.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources + ) + print(f"Updated assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + await assistants_client.delete_file(file.id) + print("Deleted file") + + await assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py new file mode 100644 index 000000000000..103825f2492a --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py @@ -0,0 +1,77 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to add files to assistant during the vector store creation. + +USAGE: + python sample_assistants_vector_store_enterprise_file_search_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity azure-ai-ml aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" +import asyncio +import os + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType +from azure.identity.aio import DefaultAzureCredential + + +async def main(): + async with DefaultAzureCredential() as credential: + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=credential, + ) as assistants_client: + # We will upload the local file to Azure and will use it for vector store creation. + asset_uri = os.environ["AZURE_BLOB_URI"] + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + vector_store = await assistants_client.create_vector_store_and_poll( + data_sources=[ds], name="sample_vector_store" + ) + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + await assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py new file mode 100644 index 000000000000..b9c0050348b4 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py @@ -0,0 +1,88 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to add files to assistant during the vector store creation. + +USAGE: + python sample_assistants_vector_store_file_search_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" +import asyncio +import os + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import FileSearchTool, FilePurpose, MessageTextContent +from azure.identity.aio import DefaultAzureCredential + + +async def main(): + async with DefaultAzureCredential() as credential: + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=credential, + ) as assistants_client: + # Upload a file and wait for it to be processed + file = await assistants_client.upload_file_and_poll( + file_path="../product_info_1.md", purpose=FilePurpose.ASSISTANTS + ) + print(f"Uploaded file, file ID: {file.id}") + + # Create a vector store with no file and wait for it to be processed + vector_store = await assistants_client.create_vector_store_and_poll( + file_ids=[file.id], name="sample_vector_store" + ) + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + await assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + + for message in reversed(messages.data): + # To remove characters, which are not correctly handled by print, we will encode the message + # and then decode it again. + clean_message = "\n".join( + text_msg.text.value.encode("ascii", "ignore").decode("utf-8") for text_msg in message.text_messages + ) + print(f"Role: {message.role} Message: {clean_message}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py new file mode 100644 index 000000000000..54ea32b05291 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py @@ -0,0 +1,83 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations to create messages with file search attachments from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_with_file_search_attachment_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" +import asyncio + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import FilePurpose +from azure.ai.assistants.models import FileSearchTool, MessageAttachment +from azure.identity.aio import DefaultAzureCredential + +import os + + +async def main() -> None: + async with DefaultAzureCredential() as creds: + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) as assistants_client: + # Upload a file and wait for it to be processed + file = await assistants_client.upload_file_and_poll( + file_path="../product_info_1.md", purpose=FilePurpose.ASSISTANTS + ) + + # Create assistant + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="What feature does Smart Eyewear offer?", + attachments=[attachment], + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run( + thread_id=thread.id, assistant_id=assistant.id, sleep_interval=4 + ) + print(f"Created run, run ID: {run.id}") + + print(f"Run completed with status: {run.status}") + + await assistants_client.delete_file(file.id) + print("Deleted file") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/user_async_functions.py b/sdk/ai/azure-ai-assistants/samples/async_samples/user_async_functions.py new file mode 100644 index 000000000000..ba75eb3d7231 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/user_async_functions.py @@ -0,0 +1,67 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import asyncio +import os +import sys +import json +import datetime +from typing import Any, Callable, Set, Optional +from azure.ai.assistants.telemetry import trace_function + + +# Add parent directory to sys.path to import user_functions +current_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.abspath(os.path.join(current_dir, "..")) +if parent_dir not in sys.path: + sys.path.insert(0, parent_dir) +from user_functions import fetch_current_datetime, fetch_weather, send_email + + +async def send_email_async(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Email address of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + await asyncio.sleep(1) + return send_email(recipient, subject, body) + + +# The trace_func decorator will trace the function call and enable adding additional attributes +# to the span in the function implementation. Note that this will trace the function parameters and their values. +@trace_function() +async def fetch_current_datetime_async(format: Optional[str] = None) -> str: + """ + Get the current time as a JSON string, optionally formatted. + + :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. + :return: The current time in JSON format. + :rtype: str + """ + await asyncio.sleep(1) + current_time = datetime.datetime.now() + + # Use the provided format if available, else use a default format + if format: + time_format = format + else: + time_format = "%Y-%m-%d %H:%M:%S" + + time_json = json.dumps({"current_time": current_time.strftime(time_format)}) + return time_json + + +# Statically defined user functions for fast reference with send_email as async but the rest as sync +user_async_functions: Set[Callable[..., Any]] = { + fetch_current_datetime_async, + fetch_weather, + send_email_async, +} diff --git a/sdk/ai/azure-ai-assistants/samples/countries.json b/sdk/ai/azure-ai-assistants/samples/countries.json new file mode 100644 index 000000000000..58d3df70d28d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/countries.json @@ -0,0 +1,46 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "RestCountries.NET API", + "description": "Web API version 3.1 for managing country items, based on previous implementations from restcountries.eu and restcountries.com.", + "version": "v3.1" + }, + "servers": [ + { "url": "https://restcountries.net" } + ], + "auth": [], + "paths": { + "/v3.1/currency": { + "get": { + "description": "Search by currency.", + "operationId": "LookupCountryByCurrency", + "parameters": [ + { + "name": "currency", + "in": "query", + "description": "The currency to search for.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Success", + "content": { + "text/plain": { + "schema": { + "type": "string" + } + } + } + } + } + } + } + }, + "components": { + "schemes": {} + } + } \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/samples/image_file.png b/sdk/ai/azure-ai-assistants/samples/image_file.png new file mode 100644 index 000000000000..50ae6c65367a Binary files /dev/null and b/sdk/ai/azure-ai-assistants/samples/image_file.png differ diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team.py new file mode 100644 index 000000000000..2146f780b02d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team.py @@ -0,0 +1,439 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import os +import yaml # type: ignore + +from opentelemetry import trace +from opentelemetry.trace import StatusCode, Span # noqa: F401 # pylint: disable=unused-import +from typing import Any, Dict, Optional, Set, List +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FunctionTool, ToolSet, MessageRole, Assistant, AssistantThread + +tracer = trace.get_tracer(__name__) + + +class _AssistantTeamMember: + """ + Represents an individual assistant on a team. + + :param model: The model (e.g. GPT-4) used by this assistant. + :param name: The assistant's name. + :param instructions: The assistant's initial instructions or "personality". + :param toolset: An optional ToolSet with specialized tools for this assistant. + :param can_delegate: Whether this assistant has delegation capability (e.g., 'create_task'). + Defaults to True. + """ + + def __init__( + self, model: str, name: str, instructions: str, toolset: Optional[ToolSet] = None, can_delegate: bool = True + ) -> None: + self.model = model + self.name = name + self.instructions = instructions + self.assistant_instance: Optional[Assistant] = None + self.toolset: Optional[ToolSet] = toolset + self.can_delegate = can_delegate + + +class AssistantTask: + """ + Encapsulates a task for an assistant to perform. + + :param recipient: The name of the assistant who should receive the task. + :param task_description: The description of the work to be done or question to be answered. + :param requestor: The name of the assistant or user requesting the task. + """ + + def __init__(self, recipient: str, task_description: str, requestor: str) -> None: + self.recipient = recipient + self.task_description = task_description + self.requestor = requestor + + +class AssistantTeam: + """ + A class that represents a team of assistants. + + """ + + # Static container to store all instances of AssistantTeam + _teams: Dict[str, "AssistantTeam"] = {} + + _assistants_client: AssistantsClient + _assistant_thread: Optional[AssistantThread] = None + _team_leader: Optional[_AssistantTeamMember] = None + _members: List[_AssistantTeamMember] = [] + _tasks: List[AssistantTask] = [] + _team_name: str = "" + _current_request_span: Optional[Span] = None + _current_task_span: Optional[Span] = None + + def __init__(self, team_name: str, assistants_client: AssistantsClient): + """ + Initialize a new AssistantTeam and set it as the singleton instance. + """ + # Validate that the team_name is a non-empty string + if not isinstance(team_name, str) or not team_name: + raise ValueError("Team name must be a non-empty string.") + # Check for existing team with the same name + if team_name in AssistantTeam._teams: + raise ValueError(f"A team with the name '{team_name}' already exists.") + self.team_name = team_name + if assistants_client is None: + raise ValueError("No AssistantsClient provided.") + self._assistants_client = assistants_client + # Store the instance in the static container + AssistantTeam._teams[team_name] = self + + # Get the directory of the current file + current_dir = os.path.dirname(os.path.abspath(__file__)) + # Construct the full path to the config file + file_path = os.path.join(current_dir, "assistant_team_config.yaml") + with open(file_path, "r") as config_file: + config = yaml.safe_load(config_file) + self.TEAM_LEADER_INSTRUCTIONS = config["TEAM_LEADER_INSTRUCTIONS"] + self.TEAM_LEADER_INITIAL_REQUEST = config["TEAM_LEADER_INITIAL_REQUEST"] + self.TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS = config[ + "TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS" + ] + self.TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS = config["TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS"] + self.TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS = config["TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS"] + self.TEAM_LEADER_MODEL = config["TEAM_LEADER_MODEL"].strip() + + @staticmethod + def get_team(team_name: str) -> "AssistantTeam": + """Static method to fetch the AssistantTeam instance by name.""" + team = AssistantTeam._teams.get(team_name) + if team is None: + raise ValueError(f"No team found with the name '{team_name}'.") + return team + + @staticmethod + def _remove_team(team_name: str) -> None: + """Static method to remove an AssistantTeam instance by name.""" + if team_name not in AssistantTeam._teams: + raise ValueError(f"No team found with the name '{team_name}'.") + del AssistantTeam._teams[team_name] + + def add_assistant( + self, model: str, name: str, instructions: str, toolset: Optional[ToolSet] = None, can_delegate: bool = True + ) -> None: + """ + Add a new assistant (team member) to this AssistantTeam. + + :param model: The model name (e.g. GPT-4) for the assistant. + :param name: The name of the assistant being added. + :param instructions: The initial instructions/personality for the assistant. + :param toolset: An optional ToolSet to configure specific tools (functions, etc.) + for this assistant. If None, we'll create a default set. + :param can_delegate: If True, the assistant can delegate tasks (via create_task). + If False, the assistant does not get 'create_task' in its ToolSet + and won't mention delegation in instructions. + """ + if toolset is None: + toolset = ToolSet() + + if can_delegate: + # If assistant can delegate, ensure it has 'create_task' + try: + function_tool = toolset.get_tool(FunctionTool) + function_tool.add_functions(assistant_team_default_functions) + except ValueError: + default_function_tool = FunctionTool(assistant_team_default_functions) + toolset.add(default_function_tool) + + member = _AssistantTeamMember( + model=model, + name=name, + instructions=instructions, + toolset=toolset, + can_delegate=can_delegate, + ) + self._members.append(member) + + def set_team_leader(self, model: str, name: str, instructions: str, toolset: Optional[ToolSet] = None) -> None: + """ + Set the team leader for this AssistantTeam. + + If team leader has not been set prior to the call to assemble_team, + then a default team leader will be set. + + :param model: The model name (e.g. GPT-4) for the assistant. + :param name: The name of the team leader. + :param instructions: The instructions for the team leader. These instructions + are not modified by the implementation, so all required + information about other team members and how to pass tasks + to them should be included. + :param toolset: An optional ToolSet to configure specific tools (functions, etc.) + for the team leader. + """ + member = _AssistantTeamMember(model=model, name=name, instructions=instructions, toolset=toolset) + self._team_leader = member + + def add_task(self, task: AssistantTask) -> None: + """ + Add a new task to the team's task list. + + :param task: The task to be added. + """ + self._tasks.append(task) + + def _create_team_leader(self) -> None: + """ + Create the team leader assistant. + """ + assert self._assistants_client is not None, "assistants_client must not be None" + assert self._team_leader is not None, "team leader has not been added" + + self._team_leader.assistant_instance = self._assistants_client.create_assistant( + model=self._team_leader.model, + name=self._team_leader.name, + instructions=self._team_leader.instructions, + toolset=self._team_leader.toolset, + ) + + def _set_default_team_leader(self): + """ + Set the default 'TeamLeader' assistant with awareness of all other assistants. + """ + toolset = ToolSet() + toolset.add(default_function_tool) + instructions = ( + self.TEAM_LEADER_INSTRUCTIONS.format(assistant_name="TeamLeader", team_name=self.team_name) + "\n" + ) + # List all assistants (will be empty at this moment if you haven't added any, or you can append after they're added) + for member in self._members: + instructions += f"- {member.name}: {member.instructions}\n" + + self._team_leader = _AssistantTeamMember( + model=self.TEAM_LEADER_MODEL, + name="TeamLeader", + instructions=instructions, + toolset=toolset, + can_delegate=True, + ) + + def assemble_team(self): + """ + Create the team leader assistant and initialize all member assistants with + their configured or default toolsets. + """ + assert self._assistants_client is not None, "assistants_client must not be None" + + if self._team_leader is None: + self._set_default_team_leader() + + self._create_team_leader() + + for member in self._members: + if member is self._team_leader: + continue + + team_description = "" + for other_member in self._members: + if other_member != member: + team_description += f"- {other_member.name}: {other_member.instructions}\n" + + if member.can_delegate: + extended_instructions = self.TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS.format( + name=member.name, + team_name=self._team_name, + original_instructions=member.instructions, + team_description=team_description, + ) + else: + extended_instructions = self.TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS.format( + name=member.name, + team_name=self._team_name, + original_instructions=member.instructions, + team_description=team_description, + ) + member.assistant_instance = self._assistants_client.create_assistant( + model=member.model, name=member.name, instructions=extended_instructions, toolset=member.toolset + ) + + def dismantle_team(self) -> None: + """ + Delete all assistants (including the team leader) from the project client. + """ + assert self._assistants_client is not None, "assistants_client must not be None" + + if self._team_leader and self._team_leader.assistant_instance: + print(f"Deleting team leader assistant '{self._team_leader.name}'") + self._assistants_client.delete_assistant(self._team_leader.assistant_instance.id) + for member in self._members: + if member is not self._team_leader and member.assistant_instance: + print(f"Deleting assistant '{member.name}'") + self._assistants_client.delete_assistant(member.assistant_instance.id) + AssistantTeam._remove_team(self.team_name) + + def _add_task_completion_event( + self, + span: Span, + result: str, + ) -> None: + + attributes: Dict[str, Any] = {} + attributes["assistant_team.task.result"] = result + span.add_event(name=f"assistant_team.task_completed", attributes=attributes) + + def process_request(self, request: str) -> None: + """ + Handle a user's request by creating a team and delegating tasks to + the team leader. The team leader may generate additional tasks. + + :param request: The user's request or question. + """ + assert self._assistants_client is not None, "project client must not be None" + assert self._team_leader is not None, "team leader must not be None" + + if self._assistant_thread is None: + self._assistant_thread = self._assistants_client.create_thread() + print(f"Created thread with ID: {self._assistant_thread.id}") + + with tracer.start_as_current_span("assistant_team_request") as current_request_span: + self._current_request_span = current_request_span + if self._current_request_span is not None: + self._current_request_span.set_attribute("assistant_team.name", self.team_name) + team_leader_request = self.TEAM_LEADER_INITIAL_REQUEST.format(original_request=request) + _create_task( + team_name=self.team_name, + recipient=self._team_leader.name, + request=team_leader_request, + requestor="user", + ) + while self._tasks: + task = self._tasks.pop(0) + with tracer.start_as_current_span("assistant_team_task") as current_task_span: + self._current_task_span = current_task_span + if self._current_task_span is not None: + self._current_task_span.set_attribute("assistant_team.name", self.team_name) + self._current_task_span.set_attribute("assistant_team.task.recipient", task.recipient) + self._current_task_span.set_attribute("assistant_team.task.requestor", task.requestor) + self._current_task_span.set_attribute("assistant_team.task.description", task.task_description) + print( + f"Starting task for assistant '{task.recipient}'. " + f"Requestor: '{task.requestor}'. " + f"Task description: '{task.task_description}'." + ) + message = self._assistants_client.create_message( + thread_id=self._assistant_thread.id, + role="user", + content=task.task_description, + ) + print(f"Created message with ID: {message.id} for task in thread {self._assistant_thread.id}") + assistant = self._get_member_by_name(task.recipient) + if assistant and assistant.assistant_instance: + run = self._assistants_client.create_and_process_run( + thread_id=self._assistant_thread.id, assistant_id=assistant.assistant_instance.id + ) + print(f"Created and processed run for assistant '{assistant.name}', run ID: {run.id}") + messages = self._assistants_client.list_messages(thread_id=self._assistant_thread.id) + text_message = messages.get_last_text_message_by_role(role=MessageRole.ASSISTANT) + if text_message and text_message.text: + print( + f"Assistant '{assistant.name}' completed task. " f"Outcome: {text_message.text.value}" + ) + if self._current_task_span is not None: + self._add_task_completion_event(self._current_task_span, result=text_message.text.value) + + # If no tasks remain AND the recipient is not the TeamLeader, + # let the TeamLeader see if more delegation is needed. + if not self._tasks and not task.recipient == "TeamLeader": + team_leader_request = self.TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS + _create_task( + team_name=self.team_name, + recipient=self._team_leader.name, + request=team_leader_request, + requestor="user", + ) + # self._current_task_span.end() + self._current_task_span = None + # self._current_request_span.end() + self._current_request_span = None + + def _get_member_by_name(self, name) -> Optional[_AssistantTeamMember]: + """ + Retrieve a team member (assistant) by name. + If no member with the specified name is found, returns None. + + :param name: The assistant's name within this team. + """ + if name == "TeamLeader": + return self._team_leader + for member in self._members: + if member.name == name: + return member + return None + + """ + Requests another assistant in the team to complete a task. + + :param span (Span): The event will be added to this span + :param team_name (str): The name of the team. + :param recipient (str): The name of the assistant that is being requested to complete the task. + :param request (str): A description of the to complete. This can also be a question. + :param requestor (str): The name of the assistant who is requesting the task. + :return: True if the task was successfully received, False otherwise. + :rtype: str + """ + + +def _add_create_task_event( + span: Span, + team_name: str, + requestor: str, + recipient: str, + request: str, +) -> None: + + attributes: Dict[str, Any] = {} + attributes["assistant_team.task.team_name"] = team_name + attributes["assistant_team.task.requestor"] = requestor + attributes["assistant_team.task.recipient"] = recipient + attributes["assistant_team.task.description"] = request + span.add_event(name=f"assistant_team.create_task", attributes=attributes) + + +def _create_task(team_name: str, recipient: str, request: str, requestor: str) -> str: + """ + Requests another assistant in the team to complete a task. + + :param team_name (str): The name of the team. + :param recipient (str): The name of the assistant that is being requested to complete the task. + :param request (str): A description of the to complete. This can also be a question. + :param requestor (str): The name of the assistant who is requesting the task. + :return: True if the task was successfully received, False otherwise. + :rtype: str + """ + task = AssistantTask(recipient=recipient, task_description=request, requestor=requestor) + team: Optional[AssistantTeam] = None + try: + team = AssistantTeam.get_team(team_name) + span: Optional[Span] = None + if team._current_task_span is not None: + span = team._current_task_span + elif team._current_request_span is not None: + span = team._current_request_span + + if span is not None: + _add_create_task_event( + span=span, team_name=team_name, requestor=requestor, recipient=recipient, request=request + ) + except: + pass + if team is not None: + team.add_task(task) + return "True" + return "False" + + +# Any additional functions that might be used by the assistants: +assistant_team_default_functions: Set = { + _create_task, +} + +default_function_tool = FunctionTool(functions=assistant_team_default_functions) diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team_config.yaml b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team_config.yaml new file mode 100644 index 000000000000..af1c9b50fba1 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team_config.yaml @@ -0,0 +1,43 @@ +TEAM_LEADER_MODEL: | + gpt-4 + +TEAM_LEADER_INSTRUCTIONS: | + You are an assistant named '{assistant_name}'. You are a leader of a team of assistants. The name of your team is '{team_name}'. + You are an assistant that is responsible for receiving requests from user and utilizing a team of assistants to complete the task. + When you are passed a request, the only thing you will do is evaluate which team member should do which task next to complete the request. + You will use the provided _create_task function to create a task for the assistant that is best suited for handling the task next. + You will respond with the description of who you assigned the task and why. When you think that the original user request is + processed completely utilizing all the talent available in the team, you do not have to create anymore tasks. + Using the skills of all the team members when applicable is highly valued. + Do not create parallel tasks. + Here are the other assistants in your team: + +TEAM_LEADER_INITIAL_REQUEST: | + Please create a task for assistant in the team that is best suited to next process the following request. + Use the _create_task function available for you to create the task. The request is: + {original_request} + +TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS: | + Check the discussion so far and especially the most recent assistant response in the thread and if you see a potential task + that could improve the final outcome, then use the _create_task function to create the task. + Do not ever ask user confirmation for creating a task. + If the request is completely processed, you do not have to create a task. + +TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS: | + You are an assistant named '{name}'. You are a member in a team of assistants. The name of your team is '{team_name}'. + {original_instructions} + + - You can delegate tasks when appropriate. To delegate, call the _create_task function, using your own name as the 'requestor'. + - Provide a brief account of any tasks you assign and the outcome. + - Ask for help from other team members if you see they have the relevant expertise. + - Once you believe your assignment is complete, respond with your final answer or actions taken. + - Below are the other assistants in your team: {team_description} + +TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS: | + You are an assistant named '{name}'. You are a member in a team of assistants. The name of your team is '{team_name}'. + {original_instructions} + + - You do not delegate tasks. Instead, focus solely on fulfilling the tasks assigned to you. + - If you have suggestions for tasks better suited to another assistant, simply mention it in your response, but do not call _create_task yourself. + - Once you believe your assignment is complete, respond with your final answer or actions taken. + - Below are the other assistants in your team: {team_description} diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py new file mode 100644 index 000000000000..98fff01843bb --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py @@ -0,0 +1,63 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import os +import sys +from typing import cast +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.telemetry import enable_telemetry +from azure.monitor.opentelemetry import configure_azure_monitor + + +class AssistantTraceConfigurator: + def __init__(self, assistants_client: AssistantsClient): + self.assistants_client = assistants_client + + def enable_azure_monitor_tracing(self): + application_insights_connection_string = os.environ.get("AI_APPINSIGHTS_CONNECTION_STRING") + if not application_insights_connection_string: + print("AI_APPINSIGHTS_CONNECTION_STRING environment variable was not set.") + print("Please create AI_APPINSIGHTS_CONNECTION_STRING with the Application Insights,") + print("connection string. It should be enabled for this project.") + print("Enable it via the 'Tracing' tab in your AI Foundry project page.") + exit() + configure_azure_monitor(connection_string=application_insights_connection_string) + enable_telemetry() + + def enable_console_tracing_without_genai(self): + exporter = ConsoleSpanExporter() + trace.set_tracer_provider(TracerProvider()) + tracer = trace.get_tracer(__name__) + provider = cast(TracerProvider, trace.get_tracer_provider()) + provider.add_span_processor(SimpleSpanProcessor(exporter)) + print("Console tracing enabled without assistant traces.") + + def enable_console_tracing_with_assistant(self): + enable_telemetry(destination=sys.stdout) + print("Console tracing enabled with assistant traces.") + + def display_menu(self): + print("Select a tracing option:") + print("1. Enable Azure Monitor tracing") + print("2. Enable console tracing without enabling gen_ai assistant traces") + print("3. Enable console tracing with gen_ai assistant traces") + print("4. Do not enable traces") + + def setup_tracing(self): + self.display_menu() + choice = input("Enter your choice (1-4): ") + + if choice == "1": + self.enable_azure_monitor_tracing() + elif choice == "2": + self.enable_console_tracing_without_genai() + elif choice == "3": + self.enable_console_tracing_with_assistant() + elif choice == "4": + print("No tracing enabled.") + else: + print("Invalid choice. Please select a valid option.") diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py new file mode 100644 index 000000000000..d7f7ee874227 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py @@ -0,0 +1,62 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use multiple assistants using AssistantTeam with traces. + +USAGE: + python sample_assistants_assistant_team.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from assistant_team import AssistantTeam +from assistant_trace_configurator import AssistantTraceConfigurator + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") + +if model_deployment_name is not None: + AssistantTraceConfigurator(assistants_client=assistants_client).setup_tracing() + with assistants_client: + assistant_team = AssistantTeam("test_team", assistants_client=assistants_client) + assistant_team.add_assistant( + model=model_deployment_name, + name="Coder", + instructions="You are software engineer who writes great code. Your name is Coder.", + ) + assistant_team.add_assistant( + model=model_deployment_name, + name="Reviewer", + instructions="You are software engineer who reviews code. Your name is Reviewer.", + ) + assistant_team.assemble_team() + + print("A team of assistants specialized in software engineering is available for requests.") + while True: + user_input = input("Input (type 'quit' or 'exit' to exit): ") + if user_input.lower() == "quit": + break + elif user_input.lower() == "exit": + break + assistant_team.process_request(request=user_input) + + assistant_team.dismantle_team() +else: + print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py new file mode 100644 index 000000000000..8c819f483d73 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py @@ -0,0 +1,115 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to multiple assistants using AssistantTeam with traces. + +USAGE: + python sample_assistants_assistant_team_custom_team_leader.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. +""" + +import os +from typing import Optional, Set +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from assistant_team import AssistantTeam, AssistantTask +from assistant_trace_configurator import AssistantTraceConfigurator +from azure.ai.assistants.models import FunctionTool, ToolSet + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") + + +def create_task(team_name: str, recipient: str, request: str, requestor: str) -> str: + """ + Requests another assistant in the team to complete a task. + + :param team_name (str): The name of the team. + :param recipient (str): The name of the assistant that is being requested to complete the task. + :param request (str): A description of the to complete. This can also be a question. + :param requestor (str): The name of the assistant who is requesting the task. + :return: True if the task was successfully received, False otherwise. + :rtype: str + """ + task = AssistantTask(recipient=recipient, task_description=request, requestor=requestor) + team: Optional[AssistantTeam] = None + try: + team = AssistantTeam.get_team(team_name) + except: + pass + if team is not None: + team.add_task(task) + return "True" + return "False" + + +# Any additional functions that might be used by the assistants: +assistant_team_default_functions: Set = { + create_task, +} + +default_function_tool = FunctionTool(functions=assistant_team_default_functions) + + +if model_deployment_name is not None: + AssistantTraceConfigurator(assistants_client=assistants_client).setup_tracing() + with assistants_client: + assistant_team = AssistantTeam("test_team", assistants_client=assistants_client) + toolset = ToolSet() + toolset.add(default_function_tool) + assistant_team.set_team_leader( + model=model_deployment_name, + name="TeamLeader", + instructions="You are an assistant named 'TeamLeader'. You are a leader of a team of assistants. The name of your team is 'test_team'." + "You are an assistant that is responsible for receiving requests from user and utilizing a team of assistants to complete the task. " + "When you are passed a request, the only thing you will do is evaluate which team member should do which task next to complete the request. " + "You will use the provided create_task function to create a task for the assistant that is best suited for handling the task next. " + "You will respond with the description of who you assigned the task and why. When you think that the original user request is " + "processed completely utilizing all the talent available in the team, you do not have to create anymore tasks. " + "Using the skills of all the team members when applicable is highly valued. " + "Do not create parallel tasks. " + "Here are the other assistants in your team: " + "- Coder: You are software engineer who writes great code. Your name is Coder. " + "- Reviewer: You are software engineer who reviews code. Your name is Reviewer.", + toolset=toolset, + ) + assistant_team.add_assistant( + model=model_deployment_name, + name="Coder", + instructions="You are software engineer who writes great code. Your name is Coder.", + ) + assistant_team.add_assistant( + model=model_deployment_name, + name="Reviewer", + instructions="You are software engineer who reviews code. Your name is Reviewer.", + ) + assistant_team.assemble_team() + + print("A team of assistants specialized in software engineering is available for requests.") + while True: + user_input = input("Input (type 'quit' or 'exit' to exit): ") + if user_input.lower() == "quit": + break + elif user_input.lower() == "exit": + break + assistant_team.process_request(request=user_input) + + assistant_team.dismantle_team() +else: + print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py new file mode 100644 index 000000000000..0e442868652d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py @@ -0,0 +1,99 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use multiple assistants to execute tasks. + +USAGE: + python sample_assistants_multi_assistant_team.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. +""" + +import os + +from user_functions_with_traces import * +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ToolSet, FunctionTool +from azure.identity import DefaultAzureCredential +from assistant_team import AssistantTeam +from assistant_trace_configurator import AssistantTraceConfigurator + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +user_function_set_1: Set = {fetch_current_datetime, fetch_weather} + +user_function_set_2: Set = {send_email_using_recipient_name} + +user_function_set_3: Set = {convert_temperature} + +model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") + +if model_deployment_name is not None: + AssistantTraceConfigurator(assistants_client=assistants_client).setup_tracing() + with assistants_client: + + functions = FunctionTool(functions=user_function_set_1) + toolset1 = ToolSet() + toolset1.add(functions) + + assistant_team = AssistantTeam("test_team", assistants_client=assistants_client) + + assistant_team.add_assistant( + model=model_deployment_name, + name="TimeWeatherAssistant", + instructions="You are a specialized assistant for time and weather queries.", + toolset=toolset1, + can_delegate=True, + ) + + functions = FunctionTool(functions=user_function_set_2) + toolset2 = ToolSet() + toolset2.add(functions) + + assistant_team.add_assistant( + model=model_deployment_name, + name="SendEmailAssistant", + instructions="You are a specialized assistant for sending emails.", + toolset=toolset2, + can_delegate=False, + ) + + functions = FunctionTool(functions=user_function_set_3) + toolset3 = ToolSet() + toolset3.add(functions) + + assistant_team.add_assistant( + model=model_deployment_name, + name="TemperatureAssistant", + instructions="You are a specialized assistant for temperature conversion.", + toolset=toolset3, + can_delegate=False, + ) + + assistant_team.assemble_team() + + user_request = ( + "Hello, Please provide me current time in '%Y-%m-%d %H:%M:%S' format, and the weather in New York. " + "Finally, convert the Celsius to Fahrenheit and send an email to Example Recipient with summary of results." + ) + + # Once process_request is called, the TeamLeader will coordinate. + # The loop in process_request will pick up tasks from the queue, assign them, and so on. + assistant_team.process_request(request=user_request) + + assistant_team.dismantle_team() +else: + print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/user_functions_with_traces.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/user_functions_with_traces.py new file mode 100644 index 000000000000..5c1df046571d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/user_functions_with_traces.py @@ -0,0 +1,111 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import json +import datetime +from typing import Any, Callable, Set, Optional +from opentelemetry import trace + + +tracer = trace.get_tracer(__name__) + + +# These are the user-defined functions that can be called by the assistant. +@tracer.start_as_current_span("fetch_current_datetime") # type: ignore +def fetch_current_datetime(format: Optional[str] = None) -> str: + """ + Get the current time as a JSON string, optionally formatted. + + :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. + :return: The current time in JSON format. + :rtype: str + """ + current_time = datetime.datetime.now() + + # Use the provided format if available, else use a default format + if format: + time_format = format + else: + time_format = "%Y-%m-%d %H:%M:%S" + + time_json = json.dumps({"current_time": current_time.strftime(time_format)}) + return time_json + + +@tracer.start_as_current_span("fetch_weather") # type: ignore +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +@tracer.start_as_current_span("send_email_using_recipient_name") # type: ignore +def send_email_using_recipient_name(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Name of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +@tracer.start_as_current_span("convert_temperature") # type: ignore +def convert_temperature(celsius: float) -> str: + """Converts temperature from Celsius to Fahrenheit. + + :param celsius (float): Temperature in Celsius. + :rtype: float + + :return: Temperature in Fahrenheit. + :rtype: str + """ + fahrenheit = (celsius * 9 / 5) + 32 + return json.dumps({"fahrenheit": fahrenheit}) + + +# Example User Input for Each Function +# 1. Fetch Current DateTime +# User Input: "What is the current date and time?" +# User Input: "What is the current date and time in '%Y-%m-%d %H:%M:%S' format?" + +# 2. Fetch Weather +# User Input: "Can you provide the weather information for New York?" + +# 3. Send Email Using Recipient Name +# User Input: "Send an email to John Doe with the subject 'Meeting Reminder' and body 'Don't forget our meeting at 3 PM.'" + +# 4. Convert Temperature +# User Input: "Convert 25 degrees Celsius to Fahrenheit." + + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_current_datetime, + fetch_weather, + send_email_using_recipient_name, + convert_temperature, +} diff --git a/sdk/ai/azure-ai-assistants/samples/nifty_500_quarterly_results.csv b/sdk/ai/azure-ai-assistants/samples/nifty_500_quarterly_results.csv new file mode 100644 index 000000000000..e02068e09042 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/nifty_500_quarterly_results.csv @@ -0,0 +1,502 @@ +name,NSE_code,BSE_code,sector,industry,revenue,operating_expenses,operating_profit,operating_profit_margin,depreciation,interest,profit_before_tax,tax,net_profit,EPS,profit_TTM,EPS_TTM +3M India Ltd.,3MINDIA,523395,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,"1,057",847.4,192.1,18.48%,12.9,0.7,195.9,49.8,146.1,129.7,535.9,475.7 +ACC Ltd.,ACC,500410,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"4,644.8","3,885.4",549.3,12.39%,212.8,28.9,517.7,131.5,387.9,20.7,"1,202.7",64 +AIA Engineering Ltd.,AIAENG,532683,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,"1,357.1",912.7,382.1,29.51%,24.5,7.4,412.5,88.4,323.1,34.3,"1,216.1",128.9 +APL Apollo Tubes Ltd.,APLAPOLLO,533758,METALS & MINING,IRON & STEEL PRODUCTS,"4,65","4,305.4",325,7.02%,41.3,26.6,276.7,73.8,202.9,7.3,767.5,27.7 +Au Small Finance Bank Ltd.,AUBANK,540611,BANKING AND FINANCE,BANKS,"2,956.5","1,026.7",647.7,25.59%,0,"1,282.1",533.4,131.5,401.8,6,"1,606.2",24 +Adani Ports & Special Economic Zone Ltd.,ADANIPORTS,532921,TRANSPORTATION,MARINE PORT & SERVICES,"6,951.9","2,982.4","3,664",55.13%,974.5,520.1,"2,474.9",759,"1,747.8",8.1,"6,337",29.3 +Adani Energy Solutions Ltd.,ADANIENSOL,ASM,UTILITIES,ELECTRIC UTILITIES,"3,766.5","2,169.3","1,504.6",40.95%,432.1,640.8,369.9,84.9,275.9,2.5,"1,315.1",11.8 +Aditya Birla Fashion and Retail Ltd.,ABFRL,535755,RETAILING,DEPARTMENT STORES,"3,272.2","2,903.6",322.9,10.01%,388.8,208.4,-228.6,-28.2,-179.2,-1.9,-491.7,-5.2 +Aegis Logistics Ltd.,AEGISCHEM,500003,OIL & GAS,OIL MARKETING & DISTRIBUTION,"1,279.3","1,026.5",208.3,16.87%,34.1,26.6,192,42,127,3.6,509,14.5 +Ajanta Pharma Ltd.,AJANTPHARM,532331,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,049.8",737.8,290.7,28.26%,33.7,2.3,275.9,80.6,195.3,15.5,660.2,52.3 +Alembic Pharmaceuticals Ltd.,APLLTD,533573,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,605.1","1,386.7",208.2,13.06%,67.6,15.7,135.1,-1.9,136.6,7,531.7,27 +Alkem Laboratories Ltd.,ALKEM,539523,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"3,503.4","2,693.4",746.7,21.71%,73.9,30.3,648,33.1,620.5,51.9,"1,432.9",119.9 +Amara Raja Energy & Mobility Ltd.,ARE&M,500008,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,988.6","2,556.9",402.5,13.60%,115.7,6.2,309.8,83.5,226.3,13.2,779.8,45.7 +Ambuja Cements Ltd.,AMBUJACEM,500425,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"7,9","6,122.1","1,301.8",17.54%,380.9,61.2,"1,335.7",352.5,793,4,"2,777.9",14 +Apollo Hospitals Enterprise Ltd.,APOLLOHOSP,508869,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"4,869.1","4,219.4",627.5,12.95%,163.4,111.3,376.9,130.2,232.9,16.2,697.5,48.5 +Apollo Tyres Ltd.,APOLLOTYRE,500877,AUTOMOBILES & AUTO COMPONENTS,AUTO TYRES & RUBBER PRODUCTS,"6,304.9","5,119.8","1,159.8",18.47%,360.3,132.8,679.9,205.8,474.3,7.5,"1,590.7",25 +Ashok Leyland Ltd.,ASHOKLEY,500477,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,"11,463","9,558.6","1,870.4",16.37%,226.6,715.1,924.4,358,526,1.8,"2,141.5",7.3 +Asian Paints Ltd.,ASIANPAINT,500820,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,"8,643.8","6,762.3","1,716.2",20.24%,208.7,50.9,"1,621.8",418.6,"1,205.4",12.6,"5,062.6",52.8 +Astral Ltd.,ASTRAL,532830,GENERAL INDUSTRIALS,PLASTIC PRODUCTS,"1,376.4","1,142.9",220.1,16.15%,48.7,8,176.8,45.1,131.2,4.9,549.7,20.4 +Atul Ltd.,ATUL,500027,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"1,215.8","1,038.5",155.2,13.00%,54,1.9,121.5,32.5,90.3,30.6,392.3,132.9 +Aurobindo Pharma Ltd.,AUROPHARMA,524804,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"7,406.4","5,846","1,373.4",19.02%,417.5,68.2,"1,074.7",323.7,757.2,12.8,"2,325.5",39.7 +Avanti Feeds Ltd.,AVANTIFEED,512573,FOOD BEVERAGES & TOBACCO,OTHER FOOD PRODUCTS,"1,312","1,184.5",94,7.35%,14.3,0.2,113,30.5,74.2,5.5,336.4,24.7 +Avenue Supermarts Ltd.,DMART,540376,RETAILING,DEPARTMENT STORES,"12,661.3","11,619.4","1,005",7.96%,174.4,15.6,851.9,228.6,623.6,9.6,"2,332.1",35.8 +Axis Bank Ltd.,AXISBANK,532215,BANKING AND FINANCE,BANKS,"33,122.2","9,207.3","9,166",33.43%,0,"14,749","8,313.8","2,096.1","6,204.1",20.1,"13,121",42.6 +Bajaj Auto Ltd.,BAJAJ-AUTO,532977,AUTOMOBILES & AUTO COMPONENTS,2/3 WHEELERS,"11,206.8","8,708.1","2,130.1",19.65%,91.8,6.5,"2,400.4",564,"2,02",71.4,"6,841.6",241.8 +Bajaj Finance Ltd.,BAJFINANCE,500034,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"13,381.8","2,851.5","9,449.7",70.63%,158.5,"4,537.1","4,757.6","1,207","3,550.8",58.7,"13,118.5",216.7 +Bajaj Finserv Ltd.,BAJAJFINSV,532978,DIVERSIFIED,HOLDING COMPANIES,"26,022.7","14,992.2","9,949.9",38.24%,208.8,"4,449.1","5,292","1,536.5","1,929",12.1,"7,422.6",46.6 +Bajaj Holdings & Investment Ltd.,BAJAJHLDNG,500490,DIVERSIFIED,HOLDING COMPANIES,240.1,33.5,191.2,85.08%,8.4,0.2,197.9,73.9,"1,491.2",134,"5,545.1",498.3 +Balkrishna Industries Ltd.,BALKRISIND,502355,AUTOMOBILES & AUTO COMPONENTS,AUTO TYRES & RUBBER PRODUCTS,"2,360.3","1,720.5",532.7,23.64%,160.4,23.9,455.5,108.1,347.4,18,"1,047.5",54.2 +Balrampur Chini Mills Ltd.,BALRAMCHIN,500038,FOOD BEVERAGES & TOBACCO,SUGAR,"1,649","1,374.6",164.9,10.71%,41.2,17.2,215.9,56.6,166.3,8.2,540.5,26.8 +Bank of Baroda,BANKBARODA,532134,BANKING AND FINANCE,BANKS,"35,766","8,430.4","9,807.9",33.52%,0,"17,527.7","6,022.8","1,679.7","4,458.4",8.5,"18,602.9",35.9 +Bank of India,BANKINDIA,532149,BANKING AND FINANCE,BANKS,"16,779.4","3,704.9","3,818.8",25.35%,0,"9,255.7","2,977.4","1,488.6","1,498.5",3.6,"5,388.7",13.1 +Bata India Ltd.,BATAINDIA,500043,RETAILING,FOOTWEAR,834.6,637.5,181.7,22.18%,81.7,28.4,46.1,12.1,34,2.6,289.7,22.5 +Berger Paints (India) Ltd.,BERGEPAINT,509480,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,"2,782.6","2,293.7",473.6,17.12%,82.9,21.1,385,96.7,291.6,2.5,"1,032.6",8.9 +Bharat Electronics Ltd.,BEL,500049,GENERAL INDUSTRIALS,DEFENCE,"4,146.1","2,994.9","1,014.2",25.30%,108.3,1.5,"1,041.5",260.7,789.4,1.1,"3,323",4.5 +Bharat Forge Ltd.,BHARATFORG,500493,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,"3,826.7","3,152.8",621.4,16.47%,211.3,124.3,336.1,121.8,227.2,4.9,783.7,16.8 +Bharat Heavy Electricals Ltd.,BHEL,500103,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"5,305.4","5,513",-387.7,-7.56%,59.9,180.4,-447.9,-197.9,-238.1,-0.7,71.3,0.2 +Bharat Petroleum Corporation Ltd.,BPCL,500547,OIL & GAS,REFINERIES/PETRO-PRODUCTS,"103,72","90,103.9","12,940.5",12.56%,"1,605.3",973.2,"10,755.7","2,812.2","8,243.5",38.7,"27,505.3",129.2 +Bharti Airtel Ltd.,BHARTIARTL,532454,TELECOM SERVICES,TELECOM SERVICES,"37,374.2","17,530.1","19,513.7",52.68%,"9,734.3","5,185.8","3,353.7","1,846.5","1,340.7",2.4,"7,547",13.2 +Indus Towers Ltd.,INDUSTOWER,534816,TELECOM SERVICES,OTHER TELECOM SERVICES,"7,229.7","3,498.8","3,633.7",50.95%,"1,525.6",458.6,"1,746.7",452,"1,294.7",4.8,"3,333.5",12.4 +Biocon Ltd.,BIOCON,532523,PHARMACEUTICALS & BIOTECHNOLOGY,BIOTECHNOLOGY,"3,620.2","2,720.7",741.6,21.42%,389.3,247.7,238.5,41.6,125.6,1.1,498.4,4.2 +Birla Corporation Ltd.,BIRLACORPN,500335,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"2,313.2","1,997",288.9,12.64%,143.5,95.4,77.1,18.8,58.4,7.6,153.1,19.9 +Blue Dart Express Ltd.,BLUEDART,526612,TRANSPORTATION,TRANSPORTATION - LOGISTICS,"1,329.7","1,101.8",222.7,16.82%,110.6,19.5,97.9,24.8,73.1,30.8,292.4,123.2 +Blue Star Ltd.,BLUESTARCO,500067,CONSUMER DURABLES,CONSUMER ELECTRONICS,"1,903.4","1,767.7",122.7,6.49%,23,17.6,95,24.3,70.7,3.6,437.7,21.3 +Bombay Burmah Trading Corporation Ltd.,BBTC,501425,FOOD BEVERAGES & TOBACCO,TEA & COFFEE,"4,643.5","3,664.7",859.2,18.99%,74.7,154.6,697.1,212.6,122,17.5,"-1,499.5",-214.8 +Bosch Ltd.,BOSCHLTD,500530,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"4,284.3","3,638.8",491.3,11.90%,101.3,12.2,"1,317",318.1,999.8,339,"2,126.9",721 +Brigade Enterprises Ltd.,BRIGADE,532929,REALTY,REALTY,"1,407.9","1,041.8",324.8,23.77%,75.7,110,180.3,67.8,133.5,5.8,298.2,12.9 +Britannia Industries Ltd.,BRITANNIA,500825,FMCG,PACKAGED FOODS,"4,485.2","3,560.5",872.4,19.68%,71.7,53.4,799.7,212.1,587.6,24.4,"2,536.2",105.3 +CCL Products India Ltd.,CCL,519600,FOOD BEVERAGES & TOBACCO,TEA & COFFEE,608.3,497.7,109.9,18.09%,22.6,18.4,69.7,8.8,60.9,4.6,279.9,21 +Crisil Ltd.,CRISIL,500092,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,771.8,544.2,191.7,26.05%,26.5,0.8,200.3,48.3,152,20.8,606.3,82.9 +Zydus Lifesciences Ltd.,ZYDUSLIFE,532321,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"4,422.8","3,222.7","1,146.1",26.23%,184.2,8.7,"1,007.2",226.4,800.7,7.9,"2,807.1",27.7 +Can Fin Homes Ltd.,CANFINHOME,511196,BANKING AND FINANCE,HOUSING FINANCE,871,49.7,749.2,86.01%,2.8,548.4,198,39.9,158.1,11.9,658.8,49.5 +Canara Bank,CANBK,532483,BANKING AND FINANCE,BANKS,"33,891.2","8,250.3","7,706.6",28.24%,0,"17,934.3","5,098","1,420.6","3,86",20.9,"13,968.4",77 +Carborundum Universal Ltd.,CARBORUNIV,513375,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,"1,166",978.8,167.5,14.61%,45.9,4.9,136.4,43.7,101.9,5.4,461.3,24.3 +Castrol India Ltd.,CASTROLIND,500870,OIL & GAS,OIL MARKETING & DISTRIBUTION,"1,203.2",914.4,268.6,22.70%,22.9,2.4,263.5,69.1,194.4,2,815.5,8.2 +Ceat Ltd.,CEATLTD,500878,AUTOMOBILES & AUTO COMPONENTS,AUTO TYRES & RUBBER PRODUCTS,"3,063.8","2,597.2",456.1,14.94%,124.5,71.7,270.4,68.3,208,51.4,521.7,129 +Central Bank of India,CENTRALBK,532885,BANKING AND FINANCE,BANKS,"8,438.5","2,565.4","1,535.4",20.81%,0,"4,337.7",567.2,-41.5,622,0.7,"2,181.4",2.5 +Century Plyboards (India) Ltd.,CENTURYPLY,532548,FOREST MATERIALS,FOREST PRODUCTS,"1,011.4",852.5,144.3,14.47%,23.4,6.1,129.4,32.2,96.9,4.4,380.7,17.1 +Cera Sanitaryware Ltd.,CERA,532443,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,476.2,387.2,76.5,16.49%,8.9,1.4,77.2,19.8,56.9,43.8,232.4,178.7 +Chambal Fertilisers & Chemicals Ltd.,CHAMBLFERT,500085,FERTILIZERS,FERTILIZERS,"5,467.3","4,770.5",615,11.42%,78.4,45.8,572.6,200.2,381,9.2,"1,137.7",27.3 +Cholamandalam Investment & Finance Company Ltd.,CHOLAFIN,511243,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"4,695.2",987.6,"3,235.1",69.99%,38.5,"2,204.2","1,065",288.8,772.9,9.4,"3,022.8",36.7 +Cipla Ltd.,CIPLA,500087,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"6,854.5","4,944.4","1,733.8",25.96%,290,25.8,"1,594.2",438.4,"1,130.9",14,"3,449.1",42.7 +City Union Bank Ltd.,CUB,532210,BANKING AND FINANCE,BANKS,"1,486.1",333.9,386.6,29.65%,0,765.6,330.6,50,280.6,3.8,943.8,12.7 +Coal India Ltd.,COALINDIA,533278,METALS & MINING,COAL,"34,760.3","24,639.4","8,137",24.83%,"1,178.2",182.5,"8,760.2","2,036.5","6,799.8",11,"28,059.6",45.5 +Colgate-Palmolive (India) Ltd.,COLPAL,500830,FMCG,PERSONAL PRODUCTS,"1,492.1",989,482.1,32.77%,44.3,1.1,457.8,117.8,340.1,12.5,"1,173.2",43.1 +Container Corporation of India Ltd.,CONCOR,531344,COMMERCIAL SERVICES & SUPPLIES,WAREHOUSING AND LOGISTICS,"2,299.8","1,648.4",546.5,24.90%,153.1,16.5,481.8,119,367.4,6,"1,186.2",19.5 +Coromandel International Ltd.,COROMANDEL,506395,FERTILIZERS,FERTILIZERS,"7,032.9","5,929.4","1,058.7",15.15%,54,46.2,"1,003.3",245,756.9,25.7,"2,024.2",68.8 +Crompton Greaves Consumer Electricals Ltd.,CROMPTON,539876,CONSUMER DURABLES,HOUSEHOLD APPLIANCES,"1,797.2","1,607.8",174.5,9.79%,32.1,21.5,135.8,34.9,97.2,1.5,432,6.7 +Cummins India Ltd.,CUMMINSIND,500480,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,"2,011.3","1,575.4",346.2,18.02%,38.3,6.8,390.9,99.6,329.1,11.9,"1,445.5",52.1 +Cyient Ltd.,CYIENT,532175,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,792","1,452.7",325.8,18.32%,65.8,27,240.3,56.7,178.3,16.3,665.6,60.1 +DCM Shriram Ltd.,DCMSHRIRAM,523367,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"2,73","2,593.9",114.1,4.21%,74,14.7,47.5,15.2,32.2,2.1,617.6,39.4 +DLF Ltd.,DLF,532868,REALTY,REALTY,"1,476.4",885.3,462.4,34.31%,37,90.2,464,112.2,622.8,2.5,"2,239",9 +Dabur India Ltd.,DABUR,500096,FMCG,PERSONAL PRODUCTS,"3,320.2","2,543",660.9,20.63%,98.3,28.1,650.8,144.3,515,2.9,"1,755.7",9.9 +Delta Corp Ltd.,DELTACORP,532848,COMMERCIAL SERVICES & SUPPLIES,MISC. COMMERCIAL SERVICES,282.6,170.5,100.1,36.99%,16.9,2.7,92.4,23,69.4,2.6,273.3,10.2 +Divi's Laboratories Ltd.,DIVISLAB,532488,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,995","1,43",479,25.09%,95,1,469,121,348,13.1,"1,331.8",50.3 +Dr. Lal Pathlabs Ltd.,LALPATHLAB,539524,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE SERVICES,619.4,423.5,177.8,29.57%,35.9,7.8,152.2,41.5,109.3,13.2,301.4,36.1 +Dr. Reddy's Laboratories Ltd.,DRREDDY,500124,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"7,217.6","4,888.8","2,008.3",29.09%,375.5,35.3,"1,912.5",434.5,"1,482.2",89.1,"5,091.2",305.2 +EID Parry (India) Ltd.,EIDPARRY,500125,FOOD BEVERAGES & TOBACCO,OTHER FOOD PRODUCTS,"9,210.3","8,002","1,057.5",11.67%,101.2,74.2,"1,032.8",246.8,452.3,25.5,991,55.8 +Eicher Motors Ltd.,EICHERMOT,505200,AUTOMOBILES & AUTO COMPONENTS,2/3 WHEELERS,"4,388.3","3,027.4","1,087.2",26.42%,142.5,12.7,"1,205.7",291.1,"1,016.2",37.1,"3,581",130.8 +Emami Ltd.,EMAMILTD,531162,FMCG,PERSONAL PRODUCTS,876,631.2,233.7,27.02%,46.1,2.2,196.4,15.8,178.5,4.1,697.8,16 +Endurance Technologies Ltd.,ENDURANCE,540153,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,560.5","2,226.7",318.3,12.51%,118.4,9.8,205.6,51.1,154.6,11,562.8,40 +Engineers India Ltd.,ENGINERSIN,532178,COMMERCIAL SERVICES & SUPPLIES,CONSULTING SERVICES,833.6,691.3,98.5,12.47%,8.3,0.4,133.6,32.2,127.5,2.3,472.7,8.4 +Escorts Kubota Ltd.,ESCORTS,500495,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,"2,154.4","1,798.6",260.7,12.66%,40.8,3.1,311.9,79.7,223.3,20.6,910.5,82.4 +Exide Industries Ltd.,EXIDEIND,500086,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"4,408.9","3,872.4",499.1,11.42%,141.5,29.7,365.3,95.2,269.4,3.2,872.7,10.3 +Federal Bank Ltd.,FEDERALBNK,500469,BANKING AND FINANCE,BANKS,"6,548.2","1,603.8","1,400.3",24.18%,0,"3,544.1","1,342.7",342.6,994.1,4.3,"3,671.4",15.6 +Finolex Cables Ltd.,FINCABLES,500144,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"1,229.3","1,041.3",146.1,12.30%,10.8,0.4,176.7,52.3,154.2,10.1,643.9,42.1 +Finolex Industries Ltd.,FINPIPE,500940,GENERAL INDUSTRIALS,PLASTIC PRODUCTS,944.5,780.2,103,11.66%,27.4,12.5,124.5,35.4,98,1.6,459.3,7.4 +Firstsource Solutions Ltd.,FSL,532809,SOFTWARE & SERVICES,BPO/KPO,"1,556.9","1,311.2",228.8,14.86%,65.4,26.1,154.3,27.8,126.5,1.9,551.7,7.9 +GAIL (India) Ltd.,GAIL,532155,UTILITIES,UTILITIES,"33,191","29,405.5","3,580.2",10.85%,837.3,199.6,"2,748.7",696.3,"2,444.1",3.7,"5,283.8",8 +GlaxoSmithKline Pharmaceuticals Ltd.,GLAXO,500660,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,985.2,667.5,289.5,30.25%,18.1,0.4,299.2,81.7,217.5,12.8,647.8,38.2 +Glenmark Pharmaceuticals Ltd.,GLENMARK,532296,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"3,209.1","2,745.1",462.3,14.41%,141.5,121.5,-124.4,55.9,-81.9,-2.9,-196.3,-7 +Godrej Consumer Products Ltd.,GODREJCP,532424,FMCG,PERSONAL PRODUCTS,"3,667.9","2,897.8",704.2,19.55%,60.9,77.3,619.4,186.6,432.8,4.2,"1,750.1",17.1 +Godrej Industries Ltd.,GODREJIND,500164,DIVERSIFIED,DIVERSIFIED,"4,256.9","3,672.1",265.5,6.74%,89.3,333.1,162.4,75.9,87.3,2.6,880,26.1 +Godrej Properties Ltd.,GODREJPROP,533150,REALTY,REALTY,605.1,404.7,-61.7,-17.98%,7.4,48,145.1,38.8,66.8,2.4,662.6,23.8 +Granules India Ltd.,GRANULES,532482,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,191",976.5,213,17.90%,52.5,26,136,33.9,102.1,4.2,393.9,16.3 +Great Eastern Shipping Company Ltd.,GESHIP,500620,TRANSPORTATION,SHIPPING,"1,461.5",585.6,643.4,52.35%,186.7,77.1,611.9,17.3,594.7,41.6,"2,520.1",176.5 +Gujarat Alkalies & Chemicals Ltd.,GUJALKALI,530001,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,"1,042.3",926.1,45.2,4.65%,95.2,10.8,10.2,-0.1,-18.4,-2.5,82.7,11.3 +Gujarat Gas Ltd.,GUJGASLTD,539336,UTILITIES,UTILITIES,"4,019.3","3,494.5",496.6,12.44%,117.9,7.8,399.1,102.9,296.2,4.3,"1,254.3",18.2 +Gujarat Narmada Valley Fertilizers & Chemicals Ltd.,GNFC,500670,FERTILIZERS,FERTILIZERS,"2,232","1,911",169,8.12%,78,1,242,64,182,11.7,932,60.1 +Gujarat Pipavav Port Ltd.,GPPL,533248,TRANSPORTATION,MARINE PORT & SERVICES,270.4,102,150.6,59.64%,28.8,2.2,141.1,53.4,92.3,1.9,341.8,7.1 +Gujarat State Fertilizer & Chemicals Ltd.,GSFC,500690,FERTILIZERS,FERTILIZERS,"3,313.2","2,881.4",237.3,7.61%,45.7,1.6,387,78.1,308.9,7.8,"1,056.2",26.5 +Gujarat State Petronet Ltd.,GSPL,532702,UTILITIES,UTILITIES,"4,455.9","3,497.2",913.7,20.72%,165,14.5,779.2,198.7,454.6,8.1,"1,522",27 +HCL Technologies Ltd.,HCLTECH,532281,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"27,037","20,743","5,929",22.23%,"1,01",156,"5,128","1,295","3,832",14.2,"15,445",56.9 +HDFC Bank Ltd.,HDFCBANK,500180,BANKING AND FINANCE,BANKS,"107,566.6","42,037.6","24,279.1",32.36%,0,"41,249.9","20,967.4","3,655","16,811.4",22.2,"54,474.6",71.8 +Havells India Ltd.,HAVELLS,517354,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"3,952.8","3,527",373.4,9.57%,81.2,9.3,335.3,86.2,249.1,4,"1,177.7",18.8 +Hero MotoCorp Ltd.,HEROMOTOCO,500182,AUTOMOBILES & AUTO COMPONENTS,2/3 WHEELERS,"9,741.2","8,173.5","1,359.5",14.26%,187.1,25,"1,355.6",353.1,"1,006.3",50.3,"3,247.6",162.5 +HFCL Ltd.,HFCL,500183,TELECOMMUNICATIONS EQUIPMENT,TELECOM CABLES,"1,128.7",978.9,132.6,11.93%,21.4,34.8,93.5,24,69.4,0.5,305.5,2.1 +Hindalco Industries Ltd.,HINDALCO,500440,METALS & MINING,ALUMINIUM AND ALUMINIUM PRODUCTS,"54,632","48,557","5,612",10.36%,"1,843","1,034","3,231","1,035","2,196",9.9,"8,423",37.9 +Hindustan Copper Ltd.,HINDCOPPER,513599,METALS & MINING,COPPER,392.6,260.2,121.2,31.77%,45.6,4.1,82.6,21.9,60.7,0.6,320.5,3.3 +Hindustan Petroleum Corporation Ltd.,HINDPETRO,500104,OIL & GAS,REFINERIES/PETRO-PRODUCTS,"96,093.4","87,512","8,24",8.61%,"1,247.3",590,"6,744.1","1,616","5,827",41.1,"16,645",117.3 +Hindustan Unilever Ltd.,HINDUNILVR,500696,FMCG,PERSONAL PRODUCTS,"15,806","11,826","3,797",24.30%,297,88,"3,59",931,"2,656",11.3,"10,284",43.8 +Hindustan Zinc Ltd.,HINDZINC,500188,METALS & MINING,ZINC,"7,014","3,652","3,139",46.22%,825,232,"2,305",576,"1,729",4.1,"8,432",20 +Housing and Urban Development Corporation Ltd.,HUDCO,540530,BANKING AND FINANCE,HOUSING FINANCE,"1,880.8",82.7,"1,809.6",97.04%,2.4,"1,216.8",606.4,154.7,451.6,2.3,"1,790.7",8.9 +ITC Ltd.,ITC,500875,FOOD BEVERAGES & TOBACCO,CIGARETTES-TOBACCO PRODUCTS,"18,439.3","11,320.2","6,454.2",36.31%,453,9.9,"6,656.2","1,700.3","4,898.1",3.9,"20,185.1",16.2 +ICICI Bank Ltd.,ICICIBANK,532174,BANKING AND FINANCE,BANKS,"57,292.3","23,911","15,473.2",39.74%,0,"17,908","14,824.2","3,808.8","11,805.6",15.6,"41,086.8",58.7 +ICICI Prudential Life Insurance Company Ltd.,ICICIPRULI,540133,BANKING AND FINANCE,LIFE INSURANCE,"17,958.1","17,612.3",-229.6,-1.32%,0,0,340.2,32.5,243.9,1.7,906.9,6.3 +IDBI Bank Ltd.,IDBI,500116,BANKING AND FINANCE,BANKS,"7,063.7","1,922.3","2,175.3",36.02%,0,"2,966.1","2,396.9","1,003.7","1,385.4",1.3,"4,776.3",4.4 +IDFC First Bank Ltd.,IDFCFIRSTB,539437,BANKING AND FINANCE,BANKS,"8,765.8","3,849","1,511.2",20.54%,0,"3,405.6",982.8,236,746.9,1.1,"2,911.1",4.3 +IDFC Ltd.,IDFC,532659,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),36.7,6,30.6,83.56%,0,0,30.6,6.6,223.5,1.4,"4,147.1",25.9 +IRB Infrastructure Developers Ltd.,IRB,532947,CEMENT AND CONSTRUCTION,ROADS & HIGHWAYS,"1,874.5",950.4,794.6,45.54%,232.7,434.6,256.9,85.8,95.7,0.2,501,0.8 +ITI Ltd.,ITI,523610,TELECOMMUNICATIONS EQUIPMENT,TELECOM EQUIPMENT,256.1,299.3,-52.8,-21.42%,13.3,69.3,-125.8,0,-126,-1.3,-388.4,-4 +Vodafone Idea Ltd.,IDEA,532822,TELECOM SERVICES,TELECOM SERVICES,"10,750.8","6,433.5","4,282.8",39.97%,"5,667.3","6,569","-7,919",817.7,"-8,737.9",-1.8,"-30,986.8",-6.4 +India Cements Ltd.,INDIACEM,530005,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"1,272.4","1,26",4.4,0.35%,55,60.4,-103,-17.4,-80.1,-2.6,-261.1,-8.4 +Indiabulls Housing Finance Ltd.,IBULHSGFIN,535789,BANKING AND FINANCE,HOUSING FINANCE,"2,242.3",190.6,"1,779.2",79.88%,22.9,"1,349.8",421.6,123.6,298,6.5,"1,146",24.3 +Indian Bank,INDIANB,532814,BANKING AND FINANCE,BANKS,"15,929.4","3,599.1","4,327.7",31.44%,0,"8,002.6","2,776.7",768.6,"2,068.5",16.6,"6,893.3",55.3 +Indian Hotels Company Ltd.,INDHOTEL,500850,HOTELS RESTAURANTS & TOURISM,HOTELS,"1,480.9","1,078.4",354.8,24.75%,111.2,59,232.2,72.3,166.9,1.2,"1,100.3",7.7 +Indian Oil Corporation Ltd.,IOC,530965,OIL & GAS,OIL MARKETING & DISTRIBUTION,"179,752.1","156,013.1","23,328.4",13.01%,"3,609.6","2,135","18,090.2","4,699.7","13,114.3",9.5,"38,614.3",27.3 +Indian Overseas Bank,IOB,532388,BANKING AND FINANCE,BANKS,"6,941.5","1,785.1","1,679.8",28.84%,0,"3,476.6",635.5,8.3,627.2,0.3,"2,341.9",1.2 +Indraprastha Gas Ltd.,IGL,532514,UTILITIES,UTILITIES,"3,520.2","2,801.6",656.9,18.99%,102.2,2.5,613.9,151.4,552.7,7.9,"1,806.2",25.8 +IndusInd Bank Ltd.,INDUSINDBK,532187,BANKING AND FINANCE,BANKS,"13,529.7","3,449.9","3,908.7",34.75%,0,"6,171.1","2,934.9",732.9,"2,202.2",28.4,"8,333.7",107.2 +Info Edge (India) Ltd.,NAUKRI,532777,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,792,421.2,204.7,32.70%,25.9,8.2,382.8,68.7,205.1,15.9,-25.6,-2 +InterGlobe Aviation Ltd.,INDIGO,539448,TRANSPORTATION,AIRLINES,"15,502.9","12,743.6","2,200.3",14.72%,"1,549","1,021.3",189.1,0.2,188.9,4.9,"5,621.3",145.7 +Ipca Laboratories Ltd.,IPCALAB,524494,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"2,072.5","1,712.7",321.3,15.80%,90.3,44.1,225.4,87.9,145.1,5.7,492.2,19.4 +J B Chemicals & Pharmaceuticals Ltd.,JBCHEPHARM,506943,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,889.4,638.2,243.5,27.62%,32.2,10.4,208.7,58.1,150.6,9.7,486.6,31.4 +JK Cement Ltd.,JKCEMENT,532644,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"2,782.1","2,285.8",467,16.96%,137.1,115,244.2,65.7,178.1,23.1,444,57.5 +JK Lakshmi Cement Ltd.,JKLAKSHMI,500380,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"1,588.5","1,357.3",217.3,13.80%,56.6,33.6,141,45.1,92.7,7.9,357.6,30.4 +JM Financial Ltd.,JMFINANCIL,523405,DIVERSIFIED,HOLDING COMPANIES,"1,214",407.9,662.6,55.34%,13.2,388.1,277.9,72.4,194.9,2,608.1,6.4 +JSW Energy Ltd.,JSWENERGY,533148,UTILITIES,ELECTRIC UTILITIES,"3,387.4","1,379","1,880.4",57.69%,408.7,513.7,"1,085.9",235.1,850.2,5.2,"1,591.7",9.7 +JSW Steel Ltd.,JSWSTEEL,500228,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"44,821","36,698","7,886",17.69%,"2,019","2,084","4,609","1,812","2,76",11.4,"9,252",38.1 +Jindal Stainless Ltd.,JSL,532508,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"9,829","8,566.5","1,230.6",12.56%,221.9,155.6,985.7,229.1,774.3,9.4,"2,600.2",31.6 +Jindal Steel & Power Ltd.,JINDALSTEL,532286,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"12,282","9,964.5","2,285.7",18.66%,603.7,329.4,"1,384.5",-5.8,"1,387.8",13.8,"4,056",40.4 +Jubilant Foodworks Ltd.,JUBLFOOD,533155,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,"1,375.7","1,091.4",277.2,20.25%,141.9,56.8,85.5,23.3,97.2,1.5,235,3.6 +Just Dial Ltd.,JUSTDIAL,535648,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,318.5,211.8,48.8,18.71%,12.2,2.4,92.1,20.3,71.8,8.4,314.1,36.9 +Jyothy Labs Ltd.,JYOTHYLAB,532926,FMCG,PERSONAL PRODUCTS,745.6,597,135.4,18.48%,12.3,1.2,135.1,31.1,104.2,2.8,326.9,8.9 +KRBL Ltd.,KRBL,530813,FMCG,PACKAGED FOODS,"1,246.5","1,018.9",194.5,16.03%,19.9,0.8,206.8,53.6,153.3,6.5,671.4,29.3 +Kajaria Ceramics Ltd.,KAJARIACER,500233,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,"1,129.9",941.9,179.7,16.02%,36.1,4.3,147.7,36.6,108,6.8,397.8,25 +Kalpataru Projects International Ltd.,KPIL,522287,UTILITIES,ELECTRIC UTILITIES,"4,53","4,148",370,8.19%,113,137,132,42,89,5.5,478,29.9 +Kansai Nerolac Paints Ltd.,KANSAINER,500165,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,"1,978.6","1,683.3",273.2,13.97%,47.4,7.6,240.3,64.8,177.2,2.2,"1,118.8",13.8 +Karur Vysya Bank Ltd.,KARURVYSYA,590003,BANKING AND FINANCE,BANKS,"2,336",616.4,637.9,31.94%,0,"1,081.7",511.5,133.1,378.4,4.7,"1,364.2",17 +KEC International Ltd.,KEC,532714,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"4,514.9","4,224.7",274.3,6.10%,46.5,177.8,65.8,9.9,55.8,2.2,187.9,7.3 +Kotak Mahindra Bank Ltd.,KOTAKBANK,500247,BANKING AND FINANCE,BANKS,"21,559.5","9,681","6,343",46.24%,0,"5,535.5","5,888.3","1,465.5","4,461",22.4,"17,172.7",86.4 +L&T Finance Holdings Ltd.,L&TFH,533519,DIVERSIFIED,HOLDING COMPANIES,"3,482.1",935.3,"1,882.4",58.57%,28.3,"1,324.9",797.4,203.2,595.1,2.4,"2,080.8",8.4 +L&T Technology Services Ltd.,LTTS,540115,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"2,427.7","1,910.9",475.6,19.93%,68.1,12.6,436.1,120.2,315.4,29.8,"1,239.7",117.5 +LIC Housing Finance Ltd.,LICHSGFIN,500253,BANKING AND FINANCE,HOUSING FINANCE,"6,765.9",250.6,"6,095.7",90.10%,13.2,"4,599.9","1,483",291.2,"1,193.5",21.7,"4,164.5",75.7 +Lakshmi Machine Works Ltd.,LAXMIMACH,500252,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,"1,355.5","1,184.5",136,10.30%,23.6,0,147.4,32.3,115.1,107.8,416,389.5 +Laurus Labs Ltd.,LAURUSLABS,540222,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,226.2","1,036.6",187.9,15.34%,93.4,42.4,53.9,14.6,37,0.7,367.8,6.8 +Lupin Ltd.,LUPIN,500257,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"5,079","4,120.8",917.8,18.21%,247.8,80.6,629.7,134.3,489.5,10.8,"1,331.2",29.2 +MMTC Ltd.,MMTC,513377,COMMERCIAL SERVICES & SUPPLIES,COMMODITY TRADING & DISTRIBUTION,-167.2,-180.1,-30.4,14.42%,0.8,1.1,12.1,1.5,52,0.3,174.1,1.2 +MRF Ltd.,MRF,500290,AUTOMOBILES & AUTO COMPONENTS,AUTO TYRES & RUBBER PRODUCTS,"6,287.8","5,060.2","1,156.9",18.61%,351.5,85.5,790.6,203.9,586.7,1383.3,"1,690.9",3988 +Mahanagar Gas Ltd.,MGL,539957,UTILITIES,UTILITIES,"1,772.7","1,250.1",478.9,27.70%,65.8,2.5,454.3,115.8,338.5,34.3,"1,147.8",116.2 +Mahindra & Mahindra Financial Services Ltd.,M&MFIN,532720,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"3,863.5","1,077.5","2,109.3",55.03%,67.1,"1,703.4",369.1,96,281.1,2.3,"1,982.5",16 +Mahindra & Mahindra Ltd.,M&M,500520,AUTOMOBILES & AUTO COMPONENTS,CARS & UTILITY VEHICLES,"35,027.2","28,705.9","5,729.6",16.64%,"1,138.6","1,835.2","3,347.5","1,083.7","2,347.8",21.1,"11,169.4",100.2 +Mahindra Holidays & Resorts India Ltd.,MHRIL,533088,HOTELS RESTAURANTS & TOURISM,HOTELS,672.2,519.3,136,20.76%,83.8,33.3,35.8,14,21.3,1.1,66,3.3 +Manappuram Finance Ltd.,MANAPPURAM,531213,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"2,174",555.6,"1,481.3",68.68%,62.5,689.4,746.7,186.1,558.4,6.6,"1,859.8",22 +Mangalore Refinery And Petrochemicals Ltd.,MRPL,500109,OIL & GAS,REFINERIES/PETRO-PRODUCTS,"22,904.7","20,705.6","2,138.2",9.36%,296,311.2,"1,592",546.2,"1,051.7",6,"3,784.9",21.6 +Marico Ltd.,MARICO,531642,FMCG,PERSONAL PRODUCTS,"2,514","1,979",497,20.07%,39,20,476,116,353,2.7,"1,41",10.9 +Maruti Suzuki India Ltd.,MARUTI,532500,AUTOMOBILES & AUTO COMPONENTS,CARS & UTILITY VEHICLES,"37,902.1","32,282.5","4,790.3",12.92%,794.4,35.1,"4,790.1","1,083.8","3,764.3",124.6,"11,351.8",375.9 +Max Financial Services Ltd.,MFSL,500271,BANKING AND FINANCE,LIFE INSURANCE,"10,189.1","10,024.6",143.9,1.42%,0.8,9.4,158.2,-12.1,147.9,4.3,506.4,14.7 +UNO Minda Ltd.,UNOMINDA,532539,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"3,630.2","3,219.8",401.6,11.09%,125.4,27.2,257.9,73.3,225,3.9,742.4,13 +Motilal Oswal Financial Services Ltd.,MOTILALOFS,532892,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,"1,650.7",724.1,904.5,55.18%,17.3,241.1,657.6,124.2,531.2,35.9,"1,449.3",97.8 +MphasiS Ltd.,MPHASIS,526299,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"3,325.5","2,680.9",595.6,18.18%,89,34,521.7,129.7,391.9,20.8,"1,605.6",85.1 +Muthoot Finance Ltd.,MUTHOOTFIN,533398,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"3,631.9",723.4,"2,801.6",77.69%,22.2,"1,335","1,470.2",374.9,"1,059.6",26.4,"3,982.9",99.2 +Natco Pharma Ltd.,NATCOPHARM,524816,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,060.8",573.4,458,44.41%,43.6,4.2,439.6,70.6,369,20.6,"1,127.4",63 +NBCC (India) Ltd.,NBCC,534309,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"2,129.1","1,957.7",95.5,4.65%,1.3,0,104.6,22.9,79.6,0.4,332.2,1.8 +NCC Ltd.,NCC,500294,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"4,746.4","4,415.9",303.7,6.44%,53.2,153.5,123.8,38.8,77.3,1.2,599.4,9.5 +NHPC Ltd.,NHPC,533098,UTILITIES,ELECTRIC UTILITIES,"3,113.8","1,173.9","1,757.4",59.95%,294.9,104.8,"1,618.3",-75,"1,545.8",1.5,"3,897.8",3.9 +Coforge Ltd.,COFORGE,532541,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"2,285.1","1,935.3",340.9,14.98%,77.2,31.9,240.7,52.8,187.9,29.6,696.2,113.2 +NLC India Ltd.,NLCINDIA,513683,UTILITIES,ELECTRIC UTILITIES,"3,234","2,143",834.6,28.03%,455.1,213.9,"1,700.6",614.7,"1,084.7",7.8,"1,912.3",13.8 +NTPC Ltd.,NTPC,532555,UTILITIES,ELECTRIC UTILITIES,"45,384.6","32,303.2","12,680.2",28.19%,"4,037.7","2,920.5","6,342.9","2,019.7","4,614.6",4.8,"19,125.2",19.7 +Narayana Hrudayalaya Ltd.,NH,539551,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"1,323.6",997.1,308.1,23.61%,55.3,22.9,248.4,21.7,226.6,11.2,737.5,36.1 +National Aluminium Company Ltd.,NATIONALUM,532234,METALS & MINING,ALUMINIUM AND ALUMINIUM PRODUCTS,"3,112","2,646.9",396.5,13.03%,186.2,4,275,68.7,187.3,1,"1,272.4",6.9 +Navin Fluorine International Ltd.,NAVINFLUOR,532504,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,494.9,373.4,98.3,20.84%,24.2,20,77.2,16.6,60.6,12.2,365,73.7 +Oberoi Realty Ltd.,OBEROIRLTY,533273,REALTY,REALTY,"1,243.8",579.2,638.2,52.42%,11.3,56.5,596.8,142.1,456.8,12.6,"1,961.3",53.9 +Oil And Natural Gas Corporation Ltd.,ONGC,500312,OIL & GAS,EXPLORATION & PRODUCTION,"149,388.5","118,618.4","28,255.3",19.24%,"6,698.1","2,603.3","21,564.9","5,633.6","13,734.1",10.9,"43,072.5",34.2 +Oil India Ltd.,OIL,533106,OIL & GAS,EXPLORATION & PRODUCTION,"9,200.1","5,293.3","3,523.2",39.96%,499,278.9,762,67.6,420.7,3.9,"5,874.5",54.2 +Oracle Financial Services Software Ltd.,OFSS,532466,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,509.6",886.4,558.1,38.64%,19,8,596.2,178.8,417.4,48.2,"1,835.1",211.9 +PI Industries Ltd.,PIIND,523642,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,"2,163.8","1,565.5",551.4,26.05%,80.3,7.8,510.2,31.7,480.5,31.7,"1,495.8",98.4 +PNB Housing Finance Ltd.,PNBHOUSING,540173,BANKING AND FINANCE,HOUSING FINANCE,"1,779.4",158.8,"1,574.1",88.54%,11.3,"1,057.3",507.1,124.1,383,14.8,"1,278.7",49.3 +PNC Infratech Ltd.,PNCINFRA,539150,CEMENT AND CONSTRUCTION,ROADS & HIGHWAYS,"1,932.4","1,511.6",399.8,20.92%,40.9,161.3,218.6,70.7,147.9,5.8,614.3,23.9 +PVR INOX Ltd.,PVRINOX,532689,RETAILING,SPECIALTY RETAIL,"2,023.7","1,293.1",706.8,35.34%,308.6,200.3,221.7,55.5,166.3,17,-232.5,-23.7 +Page Industries Ltd.,PAGEIND,532827,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,"1,126.8",891.6,233.5,20.76%,24.6,11.2,199.4,49.1,150.3,134.7,510.7,457.9 +Persistent Systems Ltd.,PERSISTENT,533179,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"2,449","2,006.5",405.2,16.80%,74.4,12.3,355.8,92.5,263.3,35,981.5,127.6 +Petronet LNG Ltd.,PETRONET,532522,OIL & GAS,OIL MARKETING & DISTRIBUTION,"12,686.2","11,317.9","1,214.7",9.69%,194.8,74.7,"1,098.8",283.9,855.7,5.7,"3,490.3",23.3 +Pfizer Ltd.,PFIZER,500680,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,611.3,392.6,182.6,31.75%,15.4,2.7,200.5,51.6,149,32.6,522.8,114.3 +Phoenix Mills Ltd.,PHOENIXLTD,503100,REALTY,REALTY,906.6,361.2,506,57.82%,65.9,96.5,375.2,71.4,252.6,14.2,923.6,51.7 +Pidilite Industries Ltd.,PIDILITIND,500331,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"3,107.6","2,396.3",679.7,22.10%,75.2,13.1,623,163.1,450.1,8.8,"1,505.5",29.6 +Power Finance Corporation Ltd.,PFC,532810,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"22,403.7",315.4,"22,941.9",102.46%,12.7,"14,313.1","8,628.8","2,000.6","4,833.1",14.7,"17,946.4",54.4 +Power Grid Corporation of India Ltd.,POWERGRID,532898,UTILITIES,ELECTRIC UTILITIES,"11,530.4","1,358.7","9,908.4",87.94%,"3,277","2,341.3","4,393.4",573.7,"3,781.4",4.1,"15,344.4",16.5 +Prestige Estates Projects Ltd.,PRESTIGE,ASM,REALTY,REALTY,"3,256","1,643.9",592.5,26.49%,174.1,263.9,"1,174.1",256.4,850.9,21.2,"1,714",42.8 +Prism Johnson Ltd.,PRSMJOHNSN,500338,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"1,846","1,745.4",92.4,5.03%,95.2,43.5,210,30.4,182.7,3.6,154.2,3.1 +Procter & Gamble Hygiene & Healthcare Ltd.,PGHH,500459,FMCG,PERSONAL PRODUCTS,"1,154.1",853.5,284.9,25.03%,14.3,1.9,284.5,73.8,210.7,64.9,734.4,226.3 +Punjab National Bank,PNB,532461,BANKING AND FINANCE,BANKS,"29,857","6,798.1","6,239.1",23.23%,0,"16,819.8","2,778.3","1,013.8","1,990.2",1.8,"5,904.8",5.4 +Quess Corp Ltd.,QUESS,539978,SOFTWARE & SERVICES,BPO/KPO,"4,763.5","4,584.8",163.6,3.44%,69.7,28.1,79.3,8.3,71.9,4.8,240.9,16.2 +RBL Bank Ltd.,RBLBANK,540065,BANKING AND FINANCE,BANKS,"3,720.6","1,422.6",765.4,25.45%,0,"1,532.6",125,-206.1,331.1,5.5,"1,173.9",19.5 +Radico Khaitan Ltd.,RADICO,532497,FOOD BEVERAGES & TOBACCO,BREWERIES & DISTILLERIES,925.7,803.8,121.2,13.10%,26.1,12.5,83.3,21.4,64.8,4.8,237,17.7 +Rain Industries Ltd.,RAIN,500339,CHEMICALS & PETROCHEMICALS,PETROCHEMICALS,"4,208.9","3,794.3",366,8.80%,192.5,241.7,-19.5,46.2,-90.2,-2.7,270.4,8 +Rajesh Exports Ltd.,RAJESHEXPO,531500,TEXTILES APPARELS & ACCESSORIES,GEMS & JEWELLERY,"38,079.4","38,015.8",50.1,0.13%,10.7,0,53,7.7,45.3,1.5,"1,142.2",38.7 +Rallis India Ltd.,RALLIS,500355,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,837,699,133,15.99%,26,3,110,28,82,4.2,98.4,5.2 +Rashtriya Chemicals & Fertilizers Ltd.,RCF,524230,FERTILIZERS,FERTILIZERS,"4,222.1","4,049.3",105.9,2.55%,56.1,44,72.8,21.1,51,0.9,523.6,9.5 +Redington Ltd.,REDINGTON,532805,COMMERCIAL SERVICES & SUPPLIES,COMMODITY TRADING & DISTRIBUTION,"22,296.6","21,738.7",481.4,2.17%,43.7,105.8,408.3,96.7,303.5,3.9,"1,242",15.9 +Relaxo Footwears Ltd.,RELAXO,530517,RETAILING,FOOTWEAR,725.9,623.8,91.5,12.79%,36.9,4.7,60.4,16.2,44.2,1.8,193.9,7.8 +Reliance Industries Ltd.,RELIANCE,500325,OIL & GAS,REFINERIES/PETRO-PRODUCTS,"238,797","193,988","40,968",17.44%,"12,585","5,731","26,493","6,673","17,394",25.7,"68,496",101.2 +REC Ltd.,RECLTD,532955,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"11,701.3",275.1,"12,180.5",104.21%,6.1,"7,349.8","4,837.6","1,047.7","3,789.9",14.4,"12,738.6",48.4 +SJVN Ltd.,SJVN,533206,UTILITIES,ELECTRIC UTILITIES,951.6,172.2,706.2,80.40%,101.9,124.2,567.7,129.2,439.6,1.1,"1,016",2.6 +SKF India Ltd.,SKFINDIA,500472,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,"1,145.5","1,003.7",121.5,10.80%,19.3,0.5,122,31.7,90,18.2,484,97.9 +SRF Ltd.,SRF,503806,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"3,206.5","2,551.2",626.2,19.71%,161.2,79.3,414.8,114,300.8,10.2,"1,733.4",58.5 +Sanofi India Ltd.,SANOFI,500674,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,726.4,506.1,208.5,29.17%,9.9,0.3,210.1,57.9,152.1,66.1,596.3,259.3 +Schaeffler India Ltd.,SCHAEFFLER,505790,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,879.2","1,506.3",342,18.50%,55.6,1.6,315.7,80.7,235,15,922.6,59 +Shree Cements Ltd.,SHREECEM,500387,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"4,932.1","3,914.1",886,18.46%,411.7,67,539.2,92.6,446.6,123.8,"1,826.8",506.3 +Shriram Finance Ltd.,SHRIRAMFIN,511218,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"8,893","1,409.4","6,334.3",71.30%,141.4,"3,798","2,404.2",614.9,"1,786.1",47.6,"6,575.4",175.2 +Siemens Ltd.,SIEMENS,500550,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"5,953.2","5,107.5",700.2,12.06%,78.6,4.9,762.2,190.5,571.3,16.1,"1,960.9",55.1 +Sobha Ltd.,SOBHA,532784,REALTY,REALTY,773.6,665.8,75.4,10.18%,19.3,63.9,24.7,9.7,14.9,1.6,107.4,11.3 +Solar Industries India Ltd.,SOLARINDS,532725,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,"1,355.2","1,011.3",336.1,24.95%,33.7,24.9,285.3,75.5,200.1,22.1,808.2,89.3 +Sonata Software Ltd.,SONATSOFTW,532221,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,935.8","1,715.2",197.3,10.32%,33.3,20.7,166.5,42.3,124.2,9,475.7,34.3 +State Bank of India,SBIN,500112,BANKING AND FINANCE,BANKS,"144,256.1","58,597.6","22,703.3",21.14%,0,"62,955.2","21,935.7","5,552.5","17,196.2",18,"69,304.1",77.7 +Steel Authority of India (SAIL) Ltd.,SAIL,500113,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"29,858.2","25,836.7","3,875.4",13.04%,"1,326.6",605.2,"1,674.7",464.2,"1,305.6",3.2,"3,219.5",7.8 +Sun Pharma Advanced Research Company Ltd.,SPARC,532872,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,29.7,112.7,-91.5,-431.87%,3.2,0.3,-86.4,0,-86.4,-2.7,-253.6,-7.8 +Sun Pharmaceutical Industries Ltd.,SUNPHARMA,524715,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"12,486","9,013","3,179.4",26.08%,632.8,49.3,"2,790.9",390.1,"2,375.5",9.9,"8,548.5",35.6 +Sun TV Network Ltd.,SUNTV,532733,MEDIA,BROADCASTING & CABLE TV,"1,160.2",320.6,727.8,69.42%,218.8,1.7,619.1,154.4,464.7,11.8,"1,861.8",47.2 +Sundram Fasteners Ltd.,SUNDRMFAST,500403,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,429.1","1,191.1",230.7,16.23%,54.5,7.4,176.2,43.1,131.9,6.3,502.9,23.9 +Sunteck Realty Ltd.,SUNTECK,512179,REALTY,REALTY,36.2,39.1,-14.1,-56.70%,2.2,15.8,-20.9,-6.4,-13.9,-1,-46.5,-3.3 +Supreme Industries Ltd.,SUPREMEIND,509930,GENERAL INDUSTRIALS,PLASTIC PRODUCTS,"2,321.4","1,952.5",356.2,15.43%,71.9,1.6,295.4,76.3,243.2,19.1,"1,028.2",80.9 +Suzlon Energy Ltd.,SUZLON,ASM,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"1,428.7","1,196.4",225,15.83%,51.2,43.7,102.4,0.1,102.3,0.1,561.4,0.4 +Syngene International Ltd.,SYNGENE,539268,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,931.7,656,254.1,27.92%,104.6,13,150.7,34.2,116.5,2.9,498.3,12.4 +TTK Prestige Ltd.,TTKPRESTIG,517506,CONSUMER DURABLES,HOUSEWARE,747.2,648.6,80.8,11.08%,15.9,3.1,79.5,20.5,59.3,4.3,224.3,16.2 +TV18 Broadcast Ltd.,TV18BRDCST,532800,MEDIA,BROADCASTING & CABLE TV,"1,989","1,992.2",-198.1,-11.04%,50.1,33.8,-87.1,-6.5,-28.9,-0.2,92.2,0.5 +TVS Motor Company Ltd.,TVSMOTOR,532343,AUTOMOBILES & AUTO COMPONENTS,2/3 WHEELERS,"9,983.8","8,576.9","1,355.9",13.65%,237.1,483.3,686.4,259.8,386.3,8.1,"1,457.6",30.7 +Tata Consultancy Services Ltd.,TCS,532540,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"60,698","43,946","15,746",26.38%,"1,263",159,"15,33","3,95","11,342",31,"44,654",122 +Tata Elxsi Ltd.,TATAELXSI,500408,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,912.8,618.2,263.5,29.89%,25,5.8,263.9,63.8,200,32.1,785.1,126.1 +Tata Consumer Products Ltd.,TATACONSUM,500800,FMCG,PACKAGED FOODS,"3,823.6","3,196.7",537.1,14.38%,93.9,27.6,490.9,131.7,338.2,3.6,"1,275.2",13.7 +Tata Motors Limited (DVR),TATAMTRDVR,570001,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,,,,,,,,,,,, +Tata Motors Ltd.,TATAMOTORS,500570,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,"106,759","91,361.3","13,766.9",13.10%,"6,636.4","2,651.7","5,985.9","2,202.8","3,764",9.8,"15,332.3",40 +Tata Power Company Ltd.,TATAPOWER,500400,UTILITIES,ELECTRIC UTILITIES,"16,029.5","12,647","3,091",19.64%,925.9,"1,181.8",979.2,213.3,875.5,2.7,"3,570.8",11.2 +Tata Steel Ltd.,TATASTEEL,500470,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"55,910.2","51,414.1","4,267.8",7.66%,"2,479.8","1,959.4","-6,842.1",-228,"-6,196.2",-5.1,"-6,081.3",-5 +Tech Mahindra Ltd.,TECHM,532755,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"13,128.1","11,941.1",922.8,7.17%,465.7,97.5,623.8,110,493.9,5.6,"3,600.7",40.9 +The Ramco Cements Ltd.,RAMCOCEM,500260,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"2,352.1","1,935",405.6,17.33%,162.8,116.5,137.8,37,72,3.1,348.9,14.8 +Thermax Ltd.,THERMAX,500411,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"2,368.3","2,097.8",204.6,8.89%,33,19.8,217.7,58.9,157.7,14,498.8,44.3 +Timken India Ltd.,TIMKEN,522113,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,692.1,546.5,135.5,19.87%,21.1,0.9,123.6,30.6,93,12.4,358.3,47.6 +Titan Company Ltd.,TITAN,500114,TEXTILES APPARELS & ACCESSORIES,GEMS & JEWELLERY,"12,653","11,118","1,411",11.26%,144,140,"1,251",336,915,10.3,"3,302",37.1 +Torrent Pharmaceuticals Ltd.,TORNTPHARM,500420,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"2,686","1,835",825,31.02%,201,91,559,173,386,11.4,"1,334",39.4 +Torrent Power Ltd.,TORNTPOWER,532779,UTILITIES,ELECTRIC UTILITIES,"7,069.1","5,739.5","1,221.4",17.55%,341.7,247.2,740.7,198.1,525.9,10.9,"2,176.8",45.3 +Trent Ltd.,TRENT,500251,RETAILING,DEPARTMENT STORES,"3,062.5","2,525.8",456.6,15.31%,152.2,95.5,288.9,86.3,234.7,6.6,629.4,17.7 +Trident Ltd.,TRIDENT,521064,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"1,812","1,557.3",240.3,13.37%,89.4,35,130.4,40.1,90.7,0.2,458.1,0.9 +UPL Ltd.,UPL,512070,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,"10,275","8,807","1,325",13.03%,657,871,-185,-96,-189,-2.5,"1,856",24.7 +UltraTech Cement Ltd.,ULTRACEMCO,532538,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"16,179.3","13,461.2","2,550.9",15.93%,797.8,233.9,"1,686.2",409.4,"1,281.5",44.5,"5,694.1",197.2 +Union Bank of India,UNIONBANK,532477,BANKING AND FINANCE,BANKS,"28,952.5","6,189.3","7,265",29.38%,0,"15,498.2","5,492.3","1,944","3,571.8",5.1,"11,918.9",16.1 +United Breweries Ltd.,UBL,532478,FOOD BEVERAGES & TOBACCO,BREWERIES & DISTILLERIES,"1,902.1","1,705.8",184.3,9.75%,50.9,1.4,144,36.9,107.3,4.1,251.3,9.5 +United Spirits Ltd.,MCDOWELL-N,532432,FOOD BEVERAGES & TOBACCO,BREWERIES & DISTILLERIES,"6,776.6","6,269.8",466.7,6.93%,65.3,26.2,446,106.3,339.3,4.8,"1,133",15.6 +V-Guard Industries Ltd.,VGUARD,532953,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"1,147.9","1,041.3",92.5,8.16%,19.8,9.3,77.5,18.6,59,1.4,215.2,5 +Vardhman Textiles Ltd.,VTL,502986,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"2,487","2,192.1",205.4,8.57%,103.7,22,169.2,41.7,134.3,4.7,531.9,18.7 +Varun Beverages Ltd.,VBL,540180,FOOD BEVERAGES & TOBACCO,NON-ALCOHOLIC BEVERAGES,"3,889","2,988.4",882.1,22.79%,170.8,62.5,667.3,152.9,501.1,3.9,"1,998.7",15.4 +Vinati Organics Ltd.,VINATIORGA,524200,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,464.4,337.3,110.8,24.73%,13.7,0.3,113,28.9,84.2,8.2,408.2,39.7 +Voltas Ltd.,VOLTAS,500575,CONSUMER DURABLES,CONSUMER ELECTRONICS,"2,363.7","2,222.5",70.3,3.06%,11.7,11.4,118.1,49.3,36.7,1.1,199.5,6 +ZF Commercial Vehicle Control Systems India Ltd.,ZFCVINDIA,533023,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,015.8",846.2,145.5,14.67%,27.1,1.3,141.2,35.5,105.7,55.7,392,206.7 +Welspun Corp Ltd.,WELCORP,ASM,METALS & MINING,IRON & STEEL PRODUCTS,"4,161.4","3,659.9",399.5,9.84%,85.7,75,340.8,79,384.7,14.7,809.2,30.9 +Welspun Living Ltd.,WELSPUNLIV,514162,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"2,542.4","2,151.1",358,14.27%,98.5,33.8,258.9,58.7,196.7,2,526.1,5.4 +Whirlpool of India Ltd.,WHIRLPOOL,500238,CONSUMER DURABLES,CONSUMER ELECTRONICS,"1,555.5","1,448.4",73.2,4.81%,49.2,5.6,52.3,14.1,36.6,2.9,198.8,15.7 +Wipro Ltd.,WIPRO,507685,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"23,255.7","18,543.2","3,972.7",17.64%,897,303.3,"3,512.2",841.9,"2,646.3",5.1,"11,643.8",22.3 +Zee Entertainment Enterprises Ltd.,ZEEL,505537,MEDIA,BROADCASTING & CABLE TV,"2,509.6","2,105",332.8,13.65%,77.2,23.4,184.2,54.4,123,1.3,-102.2,-1.1 +eClerx Services Ltd.,ECLERX,532927,SOFTWARE & SERVICES,BPO/KPO,735.9,517,204.7,28.37%,30.3,6.1,182.4,46.3,136,28.2,506,105 +Sterlite Technologies Ltd.,STLTECH,532374,TELECOMMUNICATIONS EQUIPMENT,TELECOM CABLES,"1,497","1,281",213,14.26%,85,95,36,12,34,0.9,203,5.1 +HEG Ltd.,HEG,509631,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,642.2,512.3,101.9,16.58%,38.5,8.5,82.9,21.7,96,24.9,439.5,113.9 +SBI Life Insurance Company Ltd.,SBILIFE,540719,BANKING AND FINANCE,LIFE INSURANCE,"28,816.2","28,183.8",609.9,2.12%,0,0,621.5,43.9,380.2,3.8,"1,842.2",18.4 +General Insurance Corporation of India,GICRE,540755,BANKING AND FINANCE,GENERAL INSURANCE,"13,465.9","11,574","1,464.6",11.20%,0,0,"1,855.4",243.7,"1,689",15.2,"6,628",37.8 +Tube Investments of India Ltd.,TIINDIA,540762,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,005.4","1,718.2",251.4,12.76%,34.6,7.7,244.8,63.4,181.4,9.4,717.5,37.1 +Honeywell Automation India Ltd.,HONAUT,517174,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"1,144.3",965.9,138.3,12.52%,13.8,0.7,163.9,42,121.9,137.8,443.4,503.9 +Indian Energy Exchange Ltd.,IEX,540750,BANKING AND FINANCE,EXCHANGE,133,16.6,92,84.73%,5.1,0.7,110.6,27.9,86.5,1,327.8,3.7 +ICICI Lombard General Insurance Company Ltd.,ICICIGI,540716,BANKING AND FINANCE,GENERAL INSURANCE,"5,271.1","4,612.4",743.5,14.16%,0,0,763.6,186.4,577.3,11.8,"1,757.1",35.8 +Aster DM Healthcare Ltd.,ASTERDM,540975,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"3,325.2","2,939.4",377.3,11.38%,227.2,101.9,2.1,10.2,-30.8,-0.6,284.3,5.7 +Central Depository Services (India) Ltd.,CDSL,CDSL,OTHERS,INVESTMENT COMPANIES,230.1,77.9,129.4,62.40%,6.5,0,145.6,35.8,108.9,10.4,320.2,30.6 +Graphite India Ltd.,GRAPHITE,509488,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,884,823,-30,-3.78%,19,4,992,190,804,41.1,856,43.9 +Grasim Industries Ltd.,GRASIM,500300,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"30,505.3","25,995.9","4,224.8",13.98%,"1,245.2",397.8,"2,866.4",837.7,"1,163.8",17.7,"6,624.9",100.6 +KNR Constructions Ltd.,KNRCON,532942,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"1,043.8",806.9,231.6,22.30%,39.2,20.6,177.1,34.6,147.4,5.2,537.5,19.1 +Aditya Birla Capital Ltd.,ABCAPITAL,540691,DIVERSIFIED,HOLDING COMPANIES,"7,730.4","4,550.1","2,821.9",36.55%,48,"1,827",956.8,284.1,705,2.7,"5,231.9",20.1 +Dixon Technologies (India) Ltd.,DIXON,540699,CONSUMER DURABLES,CONSUMER ELECTRONICS,"4,943.9","4,744.3",198.9,4.02%,36.4,17.1,146.1,35.2,107.3,19,308.7,51.8 +Cholamandalam Financial Holdings Ltd.,CHOLAHLDNG,504973,DIVERSIFIED,HOLDING COMPANIES,"6,372.2","2,495.1","3,404.8",54.05%,52.1,"2,209.4","1,215.8",324.6,420.9,22.4,"1,532.3",81.6 +Cochin Shipyard Ltd.,COCHINSHIP,540678,TRANSPORTATION,MARINE PORT & SERVICES,"1,100.4",820.5,191.2,18.90%,18.9,9.6,251.4,69.9,181.5,13.8,429.9,32.7 +Bharat Dynamics Ltd.,BDL,541143,GENERAL INDUSTRIALS,DEFENCE,694.1,481.8,134,21.77%,17.4,0.8,194.1,47,147.1,8,425.4,23.2 +Lux Industries Ltd.,LUXIND,539542,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,643.6,584.2,55,8.61%,5.9,5.4,48,12.1,37.1,12.3,103.1,32.9 +Zensar Technologies Ltd.,ZENSARTECH,504067,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,277.1","1,009.9",230.9,18.61%,36.6,5.7,224.9,51,173.9,7.7,525.8,23.2 +PCBL Ltd.,PCBL,506590,CHEMICALS & PETROCHEMICALS,CARBON BLACK,"1,489.4","1,248.6",238.1,16.02%,48.2,21,171.6,48.8,122.6,3.2,431.6,11.4 +Zydus Wellness Ltd.,ZYDUSWELL,531335,FMCG,PACKAGED FOODS,444,423.1,16.8,3.82%,5.8,6.5,8.6,2.7,5.9,0.9,281.2,44.2 +Linde India Ltd.,LINDEINDIA,523457,GENERAL INDUSTRIALS,INDUSTRIAL GASES,729.9,537.7,173.6,24.41%,49.7,1.2,141.3,34.6,108.7,12.8,417.9,49 +FDC Ltd.,FDC,531599,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,513.6,409.9,76.4,15.71%,9.9,1.1,92.7,22.9,69.8,4.2,251.2,15.4 +The New India Assurance Company Ltd.,NIACL,540769,BANKING AND FINANCE,GENERAL INSURANCE,"10,571","10,773.4",-246.5,-2.33%,0,0,-242,-46.7,-176.1,-1.1,947,5.7 +Sundaram Finance Ltd.,SUNDARMFIN,590071,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"1,710.6",322.5,"1,332.1",77.98%,43.6,820.3,470.6,142.8,365.4,33.2,"1,506.7",135.6 +TeamLease Services Ltd.,TEAMLEASE,539658,COMMERCIAL SERVICES & SUPPLIES,MISC. COMMERCIAL SERVICES,"2,285.6","2,240.8",31.8,1.40%,12.9,2.5,29.4,1.8,27.3,16.3,106.6,63.5 +Galaxy Surfactants Ltd.,GALAXYSURF,540935,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,985.8,858.2,124.9,12.70%,24.7,5.4,97.5,20.1,77.4,21.8,349.3,98.5 +Bandhan Bank Ltd.,BANDHANBNK,541153,BANKING AND FINANCE,BANKS,"5,032.2","1,400.2","1,583.4",35.25%,0,"2,048.6",947.2,226.1,721.2,4.5,"2,541.1",15.8 +ICICI Securities Ltd.,ISEC,541179,BANKING AND FINANCE,CAPITAL MARKETS,"1,249",433.5,810.2,64.87%,25.8,215.1,569.4,145.7,423.6,13.1,"1,238.1",38.3 +V-Mart Retail Ltd.,VMART,534976,RETAILING,DEPARTMENT STORES,551.4,548.8,0.7,0.12%,53.2,35.9,-86.4,-22.3,-64.1,-32.4,-103.1,-52.1 +Nippon Life India Asset Management Ltd.,NAM-INDIA,540767,BANKING AND FINANCE,ASSET MANAGEMENT COS.,475.4,156.1,241.4,60.73%,7.2,1.7,310.4,66.1,244.4,3.9,883.3,14.1 +Grindwell Norton Ltd.,GRINDWELL,506076,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,690,536,131.4,19.69%,16.9,1.8,135.3,33.1,101.9,9.2,378.3,34.2 +HDFC Life Insurance Company Ltd.,HDFCLIFE,540777,BANKING AND FINANCE,LIFE INSURANCE,"23,276.6","23,659.3",-508.1,-2.20%,0,0,-373.1,-657.5,378.2,1.8,"1,472.8",6.9 +Elgi Equipments Ltd.,ELGIEQUIP,522074,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,817.8,663.4,142.7,17.71%,18.7,6.6,129.2,38.8,91.3,2.9,401.9,12.7 +Hindustan Aeronautics Ltd.,HAL,541154,GENERAL INDUSTRIALS,DEFENCE,"6,105.1","4,108.1","1,527.6",27.11%,349.6,0.3,"1,647",414.8,"1,236.7",18.5,"6,037.3",90.3 +BSE Ltd.,BSE,BSE,BANKING AND FINANCE,EXCHANGE,367,172.8,189.2,52.26%,22.7,8.5,163,63.6,120.5,8.8,706,52.1 +Rites Ltd.,RITES,541556,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,608.8,444.5,137.8,23.67%,14.1,1.4,148.8,40.1,101.2,4.2,488.1,20.3 +Fortis Healthcare Ltd.,FORTIS,532843,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"1,783.5","1,439.8",330.2,18.65%,84.1,31.8,231.4,48.8,173.7,2.3,547.6,7.3 +Varroc Engineering Ltd.,VARROC,541578,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,893.5","1,692.6",194.3,10.30%,84.9,50.3,65.9,18.2,54.2,3.5,146.5,9.6 +Adani Green Energy Ltd.,ADANIGREEN,ASM,UTILITIES,ELECTRIC UTILITIES,"2,589",521,"1,699",76.53%,474,"1,165",413,119,372,2.2,"1,305",8.2 +VIP Industries Ltd.,VIPIND,507880,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,548.7,493.2,52.9,9.68%,23.8,12.4,19.3,6,13.3,0.9,110.9,7.8 +CreditAccess Grameen Ltd.,CREDITACC,541770,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"1,247.6",248.8,902.3,72.36%,12.3,423.9,466.8,119.7,347,21.8,"1,204.2",75.7 +CESC Ltd.,CESC,500084,UTILITIES,ELECTRIC UTILITIES,"4,414","3,706",646,14.84%,303,305,461,98,348,2.6,"1,447",10.9 +Jamna Auto Industries Ltd.,JAMNAAUTO,520051,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,608.7,528.2,79.1,13.03%,10.9,0.8,68.7,18.6,50.1,2.4,189.3,4.7 +Suprajit Engineering Ltd.,SUPRAJIT,532509,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,727.6,639.1,69.8,9.85%,25.7,13.6,49.2,14.5,34.8,2.5,146.9,10.6 +JK Paper Ltd.,JKPAPER,532162,COMMERCIAL SERVICES & SUPPLIES,PAPER & PAPER PRODUCTS,"1,708.8","1,242.8",407.3,24.68%,83.5,42,340.6,34.9,302.4,17.9,"1,220.6",72.1 +Bank of Maharashtra,MAHABANK,532525,BANKING AND FINANCE,BANKS,"5,735.5","1,179.4","1,920.5",37.90%,0,"2,635.7",935.7,16,919.8,1.3,"3,420.8",4.8 +Aavas Financiers Ltd.,AAVAS,541988,BANKING AND FINANCE,HOUSING FINANCE,497.6,123.5,367.8,74.03%,7.6,203.6,157.4,35.7,121.7,15.4,465.4,58.8 +HDFC Asset Management Company Ltd.,HDFCAMC,541729,BANKING AND FINANCE,ASSET MANAGEMENT COS.,765.4,162,481.1,74.81%,13,2.3,588.1,151.6,436.5,20.4,"1,659.3",77.7 +KEI Industries Ltd.,KEI,517569,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"1,954.2","1,742.7",203.9,10.47%,15.6,7.5,188.4,48.2,140.2,15.5,528.3,58.5 +Orient Electric Ltd.,ORIENTELEC,541301,CONSUMER DURABLES,CONSUMER ELECTRONICS,570.3,546.2,20.7,3.65%,14.2,5.2,23.4,4.9,18.4,0.9,95.3,4.5 +Deepak Nitrite Ltd.,DEEPAKNTR,506401,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,"1,795.1","1,475.8",302.3,17.00%,39.4,2.7,277.2,72.1,205.1,15,797.9,58.5 +Fine Organic Industries Ltd.,FINEORG,541557,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,557.6,409.4,131.1,24.25%,14.4,0.7,133.1,28.9,103.4,33.7,458.8,149.6 +LTIMindtree Ltd.,LTIM,540005,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"9,048.6","7,274.1","1,631.3",18.32%,208.2,47,"1,519.3",357,"1,161.8",39.3,"4,427.5",149.6 +Dalmia Bharat Ltd.,DALBHARAT,542216,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"3,234","2,56",589,18.70%,401,101,172,48,118,6.3,"1,041",54.8 +Godfrey Phillips India Ltd.,GODFRYPHLP,500163,FOOD BEVERAGES & TOBACCO,CIGARETTES-TOBACCO PRODUCTS,"1,412.5","1,151",223.6,16.27%,36.5,6.6,218.5,55.5,202.1,38.9,802.9,154.4 +Vaibhav Global Ltd.,VAIBHAVGBL,532156,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,708.4,641.5,63.5,9.01%,22.6,2.9,41.4,12.4,29.4,1.8,121.3,7.3 +Abbott India Ltd.,ABBOTINDIA,500488,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,549.7","1,113.3",380.9,25.49%,17.8,3.1,415.4,102.5,312.9,147.3,"1,081.4",508.9 +Adani Total Gas Ltd.,ATGL,ASM,UTILITIES,UTILITIES,"1,104.8",815.7,279.9,25.55%,37.6,27.3,224.2,57.2,172.7,1.6,571,5.2 +Nestle India Ltd.,NESTLEIND,500790,FMCG,PACKAGED FOODS,"5,070.1","3,811.9","1,224.9",24.32%,111.2,31.4,"1,222",313.9,908.1,94.2,"2,971.1",308.2 +Bayer Cropscience Ltd.,BAYERCROP,506285,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,"1,633.3","1,312.3",304.9,18.85%,11.6,3.7,305.7,82.8,222.9,49.6,844.4,188.1 +Amber Enterprises India Ltd.,AMBER,540902,CONSUMER DURABLES,CONSUMER ELECTRONICS,939.8,867.5,59.6,6.43%,45.2,36.6,-9.5,-3.8,-6.9,-2.1,156.8,46.5 +Rail Vikas Nigam Ltd.,RVNL,542649,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"5,210.3","4,616",298.3,6.07%,6.2,132.7,455.4,85.2,394.3,1.9,"1,478.8",7.1 +Metropolis Healthcare Ltd.,METROPOLIS,542650,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE SERVICES,309.7,233.7,74.8,24.25%,22.2,5.7,48.1,12.5,35.5,6.9,133.4,26 +Polycab India Ltd.,POLYCAB,542652,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"4,253","3,608.8",608.9,14.44%,60.3,26.8,557.2,127.4,425.6,28.4,"1,607.2",107.1 +Multi Commodity Exchange of India Ltd.,MCX,534091,BANKING AND FINANCE,EXCHANGE,184,193.8,-28.7,-17.38%,6.6,0.1,-16.4,1.6,-19.1,-3.7,44.8,8.8 +IIFL Finance Ltd.,IIFL,532636,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,"2,533.7",788.3,"1,600.8",64.66%,43.3,932.1,683.5,158,474.3,12.4,"1,690.7",44.4 +Ratnamani Metals & Tubes Ltd.,RATNAMANI,520111,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"1,141.9",886.3,244.9,21.65%,23.6,10.8,221.1,56.8,163.9,23.4,622.6,88.8 +RHI Magnesita India Ltd.,RHIM,534076,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,989.7,839,147.9,14.98%,44.2,8.5,97.9,26.3,71.3,3.5,-502.2,-24.3 +Birlasoft Ltd.,BSOFT,532400,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,325.4","1,102.7",207.1,15.81%,21.5,5.7,195.5,50.4,145.1,5.2,378.4,13.7 +EIH Ltd.,EIHOTEL,500840,HOTELS RESTAURANTS & TOURISM,HOTELS,552.5,387.6,142.9,26.94%,33.2,5.6,126.1,36.2,93.1,1.5,424.1,6.8 +Affle (India) Ltd.,AFFLE,542752,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,441.2,344.1,87.2,20.22%,18.4,5.5,73.2,6.4,66.8,5,264.3,19.8 +Westlife Foodworld Ltd.,WESTLIFE,505533,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,618,516.5,98.2,15.98%,43.9,27.4,30.2,7.8,22.4,1.4,107.7,6.9 +IndiaMART InterMESH Ltd.,INDIAMART,542726,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,329.3,214.7,80,27.15%,8,2.3,104.3,23.9,69.4,11.4,321.1,53.6 +Infosys Ltd.,INFY,500209,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"39,626","29,554","9,44",24.21%,"1,166",138,"8,768","2,553","6,212",15,"24,871",60.1 +Sterling and Wilson Renewable Energy Ltd.,SWSOLAR,542760,COMMERCIAL SERVICES & SUPPLIES,CONSULTING SERVICES,776.7,758,1.5,0.19%,4.3,64.3,-50,4.6,-54.2,-2.9,-668.4,-35.2 +ABB India Ltd.,ABB,500002,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"2,846","2,330.7",438.5,15.84%,30.3,0.9,484.2,122.2,362.9,17.1,"1,208.7",57 +Poly Medicure Ltd.,POLYMED,531768,HEALTHCARE EQUIPMENT & SUPPLIES,HEALTHCARE SUPPLIES,351.4,253.1,84.2,24.97%,16,2.2,80.9,18.8,62.2,6.5,233.7,24.4 +GMM Pfaudler Ltd.,GMMPFAUDLR,505255,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,946,795.5,142,15.15%,32.2,21.5,96.8,26.5,71.1,15.8,183.2,40.8 +Gujarat Fluorochemicals Ltd.,FLUOROCHEM,542812,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,960.3,783.7,163.1,17.23%,67.5,34.2,74.8,22.1,52.7,4.8,915.2,83.3 +360 One Wam Ltd.,360ONE,542772,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,617.1,235.6,317.8,57.31%,13.7,139.9,226.8,40.8,186,5.2,696.8,19.5 +Tata Communications Ltd.,TATACOMM,500483,TELECOM SERVICES,OTHER TELECOM SERVICES,"4,897.9","3,857.1","1,015.5",20.84%,605.1,137.4,298.3,77.9,220.7,7.7,"1,322.3",46.4 +Alkyl Amines Chemicals Ltd.,ALKYLAMINE,506767,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,354.5,303.9,48.3,13.71%,12.5,1.7,36.4,9.2,27.2,5.3,171.3,33.5 +CSB Bank Ltd.,CSBBANK,542867,BANKING AND FINANCE,BANKS,835.8,317.5,174.6,25.41%,0,343.6,178,44.8,133.2,7.7,577.7,33.3 +Indian Railway Catering & Tourism Corporation Ltd.,IRCTC,542830,DIVERSIFIED CONSUMER SERVICES,TRAVEL SUPPORT SERVICES,"1,042.4",628.8,366.6,36.83%,14,4.4,395.2,100.5,294.7,3.7,"1,061.2",13.3 +Sumitomo Chemical India Ltd.,SUMICHEM,542920,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,928,715.5,187.9,20.80%,15.8,1.2,195.5,52,143.4,2.9,367.7,7.4 +Century Textiles & Industries Ltd.,CENTURYTEX,500040,COMMERCIAL SERVICES & SUPPLIES,PAPER & PAPER PRODUCTS,"1,114.9","1,069.2",33.8,3.07%,59.2,17,-30.5,-3.3,-30.4,-2.8,117.7,10.5 +SBI Cards and Payment Services Ltd.,SBICARD,543066,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"4,221.4","2,018.8","1,327",32.47%,46.8,604.9,809.4,206.4,603,6.4,"2,302.2",24.3 +Hitachi Energy India Ltd.,POWERINDIA,543187,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"1,228.2","1,162.6",65.3,5.32%,22.5,10.7,32.4,7.6,24.7,5.8,82.5,19.5 +Suven Pharmaceuticals Ltd.,SUVENPHAR,543064,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,250.9,133.1,98,42.40%,11.9,0.5,105.4,25.8,79.6,3.1,431.8,17 +Tata Chemicals Ltd.,TATACHEM,500770,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,"4,083","3,179",819,20.49%,234,145,627,120,428,16.8,"2,06",80.8 +Aarti Drugs Ltd.,AARTIDRUGS,524348,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,642.2,565.1,76.4,11.92%,12.6,8.2,56.3,16.7,39.6,4.3,180.2,19.6 +Gujarat Ambuja Exports Ltd.,GAEL,524226,FMCG,EDIBLE OILS,"1,157.7","1,012.2",103.3,9.26%,30.5,5.9,109.1,26.3,82.8,3.6,305.1,13.3 +Polyplex Corporation Ltd.,POLYPLEX,524051,COMMERCIAL SERVICES & SUPPLIES,CONTAINERS & PACKAGING,"1,595.7","1,451.5",120.6,7.67%,75.1,9.9,59.1,10.9,27.9,8.9,71.1,22.6 +Chalet Hotels Ltd.,CHALET,542399,HOTELS RESTAURANTS & TOURISM,HOTELS,318.2,188.6,126,40.04%,35,50.1,44.5,8,36.4,1.8,266.7,13 +Adani Enterprises Ltd.,ADANIENT,512599,COMMERCIAL SERVICES & SUPPLIES,COMMODITY TRADING & DISTRIBUTION,"23,066","20,087.2","2,430.1",10.79%,757,"1,342.8",791,397.8,227.8,2,"2,444.3",21.4 +YES Bank Ltd.,YESBANK,532648,BANKING AND FINANCE,BANKS,"7,980.6","2,377.1",810,12.06%,0,"4,793.6",304.4,75.7,228.6,0.1,836.6,0.3 +EPL Ltd.,EPL,500135,COMMERCIAL SERVICES & SUPPLIES,CONTAINERS & PACKAGING,"1,011.2",820.6,181,18.07%,83.6,30.6,76.4,25.4,50.5,1.6,251.9,7.9 +Network18 Media & Investments Ltd.,NETWORK18,532798,MEDIA,BROADCASTING & CABLE TV,"2,052.2","2,083.8",-218.3,-11.70%,56.8,66.2,-154.5,-6.5,-61,-0.6,-144.2,-1.4 +CIE Automotive India Ltd.,CIEINDIA,532756,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,299.4","1,934",345.4,15.15%,78.3,31,256.1,69.1,375.4,9.9,298.4,7.9 +Vedanta Ltd.,VEDL,500295,METALS & MINING,ALUMINIUM AND ALUMINIUM PRODUCTS,"39,585","27,466","11,479",29.47%,"2,642","2,523","8,177","9,092","-1,783",-4.8,"5,202",14 +Rossari Biotech Ltd.,ROSSARI,543213,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,484.8,419.9,63.6,13.15%,15.1,5,44.8,11.9,32.9,6,116.8,21.2 +KPIT Technologies Ltd.,KPITTECH,542651,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,208.6",959.2,239.9,20.01%,48.1,13.6,187.7,46.3,140.9,5.2,486.9,18 +Intellect Design Arena Ltd.,INTELLECT,538835,SOFTWARE & SERVICES,IT SOFTWARE PRODUCTS,631.7,497.2,121.9,19.69%,33.7,0.8,96.5,25.7,70.4,5.2,316.6,23.2 +Balaji Amines Ltd.,BALAMINES,530999,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,387.3,326.8,53.8,14.13%,10.8,1.8,48,11.6,34.7,10.7,197.3,60.9 +UTI Asset Management Company Ltd.,UTIAMC,543238,BANKING AND FINANCE,ASSET MANAGEMENT COS.,405.6,172.5,231.5,57.30%,10.4,2.8,219.8,37,182.8,14.4,562.9,44.3 +Mazagon Dock Shipbuilders Ltd.,MAZDOCK,543237,TRANSPORTATION,SHIPPING,"2,079.2","1,651.1",176.6,9.66%,20.2,1.3,406.6,102.8,332.9,16.5,"1,327.6",65.8 +Computer Age Management Services Ltd.,CAMS,543232,BANKING AND FINANCE,CAPITAL MARKETS,284.7,153,122.1,44.39%,17.4,2,112.4,28.6,84.5,17.2,309.2,62.9 +Happiest Minds Technologies Ltd.,HAPPSTMNDS,543227,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,428.8,324,82.6,20.32%,14.6,11.2,79.1,20.7,58.5,3.9,232,15.6 +Triveni Turbine Ltd.,TRITURBINE,533655,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,402.3,313.4,74.3,19.17%,5.1,0.6,83.2,19,64.2,2,233.1,7.3 +Angel One Ltd.,ANGELONE,ASM,BANKING AND FINANCE,CAPITAL MARKETS,"1,049.3",602.6,443.4,42.31%,11.2,26.4,407.2,102.7,304.5,36.3,"1,020.2",121.7 +Tanla Platforms Ltd.,TANLA,532790,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"1,014.9",811.8,196.8,19.51%,22.6,1.8,178.7,36.2,142.5,10.6,514.7,38.3 +Max Healthcare Institute Ltd.,MAXHEALTH,543220,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"1,408.6",975.8,387.4,28.42%,57.9,8.5,366.4,89.7,276.7,2.9,990.1,10.2 +Asahi India Glass Ltd.,ASAHIINDIA,515030,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,122.6",934,185.6,16.58%,43,34.4,111.3,30.2,86.9,3.6,343.5,14.1 +Prince Pipes & Fittings Ltd.,PRINCEPIPE,542907,GENERAL INDUSTRIALS,PLASTIC PRODUCTS,660.4,562.3,94.2,14.35%,22.5,0.7,92.8,22.2,70.6,5.2,219.8,19.9 +Route Mobile Ltd.,ROUTE,543228,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"1,018.3",886.5,128.1,12.63%,21.4,6.5,103.8,15.5,88.8,14.2,365.3,58.3 +KPR Mill Ltd.,KPRMILL,532889,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"1,533","1,212.9",298,19.72%,46,18.1,256,54.2,201.8,5.9,788.8,23.1 +Infibeam Avenues Ltd.,INFIBEAM,539807,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,792.6,719.7,70.2,8.89%,17.1,0.5,55.2,14.7,41,0.1,142.2,0.5 +Restaurant Brands Asia Ltd.,RBA,543248,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,628.2,568.7,56.2,9.00%,78.6,31.5,-50.7,0,-46,-0.9,-220.3,-4.5 +Larsen & Toubro Ltd.,LT,500510,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"52,157","45,392.1","5,632",11.04%,909.9,864,"4,991.1","1,135.5","3,222.6",22.9,"12,255.3",89.2 +Gland Pharma Ltd.,GLAND,543245,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,426.6","1,049.3",324.1,23.60%,81.3,6,289.9,95.8,194.1,11.8,698.8,42.4 +Macrotech Developers Ltd.,LODHA,543287,REALTY,REALTY,"1,755.1","1,333.5",416.1,23.78%,29.3,123.1,269.2,62.4,201.9,2.1,"1,529.2",15.9 +Poonawalla Fincorp Ltd.,POONAWALLA,524000,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),745.3,178.9,531.7,71.98%,14.7,215.5,"1,124.6",270,860.2,11.2,"1,466.4",19.1 +The Fertilisers and Chemicals Travancore Ltd.,FACT,590024,FERTILIZERS,FERTILIZERS,"1,713.6","1,530.8",132.4,7.96%,5.3,61.2,105.2,0,105.2,1.6,508.4,7.9 +Home First Finance Company India Ltd.,HOMEFIRST,543259,BANKING AND FINANCE,HOUSING FINANCE,278,53.7,211.6,77.43%,2.8,117,96.4,22.1,74.3,8.4,266.2,30.2 +CG Power and Industrial Solutions Ltd.,CGPOWER,500093,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"2,019","1,692.9",308.6,15.42%,22.9,0.4,329.9,86.2,242.3,1.6,"1,1",7.2 +Laxmi Organic Industries Ltd.,LXCHEM,543277,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,660.5,613.3,38.9,5.97%,27.5,2.1,17.5,6.8,10.7,0.4,100.6,3.8 +Anupam Rasayan India Ltd.,ANURAS,543275,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,395.6,284.7,107.5,27.41%,19.8,20.4,70.7,22,40.7,3.8,178.9,16.6 +Kalyan Jewellers India Ltd.,KALYANKJIL,ASM,TEXTILES APPARELS & ACCESSORIES,GEMS & JEWELLERY,"4,427.7","4,100.9",313.7,7.11%,66.9,81.7,178.1,43.3,135.2,1.3,497.9,4.8 +Jubilant Pharmova Ltd.,JUBLPHARMA,530019,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,690.2","1,438.5",241.8,14.39%,96.6,66.1,89,35.9,62.5,3.9,-44.6,-2.8 +Indigo Paints Ltd.,INDIGOPNTS,543258,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,273.4,228.7,41.8,15.45%,10,0.5,34.3,8.2,26.1,5.5,132.4,27.8 +Indian Railway Finance Corporation Ltd.,IRFC,543257,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"6,767.5",33.5,"6,732.4",99.50%,2.1,"5,181.5","1,549.9",0,"1,549.9",1.2,"6,067.6",4.6 +Mastek Ltd.,MASTEK,523704,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,770.4,642.5,123,16.07%,20.9,12.6,90.3,25,62.8,20.5,269.7,88 +Equitas Small Finance Bank Ltd.,EQUITASBNK,543243,BANKING AND FINANCE,BANKS,"1,540.4",616.8,330.2,24.30%,0,593.4,267,68.9,198.1,1.8,749.5,6.7 +Tata Teleservices (Maharashtra) Ltd.,TTML,532371,TELECOM SERVICES,TELECOM SERVICES,288.6,159.3,127.5,44.45%,36.3,403.2,-310.2,0,-310.2,-1.6,"-1,168.3",-6 +Praj Industries Ltd.,PRAJIND,522205,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,893.3,798.4,84,9.52%,9.1,1,84.8,22.4,62.4,3.4,271.4,14.8 +Nazara Technologies Ltd.,NAZARA,543280,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,309.5,269.4,26.7,8.98%,15.1,2.7,21.2,-1.3,19.8,3,60,9.1 +Jubilant Ingrevia Ltd.,JUBLINGREA,543271,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"1,028.5",902.3,117.7,11.54%,33.9,12.5,79.8,22.4,57.5,3.6,258.9,16.4 +Sona BLW Precision Forgings Ltd.,SONACOMS,543300,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,796.9,567.5,223.3,28.24%,53.4,6,164.1,40.1,123.8,2.1,462.8,7.9 +Chemplast Sanmar Ltd.,CHEMPLASTS,543336,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"1,025",941.8,46,4.65%,35.3,38.6,9.2,-16.8,26.1,1.6,35.3,2.2 +Aptus Value Housing Finance India Ltd.,APTUS,543335,BANKING AND FINANCE,HOUSING FINANCE,344.5,50.6,277.5,83.18%,2.6,96.1,189.6,41.5,148,3,551.1,11.1 +Clean Science & Technology Ltd.,CLEAN,543318,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,187.1,106.3,74.8,41.32%,11.1,0.3,69.5,17.3,52.2,4.9,275.5,25.9 +Medplus Health Services Ltd.,MEDPLUS,543427,HEALTHCARE EQUIPMENT & SUPPLIES,HEALTHCARE SUPPLIES,"1,419","1,323.5",85.1,6.04%,55.5,23.5,16.4,1.9,14.6,1.2,58.3,4.9 +Nuvoco Vistas Corporation Ltd.,NUVOCO,543334,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"2,578.9","2,243",329.9,12.82%,225.6,139.9,-29.6,-31.1,1.5,0,141.8,4 +Star Health and Allied Insurance Company Ltd.,STARHEALTH,543412,BANKING AND FINANCE,GENERAL INSURANCE,"3,463.2","3,295.8",165.7,4.79%,0,0,167.1,41.8,125.3,2.1,725.4,12.4 +Go Fashion (India) Ltd.,GOCOLORS,543401,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,192.8,132.2,56.6,29.98%,25.8,8.9,25.8,5.7,20,3.7,85.4,15.8 +PB Fintech Ltd.,POLICYBZR,543390,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,909.1,900.7,-89.1,-10.98%,22.3,7.2,-21.1,-0.3,-20.2,-0.5,-127.9,-2.8 +FSN E-Commerce Ventures Ltd.,NYKAA,543384,SOFTWARE & SERVICES,INTERNET & CATALOGUE RETAIL,"1,515.6","1,426.4",80.6,5.35%,54.6,21.3,13.3,4,5.8,0,19.8,0.1 +Krishna Institute of Medical Sciences Ltd.,KIMS,543308,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,655.4,475.2,177.3,27.17%,32.6,8.9,138.6,37.3,92,11.5,342.1,42.7 +Zomato Ltd.,ZOMATO,543320,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"3,06","2,895",-47,-1.65%,128,16,21,-15,36,0,-496.8,-0.6 +Brightcom Group Ltd.,BCG,532368,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"1,690.5","1,172.3",518,30.65%,72.3,0.1,445.8,124.3,321.5,1.6,"1,415.2",7 +Shyam Metalics and Energy Ltd.,SHYAMMETL,543299,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"2,978.9","2,633.6",307.1,10.44%,176.5,35.4,133.4,-348.6,484.1,18.9,"1,049.9",41.2 +G R Infraprojects Ltd.,GRINFRA,543317,CEMENT AND CONSTRUCTION,ROADS & HIGHWAYS,"1,909.2","1,415.7",467.1,24.81%,61.7,144.6,287.1,69.9,217.2,22.5,"1,240.3",128.3 +RattanIndia Enterprises Ltd.,RTNINDIA,534597,UTILITIES,ELECTRIC UTILITIES,"1,618.1","1,392.8",1.5,0.11%,4.3,28.8,142.2,1.7,140.9,1,147.6,1.1 +Borosil Renewables Ltd.,BORORENEW,502219,CONSUMER DURABLES,HOUSEWARE,406.3,369.2,32.5,8.09%,31,9.6,28.9,-1.1,25.1,1.9,32.1,2.5 +HLE Glascoat Ltd.,HLEGLAS,522215,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,227.8,198,26.5,11.79%,6.1,5.8,16.1,5.3,10,1.6,54.4,8 +Tata Investment Corporation Ltd.,TATAINVEST,501301,DIVERSIFIED,HOLDING COMPANIES,125,10.1,113.8,91.88%,0.2,4.7,110.1,-1.3,124.4,24.6,326.1,64.4 +Sapphire Foods India Ltd.,SAPPHIRE,543397,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,650.1,527.5,115.1,17.91%,76.8,24.5,21.4,6.2,15.3,2.4,208.5,32.7 +Devyani International Ltd.,DEVYANI,543330,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,826,665,154.4,18.84%,86.3,41.7,19,-16.8,33.4,0.3,177.5,1.5 +Vijaya Diagnostic Centre Ltd.,VIJAYA,543350,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE SERVICES,145.6,81.5,57.4,41.31%,13.7,5.9,44.6,11,33.3,3.3,103.4,10.1 +C.E. Info Systems Ltd.,MAPMYINDIA,543425,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,99.3,50.1,41,44.98%,3.7,0.7,44.7,11.1,33,6.1,122.9,22.7 +Latent View Analytics Ltd.,LATENTVIEW,543398,SOFTWARE & SERVICES,DATA PROCESSING SERVICES,172.7,124.9,30.8,19.78%,2.3,0.8,44.7,10.6,34,1.7,153.6,7.5 +Metro Brands Ltd.,METROBRAND,543426,RETAILING,FOOTWEAR,571.9,400.3,155.4,27.96%,57.2,19.7,94.7,27.5,66.7,2.5,340,12.5 +Easy Trip Planners Ltd.,EASEMYTRIP,543272,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,144.6,76.9,64.8,45.71%,1,2,64.7,17.7,47.2,0.3,146,0.8 +Shree Renuka Sugars Ltd.,RENUKA,532670,FOOD BEVERAGES & TOBACCO,SUGAR,"2,564.7","2,491",63.7,2.49%,64.1,216.8,-207.2,-1.6,-204.9,-1,-286,-1.3 +One97 Communications Ltd.,PAYTM,543396,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"2,662.5","2,749.6",-231,-9.17%,180.1,7,-279.9,12.7,-290.5,-5,"-1,207.9",-19 +MTAR Technologies Ltd.,MTARTECH,543270,GENERAL INDUSTRIALS,DEFENCE,167.7,130.7,36.1,21.64%,5.8,5.5,25.7,5.2,20.5,6.7,103.3,33.6 +Capri Global Capital Ltd.,CGCL,531595,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),557.4,229.3,304.8,54.70%,23.1,195.8,86,20.8,65.2,3.2,231.2,11.2 +GMR Airports Infrastructure Ltd.,GMRINFRA,ASM,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"2,185","1,336.8",726.7,35.22%,373,695.8,-252,54.9,-91,-0.1,-370.9,-0.6 +Triveni Engineering & Industries Ltd.,TRIVENI,532356,FOOD BEVERAGES & TOBACCO,SUGAR,"1,629.7","1,554.5",62.9,3.89%,25.8,10.2,39.3,10.1,29.1,1.3,434.3,19.8 +Delhivery Ltd.,DELHIVERY,543529,TRANSPORTATION,TRANSPORTATION - LOGISTICS,"2,043","1,957.3",-15.6,-0.80%,171.2,19.6,-105.2,-2.1,-102.9,-1.4,-546.7,-7.5 +Life Insurance Corporation of India,LICI,543526,BANKING AND FINANCE,LIFE INSURANCE,"202,394.9","193,612.5","8,445",4.18%,0,0,"8,696.5","1,083.9","8,030.3",12.7,"37,204.8",58.8 +Campus Activewear Ltd.,CAMPUS,543523,RETAILING,FOOTWEAR,259.1,234.2,24.5,9.46%,18.1,6.5,0.4,0.1,0.3,0,103.1,3.4 +Motherson Sumi Wiring India Ltd.,MSUMI,543498,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,110.2","1,856.5",248.1,11.79%,36.4,7.4,210,54.1,155.9,0.3,523.6,1.2 +Olectra Greentech Ltd.,OLECTRA,532439,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,310.3,266.6,40.5,13.20%,8.8,9.7,25.2,8,18.6,2.2,78.5,9.6 +Patanjali Foods Ltd.,PATANJALI,500368,FMCG,EDIBLE OILS,"7,845.8","7,426.6",395.3,5.05%,60.1,24,335.1,80.5,254.5,7,875.2,24.2 +Raymond Ltd.,RAYMOND,500330,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"2,320.7","1,938.8",314.6,13.96%,65.4,89.3,204.2,50.7,159.8,24,"1,514.2",227.5 +Swan Energy Ltd.,SWANENERGY,503310,REALTY,REALTY,"1,230.1",966.3,257,21.01%,27.1,58.3,178.4,12.8,84.6,6.7,308.4,11.7 +Samvardhana Motherson International Ltd.,MOTHERSON,517334,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"23,639.2","21,585","1,888.8",8.05%,867.4,487.9,449.5,229.2,201.6,0.3,"1,910.3",2.8 +Vedant Fashions Ltd.,MANYAVAR,543463,RETAILING,SPECIALTY RETAIL,233.4,125.5,92.8,42.51%,32.5,10.7,64.8,16.1,48.7,2,399.9,16.5 +Adani Wilmar Ltd.,AWL,543458,FMCG,EDIBLE OILS,"12,331.2","12,123.5",143.7,1.17%,95.7,220.2,-161.8,-31.5,-130.7,-1,130.1,1 +Mahindra Lifespace Developers Ltd.,MAHLIFE,532313,REALTY,REALTY,25.7,52.7,-34.9,-196.45%,3.1,0.2,-30.3,-10.8,-18.9,-1.2,10.5,0.7 +Tejas Networks Ltd.,TEJASNET,540595,TELECOM SERVICES,OTHER TELECOM SERVICES,413.9,383,13,3.28%,41.7,7,-17.7,-5.1,-12.6,-0.7,-61.3,-3.5 +Aether Industries Ltd.,AETHER,543534,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,178.3,118.2,46,28.00%,9.7,1.6,48.7,12.1,36.7,2.8,139.1,10.5 +JBM Auto Ltd.,JBMA,ASM,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,238.8","1,091.3",139.7,11.35%,41.2,47.9,58.3,11.3,44.2,3.7,136.8,11.6 +Deepak Fertilisers & Petrochemicals Corporation Ltd.,DEEPAKFERT,500645,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,"2,443.2","2,138.1",286.1,11.80%,81.2,107.1,116.8,53.3,60.1,4.8,674.5,53.4 +Sharda Cropchem Ltd.,SHARDACROP,538666,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,604.3,559.6,21.2,3.65%,74,4.6,-33.8,-6.3,-27.6,-3.1,191,21.2 +Shoppers Stop Ltd.,SHOPERSTOP,532638,RETAILING,DEPARTMENT STORES,"1,049.7",878.2,160.9,15.49%,108.2,54.9,3.5,0.8,2.7,0.2,94.2,8.6 +BEML Ltd.,BEML,500048,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,924,855.3,61.5,6.70%,15.8,10.8,42.2,-9.6,51.8,12.4,200.8,48.2 +Lemon Tree Hotels Ltd.,LEMONTREE,541233,HOTELS RESTAURANTS & TOURISM,HOTELS,230.1,125.3,101.9,44.84%,22.6,47.3,34.8,8.6,22.6,0.3,130.1,1.6 +Rainbow Childrens Medicare Ltd.,RAINBOW,543524,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,340.5,215.1,117.6,35.34%,26.8,13.3,85.2,22.1,62.9,6.2,215.4,21.2 +UCO Bank,UCOBANK,532505,BANKING AND FINANCE,BANKS,"5,865.6","1,581.5",981.9,18.81%,0,"3,302.3",639.8,238.1,403.5,0.3,"1,84",1.5 +Piramal Pharma Ltd.,PPLPHARMA,543635,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,960.6","1,645.7",265.6,13.90%,184.5,109.9,20.4,34.5,5,0,-133.6,-1 +KSB Ltd.,KSB,500249,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,572.2,493.4,70.3,12.47%,12.3,2,64.5,17.1,50.1,14.4,209.7,60.3 +Data Patterns (India) Ltd.,DATAPATTNS,543428,GENERAL INDUSTRIALS,DEFENCE,119.2,67.5,40.8,37.63%,3.1,2.3,46.3,12.5,33.8,6,148.3,26.5 +Global Health Ltd.,MEDANTA,543654,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,864.7,631.1,212.9,25.22%,42.9,20.1,170.6,45.4,125.2,4.7,408.9,15.2 +Aarti Industries Ltd.,AARTIIND,524208,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"1,454","1,221.2",232.8,16.01%,93,58.2,81.6,-9.1,90.7,2.5,446.2,12.3 +BLS International Services Ltd.,BLS,540073,DIVERSIFIED CONSUMER SERVICES,TRAVEL SUPPORT SERVICES,416.4,321,86.7,21.27%,7.3,1,87.2,5.2,78.7,1.9,267.6,6.5 +Archean Chemical Industries Ltd.,ACI,543657,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,301.7,195,95.5,32.86%,17.5,1.9,87.3,21.3,66,5.4,394.4,32.1 +Adani Power Ltd.,ADANIPOWER,ASM,UTILITIES,ELECTRIC UTILITIES,"14,935.7","7,819.2","5,171.4",39.81%,"1,004.5",888.4,"5,223.6","-1,370.6","6,594.2",16.5,"20,604.8",53.4 +Craftsman Automation Ltd.,CRAFTSMAN,543276,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,183.8",941.6,237.5,20.14%,66.8,41.6,133.8,29.6,94.5,44.1,298.3,141.2 +NMDC Ltd.,NMDC,526371,METALS & MINING,MINING,"4,335","2,823.6","1,190.4",29.66%,88.8,18.6,"1,404.1",379,"1,026.2",3.5,"5,862.2",20 +Epigral Ltd.,EPIGRAL,543332,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,479.1,370.2,107.9,22.57%,31.5,21.3,56.1,17.9,38,9.1,223.4,53.8 +Apar Industries Ltd.,APARINDS,532259,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"3,944.7","3,576.2",349.8,8.91%,28.2,103.1,237.3,62.9,173.9,45.4,783.9,204.8 +Bikaji Foods International Ltd.,BIKAJI,543653,FMCG,PACKAGED FOODS,614.7,521,87.7,14.41%,15.6,2.9,75.2,15.4,61.2,2.5,173.6,6.9 +Five-Star Business Finance Ltd.,FIVESTAR,543663,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),522.4,133.2,375,72.28%,5.7,105.9,267,67.6,199.4,6.8,703,24.1 +Ingersoll-Rand (India) Ltd.,INGERRAND,500210,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,282.8,210.7,65.7,23.76%,4.6,0.6,67,17.2,49.7,15.8,218.5,69.2 +KFIN Technologies Ltd.,KFINTECH,543720,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,215.3,115.3,93.7,44.82%,12.6,3.2,84.2,22.3,61.4,3.6,215.1,12.6 +Piramal Enterprises Ltd.,PEL,500302,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"2,205.2","1,320.1","1,117.9",50.97%,38.3,"1,038.9",-11.8,10.7,48.2,2,"3,906.5",173.9 +NMDC Steel Ltd.,NSLNISP,543768,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,290.3,349.6,-72.2,-26.04%,74.5,40.8,-174.7,-43.6,-131.1,-0.5,, +Eris Lifesciences Ltd.,ERIS,540596,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,508.8,324.2,181.1,35.85%,42.1,16.3,126.2,3.9,123.4,9.1,385.6,28.3 +Mankind Pharma Ltd.,MANKIND,543904,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"2,768.1","2,025.5",682.6,25.21%,96.5,8.6,637.5,129.8,501,12.5,"1,564.8",39.1 +Kaynes Technology India Ltd.,KAYNES,ASM,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,369.8,312.1,48.8,13.52%,6.5,11.8,39.4,7.1,32.3,5.5,143.2,24.6 +Safari Industries (India) Ltd.,SAFARI,523025,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,372.9,306.6,63.5,17.15%,12.2,2.2,51.9,12.1,39.8,16.7,162.3,68.2 +Saregama India Ltd.,SAREGAMA,532163,MEDIA,MOVIES & ENTERTAINMENT,185.6,111.5,60.9,35.32%,8.2,0.2,65.6,17.6,48.1,2.5,193.4,10 +Syrma SGS Technology Ltd.,SYRMA,543573,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,720.6,662.7,49,6.88%,11.6,8,37,6.4,28.3,1.6,132.4,7.5 +Jindal Saw Ltd.,JINDALSAW,ASM,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,"5,488.9","4,662",804.2,14.71%,142.5,188.7,495.6,139.6,375.7,11.8,"1,135.8",35.5 +Godawari Power & Ispat Ltd.,GPIL,532734,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"1,314.2",929.6,361.4,28.00%,34.8,10.2,339.6,86.1,256.9,20.6,785.5,63 +Gillette India Ltd.,GILLETTE,507815,FMCG,PERSONAL PRODUCTS,676.2,530.8,136.7,20.48%,20.1,0.1,125.2,32.5,92.7,28.4,361.6,111 +Symphony Ltd.,SYMPHONY,517385,CONSUMER DURABLES,CONSUMER ELECTRONICS,286,234,41,14.91%,7,2,43,8,35,5.1,114,16.5 +Glenmark Life Sciences Ltd.,GLS,543322,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,600.7,428.3,167.1,28.06%,13.1,0.4,158.9,40.2,118.7,9.7,505.5,41.3 +Usha Martin Ltd.,USHAMART,517146,METALS & MINING,IRON & STEEL PRODUCTS,806,640.4,144.3,18.39%,18,6.4,141.2,35,109.5,3.6,399.4,13.1 +Ircon International Ltd.,IRCON,541956,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"3,136.3","2,771.2",215.7,7.22%,27.1,36.9,301.2,77.6,250.7,2.7,884.6,9.4 +Ujjivan Small Finance Bank Ltd.,UJJIVANSFB,542904,BANKING AND FINANCE,BANKS,"1,579.8",528.6,483.4,34.75%,0,567.8,436.4,108.7,327.7,1.7,"1,254.5",6.4 +Procter & Gamble Health Ltd.,PGHL,500126,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,311,216.3,88.7,29.08%,6.5,0.2,88,22.5,65.6,39.5,231.4,139.4 +Allcargo Logistics Ltd.,ALLCARGO,532749,TRANSPORTATION,TRANSPORTATION - LOGISTICS,"3,336.3","3,188.8",118,3.57%,106.7,36.7,14.2,1.3,21.8,0.9,361.9,14.7 +Sheela Foam Ltd.,SFL,540203,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,637.6,547,66.2,10.80%,21.9,8.6,60.2,15.6,44,4.5,192.4,17.7 +Alok Industries Ltd.,ALOKINDS,521070,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"1,369.3","1,323.1",35.9,2.64%,78.6,142.2,-174.6,0,-174.8,-0.3,-948.4,-1.9 +Minda Corporation Ltd.,MINDACORP,538962,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,197.9","1,064.5",131.3,10.98%,41.4,14.9,77,18.7,58.8,2.5,278.2,11.6 +Concord Biotech Ltd.,CONCORDBIO,543960,PHARMACEUTICALS & BIOTECHNOLOGY,BIOTECHNOLOGY,270.5,143.2,119.2,45.43%,13.3,0.8,113.2,28.7,81,7.7,, \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/samples/product_info_1.md b/sdk/ai/azure-ai-assistants/samples/product_info_1.md new file mode 100644 index 000000000000..041155831d53 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/product_info_1.md @@ -0,0 +1,51 @@ +# Information about product item_number: 1 + +## Brand +Contoso Galaxy Innovations + +## Category +Smart Eyewear + +## Features +- Augmented Reality interface +- Voice-controlled AI assistant +- HD video recording with 3D audio +- UV protection and blue light filtering +- Wireless charging with extended battery life + +## User Guide + +### 1. Introduction +Introduction to your new SmartView Glasses + +### 2. Product Overview +Overview of features and controls + +### 3. Sizing and Fit +Finding your perfect fit and style adjustments + +### 4. Proper Care and Maintenance +Cleaning and caring for your SmartView Glasses + +### 5. Break-in Period +Adjusting to the augmented reality experience + +### 6. Safety Tips +Safety guidelines for public and private spaces + +### 7. Troubleshooting +Quick fixes for common issues + +## Warranty Information +Two-year limited warranty on all electronic components + +## Contact Information +Customer Support at support@contoso-galaxy-innovations.com + +## Return Policy +30-day return policy with no questions asked + +## FAQ +- How to sync your SmartView Glasses with your devices +- Troubleshooting connection issues +- Customizing your augmented reality environment diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py new file mode 100644 index 000000000000..54f4485098f3 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py @@ -0,0 +1,128 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with the + Azure AI Search tool from the Azure assistants service using a synchronous client. + +PREREQUISITES: + You will need an Azure AI Search Resource. + If you already have one, you must create an assistant that can use an existing Azure AI Search index: + https://learn.microsoft.com/azure/ai-services/assistants/how-to/tools/azure-ai-search?tabs=azurecli%2Cpython&pivots=overview-azure-ai-search + + If you do not already have an assistant Setup with an Azure AI Search resource, follow the guide for a Standard assistant setup: + https://learn.microsoft.com/azure/ai-services/assistants/quickstart?pivots=programming-language-python-azure + +USAGE: + python sample_assistants_azure_ai_search.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AI_SEARCH_CONNECTION_NAME - The connection name of the AI Search connection to your Foundry project, + as found under the "Name" column in the "Connected Resources" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import AzureAISearchQueryType, AzureAISearchTool, ListSortOrder, MessageRole + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# [START create_assistant_with_azure_ai_search_tool] +conn_id = os.environ["AI_AZURE_AI_CONNECTION_ID"] + +print(conn_id) + +# Initialize assistant AI search tool and add the search index connection id +ai_search = AzureAISearchTool( + index_connection_id=conn_id, index_name="sample_index", query_type=AzureAISearchQueryType.SIMPLE, top_k=3, filter="" +) + +# Create assistant with AI search tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=ai_search.definitions, + tool_resources=ai_search.resources, + ) + # [END create_assistant_with_azure_ai_search_tool] + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="What is the temperature rating of the cozynights sleeping bag?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Fetch run steps to get the details of the assistant run + run_steps = assistants_client.list_run_steps(thread_id=thread.id, run_id=run.id) + for step in run_steps.data: + print(f"Step {step['id']} status: {step['status']}") + step_details = step.get("step_details", {}) + tool_calls = step_details.get("tool_calls", []) + + if tool_calls: + print(" Tool calls:") + for call in tool_calls: + print(f" Tool Call ID: {call.get('id')}") + print(f" Type: {call.get('type')}") + + azure_ai_search_details = call.get("azure_ai_search", {}) + if azure_ai_search_details: + print(f" azure_ai_search input: {azure_ai_search_details.get('input')}") + print(f" azure_ai_search output: {azure_ai_search_details.get('output')}") + print() # add an extra newline between steps + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # [START populate_references_assistant_with_azure_ai_search_tool] + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for message in messages.data: + if message.role == MessageRole.ASSISTANT and message.url_citation_annotations: + placeholder_annotations = { + annotation.text: f" [see {annotation.url_citation.title}] ({annotation.url_citation.url})" + for annotation in message.url_citation_annotations + } + for message_text in message.text_messages: + message_str = message_text.text.value + for k, v in placeholder_annotations.items(): + message_str = message_str.replace(k, v) + print(f"{message.role}: {message_str}") + else: + for message_text in message.text_messages: + print(f"{message.role}: {message_text.text.value}") + # [END populate_references_assistant_with_azure_ai_search_tool] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py new file mode 100644 index 000000000000..089cd4e5679b --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py @@ -0,0 +1,102 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use azure function assistant operations from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_azure_functions.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) STORAGE_SERVICE_ENDPONT - the storage service queue endpoint, triggering Azure function. + Please see Getting Started with Azure Functions page for more information on Azure Functions: + https://learn.microsoft.com/azure/azure-functions/functions-get-started +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import AzureFunctionStorageQueue, AzureFunctionTool, MessageRole +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + storage_service_endpoint = os.environ["STORAGE_SERVICE_ENDPONT"] + + # [START create_assistant_with_azure_function_tool] + azure_function_tool = AzureFunctionTool( + name="foo", + description="Get answers from the foo bot.", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + }, + input_queue=AzureFunctionStorageQueue( + queue_name="azure-function-foo-input", + storage_service_endpoint=storage_service_endpoint, + ), + output_queue=AzureFunctionStorageQueue( + queue_name="azure-function-tool-output", + storage_service_endpoint=storage_service_endpoint, + ), + ) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="azure-function-assistant-foo", + instructions=f"You are a helpful support assistant. Use the provided function any time the prompt contains the string 'What would foo say?'. When you invoke the function, ALWAYS specify the output queue uri parameter as '{storage_service_endpoint}/azure-function-tool-output'. Always responds with \"Foo says\" and then the response from the tool.", + tools=azure_function_tool.definitions, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + # [END create_assistant_with_azure_function_tool] + + # Create a thread + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="What is the most prevalent element in the universe? What would foo say?", + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Get messages from the thread + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + # Get the last message from assistant + last_msg = messages.get_last_text_message_by_role(MessageRole.ASSISTANT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + # Delete the assistant once done + result = assistants_client.delete_assistant(assistant.id) + if result.deleted: + print(f"Deleted assistant {result.id}") + else: + print(f"Failed to delete assistant {result.id}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py new file mode 100644 index 000000000000..b9d19c473807 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py @@ -0,0 +1,81 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_basics.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os, time +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ListSortOrder, MessageTextContent + +# [START create_project_client] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) +# [END create_project_client] + +with assistants_client: + + # [START create_assistant] + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + # [END create_assistant] + print(f"Created assistant, assistant ID: {assistant.id}") + + # [START create_thread] + thread = assistants_client.create_thread() + # [END create_thread] + print(f"Created thread, thread ID: {thread.id}") + + # [START create_message] + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + # [END create_message] + print(f"Created message, message ID: {message.id}") + + # [START create_run] + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + # [END create_run] + print(f"Run status: {run.status}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # [START list_messages] + messages = assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in messages.data: + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + # [END list_messages] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..19ead3c9bd9b --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py @@ -0,0 +1,81 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations from + the Azure Assistants service using a synchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Foundry project page. + +USAGE: + python sample_assistants_basics_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity azure-monitor-opentelemetry + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, time +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.telemetry import enable_telemetry +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# [START enable_tracing] +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +# Enable Azure Monitor tracing +application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] +configure_azure_monitor(connection_string=application_insights_connection_string) + +# enable additional instrumentations +enable_telemetry() + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with assistants_client: + # [END enable_tracing] + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a hilarious joke" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py new file mode 100644 index 000000000000..daa5f8cf0f02 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py @@ -0,0 +1,78 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations from + the Azure Assistants service using a synchronous client with tracing to console. + +USAGE: + python sample_assistants_basics_with_console_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, sys, time +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.telemetry import enable_telemetry +from azure.identity import DefaultAzureCredential +from opentelemetry import trace + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Enable console tracing +# or, if you have local OTLP endpoint running, change it to +# assistants_client.telemetry.enable(destination="http://localhost:4317") +enable_telemetry(destination=sys.stdout) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py new file mode 100644 index 000000000000..3f0f8be8055b --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py @@ -0,0 +1,106 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations from + the Azure Assistants service using a synchronous client with tracing to console and adding + custom attributes to the span. + +USAGE: + python sample_assistants_basics_with_console_tracing_custom_attributes.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, sys, time +from typing import cast +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ListSortOrder +from azure.ai.assistants.telemetry import enable_telemetry +from azure.identity import DefaultAzureCredential +from opentelemetry import trace +from opentelemetry.sdk.trace import SpanProcessor, ReadableSpan, Span, TracerProvider + + +# Define the custom span processor that is used for adding the custom +# attributes to spans when they are started. +class CustomAttributeSpanProcessor(SpanProcessor): + def __init__(self): + pass + + def on_start(self, span: Span, parent_context=None): + # Add this attribute to all spans + span.set_attribute("trace_sample.sessionid", "123") + + # Add another attribute only to create_message spans + if span.name == "create_message": + span.set_attribute("trace_sample.message.context", "abc") + + def on_end(self, span: ReadableSpan): + # Clean-up logic can be added here if necessary + pass + + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Enable console tracing +# or, if you have local OTLP endpoint running, change it to +# enable_telemetry(destination="http://localhost:4317") +enable_telemetry(destination=sys.stdout) + +# Add the custom span processor to the global tracer provider +provider = cast(TracerProvider, trace.get_tracer_provider()) +provider.add_span_processor(CustomAttributeSpanProcessor()) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py new file mode 100644 index 000000000000..b70e5db51aaa --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py @@ -0,0 +1,89 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with the Bing grounding tool from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_bing_grounding.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_BING_CONNECTION_ID - The ID of the Bing connection, as found in the + "Connected resources" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import MessageRole, BingGroundingTool +from azure.identity import DefaultAzureCredential + + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# [START create_assistant_with_bing_grounding_tool] +conn_id = os.environ["AZURE_BING_CONNECTION_ID"] + +print(conn_id) + +# Initialize assistant bing tool and add the connection id +bing = BingGroundingTool(connection_id=conn_id) + +# Create assistant with the bing tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=bing.definitions, + headers={"x-ms-enable-preview": "true"}, + ) + # [END create_assistant_with_bing_grounding_tool] + + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, + role=MessageRole.USER, + content="How does wikipedia explain Euler's Identity?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Print the Assistant's response message with optional citation + response_message = assistants_client.list_messages(thread_id=thread.id).get_last_message_by_role( + MessageRole.ASSISTANT + ) + if response_message: + for text_message in response_message.text_messages: + print(f"Assistant response: {text_message.text.value}") + for annotation in response_message.url_citation_annotations: + print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py new file mode 100644 index 000000000000..fc5fc10c7441 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py @@ -0,0 +1,105 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with code interpreter from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_code_interpreter.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import CodeInterpreterTool +from azure.ai.assistants.models import FilePurpose, MessageRole +from azure.identity import DefaultAzureCredential +from pathlib import Path + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + # Upload a file and wait for it to be processed + # [START upload_file_and_create_assistant_with_code_interpreter] + file = assistants_client.upload_file_and_poll( + file_path="nifty_500_quarterly_results.csv", purpose=FilePurpose.ASSISTANTS + ) + print(f"Uploaded file, file ID: {file.id}") + + code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + + # Create assistant with code interpreter tool and tools_resources + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, + ) + # [END upload_file_and_create_assistant_with_code_interpreter] + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + assistants_client.delete_file(file.id) + print("Deleted file") + + # [START get_messages_and_save_files] + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + for image_content in messages.image_contents: + file_id = image_content.image_file.file_id + print(f"Image File ID: {file_id}") + file_name = f"{file_id}_image_file.png" + assistants_client.save_file(file_id=file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + + for file_path_annotation in messages.file_path_annotations: + print(f"File Paths:") + print(f"Type: {file_path_annotation.type}") + print(f"Text: {file_path_annotation.text}") + print(f"File ID: {file_path_annotation.file_path.file_id}") + print(f"Start Index: {file_path_annotation.start_index}") + print(f"End Index: {file_path_annotation.end_index}") + # [END get_messages_and_save_files] + + last_msg = messages.get_last_text_message_by_role(MessageRole.ASSISTANT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py new file mode 100644 index 000000000000..846541c92dfd --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py @@ -0,0 +1,82 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with code interpreter from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_code_interpreter_attachment_enterprise_search.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + CodeInterpreterTool, + MessageAttachment, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + # [START create_assistant] + code_interpreter = CodeInterpreterTool() + + # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + # [END create_assistant] + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # [START upload_file_and_create_message_with_code_interpreter] + # We will upload the local file to Azure and will use it for vector store creation. + asset_uri = os.environ["AZURE_BLOB_URI"] + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + + # Create a message with the attachment + attachment = MessageAttachment(data_source=ds, tools=code_interpreter.definitions) + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + # [END upload_file_and_create_message_with_code_interpreter] + + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py new file mode 100644 index 000000000000..262de38b8d1c --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py @@ -0,0 +1,75 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to add files to assistant during the vector store creation. + +USAGE: + python sample_assistants_enterprise_file_search.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity azure-ai-ml + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + # [START upload_file_and_create_assistant_with_file_search] + # We will upload the local file to Azure and will use it for vector store creation. + asset_uri = os.environ["AZURE_BLOB_URI"] + + # Create a vector store with no file and wait for it to be processed + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + vector_store = assistants_client.create_vector_store_and_poll(data_sources=[ds], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + # [END upload_file_and_create_assistant_with_file_search] + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py new file mode 100644 index 000000000000..7f00ceda4050 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py @@ -0,0 +1,79 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_assistants_fabric.py + +DESCRIPTION: + This sample demonstrates how to use Assistant operations with the Microsoft Fabric grounding tool from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_fabric.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import FabricTool + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# [START create_assistant_with_fabric_tool] +conn_id = os.environ["FABRIC_CONNECTION_ID"] + +print(conn_id) + +# Initialize an Assistant Fabric tool and add the connection id +fabric = FabricTool(connection_id=conn_id) + +# Create an Assistant with the Fabric tool and process an Assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=fabric.definitions, + headers={"x-ms-enable-preview": "true"}, + ) + # [END create_assistant_with_fabric_tool] + print(f"Created Assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="", + ) + print(f"Created message, ID: {message.id}") + + # Create and process an Assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the Assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py new file mode 100644 index 000000000000..206b2893f9a2 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py @@ -0,0 +1,96 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with file searching from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_file_search.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + FileSearchTool, +) +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + # Upload file and create vector store + # [START upload_file_create_vector_store_and_assistant_with_file_search_tool] + file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") + print(f"Uploaded file, file ID: {file.id}") + + vector_store = assistants_client.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create file search tool with resources followed by creating assistant + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + # [END upload_file_create_vector_store_and_assistant_with_file_search_tool] + + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?" + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + # [START teardown] + # Delete the file when done + assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + assistants_client.delete_file(file_id=file.id) + print("Deleted file") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + # [END teardown] + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + + # Print messages from the thread + for text_message in messages.text_messages: + print(text_message) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py new file mode 100644 index 000000000000..39f548dc7af6 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py @@ -0,0 +1,102 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with custom functions from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_functions.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import os, time +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput +from user_functions import user_functions + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Initialize function tool with user functions +functions = FunctionTool(functions=user_functions) + +with assistants_client: + # Create an assistant and run user's request with function calls + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + print(f"Created assistant, ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, ID: {run.id}") + + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + assistants_client.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + print(f"Executing tool call: {tool_call}") + output = functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + assistants_client.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..161636bd3a12 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py @@ -0,0 +1,149 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations with function tools from + the Azure Assistants service using a synchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Foundry project page. + +USAGE: + python sample_assistants_functions_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-monitor-opentelemetry + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" +from typing import Any, Callable, Set + +import os, time, json +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.telemetry import trace_function, enable_telemetry +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Enable Azure Monitor tracing +application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] +configure_azure_monitor(connection_string=application_insights_connection_string) + +# enable additional instrumentations if needed +enable_telemetry() + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + + +# The trace_func decorator will trace the function call and enable adding additional attributes +# to the span in the function implementation. Note that this will trace the function parameters and their values. +@trace_function() +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + + # Adding attributes to the current span + span = trace.get_current_span() + span.set_attribute("requested_location", location) + + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_weather, +} + +# Initialize function tool with user function +functions = FunctionTool(functions=user_functions) + +with tracer.start_as_current_span(scenario): + with assistants_client: + # Create an assistant and run user's request with function calls + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + print(f"Created assistant, ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, what is the weather in New York?", + ) + print(f"Created message, ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, ID: {run.id}") + + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + assistants_client.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + assistants_client.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py new file mode 100644 index 000000000000..04398187aa35 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py @@ -0,0 +1,151 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations with function tools from + the Azure Assistants service using a synchronous client with tracing to console. + +USAGE: + python sample_assistants_functions_with_console_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" +from typing import Any, Callable, Set + +import os, sys, time, json +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput +from azure.ai.assistants.telemetry import trace_function, enable_telemetry +from opentelemetry import trace + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Enable console tracing +# or, if you have local OTLP endpoint running, change it to +# assistants_client.telemetry.enable(destination="http://localhost:4317") +enable_telemetry(destination=sys.stdout) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + + +# The trace_func decorator will trace the function call and enable adding additional attributes +# to the span in the function implementation. Note that this will trace the function parameters and their values. +@trace_function() +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + + # Adding attributes to the current span + span = trace.get_current_span() + span.set_attribute("requested_location", location) + + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_weather, +} + +# Initialize function tool with user function +functions = FunctionTool(functions=user_functions) + +with tracer.start_as_current_span(scenario): + with assistants_client: + # Create an assistant and run user's request with function calls + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + print(f"Created assistant, ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, what is the weather in New York?", + ) + print(f"Created message, ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, ID: {run.id}") + + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + assistants_client.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + assistants_client.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_base64.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_base64.py new file mode 100644 index 000000000000..36291d015ef8 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_base64.py @@ -0,0 +1,109 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations using image file input for the + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_image_input_base64.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os, time, base64 +from typing import List +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageInputContentBlock, + MessageImageUrlParam, + MessageInputTextBlock, + MessageInputImageUrlBlock, +) + + +def image_to_base64(image_path: str) -> str: + """ + Convert an image file to a Base64-encoded string. + + :param image_path: The path to the image file (e.g. 'image_file.png') + :return: A Base64-encoded string representing the image. + :raises FileNotFoundError: If the provided file path does not exist. + :raises OSError: If there's an error reading the file. + """ + if not os.path.isfile(image_path): + raise FileNotFoundError(f"File not found at: {image_path}") + + try: + with open(image_path, "rb") as image_file: + file_data = image_file.read() + return base64.b64encode(file_data).decode("utf-8") + except Exception as exc: + raise OSError(f"Error reading file '{image_path}'") from exc + + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + input_message = "Hello, what is in the image ?" + image_base64 = image_to_base64("image_file.png") + img_url = f"data:image/png;base64,{image_base64}" + url_param = MessageImageUrlParam(url=img_url, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageUrlBlock(image_url=url_param), + ] + message = assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_file.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_file.py new file mode 100644 index 000000000000..35662aa3d4b3 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_file.py @@ -0,0 +1,90 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations using image file input for the + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_image_input_file.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os, time +from typing import List +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageInputContentBlock, + MessageImageFileParam, + MessageInputTextBlock, + MessageInputImageFileBlock, +) + + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + image_file = assistants_client.upload_file_and_poll(file_path="image_file.png", purpose="assistants") + print(f"Uploaded file, file ID: {image_file.id}") + + input_message = "Hello, what is in the image ?" + file_param = MessageImageFileParam(file_id=image_file.id, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageFileBlock(image_file=file_param), + ] + message = assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_url.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_url.py new file mode 100644 index 000000000000..e8f16cf4aff4 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_url.py @@ -0,0 +1,89 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations using image url input for the + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_image_input_url.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os, time +from typing import List +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageInputContentBlock, + MessageImageUrlParam, + MessageInputTextBlock, + MessageInputImageUrlBlock, +) + + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + input_message = "Hello, what is in the image ?" + url_param = MessageImageUrlParam(url=image_url, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageUrlBlock(image_url=url_param), + ] + message = assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py new file mode 100644 index 000000000000..f44455a7b45e --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py @@ -0,0 +1,113 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistants with JSON schema output format. + +USAGE: + python sample_assistants_json_schema.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity pydantic + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os + +from enum import Enum +from pydantic import BaseModel, TypeAdapter +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageRole, + ResponseFormatJsonSchema, + ResponseFormatJsonSchemaType, + RunStatus, +) + +# [START create_assistants_client] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) +# [END create_assistants_client] + + +# Create the pydantic model to represent the planet names and there masses. +class Planets(str, Enum): + Earth = "Earth" + Mars = "Mars" + Jupyter = "Jupyter" + + +class Planet(BaseModel): + planet: Planets + mass: float + + +with assistants_client: + + # [START create_assistant] + assistant = assistants_client.create_assistant( + # Note only gpt-4o-mini-2024-07-18 and + # gpt-4o-2024-08-06 and later support structured output. + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Extract the information about planets.", + headers={"x-ms-enable-preview": "true"}, + response_format=ResponseFormatJsonSchemaType( + json_schema=ResponseFormatJsonSchema( + name="planet_mass", + description="Extract planet mass.", + schema=Planet.model_json_schema(), + ) + ), + ) + # [END create_assistant] + print(f"Created assistant, assistant ID: {assistant.id}") + + # [START create_thread] + thread = assistants_client.create_thread() + # [END create_thread] + print(f"Created thread, thread ID: {thread.id}") + + # [START create_message] + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content=("The mass of the Mars is 6.4171E23 kg; the mass of the Earth is 5.972168E24 kg;"), + ) + # [END create_message] + print(f"Created message, message ID: {message.id}") + + # [START create_run] + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + + if run.status != RunStatus.COMPLETED: + print(f"The run did not succeed: {run.status=}.") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # [START list_messages] + messages = assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + # We will only list assistant responses here. + if isinstance(last_message_content, MessageTextContent) and data_point.role == MessageRole.ASSISTANT: + planet = TypeAdapter(Planet).validate_json(last_message_content.text.value) + print(f"The mass of {planet.planet} is {planet.mass} kg.") + + # [END list_messages] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py new file mode 100644 index 000000000000..7ce16ce374f4 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py @@ -0,0 +1,121 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistants with Logic Apps to execute the task of sending an email. + +PREREQUISITES: + 1) Create a Logic App within the same resource group as your Azure AI Project in Azure Portal + 2) To configure your Logic App to send emails, you must include an HTTP request trigger that is + configured to accept JSON with 'to', 'subject', and 'body'. The guide to creating a Logic App Workflow + can be found here: + https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/assistants-logic-apps#create-logic-apps-workflows-for-function-calling + +USAGE: + python sample_assistants_logic_apps.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set this environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + + Replace the following values in the sample with your own values: + 1) - The name of the Logic App you created. + 2) - The name of the trigger in the Logic App you created (the default name for HTTP + triggers in the Azure Portal is "When_a_HTTP_request_is_received"). + 3) - The email address of the recipient. +""" + + +import os +from typing import Set + +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ToolSet, FunctionTool +from azure.identity import DefaultAzureCredential + +# Example user function +from user_functions import fetch_current_datetime + +# Import AzureLogicAppTool and the function factory from user_logic_apps +from user_logic_apps import AzureLogicAppTool, create_send_email_function + +# [START register_logic_app] + +# Create the project client +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Extract subscription and resource group from the project scope +subscription_id = os.environ["SUBSCRIPTION_ID"] +resource_group = os.environ["resource_group_name"] + +# Logic App details +logic_app_name = "" +trigger_name = "" + +# Create and initialize AzureLogicAppTool utility +logic_app_tool = AzureLogicAppTool(subscription_id, resource_group) +logic_app_tool.register_logic_app(logic_app_name, trigger_name) +print(f"Registered logic app '{logic_app_name}' with trigger '{trigger_name}'.") + +# Create the specialized "send_email_via_logic_app" function for your assistant tools +send_email_func = create_send_email_function(logic_app_tool, logic_app_name) + +# Prepare the function tools for the assistant +functions_to_use: Set = { + fetch_current_datetime, + send_email_func, # This references the AzureLogicAppTool instance via closure +} +# [END register_logic_app] + +with assistants_client: + # Create an assistant + functions = FunctionTool(functions=functions_to_use) + toolset = ToolSet() + toolset.add(functions) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="SendEmailAssistant", + instructions="You are a specialized assistant for sending emails.", + toolset=toolset, + ) + print(f"Created assistant, ID: {assistant.id}") + + # Create a thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create a message in the thread + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, please send an email to with the date and time in '%Y-%m-%d %H:%M:%S' format.", + ) + print(f"Created message, ID: {message.id}") + + # Create and process an assistant run in the thread + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py new file mode 100644 index 000000000000..9344ca64e2dd --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py @@ -0,0 +1,114 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with the + OpenAPI tool from the Azure Assistants service using a synchronous client. + To learn more about OpenAPI specs, visit https://learn.microsoft.com/openapi + +USAGE: + python sample_assistants_openapi.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity jsonref + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +import jsonref +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import OpenApiTool, OpenApiAnonymousAuthDetails + + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) +# [START create_assistant_with_openapi] + +with open("./weather_openapi.json", "r") as f: + openapi_weather = jsonref.loads(f.read()) + +with open("./countries.json", "r") as f: + openapi_countries = jsonref.loads(f.read()) + +# Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) +auth = OpenApiAnonymousAuthDetails() + +# Initialize assistant OpenApi tool using the read in OpenAPI spec +openapi_tool = OpenApiTool( + name="get_weather", spec=openapi_weather, description="Retrieve weather information for a location", auth=auth +) +openapi_tool.add_definition( + name="get_countries", spec=openapi_countries, description="Retrieve a list of countries", auth=auth +) + +# Create assistant with OpenApi tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=openapi_tool.definitions, + ) + # [END create_assistant_with_openapi] + + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="What's the weather in Seattle and What is the name and population of the country that uses currency with abbreviation THB?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + run_steps = assistants_client.list_run_steps(thread_id=thread.id, run_id=run.id) + + # Loop through each step + for step in run_steps.data: + print(f"Step {step['id']} status: {step['status']}") + + # Check if there are tool calls in the step details + step_details = step.get("step_details", {}) + tool_calls = step_details.get("tool_calls", []) + + if tool_calls: + print(" Tool calls:") + for call in tool_calls: + print(f" Tool Call ID: {call.get('id')}") + print(f" Type: {call.get('type')}") + + function_details = call.get("function", {}) + if function_details: + print(f" Function name: {function_details.get('name')}") + print() # add an extra newline between steps + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py new file mode 100644 index 000000000000..b460379733d0 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py @@ -0,0 +1,96 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_assistants_openapi_connection_auth.py + +DESCRIPTION: + This sample demonstrates how to use assistant operations with the + OpenAPI tool from the Azure Assistants service using a synchronous client, using + custom key authentication against the TripAdvisor API. + To learn more about OpenAPI specs, visit https://learn.microsoft.com/openapi + +USAGE: + python sample_assistants_openapi_connection_auth.py + + Before running the sample: + + Set up an account at https://www.tripadvisor.com/developers and get an API key. + + Set up a custom key connection and save the connection name following the steps at + https://aka.ms/azsdk/azure-ai-assistants/custom-key-setup + + Save that connection name as the PROJECT_OPENAPI_CONNECTION_NAME environment variable + + pip install azure-ai-assistants azure-identity jsonref + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + OPENAPI_CONNECTION_ID - the connection ID for the OpenAPI connection, taken from Azure AI Foundry. + MODEL_DEPLOYMENT_NAME - name of the model deployment in the project to use Assistants against +""" + +import os +import jsonref +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import OpenApiTool, OpenApiConnectionAuthDetails, OpenApiConnectionSecurityScheme + + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +model_name = os.environ["MODEL_DEPLOYMENT_NAME"] +connection_id = os.environ["OPENAPI_CONNECTION_ID"] + +print(connection_id) + +with open("./tripadvisor_openapi.json", "r") as f: + openapi_spec = jsonref.loads(f.read()) + +# Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) +auth = OpenApiConnectionAuthDetails(security_scheme=OpenApiConnectionSecurityScheme(connection_id=connection_id)) + +# Initialize an Assistant OpenApi tool using the read in OpenAPI spec +openapi = OpenApiTool( + name="get_weather", spec=openapi_spec, description="Retrieve weather information for a location", auth=auth +) + +# Create an Assistant with OpenApi tool and process Assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=model_name, name="my-assistant", instructions="You are a helpful assistant", tools=openapi.definitions + ) + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Summarize the reviews for the top rated hotel in Paris", + ) + print(f"Created message: {message['id']}") + + # Create and process an Assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the Assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py new file mode 100644 index 000000000000..bd958ae0f360 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py @@ -0,0 +1,82 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with toolset from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_run_with_toolset.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import FunctionTool, ToolSet, CodeInterpreterTool +from user_functions import user_functions + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Create assistant with toolset and process assistant run +with assistants_client: + # Initialize assistant toolset with user functions and code interpreter + # [START create_assistant_toolset] + functions = FunctionTool(user_functions) + code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, + ) + # [END create_assistant_toolset] + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + # [START create_and_process_run] + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + # [END create_and_process_run] + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py new file mode 100644 index 000000000000..c8cadc69cd73 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py @@ -0,0 +1,80 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_assistants_sharepoint.py + +DESCRIPTION: + This sample demonstrates how to use assistant operations with the + Sharepoint tool from the Azure Assistants service using a synchronous client. + The sharepoint tool is currently available only to whitelisted customers. + For access and onboarding instructions, please contact azureassistants-preview@microsoft.com. + +USAGE: + python sample_assistants_sharepoint.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set this environment variables with your own values: + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import SharepointTool + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Initialize Sharepoint tool with connection id +sharepoint = SharepointTool(connection_id="sharepoint_connection_name") + +# Create assistant with Sharepoint tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=sharepoint.definitions, + headers={"x-ms-enable-preview": "true"}, + ) + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, summarize the key points of the ", + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py new file mode 100644 index 000000000000..a8aac8f644ea --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py @@ -0,0 +1,101 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler in streaming from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_stream_eventhandler.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential + +from azure.ai.assistants.models import ( + AssistantEventHandler, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) + +from typing import Any, Optional + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + + +# [START stream_event_handler] +# With AssistantEventHandler[str], the return type for each event functions is optional string. +class MyEventHandler(AssistantEventHandler[str]): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> Optional[str]: + return f"Text delta received: {delta.text}" + + def on_thread_message(self, message: "ThreadMessage") -> Optional[str]: + return f"ThreadMessage created. ID: {message.id}, Status: {message.status}" + + def on_thread_run(self, run: "ThreadRun") -> Optional[str]: + return f"ThreadRun status: {run.status}" + + def on_run_step(self, step: "RunStep") -> Optional[str]: + return f"RunStep type: {step.type}, Status: {step.status}" + + def on_error(self, data: str) -> Optional[str]: + return f"An error occurred. Data: {data}" + + def on_done(self) -> Optional[str]: + return "Stream completed." + + def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional[str]: + return f"Unhandled Event Type: {event_type}, Data: {event_data}" + + +# [END stream_event_handler] + + +with assistants_client: + # Create an assistant and run stream with event handler + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created assistant, assistant ID {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + # [START create_stream] + with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + for event_type, event_data, func_return in stream: + print(f"Received data.") + print(f"Streaming receive Event Type: {event_type}") + print(f"Event Data: {str(event_data)[:100]}...") + print(f"Event Function return: {func_return}\n") + # [END create_stream] + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..24cf30480aef --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py @@ -0,0 +1,111 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler in streaming from + the Azure Assistants service using a synchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Foundry project page. + +USAGE: + python sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-monitor-opentelemetry + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. + +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + AssistantEventHandler, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) +from azure.ai.assistants.telemetry import enable_telemetry +from typing import Any +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + + +class MyEventHandler(AssistantEventHandler): + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + if len(message.content): + print( + f"ThreadMessage created. ID: {message.id}, " + f"Status: {message.status}, Content: {message.content[0].as_dict()}" + ) + else: + print(f"ThreadMessage created. ID: {message.id}, " f"Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +# Enable Azure Monitor tracing +application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] +configure_azure_monitor(connection_string=application_insights_connection_string) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +# enable additional instrumentations +enable_telemetry() + +with tracer.start_as_current_span(scenario): + with assistants_client: + # Create an assistant and run stream with event handler + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created assistant, assistant ID {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py new file mode 100644 index 000000000000..a495b46dd07b --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py @@ -0,0 +1,127 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use Assistant operations with an event handler and + the Bing grounding tool. It uses a synchronous client. + +USAGE: + python sample_assistants_stream_eventhandler_with_bing_grounding.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_BING_CONNECTION_ID - The connection id of the Bing connection, as found in the "Connected resources" tab + in your Azure AI Foundry project. +""" + +import os +from typing import Any +from azure.identity import DefaultAzureCredential +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + MessageDeltaChunk, + RunStep, + ThreadMessage, + ThreadRun, + AssistantEventHandler, + BingGroundingTool, + MessageRole, + MessageDeltaTextUrlCitationAnnotation, + MessageDeltaTextContent, +) + + +# When using FunctionTool with ToolSet in assistant creation, the tool call events are handled inside the create_stream +# method and functions gets automatically called by default. +class MyEventHandler(AssistantEventHandler): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + if delta.delta.content and isinstance(delta.delta.content[0], MessageDeltaTextContent): + delta_text_content = delta.delta.content[0] + if delta_text_content.text and delta_text_content.text.annotations: + for delta_annotation in delta_text_content.text.annotations: + if isinstance(delta_annotation, MessageDeltaTextUrlCitationAnnotation): + print( + f"URL citation delta received: [{delta_annotation.url_citation.title}]({delta_annotation.url_citation.url})" + ) + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + bing_connection_id = os.environ["AZURE_BING_CONNECTION_ID"] + print(f"Bing Connection ID: {bing_connection_id}") + + # Initialize assistant bing tool and add the connection id + bing = BingGroundingTool(connection_id=bing_connection_id) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=bing.definitions, + ) + print(f"Created assistant, ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, + role=MessageRole.USER, + content="How does wikipedia explain Euler's Identity?", + ) + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + response_message = assistants_client.list_messages(thread_id=thread.id).get_last_message_by_role( + MessageRole.ASSISTANT + ) + if response_message: + for text_message in response_message.text_messages: + print(f"Assistant response: {text_message.text.value}") + for annotation in response_message.url_citation_annotations: + print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py new file mode 100644 index 000000000000..16a77a2e7ccc --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py @@ -0,0 +1,112 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler in streaming from + the Azure Assistants service using a synchronous client with tracing to console. + +USAGE: + python sample_assistants_stream_eventhandler_with_console_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, sys +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + AssistantEventHandler, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) +from azure.ai.assistants.telemetry import enable_telemetry +from typing import Any +from opentelemetry import trace + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + + +class MyEventHandler(AssistantEventHandler): + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + if len(message.content): + print( + f"ThreadMessage created. ID: {message.id}, " + f"Status: {message.status}, Content: {message.content[0].as_dict()}" + ) + else: + print(f"ThreadMessage created. ID: {message.id}, " f"Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +# Enable console tracing +# or, if you have local OTLP endpoint running, change it to +# enable_telemetry(destination="http://localhost:4317") +enable_telemetry(destination=sys.stdout) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with assistants_client: + # Create an assistant and run stream with event handler + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created assistant, assistant ID {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py new file mode 100644 index 000000000000..8803da00427d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py @@ -0,0 +1,137 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler and toolset from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_stream_eventhandler_with_functions.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +from typing import Any + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + AssistantEventHandler, + FunctionTool, + MessageDeltaChunk, + RequiredFunctionToolCall, + RunStep, + SubmitToolOutputsAction, + ThreadMessage, + ThreadRun, + ToolOutput, +) +from azure.identity import DefaultAzureCredential +from user_functions import user_functions + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + + +class MyEventHandler(AssistantEventHandler): + + def __init__(self, functions: FunctionTool) -> None: + super().__init__() + self.functions = functions + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + # Once we receive 'requires_action' status, the next event will be DONE. + # Here we associate our existing event handler to the next stream. + assistants_client.submit_tool_outputs_to_stream( + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=self + ) + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +with assistants_client: + + # [START create_assistant_with_function_tool] + functions = FunctionTool(user_functions) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + # [END create_assistant_with_function_tool] + print(f"Created assistant, ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.", + ) + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler(functions) + ) as stream: + stream.until_done() + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py new file mode 100644 index 000000000000..4f9bb8fc9e59 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py @@ -0,0 +1,109 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler and toolset from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_stream_eventhandler_with_toolset.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + MessageDeltaChunk, + RunStep, + ThreadMessage, + ThreadRun, +) +from azure.ai.assistants.models import AssistantEventHandler +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import FunctionTool, ToolSet + +import os +from typing import Any +from user_functions import user_functions + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + + +# When using FunctionTool with ToolSet in assistant creation, the tool call events are handled inside the create_stream +# method and functions gets automatically called by default. +class MyEventHandler(AssistantEventHandler): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +with assistants_client: + # [START create_assistant_with_function_tool] + functions = FunctionTool(user_functions) + toolset = ToolSet() + toolset.add(functions) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, + ) + # [END create_assistant_with_function_tool] + print(f"Created assistant, ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", + ) + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py new file mode 100644 index 000000000000..7f55066d9c97 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py @@ -0,0 +1,85 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations in streaming from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_stream_iteration.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + AssistantStreamEvent, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + # Create an assistant and run stream with iteration + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created assistant, ID {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + # [START iterate_stream] + with assistants_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AssistantStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + # [END iterate_stream] + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py new file mode 100644 index 000000000000..b90c016a58fe --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py @@ -0,0 +1,117 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use Assistant operations with the Bing grounding + tool, and iteration in streaming. It uses a synchronous client. + +USAGE: + python sample_assistants_stream_iteration_with_bing_grounding.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_BING_CONNECTION_ID - The ID of the Bing connection, as found in the "Connected resources" tab + in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import AssistantStreamEvent, RunStepDeltaChunk +from azure.ai.assistants.models import ( + MessageDeltaChunk, + RunStep, + ThreadMessage, + ThreadRun, + BingGroundingTool, + MessageRole, + MessageDeltaTextContent, + MessageDeltaTextUrlCitationAnnotation, +) +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + bing_connection_id = os.environ["AZURE_BING_CONNECTION_ID"] + bing = BingGroundingTool(connection_id=bing_connection_id) + print(f"Bing Connection ID: {bing_connection_id}") + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=bing.definitions, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role=MessageRole.USER, content="How does wikipedia explain Euler's Identity?" + ) + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + if event_data.delta.content and isinstance(event_data.delta.content[0], MessageDeltaTextContent): + delta_text_content = event_data.delta.content[0] + if delta_text_content.text and delta_text_content.text.annotations: + for delta_annotation in delta_text_content.text.annotations: + if isinstance(delta_annotation, MessageDeltaTextUrlCitationAnnotation): + print( + f"URL citation delta received: [{delta_annotation.url_citation.title}]({delta_annotation.url_citation.url})" + ) + + elif isinstance(event_data, RunStepDeltaChunk): + print(f"RunStepDeltaChunk received. ID: {event_data.id}.") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + if event_data.status == "failed": + print(f"Run failed. Error: {event_data.last_error}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AssistantStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + response_message = assistants_client.list_messages(thread_id=thread.id).get_last_message_by_role( + MessageRole.ASSISTANT + ) + if response_message: + for text_message in response_message.text_messages: + print(f"Assistant response: {text_message.text.value}") + for annotation in response_message.url_citation_annotations: + print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py new file mode 100644 index 000000000000..5c2d76b09573 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py @@ -0,0 +1,105 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with file search tools and iteration in streaming from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_stream_iteration_with_file_search.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import AssistantStreamEvent, FileSearchTool, RunStepDeltaChunk +from azure.ai.assistants.models import MessageDeltaChunk, RunStep, ThreadMessage, ThreadRun +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + # Upload file and create vector store + # [START upload_file_create_vector_store_and_assistant_with_file_search_tool] + file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") + print(f"Uploaded file, file ID: {file.id}") + + vector_store = assistants_client.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create file search tool with resources followed by creating assistant + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, RunStepDeltaChunk): + print(f"RunStepDeltaChunk received. ID: {event_data.id}.") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + for annotation in event_data.file_citation_annotations: + print( + f"Citation {annotation.text} from file ID: {annotation.file_citation.file_id}, start index: {annotation.start_index}, end index: {annotation.end_index}" + ) + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + if event_data.status == "failed": + print(f"Run failed. Error: {event_data.last_error}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AssistantStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py new file mode 100644 index 000000000000..fcd5300a27f0 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py @@ -0,0 +1,96 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with toolset and iteration in streaming from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_stream_iteration_with_toolset.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import AssistantStreamEvent, RunStepDeltaChunk +from azure.ai.assistants.models import ( + MessageDeltaChunk, + RunStep, + ThreadMessage, + ThreadRun, +) +from azure.ai.assistants.models import FunctionTool, ToolSet +from azure.identity import DefaultAzureCredential +from user_functions import user_functions + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +functions = FunctionTool(user_functions) +toolset = ToolSet() +toolset.add(functions) + +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, what's the time?") + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, RunStepDeltaChunk): + print(f"RunStepDeltaChunk received. ID: {event_data.id}.") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + if event_data.status == "failed": + print(f"Run failed. Error: {event_data.last_error}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AssistantStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py new file mode 100644 index 000000000000..cda531f4cf2a --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py @@ -0,0 +1,102 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to override the base event handler and parse the events and iterate through them + In your use case, you might not want to write the iteration code similar to sample_assistants_stream_iteration_async.py. + If you have multiple places to call create_stream, you might find the iteration code cumbersome. + This example shows how to override the base event handler, parse the events, and iterate through them, which can be reused in multiple create_stream calls to help keep the code clean. + +USAGE: + python sample_assistants_stream_with_base_override_eventhandler.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import json +from typing import Generator, Optional + +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + MessageDeltaChunk, + MessageDeltaTextContent, +) +from azure.ai.assistants.models import AssistantStreamEvent, BaseAssistantEventHandler +from azure.identity import DefaultAzureCredential + +import os + + +# Our goal is to parse the event data in a string and return the chunk in text for each iteration. +# Because we want the iteration to be a string, we define str as the generic type for BaseAsyncAssistantEventHandler +# and override the _process_event method to return a string. +# The get_stream_chunks method is defined to return the chunks as strings because the iteration is a string. +class MyEventHandler(BaseAssistantEventHandler[Optional[str]]): + + def _process_event(self, event_data_str: str) -> Optional[str]: # type: ignore[return] + event_lines = event_data_str.strip().split("\n") + event_type: Optional[str] = None + event_data = "" + for line in event_lines: + if line.startswith("event:"): + event_type = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + event_data = line.split(":", 1)[1].strip() + + if not event_type: + raise ValueError("Event type not specified in the event data.") + + if event_type == AssistantStreamEvent.THREAD_MESSAGE_DELTA.value: + + event_obj: MessageDeltaChunk = MessageDeltaChunk(**json.loads(event_data)) + + for content_part in event_obj.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + if content_part.text is not None: + return content_part.text.value + return None + + def get_stream_chunks(self) -> Generator[str, None, None]: + for chunk in self: + if chunk: + yield chunk + + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + for chunk in stream.get_stream_chunks(): + print(chunk) + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py new file mode 100644 index 000000000000..6eaf3a02d496 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py @@ -0,0 +1,100 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to create the vector store with the list of files. + +USAGE: + python sample_assistants_vector_store_batch_enterprise_file_search.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity azure-ai-ml + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + # We will upload the local file to Azure and will use it for vector store creation. + asset_uri = os.environ["AZURE_BLOB_URI"] + + # [START attach_files_to_store] + # Create a vector store with no file and wait for it to be processed + vector_store = assistants_client.create_vector_store_and_poll(data_sources=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + # Add the file to the vector store or you can supply data sources in the vector store creation + vector_store_file_batch = assistants_client.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=[ds] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + # [END attach_files_to_store] + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + assistants_client.update_assistant( + assistant_id=assistant.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources + ) + print(f"Updated assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py new file mode 100644 index 000000000000..97e206e1f5bf --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py @@ -0,0 +1,102 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations to add files to an existing vector store and perform search from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_vector_store_batch_file_search.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FileSearchTool, FilePurpose +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + # Upload a file and wait for it to be processed + file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.ASSISTANTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create a vector store with no file and wait for it to be processed + vector_store = assistants_client.create_vector_store_and_poll(data_sources=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Add the file to the vector store or you can supply file ids in the vector store creation + vector_store_file_batch = assistants_client.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, file_ids=[file.id] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # Create a file search tool + # [START create_assistant_with_tools_and_tool_resources] + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + # [END create_assistant_with_tools_and_tool_resources] + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + assistants_client.update_assistant( + assistant_id=assistant.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources + ) + print(f"Updated assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py new file mode 100644 index 000000000000..bdbee38bc942 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py @@ -0,0 +1,80 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to add files to assistant during the vector store creation. + +USAGE: + python sample_assistants_vector_store_file_search.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FileSearchTool, FilePurpose +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + # Upload a file and wait for it to be processed + file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.ASSISTANTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create a vector store with no file and wait for it to be processed + vector_store = assistants_client.create_vector_store_and_poll(file_ids=[file.id], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + + for message in reversed(messages.data): + # To remove characters, which are not correctly handled by print, we will encode the message + # and then decode it again. + clean_message = "\n".join( + text_msg.text.value.encode("ascii", "ignore").decode("utf-8") for text_msg in message.text_messages + ) + print(f"Role: {message.role} Message: {clean_message}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py new file mode 100644 index 000000000000..3262175d194a --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py @@ -0,0 +1,105 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with code interpreter through file attachment from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_with_code_interpreter_file_attachment.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import CodeInterpreterTool, MessageAttachment +from azure.ai.assistants.models import FilePurpose, MessageRole +from azure.identity import DefaultAzureCredential +from pathlib import Path + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + # Upload a file and wait for it to be processed + file = assistants_client.upload_file_and_poll( + file_path="nifty_500_quarterly_results.csv", purpose=FilePurpose.ASSISTANTS + ) + print(f"Uploaded file, file ID: {file.id}") + + # [START create_assistant_and_message_with_code_interpreter_file_attachment] + # Notice that CodeInterpreter must be enabled in the assistant creation, + # otherwise the assistant will not be able to see the file attachment for code interpretation + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=CodeInterpreterTool().definitions, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create an attachment + attachment = MessageAttachment(file_id=file.id, tools=CodeInterpreterTool().definitions) + + # Create a message + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + attachments=[attachment], + ) + # [END create_assistant_and_message_with_code_interpreter_file_attachment] + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + assistants_client.delete_file(file.id) + print("Deleted file") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + last_msg = messages.get_last_text_message_by_role(MessageRole.ASSISTANT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + for image_content in messages.image_contents: + print(f"Image File ID: {image_content.image_file.file_id}") + file_name = f"{image_content.image_file.file_id}_image_file.png" + assistants_client.save_file(file_id=image_content.image_file.file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + + for file_path_annotation in messages.file_path_annotations: + print(f"File Paths:") + print(f"Type: {file_path_annotation.type}") + print(f"Text: {file_path_annotation.text}") + print(f"File ID: {file_path_annotation.file_path.file_id}") + print(f"Start Index: {file_path_annotation.start_index}") + print(f"End Index: {file_path_annotation.end_index}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py new file mode 100644 index 000000000000..08207bdb0975 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py @@ -0,0 +1,71 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations to create messages with file search attachments from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_with_file_search_attachment.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FilePurpose, FileSearchTool, MessageAttachment +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + # Upload a file and wait for it to be processed + file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.ASSISTANTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create assistant + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + # [START create_message_with_attachment] + attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] + ) + # [END create_message_with_attachment] + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + assistants_client.delete_file(file.id) + print("Deleted file") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py new file mode 100644 index 000000000000..08207bdb0975 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py @@ -0,0 +1,71 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations to create messages with file search attachments from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_with_file_search_attachment.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FilePurpose, FileSearchTool, MessageAttachment +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + # Upload a file and wait for it to be processed + file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.ASSISTANTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create assistant + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + # [START create_message_with_attachment] + attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] + ) + # [END create_message_with_attachment] + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + assistants_client.delete_file(file.id) + print("Deleted file") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py new file mode 100644 index 000000000000..e1afcedff945 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py @@ -0,0 +1,91 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with file searching from + the Azure Assistants service using a synchronous client. The file is attached to thread. + +USAGE: + python sample_assistants_with_resources_in_thread.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FileSearchTool +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with assistants_client: + + # Upload file and create vector store + # [START create_assistant_and_thread_for_file_search] + file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") + print(f"Uploaded file, file ID: {file.id}") + + vector_store = assistants_client.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create file search tool with resources followed by creating assistant + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + ) + + print(f"Created assistant, ID: {assistant.id}") + + # Create thread with file resources. + # If the assistant has multiple threads, only this thread can search this file. + thread = assistants_client.create_thread(tool_resources=file_search.resources) + # [END create_assistant_and_thread_for_file_search] + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?" + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + # [START teardown] + # Delete the file when done + assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + assistants_client.delete_file(file_id=file.id) + print("Deleted file") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + # [END teardown] + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/tripadvisor_openapi.json b/sdk/ai/azure-ai-assistants/samples/tripadvisor_openapi.json new file mode 100644 index 000000000000..d7e495f7a061 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/tripadvisor_openapi.json @@ -0,0 +1,1606 @@ +{ + "openapi": "3.0.1", + "servers": [ + { + "url": "https://api.content.tripadvisor.com/api" + } + ], + "info": { + "version": "1.0.0", + "title": "Content API - TripAdvisor(Knowledge)", + "description": "SSP includes Locations Details, Locations Photos, Locations Reviews, Location Search" + }, + "paths": { + "/v1/location/{locationId}/details": { + "get": { + "summary": "Location Details", + "description": "A Location Details request returns comprehensive information about a location (hotel, restaurant, or an attraction) such as name, address, rating, and URLs for the listing on Tripadvisor.", + "operationId": "getLocationDetails", + "tags": [ + "Location Details" + ], + "parameters": [ + { + "name": "locationId", + "in": "path", + "description": "A unique identifier for a location on Tripadvisor. The location ID can be obtained using the Location Search.", + "required": true, + "schema": { + "type": "integer", + "format": "int32" + } + }, + { + "name": "language", + "in": "query", + "description": "The language in which to return results (e.g. \"en\" for English or \"es\" for Spanish) from the list of our Supported Languages.", + "required": false, + "schema": { + "default": "en", + "type": "string", + "enum": [ + "ar", + "zh", + "zh_TW", + "da", + "nl", + "en_AU", + "en_CA", + "en_HK", + "en_IN", + "en_IE", + "en_MY", + "en_NZ", + "en_PH", + "en_SG", + "en_ZA", + "en_UK", + "en", + "fr", + "fr_BE", + "fr_CA", + "fr_CH", + "de_AT", + "de", + "el", + "iw", + "in", + "it", + "it_CH", + "ja", + "ko", + "no", + "pt_PT", + "pt", + "ru", + "es_AR", + "es_CO", + "es_MX", + "es_PE", + "es", + "es_VE", + "es_CL", + "sv", + "th", + "tr", + "vi" + ] + } + }, + { + "name": "currency", + "in": "query", + "description": "The currency code to use for request and response (should follow ISO 4217).", + "required": false, + "schema": { + "type": "string", + "default": "USD" + } + } + ], + "responses": { + "200": { + "description": "Details for the location", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "location_id": { + "description": "Unique Tripadvisor location ID of the destination or POI", + "type": "integer", + "format": "int32" + }, + "name": { + "description": "Name of the POI as listed on Tripadvisor", + "type": "string" + }, + "description": { + "description": "Description of the POI as listed on Tripadvisor", + "type": "string" + }, + "web_url": { + "description": "Link to the POI detail page on Tripadvisor. Link is localized to the correct domain if a language other than English is requested.", + "type": "string" + }, + "address_obj": { + "description": "Object containing address data for this location", + "type": "object", + "properties": { + "street1": { + "type": "string", + "description": "The street name" + }, + "street2": { + "type": "string", + "description": "The street name continuation" + }, + "city": { + "type": "string", + "description": "The city name" + }, + "state": { + "type": "string", + "description": "The state" + }, + "country": { + "type": "string", + "description": "The country" + }, + "postalcode": { + "type": "string", + "description": "The address postal code" + }, + "address_string": { + "type": "string", + "description": "The address in one single sentence" + } + } + }, + "ancestors": { + "description": "Ancestors describe where the POI or destination lives within the Tripadvisor destination or geo hierarchy.From this, you can derive the city where a POI is located, as well as state/province/region and country.", + "type": "array", + "items": { + "type": "object", + "properties": { + "abbrv": { + "description": "The ancestor location abbreviation", + "type": "string" + }, + "level": { + "description": "The ancestor location level in relation to the location", + "type": "string" + }, + "name": { + "description": "The ancestor location name", + "type": "string" + }, + "location_id": { + "description": "The ancestor location location identifier", + "type": "integer", + "format": "int32" + } + } + } + }, + "latitude": { + "description": "The latitude of this location in degrees, if available", + "type": "number" + }, + "longitude": { + "description": "The longitude of this location in degrees, if available", + "type": "number" + }, + "timezone": { + "description": "The timezone of the location", + "type": "string" + }, + "email": { + "description": "The email of the location, if available", + "type": "string" + }, + "phone": { + "description": "The phone number of the location, if available", + "type": "string" + }, + "website": { + "description": "The website of the location, if available", + "type": "string" + }, + "write_review": { + "description": "Link to the review form for this specific POI on Tripadvisor. Link is localized to the correct domain if a language other than English is requested.", + "type": "string" + }, + "ranking_data": { + "description": "Describes a POI's Popularity Index ranking on Tripadvisor, which compares places of interest (accomodations, restaurants, and attractions) within the same destination based on their popularity.This is measured by the quality, quantity, and recency of their review content on Tripadvisor.", + "type": "object", + "properties": { + "geo_location_id": { + "description": "The destination id", + "type": "integer", + "format": "int32" + }, + "ranking_string": { + "description": "The description of the ranking", + "type": "string" + }, + "geo_location_name": { + "description": "The destination name", + "type": "string" + }, + "ranking_out_of": { + "description": "The total number of locations on the ranking score", + "type": "integer", + "format": "int32" + }, + "ranking": { + "description": "The location ranking", + "type": "integer", + "format": "int32" + } + } + }, + "rating": { + "description": "Overall rating for this POI. Not applicable to geographic locations. Rating levels are defined as follows:5: Excellent4: Very good3: Average2: Poor1: Terrible", + "type": "number" + }, + "rating_image_url": { + "description": "URL to the bubble rating image for this location. Overall Bubble Ratings must be displayed using the Tripadvisor bubble rating image with the owl icon.", + "type": "string" + }, + "num_reviews": { + "description": "Count of total reviews published for this location", + "type": "string" + }, + "review_rating_count": { + "description": "Count of reviews for this location at each traveler rating level (1,2,3,4,5)", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "subratings": { + "type": "object", + "additionalProperties": { + "allOf": [ + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + }, + { + "type": "object", + "properties": { + "rating_image_url": { + "type": "string" + }, + "value": { + "type": "number", + "format": "float" + } + } + } + ] + } + }, + "photo_count": { + "description": "The count of photos for this POI published on Tripadvisor", + "type": "integer", + "format": "int32" + }, + "see_all_photos": { + "description": "Link to open all photos posted for this POI in a photo viewer on Tripadvisor. Link is localized to the correct domain if a language other than English is requested.", + "type": "string" + }, + "price_level": { + "description": "The relative price level for the POI. Not available for all POIs. This string is localized to other currency symbols (e.g. ££££ or €€€€) if a language other than English (en_US) is requested or if a specific currency is selected.", + "type": "string" + }, + "hours": { + "description": "Provides localized opening hours for Restaurants and Attractions, using ISO 8601 format", + "type": "object", + "properties": { + "periods": { + "type": "array", + "items": { + "type": "object", + "properties": { + "open": { + "description": "The day and times intervals in which the location is open", + "type": "object", + "properties": { + "day": { + "type": "integer", + "format": "int32" + }, + "time": { + "type": "string" + } + } + }, + "close": { + "description": "The day and times intervals in which the location is closed", + "type": "object", + "properties": { + "day": { + "type": "integer", + "format": "int32" + }, + "time": { + "type": "string" + } + } + } + } + } + }, + "weekday_text": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "amenities": { + "description": "The amenities provided by this hotel", + "type": "array", + "items": { + "type": "string" + } + }, + "features": { + "description": "The features provided by this restaurant", + "type": "array", + "items": { + "type": "string" + } + }, + "cuisine": { + "description": "The cuisines of this restaurant", + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + } + }, + "parent_brand": { + "description": "The parent brand of this hotel", + "type": "string" + }, + "brand": { + "description": "The brand of this hotel", + "type": "string" + }, + "category": { + "description": "Each POI on Tripadvisor is classified under a \"category\" and \"subcategory\", which is included in the API response.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + }, + "subcategory": { + "description": "Listings that are accommodations/hotels or restaurants are assigned a single subcategory.Deprecated as of February 2017 for Attractions. Refer to the \"groups\" object for the most up to date classifications.", + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + } + }, + "groups": { + "description": "Hierarchical display of Attraction Groups and Categories. These fields are only applicable for location type \"attraction\".", + "type": "array", + "items": { + "allOf": [ + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + }, + { + "type": "object", + "properties": { + "categories": { + "description": "Attraction Categories", + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + } + } + } + } + ] + } + }, + "styles": { + "description": "The styles of the hotel", + "type": "array", + "items": { + "type": "string" + } + }, + "neighborhood_info": { + "description": "List of neighborhoods close to the location", + "type": "array", + "items": { + "type": "object", + "properties": { + "location_id": { + "type": "string" + }, + "name": { + "type": "string" + } + } + } + }, + "trip_types": { + "description": "Each review submitted on Tripadvisor is tagged with a trip type, as designated by the reviewer.For each POI location, a breakdown of the total review count by trip type is included in the \"trip_types\" object.", + "type": "array", + "items": { + "allOf": [ + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + }, + { + "type": "object", + "properties": { + "value": { + "type": "string" + } + } + } + ] + } + }, + "awards": { + "description": "Returns a list of all of the awards for this location, which could include Certificate of Excellence, Travelers' Choice, and Green Leader.For each award, a small and large image will be returned as well.", + "type": "array", + "items": { + "type": "object", + "properties": { + "award_type": { + "description": "Award type name", + "type": "string" + }, + "year": { + "description": "The year in which the award was awarded", + "type": "integer", + "format": "int32" + }, + "images": { + "description": "The award image in its different sizes", + "type": "object", + "properties": { + "tiny": { + "type": "string" + }, + "small": { + "type": "string" + }, + "large": { + "type": "string" + } + } + }, + "categories": { + "description": "The categories in which the award was awarded", + "type": "array", + "items": { + "type": "string" + } + }, + "display_name": { + "type": "string" + } + } + } + }, + "error": { + "type": "object", + "properties": { + "message": { + "type": "string" + }, + "type": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + } + } + } + } + } + } + } + } + }, + "security": [ + { + "cosoLocationApiLambdaAuthorizer": [] + } + ] + } + }, + "/v1/location/{locationId}/photos": { + "get": { + "summary": "Location Photos", + "description": "The Location Photos request returns up to 5 high-quality photos for a specific location. Please note that the limits are different for the beta subscribers. You need to upgrade to get the higher limits mentioned here.The photos are ordered by recency.Sizes (height x width) for each photo type are as follows:Thumbnail: Fixed 50x50px, cropped, resized, and optimized by TripadvisorSmall: Fixed 150x150px, cropped, resized, and optimized by TripadvisorMedium: Max dimension 250px (can be height or width, depending on photo orientation), the other dimension is resized to maintain the aspect ratioLarge: Max dimension 550px (same rules as Medium, resized to maintain aspect ratio)Original: This is the photo in its original resolution and aspect ratio as provided by the user who submitted it.", + "operationId": "getLocationPhotos", + "tags": [ + "Location Photos" + ], + "parameters": [ + { + "name": "locationId", + "in": "path", + "description": "A unique identifier for a location on Tripadvisor. The location ID can be obtained using the Location Search.", + "required": true, + "schema": { + "type": "integer", + "format": "int32" + } + }, + { + "name": "language", + "in": "query", + "description": "The language in which to return results (e.g. \"en\" for English or \"es\" for Spanish) from the list of our Supported Languages.", + "required": false, + "schema": { + "default": "en", + "type": "string", + "enum": [ + "ar", + "zh", + "zh_TW", + "da", + "nl", + "en_AU", + "en_CA", + "en_HK", + "en_IN", + "en_IE", + "en_MY", + "en_NZ", + "en_PH", + "en_SG", + "en_ZA", + "en_UK", + "en", + "fr", + "fr_BE", + "fr_CA", + "fr_CH", + "de_AT", + "de", + "el", + "iw", + "in", + "it", + "it_CH", + "ja", + "ko", + "no", + "pt_PT", + "pt", + "ru", + "es_AR", + "es_CO", + "es_MX", + "es_PE", + "es", + "es_VE", + "es_CL", + "sv", + "th", + "tr", + "vi" + ] + } + }, + { + "name": "limit", + "in": "query", + "description": "The number of results to return", + "required": false, + "schema": { + "type": "number", + "format": "int32" + } + }, + { + "name": "offset", + "in": "query", + "description": "The index of the first result", + "required": false, + "schema": { + "type": "number", + "format": "int32" + } + }, + { + "name": "source", + "in": "query", + "description": "A comma-separated list of allowed photo sources. Allowed values are 'Expert', 'Management', 'Traveler'. If not specified, allow photos from all sources.", + "required": false, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Details for the location", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "description": "A unique ID for this photo", + "type": "integer", + "format": "int32" + }, + "is_blessed": { + "description": "Boolean whether or not this photo is blessed, i.e. reviewed at Tripadvisor as being a photo of exceptional quality", + "type": "boolean" + }, + "album": { + "description": "Name of the album the photo is featured in", + "type": "string" + }, + "caption": { + "description": "Caption of the photo", + "type": "string" + }, + "published_date": { + "description": "Date when this photo was published to Tripadvisor", + "type": "string" + }, + "images": { + "description": "Links to the photo in various sizes, along with the dimensions in pixels of each size", + "type": "object", + "additionalProperties": { + "type": "object", + "properties": { + "width": { + "type": "number" + }, + "url": { + "type": "string" + }, + "height": { + "type": "number" + } + } + } + }, + "source": { + "description": "Origin of the photo (Traveler, Expert, Management)", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + }, + "user": { + "type": "object", + "properties": { + "username": { + "description": "The username that appears on the Tripadvisor website for the user", + "type": "string" + }, + "user_location": { + "type": "object", + "properties": { + "name": { + "description": "The name of the user's location", + "type": "string" + }, + "id": { + "description": "The location ID of the user's location", + "type": "string" + } + } + }, + "review_count": { + "description": "The Review Count that appears on the Tripadvisor website for the user", + "type": "integer", + "format": "int32" + }, + "reviewer_badge": { + "description": "The Reviewer Badge that appears on the Tripadvisor website for the user", + "type": "string" + }, + "avatar": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + } + }, + "paging": { + "type": "object", + "properties": { + "next": { + "type": "string" + }, + "previous": { + "type": "string" + }, + "results": { + "type": "integer", + "format": "int32" + }, + "total_results": { + "type": "integer", + "format": "int32" + }, + "skipped": { + "type": "integer", + "format": "int32" + } + } + }, + "error": { + "type": "object", + "properties": { + "message": { + "type": "string" + }, + "type": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + } + } + } + } + } + } + } + } + }, + "security": [ + { + "cosoLocationApiLambdaAuthorizer": [] + } + ] + } + }, + "/v1/location/{locationId}/reviews": { + "get": { + "summary": "Location Reviews", + "description": "The Location Reviews request returns up to 5 of the most recent reviews for a specific location. Please note that the limits are different for the beta subscribers. You need to upgrade to get the higher limits mentioned here.", + "operationId": "getLocationReviews", + "tags": [ + "Location Reviews" + ], + "parameters": [ + { + "name": "locationId", + "in": "path", + "description": "A unique identifier for a location on Tripadvisor. The location ID can be obtained using the Location Search.", + "required": true, + "schema": { + "type": "integer", + "format": "int32" + } + }, + { + "name": "language", + "in": "query", + "description": "The language in which to return results (e.g. \"en\" for English or \"es\" for Spanish) from the list of our Supported Languages.", + "required": false, + "schema": { + "default": "en", + "type": "string", + "enum": [ + "ar", + "zh", + "zh_TW", + "da", + "nl", + "en_AU", + "en_CA", + "en_HK", + "en_IN", + "en_IE", + "en_MY", + "en_NZ", + "en_PH", + "en_SG", + "en_ZA", + "en_UK", + "en", + "fr", + "fr_BE", + "fr_CA", + "fr_CH", + "de_AT", + "de", + "el", + "iw", + "in", + "it", + "it_CH", + "ja", + "ko", + "no", + "pt_PT", + "pt", + "ru", + "es_AR", + "es_CO", + "es_MX", + "es_PE", + "es", + "es_VE", + "es_CL", + "sv", + "th", + "tr", + "vi" + ] + } + }, + { + "name": "limit", + "in": "query", + "description": "The number of results to return", + "required": false, + "schema": { + "type": "number", + "format": "int32" + } + }, + { + "name": "offset", + "in": "query", + "description": "The index of the first result", + "required": false, + "schema": { + "type": "number", + "format": "int32" + } + } + ], + "responses": { + "200": { + "description": "Details for the location", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "description": "The Tripadvisor ID for the review.", + "type": "integer", + "format": "int32" + }, + "lang": { + "description": "The language of the review.", + "type": "string" + }, + "location_id": { + "description": "Unique Tripadvisor location ID of the destination or POI.", + "type": "integer", + "format": "int32" + }, + "published_date": { + "description": "The date the review was published to Tripadvisor.", + "type": "string" + }, + "rating": { + "description": "Overall rating for this POI. Not applicable to geographic locations. Rating levels are defined as follows:5: Excellent4: Very good3: Average2: Poor1: Terrible", + "type": "integer", + "format": "int32" + }, + "helpful_votes": { + "description": "The number of helpful votes", + "type": "integer", + "format": "int32" + }, + "rating_image_url": { + "description": "The URL to the bubble rating image for this location.", + "type": "string" + }, + "url": { + "description": "The URL to the review", + "type": "string" + }, + "trip_type": { + "description": "The Trip type of the review (Business, Couples, Family, Friends, Solo).", + "type": "string" + }, + "travel_date": { + "description": "The travel date of the review", + "type": "string" + }, + "text": { + "description": "The full text of the review.", + "type": "string" + }, + "title": { + "description": "The title of this review.", + "type": "string" + }, + "owner_response": { + "description": "The Management Response to this review, if one exists.", + "type": "object", + "properties": { + "id": { + "description": "The Tripadvisor ID for the owner respose.", + "type": "integer", + "format": "int32" + }, + "lang": { + "description": "The language of the review.", + "type": "string" + }, + "text": { + "description": "The full text of the review.", + "type": "string" + }, + "title": { + "description": "The title of this review.", + "type": "string" + }, + "author": { + "description": "The owners name.", + "type": "string" + }, + "published_date": { + "description": "The date the review response was published to Tripadvisor.", + "type": "string" + } + } + }, + "is_machine_translated": { + "description": "True or false depending on whether this is a machine-translated review. (Outputs only if partner configured for inclusion of machine-translated reviews)", + "type": "boolean" + }, + "user": { + "type": "object", + "properties": { + "username": { + "description": "The username that appears on the Tripadvisor website for the user", + "type": "string" + }, + "user_location": { + "type": "object", + "properties": { + "name": { + "description": "The name of the user's location", + "type": "string" + }, + "id": { + "description": "The location ID of the user's location", + "type": "string" + } + } + }, + "review_count": { + "description": "The Review Count that appears on the Tripadvisor website for the user", + "type": "integer", + "format": "int32" + }, + "reviewer_badge": { + "description": "The Reviewer Badge that appears on the Tripadvisor website for the user", + "type": "string" + }, + "avatar": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "subratings": { + "type": "object", + "additionalProperties": { + "allOf": [ + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + }, + { + "type": "object", + "properties": { + "rating_image_url": { + "type": "string" + }, + "value": { + "type": "number", + "format": "float" + } + } + } + ] + } + } + } + } + }, + "paging": { + "type": "object", + "properties": { + "next": { + "type": "string" + }, + "previous": { + "type": "string" + }, + "results": { + "type": "integer", + "format": "int32" + }, + "total_results": { + "type": "integer", + "format": "int32" + }, + "skipped": { + "type": "integer", + "format": "int32" + } + } + }, + "error": { + "type": "object", + "properties": { + "message": { + "type": "string" + }, + "type": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + } + } + } + } + } + } + } + } + }, + "security": [ + { + "cosoLocationApiLambdaAuthorizer": [] + } + ] + } + }, + "/v1/location/search": { + "get": { + "summary": "Find Search", + "description": "The Location Search request returns up to 10 locations found by the given search query.You can use category (\"hotels\", \"attractions\", \"restaurants\", \"geos\"), phone number, address, and latitude/longtitude to search with more accuracy.", + "operationId": "searchForLocations", + "tags": [ + "Location Search" + ], + "parameters": [ + { + "name": "searchQuery", + "in": "query", + "description": "Text to use for searching based on the name of the location", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "category", + "in": "query", + "description": "Filters result set based on property type. Valid options are \"hotels\", \"attractions\", \"restaurants\", and \"geos\"", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "phone", + "in": "query", + "description": "Phone number to filter the search results by (this can be in any format with spaces and dashes but without the \"+\" sign at the beginning)", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "address", + "in": "query", + "description": "Address to filter the search results by", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "latLong", + "in": "query", + "description": "Latitude/Longitude pair to scope down the search around a specifc point - eg. \"42.3455,-71.10767\"", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "radius", + "in": "query", + "description": "Length of the radius from the provided latitude/longitude pair to filter results.", + "required": false, + "schema": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + } + }, + { + "name": "radiusUnit", + "in": "query", + "description": "Unit for length of the radius. Valid options are \"km\", \"mi\", \"m\" (km=kilometers, mi=miles, m=meters)", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "language", + "in": "query", + "description": "The language in which to return results (e.g. \"en\" for English or \"es\" for Spanish) from the list of our Supported Languages.", + "required": false, + "schema": { + "default": "en", + "type": "string", + "enum": [ + "ar", + "zh", + "zh_TW", + "da", + "nl", + "en_AU", + "en_CA", + "en_HK", + "en_IN", + "en_IE", + "en_MY", + "en_NZ", + "en_PH", + "en_SG", + "en_ZA", + "en_UK", + "en", + "fr", + "fr_BE", + "fr_CA", + "fr_CH", + "de_AT", + "de", + "el", + "iw", + "in", + "it", + "it_CH", + "ja", + "ko", + "no", + "pt_PT", + "pt", + "ru", + "es_AR", + "es_CO", + "es_MX", + "es_PE", + "es", + "es_VE", + "es_CL", + "sv", + "th", + "tr", + "vi" + ] + } + } + ], + "responses": { + "200": { + "description": "Location Search Results", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "location_id": { + "description": "A unique identifier for a location on Tripadvisor. This is to be used in the other endpoints that require a location ID.", + "type": "integer", + "format": "int32" + }, + "name": { + "description": "Name of the location", + "type": "string" + }, + "distance": { + "description": "Distance, in miles, this location is from the passed in LatLong parameters", + "type": "string" + }, + "bearing": { + "description": "Direction this location is from the passed in LatLong parameters", + "type": "string" + }, + "address_obj": { + "description": "Object consisting of various address data", + "type": "object", + "properties": { + "street1": { + "type": "string", + "description": "The street name" + }, + "street2": { + "type": "string", + "description": "The street name continuation" + }, + "city": { + "type": "string", + "description": "The city name" + }, + "state": { + "type": "string", + "description": "The state" + }, + "country": { + "type": "string", + "description": "The country" + }, + "postalcode": { + "type": "string", + "description": "The address postal code" + }, + "address_string": { + "type": "string", + "description": "The address in one single sentence" + } + } + } + } + } + }, + "error": { + "type": "object", + "properties": { + "message": { + "type": "string" + }, + "type": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + } + } + } + } + } + } + } + } + }, + "security": [ + { + "cosoLocationApiLambdaAuthorizer": [] + } + ] + } + }, + "/v1/location/nearby_search": { + "get": { + "summary": "Nearby Search", + "description": "The Nearby Location Search request returns up to 10 locations found near the given latitude/longtitude.You can use category (\"hotels\", \"attractions\", \"restaurants\", \"geos\"), phone number, address to search with more accuracy.", + "operationId": "searchForNearbyLocations", + "tags": [ + "Nearby Location Search" + ], + "parameters": [ + { + "name": "latLong", + "in": "query", + "description": "Latitude/Longitude pair to scope down the search around a specifc point - eg. \"42.3455,-71.10767\"", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "category", + "in": "query", + "description": "Filters result set based on property type. Valid options are \"hotels\", \"attractions\", \"restaurants\", and \"geos\"", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "phone", + "in": "query", + "description": "Phone number to filter the search results by (this can be in any format with spaces and dashes but without the \"+\" sign at the beginning)", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "address", + "in": "query", + "description": "Address to filter the search results by", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "radius", + "in": "query", + "description": "Length of the radius from the provided latitude/longitude pair to filter results.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "radiusUnit", + "in": "query", + "description": "Unit for length of the radius. Valid options are \"km\", \"mi\", \"m\" (km=kilometers, mi=miles, m=meters)", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "language", + "in": "query", + "description": "The language in which to return results (e.g. \"en\" for English or \"es\" for Spanish) from the list of our Supported Languages.", + "required": false, + "schema": { + "default": "en", + "type": "string", + "enum": [ + "ar", + "zh", + "zh_TW", + "da", + "nl", + "en_AU", + "en_CA", + "en_HK", + "en_IN", + "en_IE", + "en_MY", + "en_NZ", + "en_PH", + "en_SG", + "en_ZA", + "en_UK", + "en", + "fr", + "fr_BE", + "fr_CA", + "fr_CH", + "de_AT", + "de", + "el", + "iw", + "in", + "it", + "it_CH", + "ja", + "ko", + "no", + "pt_PT", + "pt", + "ru", + "es_AR", + "es_CO", + "es_MX", + "es_PE", + "es", + "es_VE", + "es_CL", + "sv", + "th", + "tr", + "vi" + ] + } + } + ], + "responses": { + "200": { + "description": "Nearby Location Search Results", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "location_id": { + "description": "A unique identifier for a location on Tripadvisor. This is to be used in the other endpoints that require a location ID.", + "type": "integer", + "format": "int32" + }, + "name": { + "description": "Name of the location", + "type": "string" + }, + "distance": { + "description": "Distance, in miles, this location is from the passed in LatLong parameters", + "type": "string" + }, + "bearing": { + "description": "Direction this location is from the passed in LatLong parameters", + "type": "string" + }, + "address_obj": { + "description": "Object consisting of various address data", + "type": "object", + "properties": { + "street1": { + "type": "string", + "description": "The street name" + }, + "street2": { + "type": "string", + "description": "The street name continuation" + }, + "city": { + "type": "string", + "description": "The city name" + }, + "state": { + "type": "string", + "description": "The state" + }, + "country": { + "type": "string", + "description": "The country" + }, + "postalcode": { + "type": "string", + "description": "The address postal code" + }, + "address_string": { + "type": "string", + "description": "The address in one single sentence" + } + } + } + } + } + }, + "error": { + "type": "object", + "properties": { + "message": { + "type": "string" + }, + "type": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + } + } + } + } + } + } + } + } + }, + "security": [ + { + "cosoLocationApiLambdaAuthorizer": [] + } + ] + } + } + }, + "components": { + "securitySchemes": { + "cosoLocationApiLambdaAuthorizer": { + "type": "apiKey", + "name": "key", + "in": "query" + } + } + } + } \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/samples/user_functions.py b/sdk/ai/azure-ai-assistants/samples/user_functions.py new file mode 100644 index 000000000000..cb1e3d9cf43d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/user_functions.py @@ -0,0 +1,248 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import json +import datetime +from typing import Any, Callable, Set, Dict, List, Optional + +# These are the user-defined functions that can be called by the agent. + + +def fetch_current_datetime(format: Optional[str] = None) -> str: + """ + Get the current time as a JSON string, optionally formatted. + + :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. + :return: The current time in JSON format. + :rtype: str + """ + current_time = datetime.datetime.now() + + # Use the provided format if available, else use a default format + if format: + time_format = format + else: + time_format = "%Y-%m-%d %H:%M:%S" + + time_json = json.dumps({"current_time": current_time.strftime(time_format)}) + return time_json + + +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +def send_email(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Email address of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +def send_email_using_recipient_name(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Name of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +def calculate_sum(a: int, b: int) -> str: + """Calculates the sum of two integers. + + :param a (int): First integer. + :rtype: int + :param b (int): Second integer. + :rtype: int + + :return: The sum of the two integers. + :rtype: str + """ + result = a + b + return json.dumps({"result": result}) + + +def convert_temperature(celsius: float) -> str: + """Converts temperature from Celsius to Fahrenheit. + + :param celsius (float): Temperature in Celsius. + :rtype: float + + :return: Temperature in Fahrenheit. + :rtype: str + """ + fahrenheit = (celsius * 9 / 5) + 32 + return json.dumps({"fahrenheit": fahrenheit}) + + +def toggle_flag(flag: bool) -> str: + """Toggles a boolean flag. + + :param flag (bool): The flag to toggle. + :rtype: bool + + :return: The toggled flag. + :rtype: str + """ + toggled = not flag + return json.dumps({"toggled_flag": toggled}) + + +def merge_dicts(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> str: + """Merges two dictionaries. + + :param dict1 (Dict[str, Any]): First dictionary. + :rtype: dict + :param dict2 (Dict[str, Any]): Second dictionary. + :rtype: dict + + :return: The merged dictionary. + :rtype: str + """ + merged = dict1.copy() + merged.update(dict2) + return json.dumps({"merged_dict": merged}) + + +def get_user_info(user_id: int) -> str: + """Retrieves user information based on user ID. + + :param user_id (int): ID of the user. + :rtype: int + + :return: User information as a JSON string. + :rtype: str + """ + mock_users = { + 1: {"name": "Alice", "email": "alice@example.com"}, + 2: {"name": "Bob", "email": "bob@example.com"}, + 3: {"name": "Charlie", "email": "charlie@example.com"}, + } + user_info = mock_users.get(user_id, {"error": "User not found."}) + return json.dumps({"user_info": user_info}) + + +def longest_word_in_sentences(sentences: List[str]) -> str: + """Finds the longest word in each sentence. + + :param sentences (List[str]): A list of sentences. + :return: A JSON string mapping each sentence to its longest word. + :rtype: str + """ + if not sentences: + return json.dumps({"error": "The list of sentences is empty"}) + + longest_words = {} + for sentence in sentences: + # Split sentence into words + words = sentence.split() + if words: + # Find the longest word + longest_word = max(words, key=len) + longest_words[sentence] = longest_word + else: + longest_words[sentence] = "" + + return json.dumps({"longest_words": longest_words}) + + +def process_records(records: List[Dict[str, int]]) -> str: + """ + Process a list of records, where each record is a dictionary with string keys and integer values. + + :param records: A list containing dictionaries that map strings to integers. + :return: A list of sums of the integer values in each record. + """ + sums = [] + for record in records: + # Sum up all the values in each dictionary and append the result to the sums list + total = sum(record.values()) + sums.append(total) + return json.dumps({"sums": sums}) + + +# Example User Input for Each Function +# 1. Fetch Current DateTime +# User Input: "What is the current date and time?" +# User Input: "What is the current date and time in '%Y-%m-%d %H:%M:%S' format?" + +# 2. Fetch Weather +# User Input: "Can you provide the weather information for New York?" + +# 3. Send Email +# User Input: "Send an email to john.doe@example.com with the subject 'Meeting Reminder' and body 'Don't forget our meeting at 3 PM.'" + +# 4. Calculate Sum +# User Input: "What is the sum of 45 and 55?" + +# 5. Convert Temperature +# User Input: "Convert 25 degrees Celsius to Fahrenheit." + +# 6. Toggle Flag +# User Input: "Toggle the flag True." + +# 7. Merge Dictionaries +# User Input: "Merge these two dictionaries: {'name': 'Alice'} and {'age': 30}." + +# 8. Get User Info +# User Input: "Retrieve user information for user ID 1." + +# 9. Longest Word in Sentences +# User Input: "Find the longest word in each of these sentences: ['The quick brown fox jumps over the lazy dog', 'Python is an amazing programming language', 'Azure AI capabilities are impressive']." + +# 10. Process Records +# User Input: "Process the following records: [{'a': 10, 'b': 20}, {'x': 5, 'y': 15, 'z': 25}, {'m': 30}]." + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_current_datetime, + fetch_weather, + send_email, + calculate_sum, + convert_temperature, + toggle_flag, + merge_dicts, + get_user_info, + longest_word_in_sentences, + process_records, +} diff --git a/sdk/ai/azure-ai-assistants/samples/user_logic_apps.py b/sdk/ai/azure-ai-assistants/samples/user_logic_apps.py new file mode 100644 index 000000000000..979fd5eca143 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/user_logic_apps.py @@ -0,0 +1,80 @@ +import json +import requests +from typing import Dict, Any, Callable + +from azure.identity import DefaultAzureCredential +from azure.mgmt.logic import LogicManagementClient + + +class AzureLogicAppTool: + """ + A service that manages multiple Logic Apps by retrieving and storing their callback URLs, + and then invoking them with an appropriate payload. + """ + + def __init__(self, subscription_id: str, resource_group: str, credential=None): + if credential is None: + credential = DefaultAzureCredential() + self.subscription_id = subscription_id + self.resource_group = resource_group + self.logic_client = LogicManagementClient(credential, subscription_id) + + self.callback_urls: Dict[str, str] = {} + + def register_logic_app(self, logic_app_name: str, trigger_name: str) -> None: + """ + Retrieves and stores a callback URL for a specific Logic App + trigger. + Raises a ValueError if the callback URL is missing. + """ + callback = self.logic_client.workflow_triggers.list_callback_url( + resource_group_name=self.resource_group, + workflow_name=logic_app_name, + trigger_name=trigger_name, + ) + + if callback.value is None: + raise ValueError(f"No callback URL returned for Logic App '{logic_app_name}'.") + + self.callback_urls[logic_app_name] = callback.value + + def invoke_logic_app(self, logic_app_name: str, payload: Dict[str, Any]) -> Dict[str, Any]: + """ + Invokes the registered Logic App (by name) with the given JSON payload. + Returns a dictionary summarizing success/failure. + """ + if logic_app_name not in self.callback_urls: + raise ValueError(f"Logic App '{logic_app_name}' has not been registered.") + + url = self.callback_urls[logic_app_name] + response = requests.post(url=url, json=payload) + + if response.ok: + return {"result": f"Successfully invoked {logic_app_name}."} + else: + return {"error": (f"Error invoking {logic_app_name} " f"({response.status_code}): {response.text}")} + + +def create_send_email_function(service: AzureLogicAppTool, logic_app_name: str) -> Callable[[str, str, str], str]: + """ + Returns a function that sends an email by invoking the specified Logic App in LogicAppService. + This keeps the LogicAppService instance out of global scope by capturing it in a closure. + """ + + def send_email_via_logic_app(recipient: str, subject: str, body: str) -> str: + """ + Sends an email by invoking the specified Logic App with the given recipient, subject, and body. + + :param recipient: The email address of the recipient. + :param subject: The subject of the email. + :param body: The body of the email. + :return: A JSON string summarizing the result of the operation. + """ + payload = { + "to": recipient, + "subject": subject, + "body": body, + } + result = service.invoke_logic_app(logic_app_name, payload) + return json.dumps(result) + + return send_email_via_logic_app diff --git a/sdk/ai/azure-ai-assistants/samples/weather_openapi.json b/sdk/ai/azure-ai-assistants/samples/weather_openapi.json new file mode 100644 index 000000000000..df0192590adb --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/weather_openapi.json @@ -0,0 +1,62 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "get weather data", + "description": "Retrieves current weather data for a location based on wttr.in.", + "version": "v1.0.0" + }, + "servers": [ + { + "url": "https://wttr.in" + } + ], + "auth": [], + "paths": { + "/{location}": { + "get": { + "description": "Get weather information for a specific location", + "operationId": "GetCurrentWeather", + "parameters": [ + { + "name": "location", + "in": "path", + "description": "City or location to retrieve the weather for", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "format", + "in": "query", + "description": "Always use j1 value for this parameter", + "required": true, + "schema": { + "type": "string", + "default": "j1" + } + } + ], + "responses": { + "200": { + "description": "Successful response", + "content": { + "text/plain": { + "schema": { + "type": "string" + } + } + } + }, + "404": { + "description": "Location not found" + } + }, + "deprecated": false + } + } + }, + "components": { + "schemes": {} + } +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/setup.py b/sdk/ai/azure-ai-assistants/setup.py new file mode 100644 index 000000000000..2752c3ba025c --- /dev/null +++ b/sdk/ai/azure-ai-assistants/setup.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# coding: utf-8 + +import os +import re +from setuptools import setup, find_packages + + +PACKAGE_NAME = "azure-ai-assistants" +PACKAGE_PPRINT_NAME = "Azure Ai Assistants" + +# a-b-c => a/b/c +package_folder_path = PACKAGE_NAME.replace("-", "/") + +# Version extraction inspired from 'requests' +with open(os.path.join(package_folder_path, "_version.py"), "r") as fd: + version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError("Cannot find version information") + + +setup( + name=PACKAGE_NAME, + version=version, + description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), + long_description=open("README.md", "r").read(), + long_description_content_type="text/markdown", + license="MIT License", + author="Microsoft Corporation", + author_email="azpysdkhelp@microsoft.com", + url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk", + keywords="azure, azure sdk", + classifiers=[ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "License :: OSI Approved :: MIT License", + ], + zip_safe=False, + packages=find_packages( + exclude=[ + "tests", + # Exclude packages that will be covered by PEP420 or nspkg + "azure", + "azure.ai", + ] + ), + include_package_data=True, + package_data={ + "azure.ai.assistants": ["py.typed"], + }, + install_requires=[ + "isodate>=0.6.1", + "azure-core>=1.30.0", + "typing-extensions>=4.6.0", + ], + python_requires=">=3.8", +) diff --git a/sdk/ai/azure-ai-assistants/tests/README.md b/sdk/ai/azure-ai-assistants/tests/README.md new file mode 100644 index 000000000000..c49ab7e61a82 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/README.md @@ -0,0 +1,54 @@ +# Azure AI Assistants client library tests for Python + +The instructions below are for running tests locally, on a Windows machine, against the live service using a local build of the client library. + +## Build and install the client library + +- Clone or download this sample repository. +- Open a command prompt window in the folder `sdk\ai\azure-ai-assistants` +- Install development dependencies: + ```bash + pip install -r dev_requirements.txt + ``` +- Build the package: + ```bash + pip install wheel + python setup.py bdist_wheel + ``` +- Install the resulting wheel (update version `1.0.0b5` to the current one): + ```bash + pip install dist\azure_ai_assistants-1.0.0b5-py3-none-any.whl --user --force-reinstall + ``` + +## Log in to Azure + +```bash +az login +``` + +## Setup up environment variables + +Edit the file `azure_ai_assistants_tests.env` located in the folder above. Follow the instructions there on how to set up Azure AI Foundry projects to be used for testing, and enter appropriate values for the environment variables used for the tests you want to run. + +## Configure test proxy + +Configure the test proxy to run live service tests without recordings: + +```bash +set AZURE_TEST_RUN_LIVE=true +set AZURE_SKIP_LIVE_RECORDING=true +set PROXY_URL=http://localhost:5000 +set AZURE_TEST_USE_CLI_AUTH=true +``` + +## Run tests + +To run all tests, type: + +```bash +pytest +``` + +## Additional information + +See [test documentation](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md) for additional information, including how to set proxy recordings and run tests using recordings. diff --git a/sdk/ai/azure-ai-assistants/tests/assets/fetch_current_datetime_and_weather_stream_response.txt b/sdk/ai/azure-ai-assistants/tests/assets/fetch_current_datetime_and_weather_stream_response.txt new file mode 100644 index 000000000000..f5a1bb4c3ac7 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/assets/fetch_current_datetime_and_weather_stream_response.txt @@ -0,0 +1,255 @@ +event: thread.run.step.completed +data: {"id":"step_01","object":"thread.run.step","created_at":1735945043,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1735945046,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_01","type":"function","function":{"name":"fetch_current_datetime","arguments":"{}","output":"{\"current_time\": \"2025-01-03 14:57:24\"}"}},{"id":"call_02","type":"function","function":{"name":"fetch_weather","arguments":"{\"location\": \"New York\"}","output":"{\"weather\": \"Sunny, 25\\u00b0C\"}"}}]},"usage":{"prompt_tokens":648,"completion_tokens":71,"total_tokens":719}} + +event: thread.run.queued +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":1735945041,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: thread.run.in_progress +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"in_progress","started_at":1735945048,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: thread.run.step.created +data: {"id":"step_02","object":"thread.run.step","created_at":1735945052,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + +event: thread.run.step.in_progress +data: {"id":"step_02","object":"thread.run.step","created_at":1735945052,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_03","type":"function","function":{"name":"send_email","arguments":"","output":null}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"{\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" "}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" \""}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"recipient"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\":"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" \""}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"user"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"@example"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":".com"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\",\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" "}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" \""}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"subject"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\":"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" \""}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"Current"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" New"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" York"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" Weather"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" and"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" DateTime"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" Information"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\",\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" "}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" \""}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"body"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\":"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" \""}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"Hello"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":",\\"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"Here"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" are"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" the"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" details"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" you"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" requested"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":":\\"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"-"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" Date"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" and"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" Time"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":":"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" "}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"202"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"5"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"-"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"01"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"-"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"03"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" "}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"14"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":":"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"57"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":":"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"24"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"-"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" Weather"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" in"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" New"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" York"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":":"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" Sunny"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":","}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" "}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"25"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"�C"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"Best"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" regards"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":".\"\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"}"}}]}}} + +event: thread.run.requires_action +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"requires_action","started_at":1735945048,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_03","type":"function","function":{"name":"send_email","arguments":"{\n \"recipient\": \"user@example.com\",\n \"subject\": \"Current New York Weather and DateTime Information\",\n \"body\": \"Hello,\\n\\nHere are the details you requested:\\n\\n- Date and Time: 2025-01-03 14:57:24\\n- Weather in New York: Sunny, 25�C\\n\\nBest regards.\"\n}"}}]}},"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: done +data: [DONE] + diff --git a/sdk/ai/azure-ai-assistants/tests/assets/main_stream_response.txt b/sdk/ai/azure-ai-assistants/tests/assets/main_stream_response.txt new file mode 100644 index 000000000000..de654aec55ee --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/assets/main_stream_response.txt @@ -0,0 +1,45 @@ +event: thread.run.created +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":null,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: thread.run.queued +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":null,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: thread.run.in_progress +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"in_progress","started_at":1735945041,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: thread.run.step.created +data: {"id":"step_01","object":"thread.run.step","created_at":1735945043,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + +event: thread.run.step.in_progress +data: {"id":"step_01","object":"thread.run.step","created_at":1735945043,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_01","type":"function","function":{"name":"fetch_current_datetime","arguments":"","output":null}}]}}} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"{}"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":1,"id":"call_02","type":"function","function":{"name":"fetch_weather","arguments":"","output":null}}]}}} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":1,"type":"function","function":{"arguments":"{\"location"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":1,"type":"function","function":{"arguments":""}}]}}} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":1,"type":"function","function":{"arguments":"\": \"N"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":1,"type":"function","function":{"arguments":"ew Y"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":1,"type":"function","function":{"arguments":"ork\"}"}}]}}} + +event: thread.run.requires_action +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"requires_action","started_at":1735945041,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_01","type":"function","function":{"name":"fetch_current_datetime","arguments":"{}"}},{"id":"call_02","type":"function","function":{"name":"fetch_weather","arguments":"{\"location\": \"New York\"}"}}]}},"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: done +data: [DONE] + diff --git a/sdk/ai/azure-ai-assistants/tests/assets/send_email_stream_response.txt b/sdk/ai/azure-ai-assistants/tests/assets/send_email_stream_response.txt new file mode 100644 index 000000000000..c8bd94f9005f --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/assets/send_email_stream_response.txt @@ -0,0 +1,213 @@ +event: thread.run.step.completed +data: {"id":"step_02","object":"thread.run.step","created_at":1735945052,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1735945059,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_03","type":"function","function":{"name":"send_email","arguments":"{\n \"recipient\": \"user@example.com\",\n \"subject\": \"Current New York Weather and DateTime Information\",\n \"body\": \"Hello,\\n\\nHere are the details you requested:\\n\\n- Date and Time: 2025-01-03 14:57:24\\n- Weather in New York: Sunny, 25�C\\n\\nBest regards.\"\n}","output":"{\"message\": \"Email successfully sent to user@example.com.\"}"}}]},"usage":{"prompt_tokens":735,"completion_tokens":87,"total_tokens":822}} + +event: thread.run.queued +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":1735945048,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: thread.run.in_progress +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"in_progress","started_at":1735945059,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: thread.run.step.created +data: {"id":"step_03","object":"thread.run.step","created_at":1735945060,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_01"}},"usage":null} + +event: thread.run.step.in_progress +data: {"id":"step_03","object":"thread.run.step","created_at":1735945060,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_01"}},"usage":null} + +event: thread.message.created +data: {"id":"msg_01","object":"thread.message","created_at":1735945060,"assistant_id":"asst_01","thread_id":"thread_01","run_id":"run_01","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"attachments":[],"metadata":{}} + +event: thread.message.in_progress +data: {"id":"msg_01","object":"thread.message","created_at":1735945060,"assistant_id":"asst_01","thread_id":"thread_01","run_id":"run_01","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"attachments":[],"metadata":{}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"The","annotations":[]}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" email"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" has"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" been"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" successfully"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" sent"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" to"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" the"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" recipient"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" with"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" the"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" following"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" details"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":":\n\n"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"-"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" Date"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" and"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" Time"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":":"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" "}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"202"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"5"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"-"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"01"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"-"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"03"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" "}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"14"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":":"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"57"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":":"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"24"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"\n"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"-"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" Weather"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" in"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" New"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" York"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":":"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" Sunny"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":","}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" "}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"25"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"�C"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"\n\n"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"If"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" you"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" need"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" any"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" further"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" assistance"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" or"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" information"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":","}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" please"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" feel"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" free"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" to"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" ask"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}} + +event: thread.message.completed +data: {"id":"msg_01","object":"thread.message","created_at":1735945060,"assistant_id":"asst_01","thread_id":"thread_01","run_id":"run_01","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1735945061,"role":"assistant","content":[{"type":"text","text":{"value":"The email has been successfully sent to the recipient with the following details:\n\n- Date and Time: 2025-01-03 14:57:24\n- Weather in New York: Sunny, 25�C\n\nIf you need any further assistance or information, please feel free to ask.","annotations":[]}}],"attachments":[],"metadata":{}} + +event: thread.run.step.completed +data: {"id":"step_03","object":"thread.run.step","created_at":1735945060,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1735945061,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_01"}},"usage":{"prompt_tokens":834,"completion_tokens":62,"total_tokens":896}} + +event: thread.run.completed +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"completed","started_at":1735945059,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1735945061,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":2217,"completion_tokens":220,"total_tokens":2437},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: done +data: [DONE] + diff --git a/sdk/ai/azure-ai-assistants/tests/check_sample_name.sh b/sdk/ai/azure-ai-assistants/tests/check_sample_name.sh new file mode 100644 index 000000000000..5c7cbfa53363 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/check_sample_name.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# This is simple helper script to chreck the name of a file +# the name should appear at least once as: +# +# python $fname +# +# If the file contain its name less times, we print its name. + +SAMPLES_SYNC="`dirname ${0}`/../samples/agents" +SAMPLES_ASYNC="`dirname ${0}`/../samples/agents/async_samples" + +for sample_dir in "$SAMPLES_SYNC" "$SAMPLES_ASYNC"; do + for fname in `ls "$sample_dir" | grep \^sample_ | grep \[.\]py\$`; do + cnt=`grep -c "${fname}" "${sample_dir}/${fname}"` + if [ $cnt -lt 1 ]; then + echo "${sample_dir}/${fname} name encountered ${cnt} times." + fi + done +done +exit 0 diff --git a/sdk/ai/azure-ai-assistants/tests/conftest.py b/sdk/ai/azure-ai-assistants/tests/conftest.py new file mode 100644 index 000000000000..e1f9eaa3a08b --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/conftest.py @@ -0,0 +1,151 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import pytest +from devtools_testutils import ( + add_general_regex_sanitizer, + add_body_key_sanitizer, + remove_batch_sanitizers, + get_credential, + test_proxy, +) +from azure.ai.assistants import AssistantsClient +from dotenv import load_dotenv, find_dotenv + +if not load_dotenv(find_dotenv(filename="azure_ai_assistants_tests.env"), override=True): + print("Failed to apply environment variables for azure-ai-projects tests.") + + +class SanitizedValues: + SUBSCRIPTION_ID = "00000000-0000-0000-0000-000000000000" + RESOURCE_GROUP_NAME = "00000" + WORKSPACE_NAME = "00000" + DATASET_NAME = "00000" + TENANT_ID = "00000000-0000-0000-0000-000000000000" + USER_OBJECT_ID = "00000000-0000-0000-0000-000000000000" + API_KEY = "00000000000000000000000000000000000000000000000000000000000000000000" + VECTOR_STORE_NAME = "vs_000000000000000000000000" + # cSpell:disable-next-line + FILE_BATCH = "vsfb_00000000000000000000000000000000" + + +@pytest.fixture(scope="session") +def mock_project_scope(): + return { + "subscription_id": f"{SanitizedValues.SUBSCRIPTION_ID}", + "resource_group_name": f"{SanitizedValues.RESOURCE_GROUP_NAME}", + "project_name": f"{SanitizedValues.WORKSPACE_NAME}", + } + + +@pytest.fixture(scope="session") +def mock_dataset_name(): + return { + "dataset_name": f"{SanitizedValues.DATASET_NAME}", + } + + +@pytest.fixture(scope="session") +def mock_vector_store_name(): + return { + "vector_store_name": f"{SanitizedValues.VECTOR_STORE_NAME}", + "file_batches": f"{SanitizedValues.FILE_BATCH}", + } + + +# autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method +@pytest.fixture(scope="session", autouse=True) +def start_proxy(test_proxy): + return + + +@pytest.fixture(scope="session", autouse=True) +def add_sanitizers(test_proxy, mock_project_scope, mock_dataset_name, mock_vector_store_name): + + def azure_workspace_triad_sanitizer(): + """Sanitize subscription, resource group, and workspace.""" + + add_general_regex_sanitizer( + regex=r"/subscriptions/([-\w\._\(\)]+)", + value=mock_project_scope["subscription_id"], + group_for_replace="1", + ) + + add_general_regex_sanitizer( + regex=r"/resource[gG]roups/([-\w\._\(\)]+)", + value=mock_project_scope["resource_group_name"], + group_for_replace="1", + ) + + add_general_regex_sanitizer( + regex=r"/workspaces/([-\w\._\(\)]+)", value=mock_project_scope["project_name"], group_for_replace="1" + ) + + # TODO (Darren): Check why this is needed in addition to the above + add_general_regex_sanitizer( + regex=r"%2Fsubscriptions%2F([-\w\._\(\)]+)", + value=mock_project_scope["subscription_id"], + group_for_replace="1", + ) + + # TODO (Darren): Check why this is needed in addition to the above + add_general_regex_sanitizer( + regex=r"%2Fresource[gG]roups%2F([-\w\._\(\)]+)", + value=mock_project_scope["resource_group_name"], + group_for_replace="1", + ) + + azure_workspace_triad_sanitizer() + + add_general_regex_sanitizer(regex=r"/runs/([-\w\._\(\)]+)", value="Sanitized", group_for_replace="1") + + add_general_regex_sanitizer( + regex=r"/data/([-\w\._\(\)]+)", value=mock_dataset_name["dataset_name"], group_for_replace="1" + ) + + add_general_regex_sanitizer( + regex=r"/vector_stores/([-\w\._\(\)]+)", + value=mock_vector_store_name["vector_store_name"], + group_for_replace="1", + ) + + add_general_regex_sanitizer( + regex=r"/file_batches/([-\w\._\(\)]+)/", value=mock_vector_store_name["file_batches"], group_for_replace="1" + ) + + # Sanitize Application Insights connection string from service response (/tests/telemetry) + add_body_key_sanitizer( + json_path="properties.ConnectionString", + value="InstrumentationKey=00000000-0000-0000-0000-000000000000;IngestionEndpoint=https://region.applicationinsights.azure.com/;LiveEndpoint=https://region.livediagnostics.monitor.azure.com/;ApplicationId=00000000-0000-0000-0000-000000000000", + ) + + add_body_key_sanitizer( + json_path="data_sources[*].uri", + value="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/workspaces/00000/datastores/workspaceblobstore/paths/LocalUpload/00000000000/product_info_1.md", + ) + + add_body_key_sanitizer( + json_path="configuration.data_sources[*].uri", + value="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/workspaces/00000/datastores/workspaceblobstore/paths/LocalUpload/00000000000/product_info_1.md", + ) + + add_body_key_sanitizer( + json_path="data_source.uri", + value="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/workspaces/00000/datastores/workspaceblobstore/paths/LocalUpload/00000000000/product_info_1.md", + ) + + add_body_key_sanitizer( + json_path="tool_resources.azure_ai_search.indexes[*].index_connection_id", + value="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/connections/someindex" + ) + + # Sanitize API key from service response (/tests/connections) + add_body_key_sanitizer(json_path="properties.credentials.key", value="Sanitized") + + # Remove the following sanitizers since certain fields are needed in tests and are non-sensitive: + # - AZSDK3493: $..name + # - AZSDK3430: $..id + remove_batch_sanitizers(["AZSDK3493"]) + remove_batch_sanitizers(["AZSDK3430"]) diff --git a/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py b/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py new file mode 100644 index 000000000000..6a927c646779 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py @@ -0,0 +1,184 @@ +# pylint: disable=line-too-long,useless-suppression +import io +import json +import unittest +from typing import Any, Dict, IO, Union +from unittest.mock import Mock, MagicMock, AsyncMock +from requests.structures import CaseInsensitiveDict +import inspect +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.aio import AssistantsClient as AsyncAssistantsClient +from azure.ai.assistants._model_base import SdkJSONEncoder + + +def dict_to_io_bytes(input: Dict[str, Any]) -> io.BytesIO: + input_string = json.dumps(input, cls=SdkJSONEncoder, exclude_readonly=True) + return io.BytesIO(input_string.encode("utf-8")) + + +class OverloadAssertion: + def __init__(self, mock: Mock, async_mock: AsyncMock, **args): + self.mock = mock + self.async_mock = async_mock + + def _to_dict(self, input: Union[None, str, IO[bytes]]) -> Dict[str, Any]: + json_string = "" + if isinstance(input, io.BytesIO): + json_string = input.getvalue().decode("utf-8") + elif isinstance(input, str): + json_string = input + else: + json_string = "{}" + return json.loads(json_string) + + def assert_deep_equal_header_except_content_length( + self, header1: CaseInsensitiveDict, header2: CaseInsensitiveDict, msg: str + ): + """ + Compare two HTTP headers for deep equality, except for the Content-Length header. + Because it seems only created by HttpRequest class automatically when the type is bytes + """ + header1 = header1.copy() + header2 = header2.copy() + header1.pop("Content-Length", None) + header2.pop("Content-Length", None) + unittest.TestCase().assertDictEqual(dict(header1), dict(header2), msg) + + def _assert_same_http_request(self, call1: Any, call2: Any, index1: int, index2: int): + """ + Compare two HTTP request objects for deep equality. + """ + + # Compare method, URL, headers, body, and other relevant attributes + req1 = call1.args[0] + req2 = call2.args[0] + req1_body = self._to_dict(req1.body) + req2_body = self._to_dict(req2.body) + unittest.TestCase().assertEqual( + req1.method, + req2.method, + f"call[{index1}] method is {req1.method}, but call[{index2}] method is {req2.method}", + ) + unittest.TestCase().assertEqual( + req1.url, req2.url, f"call[{index1}] url is {req1.url}, but call[{index2}] url is {req2.url}" + ) + unittest.TestCase().assertDictEqual( + req1_body, + req2_body, + f"call[{index1}] body is {json.dumps(req1_body, sort_keys=True)}, but call[{index2}] body is {json.dumps(req2_body, sort_keys=True)}", + ) + self.assert_deep_equal_header_except_content_length( + req1.headers, + req2.headers, + f"call[{index1}] headers are {req1.headers}, but call[{index2}] headers are {req2.headers}", + ) + unittest.TestCase().assertDictEqual( + call1.kwargs, + call2.kwargs, + f"call[{index1}] kwargs are {call1.kwargs}, but call[{index2}] kwargs are {call2.kwargs}", + ) + + def same_http_requests_from(self, *, operation_count: int, api_per_operation_count: int): + all_calls = self.mock.call_args_list + self.async_mock.call_args_list + assert len(all_calls) == operation_count * api_per_operation_count + + # Compare first followed by second followed by third call etc of each operations, + # Assert they have the same http request + template = all_calls[:api_per_operation_count] + for j in range(api_per_operation_count, len(all_calls), api_per_operation_count): + for i, (api_one, api_other) in enumerate(zip(template, all_calls[j : j + api_per_operation_count])): + self._assert_same_http_request(api_one, api_other, i, i + j) + + +def assert_same_http_requests(test_func): + """ + Decorator to mock pipeline responses and call the test function with the mock clients and assertion. + + :param test_func: The test function to be decorated. + :return: The wrapper function. + """ + + def _get_mock_client() -> AssistantsClient: + """Return the fake project client""" + client = AssistantsClient( + endpoint="www.bcac95dd-a1eb-11ef-978f-8c1645fec84b.com", + credential=MagicMock(), + ) + client.submit_tool_outputs_to_run = MagicMock() + client.submit_tool_outputs_to_stream = MagicMock() + return client + + def _get_async_mock_client() -> AsyncAssistantsClient: + """Return the fake project client""" + client = AsyncAssistantsClient( + endpoint="www.bcac95dd-a1eb-11ef-978f-8c1645fec84b.com", + subscription_id="00000000-0000-0000-0000-000000000000", + resource_group_name="non-existing-rg", + project_name="non-existing-project", + credential=AsyncMock(), + ) + client.submit_tool_outputs_to_run = AsyncMock() + client.submit_tool_outputs_to_stream = AsyncMock() + return client + + async def wrapper(self, *args, **kwargs): + """ + Wrapper function to set up mocks and call the test function. + + :param self: The test class instance. + :param args: Positional arguments to pass to the test function. + :param kwargs: Keyword arguments to pass to the test function. + """ + if not test_func: + return + + # Mock the pipeline response + pipeline_response_mock_return = Mock() + http_response = Mock() + http_response_json = Mock() + iter_bytes = Mock() + + # Set up the mock HTTP response + http_response_json.return_value = {} + http_response.status_code = 200 + http_response.json = http_response_json + http_response.iter_bytes = iter_bytes + + # Set up the pipeline response mock + pipeline_response_mock = Mock() + pipeline_response_mock_async = AsyncMock() + pipeline_response_mock.return_value = pipeline_response_mock_return + pipeline_response_mock_async.return_value = pipeline_response_mock_return + pipeline_response_mock_return.http_response = http_response + + # Get the mock clients + client = _get_mock_client() + async_client = _get_async_mock_client() + + async with async_client: + with client: + # Assign the pipeline mock to the client + client._client._pipeline.run = pipeline_response_mock + async_client._client._pipeline.run = pipeline_response_mock_async + + # Create an assertion object with the call arguments list + assertion = OverloadAssertion(pipeline_response_mock, pipeline_response_mock_async) + + # Call the test function with the mock clients and assertion + await test_func(self, client, async_client, assertion, *args, **kwargs) + + return wrapper + + +def get_mock_fn(fn, return_val): + def mock_func(*args, **kwargs): + fn(*args, **kwargs) + return return_val + + async def mock_func_async(*args, **kwargs): + await fn(*args, **kwargs) + return return_val + + if inspect.iscoroutinefunction(fn): + return mock_func_async + return mock_func diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py b/sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py new file mode 100644 index 000000000000..09cf9eef56f4 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py @@ -0,0 +1,141 @@ +from unittest.mock import patch + +import pytest +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.aio import AssistantsClient as AsyncAssistantsClient +from typing import List, Dict, MutableMapping, Any + +from overload_assert_utils import ( + assert_same_http_requests, + OverloadAssertion, + dict_to_io_bytes, + get_mock_fn, +) + +from azure.ai.assistants.models import ThreadMessageOptions, ToolResources, VectorStore + + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object + + +class TestSignatures: + + @pytest.mark.asyncio + @assert_same_http_requests + async def test_create_assistant( + self, assistant: AssistantsClient, async_assistant: AsyncAssistantsClient, assertion: OverloadAssertion + ): + model = "gpt-4-1106-preview" + name = "first" + instructions = "You are a helpful assistant" + body = {"model": model, "name": name, "instructions": instructions} + + assistant.create_assistant(model=model, name=name, instructions=instructions) + assistant.create_assistant(body=body) + assistant.create_assistant(body=dict_to_io_bytes(body)) + + await async_assistant.create_assistant(model=model, name=name, instructions=instructions) + await async_assistant.create_assistant(body=body) + await async_assistant.create_assistant(body=dict_to_io_bytes(body)) + + assertion.same_http_requests_from(operation_count=6, api_per_operation_count=1) + + @pytest.mark.asyncio + @assert_same_http_requests + async def test_create_vector_store_and_poll( + self, assistant: AssistantsClient, async_assistant: AsyncAssistantsClient, assertion: OverloadAssertion + ): + file_ids = ["file_id"] + body = {"file_ids": file_ids} + + with patch( + "azure.ai.assistants._operations.AssistantsClientOperationsMixin.create_vector_store", + wraps=get_mock_fn( + assistant.create_vector_store, return_val=VectorStore({"id": "store_1", "status": "in_progress"}) + ), + ), patch( + "azure.ai.assistants._operations.AssistantsClientOperationsMixin.get_vector_store", + wraps=get_mock_fn( + assistant.get_vector_store, return_val=VectorStore({"id": "store_1", "status": "completed"}) + ), + ): + + assistant.create_vector_store_and_poll(file_ids=file_ids, sleep_interval=0) + assistant.create_vector_store_and_poll(body=body, sleep_interval=0) + assistant.create_vector_store_and_poll(body=dict_to_io_bytes(body), sleep_interval=0) + + with patch( + "azure.ai.assistants.aio._operations.AssistantsClientOperationsMixin.create_vector_store", + wraps=get_mock_fn( + async_assistant.create_vector_store, return_val=VectorStore({"id": "store_1", "status": "in_progress"}) + ), + ), patch( + "azure.ai.assistants.aio._operations.AssistantsClientOperationsMixin.get_vector_store", + wraps=get_mock_fn( + async_assistant.get_vector_store, return_val=VectorStore({"id": "store_1", "status": "completed"}) + ), + ): + await async_assistant.create_vector_store_and_poll(file_ids=file_ids, sleep_interval=0) + await async_assistant.create_vector_store_and_poll(body=body, sleep_interval=0) + await async_assistant.create_vector_store_and_poll(body=dict_to_io_bytes(body), sleep_interval=0) + assertion.same_http_requests_from(operation_count=6, api_per_operation_count=2) + + @pytest.mark.asyncio + @assert_same_http_requests + async def test_create_thread( + self, assistant: AssistantsClient, async_assistant: AsyncAssistantsClient, assertion: OverloadAssertion + ): + messages: List[ThreadMessageOptions] = [] + tool_resources: ToolResources = ToolResources() + metadata: Dict[str, str] = {} + body = {"messages": messages, "tool_resources": tool_resources, "metadata": metadata} + + assistant.create_thread(messages=messages, tool_resources=tool_resources, metadata=metadata) + assistant.create_thread(body=body) + assistant.create_thread(body=dict_to_io_bytes(body)) + + await async_assistant.create_thread(messages=messages, tool_resources=tool_resources, metadata=metadata) + await async_assistant.create_thread(body=body) + await async_assistant.create_thread(body=dict_to_io_bytes(body)) + + assertion.same_http_requests_from(operation_count=6, api_per_operation_count=1) + + @pytest.mark.asyncio + @pytest.mark.skip("Defect: during body as JSON and IO Bytes don't, backend not called with stream=False") + @assert_same_http_requests + async def test_create_run( + self, assistant: AssistantsClient, async_assistant: AsyncAssistantsClient, assertion: OverloadAssertion + ): + thread_id = "thread_id" + assistant_id = "assistant_id" + body = {"assistant_id": assistant_id} + + assistant.create_run(thread_id, assistant_id=assistant_id) + assistant.create_run(thread_id, body=body) + assistant.create_run(thread_id, body=dict_to_io_bytes(body)) + + await async_assistant.create_run(thread_id, assistant_id=assistant_id) + await async_assistant.create_run(thread_id, body=body) + await async_assistant.create_run(thread_id, body=dict_to_io_bytes(body)) + + assertion.same_http_requests_from(operation_count=6, api_per_operation_count=1) + + @pytest.mark.asyncio + @pytest.mark.skip("Defect: during body as JSON and IO Bytes don't, backend not called with stream=True") + @assert_same_http_requests + async def test_create_stream( + self, assistant: AssistantsClient, async_assistant: AsyncAssistantsClient, assertion: OverloadAssertion + ): + thread_id = "thread_id" + assistant_id = "assistant_id" + body = {"assistant_id": assistant_id} + + assistant.create_stream(thread_id, assistant_id=assistant_id) + assistant.create_stream(thread_id, body=body) + assistant.create_stream(thread_id, body=dict_to_io_bytes(body)) + + await async_assistant.create_stream(thread_id, assistant_id=assistant_id) + await async_assistant.create_stream(thread_id, body=body) + await async_assistant.create_stream(thread_id, body=dict_to_io_bytes(body)) + + assertion.same_http_requests_from(operation_count=6, api_per_operation_count=1) diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistant_models.py b/sdk/ai/azure-ai-assistants/tests/test_assistant_models.py new file mode 100644 index 000000000000..eee0b4629d64 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_assistant_models.py @@ -0,0 +1,273 @@ +# pylint: disable=line-too-long,useless-suppression +from typing import Iterator, List +from unittest.mock import Mock, patch +import pytest +import os +from azure.ai.assistants.models import ( + AssistantEventHandler, + BaseAssistantEventHandler, + SubmitToolOutputsAction, + ThreadRun, +) +from azure.ai.assistants.models._patch import _parse_event +from azure.ai.assistants.models import AssistantStreamEvent +from azure.ai.assistants.models import ThreadRun, RunStep, ThreadMessage, MessageDeltaChunk, RunStepDeltaChunk + + +def read_file(file_name: str) -> str: + with open(os.path.join(os.path.dirname(__file__), "assets", f"{file_name}.txt"), "r") as file: + return file.read() + + +main_stream_response = read_file("main_stream_response") +fetch_current_datetime_and_weather_stream_response = read_file("fetch_current_datetime_and_weather_stream_response") + + +def convert_to_byte_iterator(input: str) -> Iterator[bytes]: + yield input.encode() + + +class TestBaseAssistantEventHandler: + class MyAssistantEventhHandler(BaseAssistantEventHandler[str]): + def _process_event(self, event_data_str: str) -> str: + return event_data_str + + def break_main_stream_response(self, indices: List[int], response: str): + previous_index = 0 + for index in indices: + yield response[previous_index:index].encode() + previous_index = index + yield response[previous_index:].encode() + + def mock_callable(self, _: ThreadRun, __: BaseAssistantEventHandler[str]) -> None: + pass + + def test_event_handler_process_response_when_break_around_event_separators(self): + # events are split into multiple chunks. + # Each chunk might contains more than one or incomplete response. + # Test the chunks are borken around the event separators which are "\n\n" + handler = self.MyAssistantEventhHandler() + new_line_indices = [i for i in range(len(main_stream_response)) if main_stream_response.startswith("\n\n", i)] + + indices_around_new_lines = [i + offset for i, offset in zip(new_line_indices, [0, -1, 1, 2, 3, 4, 5])] + handler.initialize( + self.break_main_stream_response(indices_around_new_lines, main_stream_response), self.mock_callable + ) + count = 0 + all_event_str: List[str] = [] + for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == main_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + def test_event_handler_process_response_when_break_at_the_start(self): + handler = self.MyAssistantEventhHandler() + + handler.initialize( + # the numbers of the index around the new line characters, middle of the event, or at the end + self.break_main_stream_response([2], main_stream_response), + self.mock_callable, + ) + count = 0 + all_event_str: List[str] = [] + for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == main_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + def test_event_handler_process_response_when_break_at_the_end(self): + handler = self.MyAssistantEventhHandler() + + response_len = len(main_stream_response) + indices_around_new_lines = list(range(response_len - 5, response_len + 1)) + + handler.initialize( + # the numbers of the index around the new line characters, middle of the event, or at the end + self.break_main_stream_response(indices_around_new_lines, main_stream_response), + self.mock_callable, + ) + count = 0 + all_event_str: List[str] = [] + for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == main_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + def test_event_handler_chain_responses(self): + # Test if the event handler can have the second stream followed by the first one. + handler = self.MyAssistantEventhHandler() + handler.initialize(convert_to_byte_iterator(main_stream_response), self.mock_callable) + handler.initialize( + convert_to_byte_iterator(fetch_current_datetime_and_weather_stream_response), self.mock_callable + ) + count = 0 + all_event_str: List[str] = [] + for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + + assert count == main_stream_response.count("event:") + fetch_current_datetime_and_weather_stream_response.count( + "event:" + ) + assert all_event_str[-1].startswith("event: done") + + def test_event_handler_reusable(self): + # Test if the event handler can be reused after a stream is done. + handler = self.MyAssistantEventhHandler() + handler.initialize(convert_to_byte_iterator(main_stream_response), self.mock_callable) + count = 0 + all_event_str: List[str] = [] + for event_str in handler: + assert event_str.startswith("event:") + + handler.initialize( + convert_to_byte_iterator(fetch_current_datetime_and_weather_stream_response), self.mock_callable + ) + + for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + + assert fetch_current_datetime_and_weather_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + def test_event_handler_with_split_chinese_char(self): + response_bytes_split_chinese_char: List[bytes] = [ + b'event: thread.message.delta\ndata: data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"\xe5', + b"\xa4", + b'\xa9"}}]}}\n\n', + b'event: thread.message.delta\ndata: data: {"id":"msg_02","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}}}\n\nevent: done\ndata: [DONE]\n\n', + ] + + handler = self.MyAssistantEventhHandler() + + handler.initialize( + # the numbers of the index around the new line characters, middle of the event, or at the end + iter(response_bytes_split_chinese_char), + self.mock_callable, + ) + count = 0 + all_event_str: List[str] = [] + for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == 3 + assert all_event_str[-1].startswith("event: done") + + +class TestAssistantEventHandler: + + deserializable_events = [ + AssistantStreamEvent.THREAD_CREATED.value, + AssistantStreamEvent.ERROR.value, + AssistantStreamEvent.DONE.value, + ] + + class MyAssistantEventHandler(AssistantEventHandler[None]): + pass + + @patch("azure.ai.assistants.models._patch._parse_event") + def test_tool_calls(self, mock_parse_event: Mock): + # Test if the event type and status are met, submit function calls. + submit_tool_outputs = Mock() + handler = self.MyAssistantEventHandler() + + handler.initialize(convert_to_byte_iterator("event\n\n"), submit_tool_outputs) + + event_obj = ThreadRun({}) + event_obj.status = "requires_action" + event_obj.required_action = SubmitToolOutputsAction({}) + mock_parse_event.return_value = ("", event_obj) + + for _ in handler: + handler.until_done() + + assert mock_parse_event.call_count == 1 + assert mock_parse_event.call_args[0][0] == "event" + assert submit_tool_outputs.call_count == 1 + assert submit_tool_outputs.call_args[0] == (event_obj, handler) + + @patch("azure.ai.assistants.models._patch.AssistantEventHandler.on_unhandled_event") + @pytest.mark.parametrize("event_type", [e.value for e in AssistantStreamEvent]) + def test_parse_event(self, mock_on_unhandled_event: Mock, event_type: str): + # Make sure all the event types defined in AssistantStreamEvent are deserializable except Created, Done, and Error + # And ensure handle_event is never raised. + + handler = self.MyAssistantEventHandler() + event_data_str = f"event: {event_type}\ndata: {{}}" + _, event_obj, _ = handler._process_event(event_data_str) + + if event_type in self.deserializable_events: + assert isinstance(event_obj, str) + else: + assert not isinstance(event_obj, str) + + # The only event we are not handling today is CREATED which is never sent by backend. + if event_type == AssistantStreamEvent.THREAD_CREATED.value: + assert mock_on_unhandled_event.call_count == 1 + else: + assert mock_on_unhandled_event.call_count == 0 + + +class TestParseEvent: + + def test_parse_event_thread_run_created(self): + event_data_str = 'event: thread.run.created\ndata: {"id": "123"}' + event_type, event_obj = _parse_event(event_data_str) + assert event_type == AssistantStreamEvent.THREAD_RUN_CREATED.value + assert isinstance(event_obj, ThreadRun) + assert event_obj.id == "123" + + def test_parse_event_thread_run_step_created(self): + event_data_str = 'event: thread.run.step.created\ndata: {"id": "456"}' + event_type, event_obj = _parse_event(event_data_str) + assert event_type == AssistantStreamEvent.THREAD_RUN_STEP_CREATED.value + assert isinstance(event_obj, RunStep) + assert event_obj.id == "456" + + def test_parse_event_thread_message_created(self): + event_data_str = 'event: thread.message.created\ndata: {"id": "789"}' + event_type, event_obj = _parse_event(event_data_str) + assert event_type == AssistantStreamEvent.THREAD_MESSAGE_CREATED.value + assert isinstance(event_obj, ThreadMessage) + assert event_obj.id == "789" + + def test_parse_event_thread_message_delta(self): + event_data_str = 'event: thread.message.delta\ndata: {"id": "101"}' + event_type, event_obj = _parse_event(event_data_str) + assert event_type == AssistantStreamEvent.THREAD_MESSAGE_DELTA.value + assert isinstance(event_obj, MessageDeltaChunk) + assert event_obj.id == "101" + + def test_parse_event_thread_run_step_delta(self): + event_data_str = 'event: thread.run.step.delta\ndata: {"id": "202"}' + event_type, event_obj = _parse_event(event_data_str) + assert event_type == AssistantStreamEvent.THREAD_RUN_STEP_DELTA.value + assert isinstance(event_obj, RunStepDeltaChunk) + assert event_obj.id == "202" + + def test_parse_event_invalid_event_type(self): + event_data_str = 'event: invalid.event\ndata: {"id": "303"}' + event_type, event_obj = _parse_event(event_data_str) + assert event_type == "invalid.event" + assert event_obj == "{'id': '303'}" + + def test_parse_event_no_event_type(self): + event_data_str = 'data: {"id": "404"}' + with pytest.raises(ValueError): + _parse_event(event_data_str) + + def test_parse_event_invalid_json(self): + event_data_str = "event: thread.run.created\ndata: invalid_json" + event_type, event_obj = _parse_event(event_data_str) + assert event_type == AssistantStreamEvent.THREAD_RUN_CREATED.value + assert event_obj == "invalid_json" diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py new file mode 100644 index 000000000000..432af973680c --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py @@ -0,0 +1,230 @@ +# pylint: disable=line-too-long,useless-suppression +from typing import AsyncIterator, List +from unittest.mock import AsyncMock, patch +import pytest +import os +from azure.ai.assistants.models import ( + AsyncAssistantEventHandler, + BaseAsyncAssistantEventHandler, + SubmitToolOutputsAction, + ThreadRun, +) +from azure.ai.assistants.models import AssistantStreamEvent + + +def read_file(file_name: str) -> str: + with open(os.path.join(os.path.dirname(__file__), "assets", f"{file_name}.txt"), "r") as file: + return file.read() + + +main_stream_response = read_file("main_stream_response") +fetch_current_datetime_and_weather_stream_response = read_file("fetch_current_datetime_and_weather_stream_response") +send_email_stream_response = read_file("send_email_stream_response") + + +async def convert_to_byte_iterator(input: str) -> AsyncIterator[bytes]: + yield input.encode() + + +async def async_bytes_iter(iterable: List[bytes]) -> AsyncIterator[bytes]: + for item in iterable: + yield item + + +class TestBaseAsyncAssistantEventHandler: + class MyAssistantEventhHandler(BaseAsyncAssistantEventHandler[str]): + async def _process_event(self, event_data_str: str) -> str: + return event_data_str + + async def break_main_stream_response(self, indices: List[int], response: str): + previous_index = 0 + for index in indices: + yield response[previous_index:index].encode() + previous_index = index + yield response[previous_index:].encode() + + async def mock_callable(self, _: ThreadRun, __: BaseAsyncAssistantEventHandler[str]) -> None: + pass + + @pytest.mark.asyncio + async def test_event_handler_process_response_when_break_around_event_separators(self): + # events are split into multiple chunks. + # Each chunk might contains more than one or incomplete response. + # Test the chunks are borken around the event separators which are "\n\n" + handler = self.MyAssistantEventhHandler() + new_line_indices = [i for i in range(len(main_stream_response)) if main_stream_response.startswith("\n\n", i)] + + indices_around_new_lines = [i + offset for i, offset in zip(new_line_indices, [0, -1, 1, 2, 3, 4, 5])] + handler.initialize( + self.break_main_stream_response(indices_around_new_lines, main_stream_response), self.mock_callable + ) + count = 0 + all_event_str: List[str] = [] + async for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == main_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + @pytest.mark.asyncio + async def test_event_handler_process_response_when_break_at_the_start(self): + handler = self.MyAssistantEventhHandler() + + handler.initialize( + # the numbers of the index around the new line characters, middle of the event, or at the end + self.break_main_stream_response([2], main_stream_response), + self.mock_callable, + ) + count = 0 + all_event_str: List[str] = [] + async for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == main_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + @pytest.mark.asyncio + async def test_event_handler_process_response_when_break_at_the_end(self): + handler = self.MyAssistantEventhHandler() + + response_len = len(main_stream_response) + indices_around_new_lines = list(range(response_len - 5, response_len + 1)) + + handler.initialize( + # the numbers of the index around the new line characters, middle of the event, or at the end + self.break_main_stream_response(indices_around_new_lines, main_stream_response), + self.mock_callable, + ) + count = 0 + all_event_str: List[str] = [] + async for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == main_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + @pytest.mark.asyncio + async def test_event_handler_chain_responses(self): + # Test if the event handler can have the second stream followed by the first one. + handler = self.MyAssistantEventhHandler() + handler.initialize(convert_to_byte_iterator(main_stream_response), self.mock_callable) + handler.initialize( + convert_to_byte_iterator(fetch_current_datetime_and_weather_stream_response), self.mock_callable + ) + count = 0 + all_event_str: List[str] = [] + async for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + + assert count == main_stream_response.count("event:") + fetch_current_datetime_and_weather_stream_response.count( + "event:" + ) + assert all_event_str[-1].startswith("event: done") + + @pytest.mark.asyncio + async def test_event_handler_reusable(self): + # Test if the event handler can be reused after a stream is done. + handler = self.MyAssistantEventhHandler() + handler.initialize(convert_to_byte_iterator(main_stream_response), self.mock_callable) + count = 0 + all_event_str: List[str] = [] + async for event_str in handler: + assert event_str.startswith("event:") + + handler.initialize( + convert_to_byte_iterator(fetch_current_datetime_and_weather_stream_response), self.mock_callable + ) + + async for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + + assert fetch_current_datetime_and_weather_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + @pytest.mark.asyncio + async def test_event_handler_with_split_chinese_char(self): + response_bytes_split_chinese_char: List[bytes] = [ + b'event: thread.message.delta\ndata: data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"\xe5', + b"\xa4", + b'\xa9"}}]}}\n\n', + b'event: thread.message.delta\ndata: data: {"id":"msg_02","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}}}\n\nevent: done\ndata: [DONE]\n\n', + ] + + handler = self.MyAssistantEventhHandler() + + handler.initialize( + # the numbers of the index around the new line characters, middle of the event, or at the end + async_bytes_iter(response_bytes_split_chinese_char), + self.mock_callable, + ) + count = 0 + all_event_str: List[str] = [] + async for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == 3 + assert all_event_str[-1].startswith("event: done") + + +class TestAsyncAssistantEventHandler: + + deserializable_events = [ + AssistantStreamEvent.THREAD_CREATED.value, + AssistantStreamEvent.ERROR.value, + AssistantStreamEvent.DONE.value, + ] + + class MyAssistantEventHandler(AsyncAssistantEventHandler[None]): + pass + + @pytest.mark.asyncio + @patch("azure.ai.assistants.models._patch._parse_event") + async def test_tool_calls(self, mock_parse_event: AsyncMock): + # Test if the event type and status are met, submit function calls. + submit_tool_outputs = AsyncMock() + handler = self.MyAssistantEventHandler() + + handler.initialize(convert_to_byte_iterator("event\n\n"), submit_tool_outputs) + + event_obj = ThreadRun({}) + event_obj.status = "requires_action" + event_obj.required_action = SubmitToolOutputsAction({}) + mock_parse_event.return_value = ("", event_obj) + + async for _ in handler: + await handler.until_done() + + assert mock_parse_event.call_count == 1 + assert mock_parse_event.call_args[0][0] == "event" + assert submit_tool_outputs.call_count == 1 + assert submit_tool_outputs.call_args[0] == (event_obj, handler) + + @pytest.mark.asyncio + @patch("azure.ai.assistants.models._patch.AsyncAssistantEventHandler.on_unhandled_event") + @pytest.mark.parametrize("event_type", [e.value for e in AssistantStreamEvent]) + async def test_parse_event(self, mock_on_unhandled_event: AsyncMock, event_type: str): + # Make sure all the event types defined in AssistantStreamEvent are deserializable except Created, Done, and Error + # And ensure handle_event is never raised. + + handler = self.MyAssistantEventHandler() + event_data_str = f"event: {event_type}\ndata: {{}}" + _, event_obj, _ = await handler._process_event(event_data_str) + + if event_type in self.deserializable_events: + assert isinstance(event_obj, str) + else: + assert not isinstance(event_obj, str) + + # The only event we are not handling today is CREATED which is never sent by backend. + if event_type == AssistantStreamEvent.THREAD_CREATED.value: + assert mock_on_unhandled_event.call_count == 1 + else: + assert mock_on_unhandled_event.call_count == 0 diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py new file mode 100644 index 000000000000..4b7d0bd340a5 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py @@ -0,0 +1,3295 @@ +# pylint: disable=too-many-lines,line-too-long,useless-suppression +# # ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +# cSpell:disable +from typing import Any, Optional + +import os +import datetime +import json +import logging +import tempfile +import sys +import time +import pytest +import functools +import io +import user_functions + +from azure.ai.assistants import AssistantsClient +from azure.core.exceptions import HttpResponseError +from devtools_testutils import ( + AzureRecordedTestCase, + EnvironmentVariableLoader, + recorded_by_proxy, +) +from azure.ai.assistants.models import ( + AssistantEventHandler, + AssistantStreamEvent, + AssistantThread, + AzureAISearchTool, + AzureFunctionStorageQueue, + AzureFunctionTool, + CodeInterpreterTool, + CodeInterpreterToolResource, + FilePurpose, + FileSearchTool, + FileSearchToolCallContent, + FileSearchToolResource, + FunctionTool, + MessageAttachment, + MessageDeltaChunk, + MessageTextContent, + MessageRole, + OpenAIFile, + ResponseFormatJsonSchema, + ResponseFormatJsonSchemaType, + RunAdditionalFieldList, + RunStepDeltaChunk, + RunStepDeltaToolCallObject, + RunStepFileSearchToolCall, + RunStepFileSearchToolCallResult, + RunStepFileSearchToolCallResults, + RunStatus, + RunStep, + ThreadMessage, + ThreadMessageOptions, + ThreadRun, + ToolResources, + ToolSet, + VectorStore, + VectorStoreConfigurations, + VectorStoreConfiguration, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) + + +# Set to True to enable SDK logging +LOGGING_ENABLED = True + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + + +assistantClientPreparer = functools.partial( + EnvironmentVariableLoader, + "azure_ai_assistants", + # TODO: uncomment this endpoint when re running with 1DP + #azure_ai_assistants_tests_project_endpoint="https://aiservices-id.services.ai.azure.com/api/projects/project-name", + # TODO: remove this endpoint when re running with 1DP + azure_ai_assistants_tests_project_endpoint="https://Sanitized.api.azureml.ms/agents/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/", + azure_ai_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", + azure_ai_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", + azure_ai_assistants_tests_search_index_name="sample_index", + azure_ai_assistants_tests_search_connection_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/connections/someindex", +) + + +# create tool for assistant use +def fetch_current_datetime_live(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + time_json = json.dumps({"current_time": current_datetime}) + return time_json + + +# create tool for assistant use +def fetch_current_datetime_recordings(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + time_json = json.dumps({"current_time": "2024-10-10 12:30:19"}) + return time_json + + +# Statically defined user functions for fast reference +user_functions_recording = {fetch_current_datetime_recordings} +user_functions_live = {fetch_current_datetime_live} + + +# The test class name needs to start with "Test" to get collected by pytest +class TestAssistantClient(AzureRecordedTestCase): + + # helper function: create client using environment variables + def create_client(self, **kwargs): + # fetch environment variables + endpoint = kwargs.pop("azure_ai_assistants_tests_project_endpoint") + credential = self.get_credential(AssistantsClient, is_async=False) + + # create and return client + client = AssistantsClient( + endpoint=endpoint, + credential=credential, + ) + + return client + + def _get_data_file(self) -> str: + """Return the test file name.""" + return os.path.join(os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md") + + # ********************************************************************************** + # + # HAPPY PATH SERVICE TESTS - assistant APIs + # + # ********************************************************************************** + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_client(self, **kwargs): + """test client creation""" + + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + + # close client + client.close() + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_delete_assistant(self, **kwargs): + """test assistant creation and deletion""" + # create client + # client = self.create_client(**kwargs) + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + print("Created client") + self._do_test_create_assistant(client=client, body=None, functions=None) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_body(self, **kwargs): + """test assistant creation with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + print("Created client") + + # create body for assistant and call helper function + body = {"name": "my-assistant", "model": "gpt-4o", "instructions": "You are helpful assistant"} + self._do_test_create_assistant(client=client, body=body, functions=None) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_iobytes(self, **kwargs): + """test assistant creation with body: IO[bytes]""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + print("Created client") + + # create body for assistant and call helper function + body = {"name": "my-assistant", "model": "gpt-4o", "instructions": "You are helpful assistant"} + binary_body = json.dumps(body).encode("utf-8") + self._do_test_create_assistant(client=client, body=io.BytesIO(binary_body), functions=None) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_tools(self, **kwargs): + """test assistant creation with tools""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # initialize assistant functions + functions = FunctionTool(functions=user_functions_recording) + self._do_test_create_assistant(client=client, body=None, functions=functions) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_tools_and_resources(self, **kwargs): + """test assistant creation with tools and resources""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # initialize assistant functions + functions = FunctionTool(functions=user_functions_recording) + self._do_test_create_assistant(client=client, body=None, functions=functions) + + def _do_test_create_assistant(self, client, body, functions): + """helper function for creating assistant with different body inputs""" + + # create assistant + if body: + assistant = client.create_assistant(body=body) + elif functions: + assistant = client.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="You are helpful assistant", + tools=functions.definitions, + ) + assert assistant.tools + assert assistant.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + else: + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + assert assistant.name == "my-assistant" + assert assistant.model == "gpt-4o" + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_assistant(self, **kwargs): + """test assistant update without body""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_update_assistant(client=client, use_body=False, use_io=False) + + @assistantClientPreparer() + @pytest.mark.skip("Update assistant with body is failing") + @recorded_by_proxy + def test_update_assistant_with_body(self, **kwargs): + """test assistant update with body: JSON""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_update_assistant(client=client, use_body=True, use_io=False) + + @assistantClientPreparer() + @pytest.mark.skip("Update assistant with body is failing") + @recorded_by_proxy + def test_update_assistant_with_iobytes(self, **kwargs): + """test assistant update with body: IO[bytes]""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_update_assistant(client=client, use_body=True, use_io=True) + + def _do_test_update_assistant(self, client, use_body, use_io): + """helper function for updating assistant with different body inputs""" + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + + # update assistant + if use_body: + body = {"assistant_id": assistant.id, "name": "my-assistant2"} + if use_io: + binary_body = json.dumps(body).encode("utf-8") + body = io.BytesIO(binary_body) + assistant = client.update_assistant(assistant_id=assistant.id, body=body) + else: + assistant = client.update_assistant(assistant_id=assistant.id, name="my-assistant2") + assert assistant.name + assert assistant.name == "my-assistant2" + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @pytest.mark.skip("Does not perform consistently on a shared resource") + @recorded_by_proxy + def test_assistant_list(self, **kwargs): + """test list assistants""" + # create client and ensure there are no previous assistants + with self.create_client(**kwargs) as client: + list_length = client.list_assistants().data.__len__() + + # create assistant and check that it appears in the list + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert client.list_assistants().data.__len__() == list_length + 1 + assert client.list_assistants().data[0].id == assistant.id + + # create second assistant and check that it appears in the list + assistant2 = client.create_assistant( + model="gpt-4o", name="my-assistant2", instructions="You are helpful assistant" + ) + assert client.list_assistants().data.__len__() == list_length + 2 + assert ( + client.list_assistants().data[0].id == assistant.id + or client.list_assistants().data[1].id == assistant.id + ) + + # delete assistants and check list + client.delete_assistant(assistant.id) + assert client.list_assistants().data.__len__() == list_length + 1 + assert client.list_assistants().data[0].id == assistant2.id + + client.delete_assistant(assistant2.id) + assert client.list_assistants().data.__len__() == list_length + print("Deleted assistants") + + # ********************************************************************************** + # + # HAPPY PATH SERVICE TESTS - Thread APIs + # + # ********************************************************************************** + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_thread(self, **kwargs): + """test creating thread""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_thread_with_metadata(self, **kwargs): + """test creating thread with no body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + self._do_test_create_thread(client=client, body=None) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_thread_with_body(self, **kwargs): + """test creating thread with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for thread and call helper function + body = { + "metadata": {"key1": "value1", "key2": "value2"}, + } + self._do_test_create_thread(client=client, body=body) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_thread_with_iobytes(self, **kwargs): + """test creating thread with body: IO[bytes]""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for thread and call helper function + body = { + "metadata": {"key1": "value1", "key2": "value2"}, + } + binary_body = json.dumps(body).encode("utf-8") + self._do_test_create_thread(client=client, body=io.BytesIO(binary_body)) + + def _do_test_create_thread(self, client, body): + """helper function for creating thread with different body inputs""" + # create thread + if body: + thread = client.create_thread(body=body) + else: + thread = client.create_thread(metadata={"key1": "value1", "key2": "value2"}) + assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + @assistantClientPreparer() + @recorded_by_proxy + def test_get_thread(self, **kwargs): + """test getting thread""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # get thread + thread2 = client.get_thread(thread.id) + assert thread2.id + assert thread.id == thread2.id + print("Got thread, thread ID", thread2.id) + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_thread(self, **kwargs): + """test updating thread without body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # update thread + thread = client.update_thread(thread.id, metadata={"key1": "value1", "key2": "value2"}) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_thread_with_metadata(self, **kwargs): + """test updating thread without body""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # set metadata + metadata = {"key1": "value1", "key2": "value2"} + + # create thread + thread = client.create_thread(metadata=metadata) + assert thread.id + print("Created thread, thread ID", thread.id) + + # set metadata + metadata2 = {"key1": "value1", "key2": "newvalue2"} + + # update thread + thread = client.update_thread(thread.id, metadata=metadata2) + assert thread.metadata == {"key1": "value1", "key2": "newvalue2"} + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_thread_with_body(self, **kwargs): + """test updating thread with body: JSON""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # set body and run test + body = {"metadata": {"key1": "value1", "key2": "value2"}} + self._do_test_update_thread(client=client, body=body) + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_thread_with_iobytes(self, **kwargs): + """test updating thread with body: IO[bytes]""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # set body and run test + body = {"metadata": {"key1": "value1", "key2": "value2"}} + binary_body = json.dumps(body).encode("utf-8") + io_body = io.BytesIO(binary_body) + self._do_test_update_thread(client=client, body=io_body) + + def _do_test_update_thread(self, client, body): + """helper function for updating thread with different body inputs""" + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # update thread + if body: + thread = client.update_thread(thread.id, body=body) + else: + metadata = {"key1": "value1", "key2": "value2"} + thread = client.update_thread(thread.id, metadata=metadata) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + @assistantClientPreparer() + @recorded_by_proxy + def test_delete_thread(self, **kwargs): + """test deleting thread""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete thread + deletion_status = client.delete_thread(thread.id) + assert deletion_status.id == thread.id + assert deletion_status.deleted == True + print("Deleted thread, thread ID", deletion_status.id) + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Message APIs + # # + # # ********************************************************************************** + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_message(self, **kwargs): + """test creating message in a thread without body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_create_message(client=client, body=None) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_message_with_body(self, **kwargs): + """test creating message in a thread with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for message and call helper function + body = {"role": "user", "content": "Hello, tell me a joke"} + self._do_test_create_message(client=client, body=body) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_message_with_iobytes(self, **kwargs): + """test creating message in a thread with body: IO[bytes]""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for message and call helper function + body = {"role": "user", "content": "Hello, tell me a joke"} + binary_body = json.dumps(body).encode("utf-8") + self._do_test_create_message(client=client, body=io.BytesIO(binary_body)) + + def _do_test_create_message(self, client, body): + """helper function for creating message with different body inputs""" + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + if body: + message = client.create_message(thread_id=thread.id, body=body) + else: + message = client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_multiple_messages(self, **kwargs): + """test creating multiple messages in a thread""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create messages + message = client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + message2 = client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me another joke" + ) + assert message2.id + print("Created message, message ID", message2.id) + message3 = client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a third joke" + ) + assert message3.id + print("Created message, message ID", message3.id) + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_list_messages(self, **kwargs): + """test listing messages in a thread""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # check that initial message list is empty + messages0 = client.list_messages(thread_id=thread.id) + print(messages0.data) + assert messages0.data.__len__() == 0 + + # create messages and check message list for each one + message1 = client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message1.id + print("Created message, message ID", message1.id) + messages1 = client.list_messages(thread_id=thread.id) + assert messages1.data.__len__() == 1 + assert messages1.data[0].id == message1.id + + message2 = client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me another joke" + ) + assert message2.id + print("Created message, message ID", message2.id) + messages2 = client.list_messages(thread_id=thread.id) + assert messages2.data.__len__() == 2 + assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id + + message3 = client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a third joke" + ) + assert message3.id + print("Created message, message ID", message3.id) + messages3 = client.list_messages(thread_id=thread.id) + assert messages3.data.__len__() == 3 + assert ( + messages3.data[0].id == message3.id + or messages3.data[1].id == message2.id + or messages3.data[2].id == message2.id + ) + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_get_message(self, **kwargs): + """test getting message in a thread""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + # get message + message2 = client.get_message(thread_id=thread.id, message_id=message.id) + assert message2.id + assert message.id == message2.id + print("Got message, message ID", message.id) + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_message(self, **kwargs): + """test updating message in a thread without body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_update_message(client=client, body=None) + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_message_with_body(self, **kwargs): + """test updating message in a thread with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for message and call helper function + body = {"metadata": {"key1": "value1", "key2": "value2"}} + self._do_test_update_message(client=client, body=body) + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_message_with_iobytes(self, **kwargs): + """test updating message in a thread with body: IO[bytes]""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for message and call helper function + body = {"metadata": {"key1": "value1", "key2": "value2"}} + binary_body = json.dumps(body).encode("utf-8") + self._do_test_update_message(client=client, body=io.BytesIO(binary_body)) + + def _do_test_update_message(self, client, body): + """helper function for updating message with different body inputs""" + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # update message + if body: + message = client.update_message(thread_id=thread.id, message_id=message.id, body=body) + else: + message = client.update_message( + thread_id=thread.id, message_id=message.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert message.metadata == {"key1": "value1", "key2": "value2"} + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Run APIs + # # + # # ********************************************************************************** + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_run(self, **kwargs): + """test creating run""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_run_with_metadata(self, **kwargs): + """test creating run without body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_create_run(client=client, use_body=False, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_run_with_body(self, **kwargs): + """test creating run with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_create_run(client=client, use_body=True, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_run_with_iobytes(self, **kwargs): + """test creating run with body: IO[bytes]""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_create_run(client=client, use_body=True, use_io=True) + + def _do_test_create_run(self, client, use_body, use_io=False): + """helper function for creating run with different body inputs""" + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + if use_body: + body = {"assistant_id": assistant.id, "metadata": {"key1": "value1", "key2": "value2"}} + if use_io: + binary_body = json.dumps(body).encode("utf-8") + body = io.BytesIO(binary_body) + run = client.create_run(thread_id=thread.id, body=body) + else: + run = client.create_run( + thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_get_run(self, **kwargs): + """test getting run""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # get run + run2 = client.get_run(thread_id=thread.id, run_id=run.id) + assert run2.id + assert run.id == run2.id + print("Got run, run ID", run2.id) + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_run_status(self, **kwargs): + """test run status""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.get_run(thread_id=thread.id, run_id=run.id) + print("Run status:", run.status) + + assert run.status in ["cancelled", "failed", "completed", "expired"] + print("Run completed with status:", run.status) + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_run(self, **kwargs): + """test updating run without body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # update run + while run.status in ["queued", "in_progress"]: + # wait for a second + time.sleep(1) + run = client.get_run(thread_id=thread.id, run_id=run.id) + run = client.update_run( + thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.metadata == {"key1": "value1", "key2": "value2"} + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_run_with_metadata(self, **kwargs): + """test updating run without body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_update_run(client=client, body=None) + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_run_with_body(self, **kwargs): + """test updating run with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for run and call helper function + body = {"metadata": {"key1": "value1", "key2": "newvalue2"}} + self._do_test_update_run(client=client, body=body) + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_run_with_iobytes(self, **kwargs): + """test updating run with body: IO[bytes]""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for run and call helper function + body = {"metadata": {"key1": "value1", "key2": "newvalue2"}} + binary_body = json.dumps(body).encode("utf-8") + self._do_test_update_run(client=client, body=io.BytesIO(binary_body)) + + def _do_test_update_run(self, client, body): + """helper function for updating run with different body inputs""" + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.create_run( + thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # update run + while run.status in ["queued", "in_progress"]: + time.sleep(5) + run = client.get_run(thread_id=thread.id, run_id=run.id) + if body: + run = client.update_run(thread_id=thread.id, run_id=run.id, body=body) + else: + run = client.update_run( + thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "newvalue2"} + ) + assert run.metadata == {"key1": "value1", "key2": "newvalue2"} + + # delete assistant + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_run(self, **kwargs): + """test submitting tool outputs to run without body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_submit_tool_outputs_to_run(client=client, use_body=False, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_run_with_body(self, **kwargs): + """test submitting tool outputs to run with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_submit_tool_outputs_to_run(client=client, use_body=True, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): + """test submitting tool outputs to run with body: IO[bytes]""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_submit_tool_outputs_to_run(client=client, use_body=True, use_io=True) + + def _do_test_submit_tool_outputs_to_run(self, client, use_body, use_io): + """helper function for submitting tool outputs to run with different body inputs""" + + # Initialize assistant tools + functions = FunctionTool(user_functions_recording) + # code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + # toolset.add(code_interpreter) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = client.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + client.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) + print("Tool outputs:", tool_outputs) + if tool_outputs: + if use_body: + body = {"tool_outputs": tool_outputs} + if use_io: + binary_body = json.dumps(body).encode("utf-8") + body = io.BytesIO(binary_body) + client.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, body=body) + else: + client.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + print("Messages: ") + messages = client.list_messages(thread_id=thread.id, run_id=run.id) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + # if user_functions_live is used, the time will be the current time + # since user_functions_recording is used, the time will be 12:30 + assert "12:30" in tool_message + print("Used tool_outputs") + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy + def test_create_parallel_tool_thread_true(self, **kwargs): + """Test creation of parallel runs.""" + self._do_test_create_parallel_thread_runs(True, True, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy + def test_create_parallel_tool_thread_false(self, **kwargs): + """Test creation of parallel runs.""" + self._do_test_create_parallel_thread_runs(False, True, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy + def test_create_parallel_tool_run_true(self, **kwargs): + """Test creation of parallel runs.""" + self._do_test_create_parallel_thread_runs(True, False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy + def test_create_parallel_tool_run_false(self, **kwargs): + """Test creation of parallel runs.""" + self._do_test_create_parallel_thread_runs(False, False, **kwargs) + + def _wait_for_run(self, client, run, timeout=1): + """Wait while run will get to terminal state.""" + while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS, RunStatus.REQUIRES_ACTION]: + time.sleep(timeout) + run = client.get_run(thread_id=run.thread_id, run_id=run.id) + return run + + def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_thread_run, **kwargs): + """Test creation of parallel runs.""" + + # create client + client = self.create_client( + **kwargs, + ) + assert isinstance(client, AssistantsClient) + + # Initialize assistant tools + functions = FunctionTool(functions=user_functions_recording) + code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + assistant = client.create_assistant( + model="gpt-4", + name="my-assistant", + instructions="You are helpful assistant", + toolset=toolset, + ) + assert assistant.id + + message = ThreadMessageOptions( + role="user", + content="Hello, what time is it?", + ) + + if create_thread_run: + run = client.create_thread_and_run( + assistant_id=assistant.id, + parallel_tool_calls=use_parallel_runs, + ) + run = self._wait_for_run(client, run) + else: + thread = client.create_thread(messages=[message]) + assert thread.id + + run = client.create_and_process_run( + thread_id=thread.id, + assistant_id=assistant.id, + parallel_tool_calls=use_parallel_runs, + ) + assert run.id + assert run.status == RunStatus.COMPLETED, run.last_error.message + assert run.parallel_tool_calls == use_parallel_runs + + assert client.delete_assistant(assistant.id).deleted, "The assistant was not deleted" + messages = client.list_messages(thread_id=run.thread_id) + assert len(messages.data), "The data from the assistant was not received." + + """ + # DISABLED: rewrite to ensure run is not complete when cancel_run is called + @assistantClientPreparer() + @recorded_by_proxy + def test_cancel_run(self, **kwargs): + '''test cancelling run''' + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check status and cancel + assert run.status in ["queued", "in_progress", "requires_action"] + client.cancel_run(thread_id=thread.id, run_id=run.id) + + while run.status in ["queued", "cancelling"]: + time.sleep(1) + run = client.get_run(thread_id=thread.id, run_id=run.id) + print("Current run status:", run.status) + assert run.status == "cancelled" + print("Run cancelled") + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + client.close() + """ + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_thread_and_run(self, **kwargs): + """Test creating thread and run""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_create_thread_and_run(client=client, use_body=False, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_thread_and_run_with_body(self, **kwargs): + """Test creating thread and run with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_create_thread_and_run(client=client, use_body=True, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_thread_and_run_with_iobytes(self, **kwargs): + """Test creating thread and run with body: IO[bytes]""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_create_thread_and_run(client=client, use_body=True, use_io=True) + + def _do_test_create_thread_and_run(self, client, use_body, use_io): + """helper function for creating thread and run with different body inputs""" + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create run + if use_body: + body = { + "assistant_id": assistant.id, + "metadata": {"key1": "value1", "key2": "value2"}, + } + if use_io: + binary_body = json.dumps(body).encode("utf-8") + body = io.BytesIO(binary_body) + run = client.create_thread_and_run(body=body) + assert run.metadata == {"key1": "value1", "key2": "value2"} + else: + run = client.create_thread_and_run(assistant_id=assistant.id) + + # create thread and run + assert run.id + assert run.thread_id + print("Created run, run ID", run.id) + + # get thread + thread = client.get_thread(run.thread_id) + assert thread.id + print("Created thread, thread ID", thread.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.get_run(thread_id=thread.id, run_id=run.id) + # assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + assert run.status == "completed" + print("Run completed") + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @pytest.mark.skip("Working on recordings") + @recorded_by_proxy + def test_list_run_step(self, **kwargs): + """Test listing run steps.""" + + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + steps = client.list_run_steps(thread_id=thread.id, run_id=run.id) + # commenting assertion out below, do we know exactly when run starts? + # assert steps['data'].__len__() == 0 + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.get_run(thread_id=thread.id, run_id=run.id) + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "completed", + ] + print("Run status:", run.status) + if run.status != "queued": + steps = client.list_run_steps(thread_id=thread.id, run_id=run.id) + print("Steps:", steps) + assert steps["data"].__len__() > 0 + + assert run.status == "completed" + print("Run completed") + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + client.close() + + @assistantClientPreparer() + @recorded_by_proxy + def test_get_run_step(self, **kwargs): + """Test getting run step.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.create_message( + thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.get_run(thread_id=thread.id, run_id=run.id) + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "completed", + ] + print("Run status:", run.status) + + # list steps, check that get_run_step works with first step_id + steps = client.list_run_steps(thread_id=thread.id, run_id=run.id) + assert steps["data"].__len__() > 0 + step = steps["data"][0] + get_step = client.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) + assert step == get_step + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Streaming APIs + # # + # # ********************************************************************************** + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_stream(self, **kwargs): + """Test creating stream.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.create_message( + thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create stream + with client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + for event_type, event_data, _ in stream: + assert ( + isinstance(event_data, (MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep)) + or event_type == AssistantStreamEvent.DONE + ) + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + # TODO create_stream doesn't work with body -- fails on for event_type, event_data : TypeError: 'ThreadRun' object is not an iterator + @assistantClientPreparer() + @recorded_by_proxy + def test_create_stream_with_body(self, **kwargs): + """Test creating stream with body.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.create_message( + thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create body for stream + body = {"assistant_id": assistant.id, "stream": True} + + # create stream + with client.create_stream(thread_id=thread.id, body=body, stream=True) as stream: + + for event_type, event_data, _ in stream: + print("event type: event data") + print(event_type, event_data) + assert ( + isinstance(event_data, (MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep)) + or event_type == AssistantStreamEvent.DONE + ) + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_stream_with_iobytes(self, **kwargs): + """Test creating stream with body: IO[bytes].""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.create_message( + thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create body for stream + body = {"assistant_id": assistant.id, "stream": True} + binary_body = json.dumps(body).encode("utf-8") + + # create stream + with client.create_stream( + thread_id=thread.id, body=io.BytesIO(binary_body), stream=True + ) as stream: + for event_type, event_data, _ in stream: + assert ( + isinstance(event_data, (MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep)) + or event_type == AssistantStreamEvent.DONE + ) + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_stream(self, **kwargs): + """Test submitting tool outputs to stream.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_submit_tool_outputs_to_stream(client=client, use_body=False, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_stream_with_body(self, **kwargs): + """Test submitting tool outputs to stream with body: JSON.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_submit_tool_outputs_to_stream(client=client, use_body=True, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_stream_with_iobytes(self, **kwargs): + """Test submitting tool outputs to stream with body: IO[bytes].""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_submit_tool_outputs_to_stream(client=client, use_body=True, use_io=True) + + def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): + """helper function for submitting tool outputs to stream with different body inputs""" + + # Initialize assistant tools + functions = FunctionTool(functions=user_functions_recording) + + toolset = ToolSet() + toolset.add(functions) + # toolset.add(code_interpreter) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="You are helpful assistant", + tools=functions.definitions, + tool_resources=functions.resources, + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create stream + with client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + for event_type, event_data, _ in stream: + + # Check if tools are needed + if ( + event_type == AssistantStreamEvent.THREAD_RUN_REQUIRES_ACTION + and event_data.required_action.submit_tool_outputs + ): + print("Requires action: submit tool outputs") + tool_calls = event_data.required_action.submit_tool_outputs.tool_calls + + if not tool_calls: + print("No tool calls provided - cancelling run") + client.cancel_run(thread_id=thread.id, run_id=event_data.id) + break + + # submit tool outputs to stream + tool_outputs = toolset.execute_tool_calls(tool_calls) + + tool_event_handler = AssistantEventHandler() + if tool_outputs: + if use_body: + body = {"tool_outputs": tool_outputs, "stream": True} + if use_io: + binary_body = json.dumps(body).encode("utf-8") + body = io.BytesIO(binary_body) + client.submit_tool_outputs_to_stream( + thread_id=thread.id, + run_id=event_data.id, + body=body, + event_handler=tool_event_handler, + stream=True, + ) + else: + client.submit_tool_outputs_to_stream( + thread_id=thread.id, + run_id=event_data.id, + tool_outputs=tool_outputs, + event_handler=tool_event_handler, + ) + for tool_event_type, tool_event_data, _ in tool_event_handler: + assert ( + isinstance(tool_event_data, (MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep)) + or tool_event_type == AssistantStreamEvent.DONE + ) + + print("Submitted tool outputs to stream") + + print("Stream processing completed") + + # check that messages used the tool + messages = client.list_messages(thread_id=thread.id) + print("Messages: ", messages) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + # TODO if testing live, uncomment these + # hour12 = time.strftime("%H") + # hour24 = time.strftime("%I") + # minute = time.strftime("%M") + # hour12string = str(hour12)+":"+str(minute) + # hour24string = str(hour24)+":"+str(minute) + # assert hour12string in tool_message or hour24string in tool_message + recorded_time = "12:30" + assert recorded_time in tool_message + print("Used tool_outputs") + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + # client.close() + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - User function APIs + # # + # # ********************************************************************************** + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_string_input(self, **kwargs): + """Test submitting tool outputs to run with function input being a single string.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.fetch_weather}, + content="Hello, what is the weather in New York?", + expected_values=["sunny", "25"], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_multiple_strings(self, **kwargs): + """Test submitting tool outputs to run with function input being multiple strings.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.send_email}, + content="Hello, can you send an email to my manager (manager@microsoft.com) with the subject 'thanksgiving' asking when he is OOF?", + possible_values=["email has been sent", "email has been successfully sent"], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_integers(self, **kwargs): + """Test submitting tool outputs to run with function input being multiple integers.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.calculate_sum}, + content="Hello, what is 293 + 243?", + expected_values=["536"], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_integer(self, **kwargs): + """Test submitting tool outputs to run with function input being a single integer.""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.convert_temperature}, + content="Hello, what is 32 degrees Celsius in Fahrenheit?", + expected_value=["89.6"], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_multiple_dicts(self, **kwargs): + """Test submitting tool outputs to run with function input being multiple dictionaries.""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.merge_dicts}, + content="If I have a dictionary with the key 'name' and value 'John' and another dictionary with the key 'age' and value '25', what is the merged dictionary?", + possible_values=[ + "{'name': 'john', 'age': '25'}", + "{'age': '25', 'name': 'john'}", + '{"name": "john", "age": "25"}', + '{"age": "25", "name": "john"}', + "{'name': 'john', 'age': 25}", + "{'age': 25, 'name': 'john'}", + '"name": "john",\n "age": 25', + '"name": "john",\n "age": "25"', + '"name": "john",\n "age": 25', + ], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_input_string_output_dict(self, **kwargs): + """Test submitting tool outputs to run with function input being one string and output being a dictionary.""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.get_user_info}, + content="What is the name and email of the first user in our database?", + expected_values=["alice", "alice@example.com"], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_list(self, **kwargs): + """Test submitting tool outputs to run with function input being a list.""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.longest_word_in_sentences}, + content="Hello, please give me the longest word in the following sentences: 'Hello, how are you?' and 'I am good.'", + expected_values=["hello", "good"], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_multiple_dicts2(self, **kwargs): + """Test submitting tool outputs to run with function input being multiple dictionaries.""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.process_records}, + content="Hello, please process the following records: [{'a': 10, 'b': 20}, {'x': 5, 'y': 15, 'z': 25}, {'m': 35}]", + expected_values=["30", "45", "35"], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def _test_tools_with_different_functions( + self, client, function, content, expected_values=None, possible_values=None + ): + """Helper function to test submitting tool outputs to run with different function inputs.""" + # Initialize assistant tools + functions = FunctionTool(functions=function) + toolset = ToolSet() + toolset.add(functions) + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="You are helpful assistant", + toolset=toolset, + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.create_message(thread_id=thread.id, role="user", content=content) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = client.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + client.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) + print("Tool outputs:", tool_outputs) + if tool_outputs: + client.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + messages = client.list_messages(thread_id=thread.id, run_id=run.id) + print("Messages: ", messages) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + if expected_values: + for value in expected_values: + assert value in tool_message.lower() + if possible_values: + value_used = False + for value in possible_values: + if value in tool_message.lower(): + value_used = True + assert value_used + # assert expected_value in tool_message + print("Used tool_outputs") + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + + # # ********************************************************************************** + # # + # # NEGATIVE TESTS + # # + # # ********************************************************************************** + + ''' + @assistantClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_invalid_code_interpreter_tool_resource(self, **kwargs): + """test assistant creation with invalid code interpreter tool resource.""" + # create client + with self.create_client(**kwargs) as client: + + # initialize resources + tool_resources = ToolResources() + tool_resources.code_interpreter = CodeInterpreterToolResource() + + exception_message = "" + try: + client.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="You are helpful assistant", + tools=[], + tool_resources=tool_resources, + ) + except: + print("exception here") + # except ValueError as e: + # exception_message = e.args[0] + else: + print("no exception") + + assert ( + exception_message + == "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" + ) + + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_invalid_file_search_tool_resource(self, **kwargs): + """test assistant creation with invalid file search tool resource.""" + # create client + with self.create_client(**kwargs) as client: + + # initialize resources + tool_resources = ToolResources() + tool_resources.file_search = FileSearchToolResource() + + exception_message = "" + try: + client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", tools=[], tool_resources=tool_resources + ) + except: + print("exception here") + # except ValueError as e: + # exception_message = e.args[0] + else: + print("no exception") + + assert exception_message == "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" + ''' + + @assistantClientPreparer() + @pytest.mark.skip("PASSES LIVE ONLY: recordings don't capture DNS lookup errors") + @recorded_by_proxy + def test_create_assistant_with_invalid_file_search_tool_resource(self, **kwargs): + """test assistant creation with invalid file search tool resource.""" + # create client + with self.create_client(**kwargs) as client: + + # initialize resources + tool_resources = ToolResources() + tool_resources.file_search = FileSearchToolResource() + + exception_message = "" + try: + client.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="You are helpful assistant", + tools=[], + tool_resources=tool_resources, + ) + except ValueError as e: + exception_message = e.args[0] + + assert ( + exception_message + == "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" + ) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_file_search_add_vector_store(self, **kwargs): + """Test the assistant with file search and vector store creation.""" + + # Create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + print("Created client") + + # Create file search tool + file_search = FileSearchTool() + + # Adjust the file path to be relative to the test file location + file_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md") + openai_file = client.upload_file_and_poll(file_path=file_path, purpose="assistants") + print(f"Uploaded file, file ID: {openai_file.id}") + + openai_vectorstore = client.create_vector_store_and_poll( + file_ids=[openai_file.id], name="my_vectorstore" + ) + print(f"Created vector store, vector store ID: {openai_vectorstore.id}") + + file_search.add_vector_store(openai_vectorstore.id) + + toolset = ToolSet() + toolset.add(file_search) + print("Created toolset and added file search") + + # create assistant + assistant = client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # check assistant tools and vector store resources + assert assistant.tools + assert assistant.tools[0]["type"] == "file_search" + assert assistant.tool_resources + assert assistant.tool_resources["file_search"]["vector_store_ids"][0] == openai_vectorstore.id + + # delete assistant and close client + client.delete_assistant(assistant.id) + print("Deleted assistant") + client.close() + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store_and_poll(self, **kwargs): + """test create vector store and poll""" + # Create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + print("Created client") + + # Create vector store + body = {"name": "test_vector_store", "metadata": {"key1": "value1", "key2": "value2"}} + try: + vector_store = client.create_vector_store_and_poll(body=body, sleep_interval=2) + # check correct creation + assert isinstance(vector_store, VectorStore) + assert vector_store.name == "test_vector_store" + assert vector_store.id + assert vector_store.metadata == {"key1": "value1", "key2": "value2"} + assert vector_store.status == "completed" + print(f"Vector store created and polled successfully: {vector_store.id}") + + # throw error if failed to create and poll vector store + except HttpResponseError as e: + print(f"Failed to create and poll vector store: {e}") + raise + + # close client + client.close() + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store(self, **kwargs): + """Test the assistant with vector store creation.""" + # Create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + print("Created client") + + # Create vector store + body = {"name": "test_vector_store", "metadata": {"key1": "value1", "key2": "value2"}} + try: + vector_store = client.create_vector_store(body=body) + print("here") + print(vector_store) + # check correct creation + assert isinstance(vector_store, VectorStore) + assert vector_store.name == "test_vector_store" + assert vector_store.id + assert vector_store.metadata == {"key1": "value1", "key2": "value2"} + assert vector_store.status == "completed" + print(f"Vector store created and polled successfully: {vector_store.id}") + + # throw error if failed to create and poll vector store + except HttpResponseError as e: + print(f"Failed to create and poll vector store: {e}") + raise + + # close client + client.close() + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store_azure(self, **kwargs): + """Test the assistant with vector store creation.""" + self._do_test_create_vector_store(streaming=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_vector_store_file_id(self, **kwargs): + """Test the assistant with vector store creation.""" + self._do_test_create_vector_store(file_path=self._get_data_file(), streaming=False, **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store_azure_streaming(self, **kwargs): + """Test the assistant with vector store creation.""" + self._do_test_create_vector_store(streaming=True, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_vector_store_file_id_streaming(self, **kwargs): + """Test the assistant with vector store creation.""" + self._do_test_create_vector_store(file_path=self._get_data_file(), streaming=True, **kwargs) + + def _do_test_create_vector_store(self, streaming, **kwargs): + """Test the assistant with vector store creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + file_ids = [file_id] if file_id else None + if file_ids: + ds = None + else: + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = ai_client.create_vector_store_and_poll( + file_ids=file_ids, data_sources=ds, name="my_vectorstore" + ) + assert vector_store.id + self._test_file_search(ai_client, vector_store, file_id, streaming) + + @assistantClientPreparer() + @pytest.mark.skip("Not deployed in all regions.") + @recorded_by_proxy + def test_vector_store_threads_file_search_azure(self, **kwargs): + """Test file search when azure asset ids are sopplied during thread creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + fs = FileSearchToolResource( + vector_stores=[ + VectorStoreConfigurations( + store_name="my_vector_store", + store_configuration=VectorStoreConfiguration(data_sources=ds), + ) + ] + ) + file_search = FileSearchTool() + assistant = ai_client.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert assistant.id + + thread = ai_client.create_thread(tool_resources=ToolResources(file_search=fs)) + assert thread.id + # create message + message = ai_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = ai_client.list_messages(thread.id) + assert len(messages) + ai_client.delete_assistant(assistant.id) + ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_vector_store_add_file_file_id(self, **kwargs): + """Test adding single file to vector store withn file ID.""" + self._do_test_create_vector_store_add_file(file_path=self._get_data_file(), streaming=False, **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store_add_file_azure(self, **kwargs): + """Test adding single file to vector store with azure asset ID.""" + self._do_test_create_vector_store_add_file(streaming=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_vector_store_add_file_file_id_streaming(self, **kwargs): + """Test adding single file to vector store withn file ID.""" + self._do_test_create_vector_store_add_file(file_path=self._get_data_file(), streaming=True, **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store_add_file_azure_streaming(self, **kwargs): + """Test adding single file to vector store with azure asset ID.""" + self._do_test_create_vector_store_add_file(streaming=True, **kwargs) + + def _do_test_create_vector_store_add_file(self, streaming, **kwargs): + """Test adding single file to vector store.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + if file_id: + ds = None + else: + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type="uri_asset", + ) + vector_store = ai_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + assert vector_store.id + vector_store_file = ai_client.create_vector_store_file( + vector_store_id=vector_store.id, data_source=ds, file_id=file_id + ) + assert vector_store_file.id + self._test_file_search(ai_client, vector_store, file_id, streaming) + ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_vector_store_batch_file_ids(self, **kwargs): + """Test adding multiple files to vector store with file IDs.""" + self._do_test_create_vector_store_batch(streaming=False, file_path=self._get_data_file(), **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store_batch_azure(self, **kwargs): + """Test adding multiple files to vector store with azure asset IDs.""" + self._do_test_create_vector_store_batch(streaming=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_vector_store_batch_file_ids_streaming(self, **kwargs): + """Test adding multiple files to vector store with file IDs.""" + self._do_test_create_vector_store_batch(streaming=True, file_path=self._get_data_file(), **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store_batch_azure_streaming(self, **kwargs): + """Test adding multiple files to vector store with azure asset IDs.""" + self._do_test_create_vector_store_batch(streaming=True, **kwargs) + + def _do_test_create_vector_store_batch(self, streaming, **kwargs): + """Test the assistant with vector store creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + if file_id: + file_ids = [file_id] + ds = None + else: + file_ids = None + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = ai_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + assert vector_store.id + vector_store_file_batch = ai_client.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids + ) + assert vector_store_file_batch.id + self._test_file_search(ai_client, vector_store, file_id, streaming) + ai_client.close() + + def _test_file_search( + self, ai_client: AssistantsClient, vector_store: VectorStore, file_id: Optional[str], streaming: bool + ) -> None: + """Test the file search""" + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + assistant = ai_client.create_assistant( + model="gpt-4", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert assistant.id + + thread = ai_client.create_thread() + assert thread.id + + # create message + message = ai_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + if streaming: + thread_run = None + with ai_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + for _, event_data, _ in stream: + if isinstance(event_data, ThreadRun): + thread_run = event_data + elif ( + isinstance(event_data, RunStepDeltaChunk) + and isinstance(event_data.delta.step_details, RunStepDeltaToolCallObject) + and event_data.delta.step_details.tool_calls + ): + assert isinstance( + event_data.delta.step_details.tool_calls[0].file_search, RunStepFileSearchToolCallResults + ) + assert thread_run is not None + run = ai_client.get_run(thread_id=thread_run.thread_id, run_id=thread_run.id) + assert run is not None + else: + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + + ai_client.delete_vector_store(vector_store.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = ai_client.list_messages(thread.id) + assert len(messages) + ai_client.delete_assistant(assistant.id) + self._remove_file_maybe(file_id, ai_client) + ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_message_attachement_azure(self, **kwargs): + """Test message attachment with azure ID.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + self._do_test_message_attachment(data_source=ds, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_message_attachment_file_ids(self, **kwargs): + """Test message attachment with file ID.""" + self._do_test_message_attachment(file_path=self._get_data_file(), **kwargs) + + def _do_test_message_attachment(self, **kwargs): + """Test assistant with the message attachment.""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + + # Create assistant with file search tool + assistant = ai_client.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + ) + assert assistant.id, "Assistant was not created" + + thread = ai_client.create_thread() + assert thread.id, "The thread was not created." + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment( + file_id=file_id, + data_source=kwargs.get("data_source"), + tools=[ + FileSearchTool().definitions[0], + CodeInterpreterTool().definitions[0], + ], + ) + message = ai_client.create_message( + thread_id=thread.id, + role="user", + content="What does the attachment say?", + attachments=[attachment], + ) + assert message.id, "The message was not created." + + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id, "The run was not created." + self._remove_file_maybe(file_id, ai_client) + ai_client.delete_assistant(assistant.id) + + messages = ai_client.list_messages(thread_id=thread.id) + assert len(messages), "No messages were created" + ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("The API is not supported yet.") + @recorded_by_proxy + def test_create_assistant_with_interpreter_azure(self, **kwargs): + """Test Create assistant with code interpreter with azure asset ids.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_assistant_with_interpreter_file_ids(self, **kwargs): + """Test Create assistant with code interpreter with file IDs.""" + self._do_test_create_assistant_with_interpreter(file_path=self._get_data_file(), **kwargs) + + def _do_test_create_assistant_with_interpreter(self, **kwargs): + """Test create assistant with code interpreter and project asset id""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + code_interpreter = CodeInterpreterTool() + + file_id = None + if "file_path" in kwargs: + file = ai_client.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) + assert file.id, "The file was not uploaded." + file_id = file.id + + cdr = CodeInterpreterToolResource( + file_ids=[file_id] if file_id else None, + data_sources=kwargs.get("data_sources"), + ) + tr = ToolResources(code_interpreter=cdr) + # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment + assistant = ai_client.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=tr, + ) + assert assistant.id, "Assistant was not created" + + thread = ai_client.create_thread() + assert thread.id, "The thread was not created." + + message = ai_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id, "The run was not created." + self._remove_file_maybe(file_id, ai_client) + assert run.status == "completed", f"Error in run: {run.last_error}" + ai_client.delete_assistant(assistant.id) + assert len(ai_client.list_messages(thread_id=thread.id)), "No messages were created" + ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("The API is not supported yet.") + @recorded_by_proxy + def test_create_thread_with_interpreter_azure(self, **kwargs): + """Test Create assistant with code interpreter with azure asset ids.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_thread_with_interpreter_file_ids(self, **kwargs): + """Test Create assistant with code interpreter with file IDs.""" + self._do_test_create_thread_with_interpreter(file_path=self._get_data_file(), **kwargs) + + def _do_test_create_thread_with_interpreter(self, **kwargs): + """Test create assistant with code interpreter and project asset id""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + code_interpreter = CodeInterpreterTool() + + file_id = None + if "file_path" in kwargs: + file = ai_client.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) + assert file.id, "The file was not uploaded." + file_id = file.id + + cdr = CodeInterpreterToolResource( + file_ids=[file_id] if file_id else None, + data_sources=kwargs.get("data_sources"), + ) + tr = ToolResources(code_interpreter=cdr) + # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment + assistant = ai_client.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + assert assistant.id, "Assistant was not created" + + thread = ai_client.create_thread(tool_resources=tr) + assert thread.id, "The thread was not created." + + message = ai_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id, "The run was not created." + self._remove_file_maybe(file_id, ai_client) + assert run.status == "completed", f"Error in run: {run.last_error}" + ai_client.delete_assistant(assistant.id) + messages = ai_client.list_messages(thread.id) + assert len(messages) + ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("Not deployed in all regions.") + @recorded_by_proxy + def test_create_assistant_with_inline_vs_azure(self, **kwargs): + """Test creation of asistant with vector store inline.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + fs = FileSearchToolResource( + vector_stores=[ + VectorStoreConfigurations( + store_name="my_vector_store", + store_configuration=VectorStoreConfiguration(data_sources=ds), + ) + ] + ) + file_search = FileSearchTool() + assistant = ai_client.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=ToolResources(file_search=fs), + ) + assert assistant.id + + thread = ai_client.create_thread() + assert thread.id + # create message + message = ai_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = ai_client.list_messages(thread.id) + assert len(messages) + ai_client.delete_assistant(assistant.id) + ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("The API is not supported yet.") + @recorded_by_proxy + def test_create_attachment_in_thread_azure(self, **kwargs): + """Create thread with message attachment inline with azure asset IDs.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + self._do_test_create_attachment_in_thread_azure(data_source=ds, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_attachment_in_thread_file_ids(self, **kwargs): + """Create thread with message attachment inline with azure asset IDs.""" + self._do_test_create_attachment_in_thread_azure(file_path=self._get_data_file(), **kwargs) + + def _do_test_create_attachment_in_thread_azure(self, **kwargs): + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + + file_search = FileSearchTool() + assistant = ai_client.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + ) + assert assistant.id + + # create message + attachment = MessageAttachment( + file_id=file_id, + data_source=kwargs.get("data_source"), + tools=[ + FileSearchTool().definitions[0], + CodeInterpreterTool().definitions[0], + ], + ) + message = ThreadMessageOptions( + role="user", + content="What does the attachment say?", + attachments=[attachment], + ) + thread = ai_client.create_thread(messages=[message]) + assert thread.id + + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = ai_client.list_messages(thread.id) + assert len(messages) + ai_client.delete_assistant(assistant.id) + ai_client.close() + + @assistantClientPreparer() + @recorded_by_proxy + def test_azure_ai_search_tool(self, **kwargs): + """Test using the AzureAISearchTool with an assistant.""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # Create AzureAISearchTool + conn_id = kwargs.pop( + "azure_ai_assistants_tests_search_connection_id", "my-search-connection-ID" + ) + index_name = kwargs.pop("azure_ai_assistants_tests_search_index_name", "my-search-index") + + azure_search_tool = AzureAISearchTool( + index_connection_id=conn_id, + index_name=index_name, + ) + + # Create assistant with the search tool + assistant = client.create_assistant( + model="gpt-4o", + name="search-assistant", + instructions="You are a helpful assistant that can search for information using Azure AI Search.", + tools=azure_search_tool.definitions, + tool_resources=azure_search_tool.resources, + ) + assert assistant.id + print(f"Created assistant with ID: {assistant.id}") + + # Create thread + thread = client.create_thread() + assert thread.id + print(f"Created thread with ID: {thread.id}") + + # Create message + message = client.create_message( + thread_id=thread.id, role="user", content="Search for information about iPhone prices." + ) + assert message.id + print(f"Created message with ID: {message.id}") + + # Create and process run + run = client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == RunStatus.COMPLETED, run.last_error.message + + # List messages to verify tool was used + messages = client.list_messages(thread_id=thread.id) + assert len(messages.data) > 0 + + # Clean up + client.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy + def test_include_file_search_results_no_stream(self, **kwargs): + """Test using include_file_search.""" + self._do_test_include_file_search_results(use_stream=False, include_content=True, **kwargs) + self._do_test_include_file_search_results(use_stream=False, include_content=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy + def test_include_file_search_results_stream(self, **kwargs): + """Test using include_file_search with streaming.""" + self._do_test_include_file_search_results(use_stream=True, include_content=True, **kwargs) + self._do_test_include_file_search_results(use_stream=True, include_content=False, **kwargs) + + def _do_test_include_file_search_results(self, use_stream, include_content, **kwargs): + """Run the test with file search results.""" + with self.create_client(**kwargs) as ai_client: + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = ai_client.create_vector_store_and_poll( + file_ids=[], data_sources=ds, name="my_vectorstore" + ) + # vector_store = await ai_client.get_vector_store('vs_M9oxKG7JngORHcYNBGVZ6Iz3') + assert vector_store.id + + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + assistant = ai_client.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert assistant.id + thread = ai_client.create_thread() + assert thread.id + # create message + message = ai_client.create_message( + thread_id=thread.id, + role="user", + # content="What does the attachment say?" + content="What Contoso Galaxy Innovations produces?", + ) + assert message.id, "The message was not created." + include = [RunAdditionalFieldList.FILE_SEARCH_CONTENTS] if include_content else None + + if use_stream: + run = None + with ai_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, include=include + ) as stream: + for event_type, event_data, _ in stream: + if isinstance(event_data, ThreadRun): + run = event_data + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + break + else: + run = ai_client.create_and_process_run( + thread_id=thread.id, assistant_id=assistant.id, include=include + ) + assert run.status == RunStatus.COMPLETED + assert run is not None + steps = ai_client.list_run_steps(thread_id=thread.id, run_id=run.id, include=include) + # The 1st (not 0th) step is a tool call. + step_id = steps.data[1].id + one_step = ai_client.get_run_step( + thread_id=thread.id, run_id=run.id, step_id=step_id, include=include + ) + self._assert_file_search_valid(one_step.step_details.tool_calls[0], include_content) + self._assert_file_search_valid(steps.data[1].step_details.tool_calls[0], include_content) + + messages = ai_client.list_messages(thread_id=thread.id) + assert len(messages) + + ai_client.delete_vector_store(vector_store.id) + # delete assistant and close client + ai_client.delete_assistant(assistant.id) + print("Deleted assistant") + ai_client.close() + + def _assert_file_search_valid(self, tool_call: Any, include_content: bool) -> None: + """Test that file search result is properly populated.""" + assert isinstance(tool_call, RunStepFileSearchToolCall), f"Wrong type of tool call: {type(tool_call)}." + assert isinstance( + tool_call.file_search, RunStepFileSearchToolCallResults + ), f"Wrong type of search results: {type(tool_call.file_search)}." + assert isinstance( + tool_call.file_search.results[0], RunStepFileSearchToolCallResult + ), f"Wrong type of search result: {type(tool_call.file_search.results[0])}." + assert tool_call.file_search.results + if include_content: + assert tool_call.file_search.results[0].content + assert isinstance(tool_call.file_search.results[0].content[0], FileSearchToolCallContent) + assert tool_call.file_search.results[0].content[0].type == "text" + assert tool_call.file_search.results[0].content[0].text + else: + assert tool_call.file_search.results[0].content is None + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy + def test_assistants_with_json_schema(self, **kwargs): + """Test structured output from the assistant.""" + with self.create_client(**kwargs) as ai_client: + assistant = ai_client.create_assistant( + # Note only gpt-4o-mini-2024-07-18 and + # gpt-4o-2024-08-06 and later support structured output. + model="gpt-4o-mini", + name="my-assistant", + instructions="Extract the information about planets.", + headers={"x-ms-enable-preview": "true"}, + response_format=ResponseFormatJsonSchemaType( + json_schema=ResponseFormatJsonSchema( + name="planet_mass", + description="Extract planet mass.", + schema={ + "$defs": { + "Planets": {"enum": ["Earth", "Mars", "Jupyter"], "title": "Planets", "type": "string"} + }, + "properties": { + "planet": {"$ref": "#/$defs/Planets"}, + "mass": {"title": "Mass", "type": "number"}, + }, + "required": ["planet", "mass"], + "title": "Planet", + "type": "object", + }, + ) + ), + ) + assert assistant.id + + thread = ai_client.create_thread() + assert thread.id + + message = ai_client.create_message( + thread_id=thread.id, + role="user", + content=("The mass of the Mars is 6.4171E23 kg"), + ) + assert message.id + + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + + assert run.status == RunStatus.COMPLETED, run.last_error.message + + del_assistant = ai_client.delete_assistant(assistant.id) + assert del_assistant.deleted + + messages = ai_client.list_messages(thread_id=thread.id) + + planet_info = [] + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + # We will only list assistant responses here. + if isinstance(last_message_content, MessageTextContent) and data_point.role == MessageRole.ASSISTANT: + planet_info.append(json.loads(last_message_content.text.value)) + assert len(planet_info) == 1 + assert len(planet_info[0]) == 2 + assert planet_info[0].get("mass") == pytest.approx(6.4171e23, 1e22) + assert planet_info[0].get("planet") == "Mars" + + def _get_file_id_maybe(self, ai_client: AssistantsClient, **kwargs) -> str: + """Return file id if kwargs has file path.""" + if "file_path" in kwargs: + file = ai_client.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) + assert file.id, "The file was not uploaded." + return file.id + return None + + def _remove_file_maybe(self, file_id: str, ai_client: AssistantsClient) -> None: + """Remove file if we have file ID.""" + if file_id: + ai_client.delete_file(file_id) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_code_interpreter_and_save_file(self, **kwargs): + output_file_exist = False + + # create client + with self.create_client(**kwargs) as client: + + with tempfile.TemporaryDirectory() as temp_dir: + + # create a temporary input file for upload + test_file_path = os.path.join(temp_dir, "input.txt") + + with open(test_file_path, "w") as f: + f.write("This is a test file") + + file: OpenAIFile = client.upload_file_and_poll( + file_path=test_file_path, purpose=FilePurpose.ASSISTANTS + ) + + # create assistant + code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + assistant = client.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # create a message + message = client.create_message( + thread_id=thread.id, + role="user", + content="Create an image file same as the text file and give me file id?", + ) + print(f"Created message, message ID: {message.id}") + + # create run + run = client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + # delete file + client.delete_file(file.id) + print("Deleted file") + + # get messages + messages = client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + last_msg = messages.get_last_text_message_by_role(MessageRole.ASSISTANT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + for file_path_annotation in messages.file_path_annotations: + file_id = file_path_annotation.file_path.file_id + print(f"Image File ID: {file_path_annotation.file_path.file_id}") + temp_file_path = os.path.join(temp_dir, "output.png") + client.save_file(file_id=file_id, file_name="output.png", target_dir=temp_dir) + output_file_exist = os.path.exists(temp_file_path) + + assert output_file_exist + + @assistantClientPreparer() + @recorded_by_proxy + def test_azure_function_call(self, **kwargs): + """Test calling Azure functions.""" + # Note: This test was recorded in westus region as for now + # 2025-02-05 it is not supported in test region (East US 2) + # create client + storage_queue = kwargs["azure_ai_assistants_tests_storage_queue"] + with self.create_client(**kwargs) as client: + azure_function_tool = AzureFunctionTool( + name="foo", + description="Get answers from the foo bot.", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + }, + input_queue=AzureFunctionStorageQueue( + queue_name="azure-function-foo-input", + storage_service_endpoint=storage_queue, + ), + output_queue=AzureFunctionStorageQueue( + queue_name="azure-function-tool-output", + storage_service_endpoint=storage_queue, + ), + ) + assistant = client.create_assistant( + model="gpt-4", + name="azure-function-assistant-foo", + instructions=( + "You are a helpful support assistant. Use the provided function any " + "time the prompt contains the string 'What would foo say?'. When " + "you invoke the function, ALWAYS specify the output queue uri parameter as " + f"'{storage_queue}/azure-function-tool-output'" + '. Always responds with "Foo says" and then the response from the tool.' + ), + headers={"x-ms-enable-preview": "true"}, + tools=azure_function_tool.definitions, + ) + assert assistant.id, "The assistant was not created" + + # Create a thread + thread = client.create_thread() + assert thread.id, "The thread was not created." + + # Create a message + message = client.create_message( + thread_id=thread.id, + role="user", + content="What is the most prevalent element in the universe? What would foo say?", + ) + assert message.id, "The message was not created." + + run = client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == RunStatus.COMPLETED, f"The run is in {run.status} state." + + # Get messages from the thread + messages = client.list_messages(thread_id=thread.id) + assert len(messages.text_messages) > 1, "No messages were received from assistant." + + # Check that we have function response in at least one message. + assert any("bar" in msg.text.value.lower() for msg in messages.text_messages) + + # Delete the assistant once done + result = client.delete_assistant(assistant.id) + assert result.deleted, "The assistant was not deleted." + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented.") + @recorded_by_proxy + def test_client_with_thread_messages(self, **kwargs): + """Test assistant with thread messages.""" + with self.create_client(**kwargs) as client: + + # [START create_assistant] + assistant = client.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a personal electronics tutor. Write and run code to answer questions.", + ) + assert assistant.id, "The assistant was not created." + thread = client.create_thread() + assert thread.id, "Thread was not created" + + message = client.create_message( + thread_id=thread.id, role="user", content="What is the equation of light energy?" + ) + assert message.id, "The message was not created." + + additional_messages = [ + ThreadMessageOptions(role=MessageRole.ASSISTANT, content="E=mc^2"), + ThreadMessageOptions(role=MessageRole.USER, content="What is the impedance formula?"), + ] + run = client.create_run( + thread_id=thread.id, assistant_id=assistant.id, additional_messages=additional_messages + ) + + # poll the run as long as run status is queued or in progress + while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS]: + # wait for a second + time.sleep(1) + run = client.get_run( + thread_id=thread.id, + run_id=run.id, + ) + assert run.status in RunStatus.COMPLETED + + assert client.delete_assistant(assistant.id).deleted, "The assistant was not deleted" + messages = client.list_messages(thread_id=thread.id) + assert len(messages.data), "The data from the assistant was not received." diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py new file mode 100644 index 000000000000..9bb7f478b14c --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py @@ -0,0 +1,3104 @@ +# pylint: disable=too-many-lines,line-too-long,useless-suppression +# # ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +# cSpell:disable +from typing import Any + +import datetime +import functools +import json +import logging +import os +import pytest +import sys +import io +import time + +from azure.ai.assistants.aio import AssistantsClient +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader +from devtools_testutils.aio import recorded_by_proxy_async +from azure.ai.assistants.models import ( + AzureFunctionTool, + AzureFunctionStorageQueue, + AssistantStreamEvent, + AssistantThread, + CodeInterpreterTool, + CodeInterpreterToolResource, + FilePurpose, + FileSearchTool, + FileSearchToolCallContent, + FileSearchToolResource, + FunctionTool, + MessageAttachment, + MessageRole, + MessageTextContent, + ResponseFormatJsonSchema, + ResponseFormatJsonSchemaType, + RunAdditionalFieldList, + RunStepDeltaChunk, + RunStepDeltaToolCallObject, + RunStepFileSearchToolCall, + RunStepFileSearchToolCallResult, + RunStepFileSearchToolCallResults, + RunStatus, + ThreadMessageOptions, + ThreadRun, + ToolResources, + ToolSet, + VectorStore, + VectorStoreConfigurations, + VectorStoreConfiguration, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) + +# TODO clean this up / get rid of anything not in use + +""" +issues I've noticed with the code: + delete_thread(thread.id) fails + cancel_thread(thread.id) expires/times out occasionally + added time.sleep() to the beginning of my last few tests to avoid limits + when using the endpoint from Howie, delete_assistant(assistant.id) did not work but would not cause an error +""" + +# Set to True to enable SDK logging +LOGGING_ENABLED = True + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + + +assistantClientPreparer = functools.partial( + EnvironmentVariableLoader, + "azure_ai_assistants", + # TODO: uncomment this endpoint when re running with 1DP + #azure_ai_assistants_tests_project_endpoint="https://aiservices-id.services.ai.azure.com/api/projects/project-name", + # TODO: remove this endpoint when re running with 1DP + azure_ai_assistants_tests_project_endpoint="https://Sanitized.api.azureml.ms/agents/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/", + azure_ai_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", + azure_ai_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", + azure_ai_assistants_tests_search_index_name="sample_index", + azure_ai_assistants_tests_search_connection_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/connections/someindex", +) + + +# create tool for assistant use +def fetch_current_datetime_live(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + time_json = json.dumps({"current_time": current_datetime}) + return time_json + + +# create tool for assistant use +def fetch_current_datetime_recordings(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + time_json = json.dumps({"current_time": "2024-10-10 12:30:19"}) + return time_json + + +# Statically defined user functions for fast reference +user_functions_recording = {fetch_current_datetime_recordings} +user_functions_live = {fetch_current_datetime_live} + + +# The test class name needs to start with "Test" to get collected by pytest +class TestAssistantClientAsync(AzureRecordedTestCase): + + # helper function: create client using environment variables + def create_client(self, **kwargs): + # fetch environment variables + endpoint = kwargs.pop("azure_ai_assistants_tests_project_endpoint") + credential = self.get_credential(AssistantsClient, is_async=True) + + # create and return client + client = AssistantsClient( + endpoint=endpoint, + credential=credential, + ) + + return client + + def _get_data_file(self) -> str: + """Return the test file name.""" + return os.path.join(os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md") + + # for debugging purposes: if a test fails and its assistant has not been deleted, it will continue to show up in the assistants list + """ + # NOTE: this test should not be run against a shared resource, as it will delete all assistants + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_clear_client(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # clear assistant list + assistants = await client.list_assistants().data + for assistant in assistants: + await client.delete_assistant(assistant.id) + assert client.list_assistants().data.__len__() == 0 + + # close client + await client.close() + """ + + # ********************************************************************************** + # + # UNIT TESTS + # + # ********************************************************************************** + + # ********************************************************************************** + # + # HAPPY PATH SERVICE TESTS - assistant APIs + # + # ********************************************************************************** + + # test client creation + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_client(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + print("Created client") + + # close client + await client.close() + + # test assistant creation and deletion + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_delete_assistant(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + + # test assistant creation with tools + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_assistant_with_tools(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + print("Created client") + + # initialize assistant functions + functions = FunctionTool(functions=user_functions_recording) + + # create assistant with tools + assistant = await client.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="You are helpful assistant", + tools=functions.definitions, + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + assert assistant.tools + assert assistant.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + + # test update assistant without body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_assistant(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + print("Created client") + + # create body for assistant + body = {"name": "my-assistant", "model": "gpt-4o", "instructions": "You are helpful assistant"} + + # create assistant + assistant = await client.create_assistant(body=body) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # update assistant and confirm changes went through + assistant = await client.update_assistant(assistant.id, name="my-assistant2") + assert assistant.name + assert assistant.name == "my-assistant2" + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test update assistant with body: JSON + @assistantClientPreparer() + @pytest.mark.skip("Overload performs inconsistently.") + @recorded_by_proxy_async + async def test_update_assistant_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create body for assistant + body = {"name": "my-assistant", "model": "gpt-4o", "instructions": "You are helpful assistant"} + + # create assistant + assistant = await client.create_assistant(body=body) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create body for assistant + body2 = {"name": "my-assistant2", "instructions": "You are helpful assistant"} + + # update assistant and confirm changes went through + assistant = await client.update_assistant(assistant.id, body=body2) + assert assistant.name + assert assistant.name == "my-assistant2" + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # NOTE update_assistant with overloads isn't working + # test update assistant with body: IO[bytes] + @assistantClientPreparer() + @pytest.mark.skip("Overload performs inconsistently.") + @recorded_by_proxy_async + async def test_update_assistant_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + + # create body for assistant + body = {"name": "my-assistant2", "instructions": "You are helpful assistant"} + binary_body = json.dumps(body).encode("utf-8") + + # update assistant and confirm changes went through + assistant = await client.update_assistant(assistant.id, body=io.BytesIO(binary_body)) + assert assistant.name + assert assistant.name == "my-assistant2" + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + """ + DISABLED: can't perform consistently on shared resource + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_assistant_list(self, **kwargs): + # create client and ensure there are no previous assistants + client = self.create_client(**kwargs) + list_length = await client.list_assistants().data.__len__() + + # create assistant and check that it appears in the list + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert client.list_assistants().data.__len__() == list_length + 1 + assert client.list_assistants().data[0].id == assistant.id + + # create second assistant and check that it appears in the list + assistant2 = await client.create_assistant(model="gpt-4o", name="my-assistant2", instructions="You are helpful assistant") + assert client.list_assistants().data.__len__() == list_length + 2 + assert client.list_assistants().data[0].id == assistant.id or client.list_assistants().data[1].id == assistant.id + + # delete assistants and check list + await client.delete_assistant(assistant.id) + assert client.list_assistants().data.__len__() == list_length + 1 + assert client.list_assistants().data[0].id == assistant2.id + + client.delete_assistant(assistant2.id) + assert client.list_assistants().data.__len__() == list_length + print("Deleted assistants") + + # close client + await client.close() + """ + + # ********************************************************************************** + # + # HAPPY PATH SERVICE TESTS - Thread APIs + # + # ********************************************************************************** + + # test creating thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_thread(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + + # test creating thread with no body + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_with_metadata(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create metadata for thread + metadata = {"key1": "value1", "key2": "value2"} + + # create thread + thread = await client.create_thread(metadata=metadata) + assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + # close client + print("Deleted assistant") + await client.close() + + # test creating thread with body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create body for thread + body = { + "metadata": {"key1": "value1", "key2": "value2"}, + } + + # create thread + thread = await client.create_thread(body=body) + assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + # close client + print("Deleted assistant") + await client.close() + + # test creating thread with body: IO[bytes] + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create body for thread + body = { + "metadata": {"key1": "value1", "key2": "value2"}, + } + binary_body = json.dumps(body).encode("utf-8") + + # create thread + thread = await client.create_thread(body=io.BytesIO(binary_body)) + assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + # close client + print("Deleted assistant") + await client.close() + + # test getting thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_get_thread(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # get thread + thread2 = await client.get_thread(thread.id) + assert thread2.id + assert thread.id == thread2.id + print("Got thread, thread ID", thread2.id) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + + # test updating thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_thread(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # update thread + thread = await client.update_thread(thread.id, metadata={"key1": "value1", "key2": "value2"}) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test updating thread without body + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_thread_with_metadata(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # set metadata + metadata = {"key1": "value1", "key2": "value2"} + + # create thread + thread = await client.create_thread(metadata=metadata) + assert thread.id + print("Created thread, thread ID", thread.id) + + # set metadata + metadata2 = {"key1": "value1", "key2": "newvalue2"} + + # update thread + thread = await client.update_thread(thread.id, metadata=metadata2) + assert thread.metadata == {"key1": "value1", "key2": "newvalue2"} + + # close client + await client.close() + + # test updating thread with body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_thread_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # set metadata + body = {"metadata": {"key1": "value1", "key2": "value2"}} + + # update thread + thread = await client.update_thread(thread.id, body=body) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + # close client + await client.close() + + # test updating thread with body: IO[bytes] + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_thread_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # set metadata + body = {"metadata": {"key1": "value1", "key2": "value2"}} + binary_body = json.dumps(body).encode("utf-8") + + # update thread + thread = await client.update_thread(thread.id, body=io.BytesIO(binary_body)) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + # close client + await client.close() + + # test deleting thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_delete_thread(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + # assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete thread + deletion_status = await client.delete_thread(thread.id) + assert deletion_status.id == thread.id + assert deletion_status.deleted == True + print("Deleted thread, thread ID", deletion_status.id) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Message APIs + # # + # # ********************************************************************************** + + # test creating message in a thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_message(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test creating message in a thread with body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_message_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create body for message + body = {"role": "user", "content": "Hello, tell me a joke"} + + # create message + message = await client.create_message(thread_id=thread.id, body=body) + assert message.id + print("Created message, message ID", message.id) + + # close client + await client.close() + + # test creating message in a thread with body: IO[bytes] + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_message_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create body for message + body = {"role": "user", "content": "Hello, tell me a joke"} + binary_body = json.dumps(body).encode("utf-8") + + # create message + message = await client.create_message(thread_id=thread.id, body=io.BytesIO(binary_body)) + assert message.id + print("Created message, message ID", message.id) + + # close client + await client.close() + + # test creating multiple messages in a thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_multiple_messages(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create messages + message = await client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + message2 = await client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me another joke" + ) + assert message2.id + print("Created message, message ID", message2.id) + message3 = await client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a third joke" + ) + assert message3.id + print("Created message, message ID", message3.id) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test listing messages in a thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_list_messages(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # check that initial message list is empty + messages0 = await client.list_messages(thread_id=thread.id) + print(messages0.data) + assert messages0.data.__len__() == 0 + + # create messages and check message list for each one + message1 = await client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message1.id + print("Created message, message ID", message1.id) + messages1 = await client.list_messages(thread_id=thread.id) + assert messages1.data.__len__() == 1 + assert messages1.data[0].id == message1.id + + message2 = await client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me another joke" + ) + assert message2.id + print("Created message, message ID", message2.id) + messages2 = await client.list_messages(thread_id=thread.id) + assert messages2.data.__len__() == 2 + assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id + + message3 = await client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a third joke" + ) + assert message3.id + print("Created message, message ID", message3.id) + messages3 = await client.list_messages(thread_id=thread.id) + assert messages3.data.__len__() == 3 + assert ( + messages3.data[0].id == message3.id + or messages3.data[1].id == message2.id + or messages3.data[2].id == message2.id + ) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test getting message in a thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_get_message(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + # get message + message2 = await client.get_message(thread_id=thread.id, message_id=message.id) + assert message2.id + assert message.id == message2.id + print("Got message, message ID", message.id) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + + # test updating message in a thread without body + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_message(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + # update message + message = await client.update_message( + thread_id=thread.id, message_id=message.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert message.metadata == {"key1": "value1", "key2": "value2"} + + # close client + await client.close() + + # test updating message in a thread with body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_message_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + # create body for message + body = {"metadata": {"key1": "value1", "key2": "value2"}} + + # update message + message = await client.update_message(thread_id=thread.id, message_id=message.id, body=body) + assert message.metadata == {"key1": "value1", "key2": "value2"} + + # close client + await client.close() + + # test updating message in a thread with body: IO[bytes] + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_message_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + # create body for message + body = {"metadata": {"key1": "value1", "key2": "value2"}} + binary_body = json.dumps(body).encode("utf-8") + + # update message + message = await client.update_message( + thread_id=thread.id, message_id=message.id, body=io.BytesIO(binary_body) + ) + assert message.metadata == {"key1": "value1", "key2": "value2"} + + # close client + await client.close() + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Run APIs + # # + # # ********************************************************************************** + + # test creating run + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_run(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test creating run without body + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_run_with_metadata(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.create_run( + thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test creating run with body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_run_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create body for run + body = {"assistant_id": assistant.id, "metadata": {"key1": "value1", "key2": "value2"}} + + # create run + run = await client.create_run(thread_id=thread.id, body=body) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test creating run with body: IO[bytes] + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_run_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create body for run + body = {"assistant_id": assistant.id, "metadata": {"key1": "value1", "key2": "value2"}} + binary_body = json.dumps(body).encode("utf-8") + + # create run + run = await client.create_run(thread_id=thread.id, body=io.BytesIO(binary_body)) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test getting run + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_get_run(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # get run + run2 = await client.get_run(thread_id=thread.id, run_id=run.id) + assert run2.id + assert run.id == run2.id + print("Got run, run ID", run2.id) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test sucessful run status + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_run_status(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + print("Run status:", run.status) + + assert run.status in ["cancelled", "failed", "completed", "expired"] + print("Run completed with status:", run.status) + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + """ + # TODO another, but check that the number of runs decreases after cancelling runs + # TODO can each thread only support one run? + # test listing runs + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_list_runs(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # check list for current runs + runs0 = await client.list_runs(thread_id=thread.id) + assert runs0.data.__len__() == 0 + + # create run and check list + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + runs1 = await client.list_runs(thread_id=thread.id) + assert runs1.data.__len__() == 1 + assert runs1.data[0].id == run.id + + # create second run + run2 = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run2.id + print("Created run, run ID", run2.id) + runs2 = await client.list_runs(thread_id=thread.id) + assert runs2.data.__len__() == 2 + assert runs2.data[0].id == run2.id or runs2.data[1].id == run2.id + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + """ + + # test updating run + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_run(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # update run + while run.status in ["queued", "in_progress"]: + time.sleep(5) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + run = await client.update_run( + thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.metadata == {"key1": "value1", "key2": "value2"} + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test updating run without body + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_run_with_metadata(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.create_run( + thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # update run + while run.status in ["queued", "in_progress"]: + time.sleep(5) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + run = await client.update_run( + thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "newvalue2"} + ) + assert run.metadata == {"key1": "value1", "key2": "newvalue2"} + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test updating run with body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_run_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.create_run( + thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # create body for run + body = {"metadata": {"key1": "value1", "key2": "newvalue2"}} + + # update run + while run.status in ["queued", "in_progress"]: + time.sleep(5) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + run = await client.update_run(thread_id=thread.id, run_id=run.id, body=body) + assert run.metadata == {"key1": "value1", "key2": "newvalue2"} + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test updating run with body: IO[bytes] + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_run_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.create_run( + thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # create body for run + body = {"metadata": {"key1": "value1", "key2": "newvalue2"}} + binary_body = json.dumps(body).encode("utf-8") + + # update run + while run.status in ["queued", "in_progress"]: + time.sleep(5) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + run = await client.update_run(thread_id=thread.id, run_id=run.id, body=io.BytesIO(binary_body)) + assert run.metadata == {"key1": "value1", "key2": "newvalue2"} + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test submitting tool outputs to run + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_submit_tool_outputs_to_run(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # Initialize assistant tools + functions = FunctionTool(user_functions_recording) + # TODO add files for code interpreter tool + # code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + # toolset.add(code_interpreter) + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.create_message( + thread_id=thread.id, role="user", content="Hello, what time is it?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + await client.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) + print("Tool outputs:", tool_outputs) + if tool_outputs: + await client.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + messages = await client.list_messages(thread_id=thread.id, run_id=run.id) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + hour12 = time.strftime("%H") + hour24 = time.strftime("%I") + minute = time.strftime("%M") + assert hour12 + ":" + minute in tool_message or hour24 + ":" + minute + print("Used tool_outputs") + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + + # test submitting tool outputs to run with body: JSON + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # Initialize assistant tools + functions = FunctionTool(user_functions_recording) + toolset = ToolSet() + toolset.add(functions) + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.create_message( + thread_id=thread.id, role="user", content="Hello, what time is it?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + await client.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) + print("Tool outputs:", tool_outputs) + if tool_outputs: + body = {"tool_outputs": tool_outputs} + await client.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, body=body + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + messages = await client.list_messages(thread_id=thread.id, run_id=run.id) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + # hour12 = time.strftime("%H") + # hour24 = time.strftime("%I") + # minute = time.strftime("%M") + # assert hour12 + ":" + minute in tool_message or hour24 + ":" + minute + recorded_time = "12:30" + assert recorded_time in tool_message + print("Used tool_outputs") + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + + # test submitting tool outputs to run with body: IO[bytes] + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # Initialize assistant tools + functions = FunctionTool(user_functions_recording) + toolset = ToolSet() + toolset.add(functions) + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.create_message( + thread_id=thread.id, role="user", content="Hello, what time is it?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + client.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) + print("Tool outputs:", tool_outputs) + if tool_outputs: + body = {"tool_outputs": tool_outputs} + binary_body = json.dumps(body).encode("utf-8") + await client.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, body=io.BytesIO(binary_body) + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + messages = await client.list_messages(thread_id=thread.id, run_id=run.id) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + # hour12 = time.strftime("%H") + # hour24 = time.strftime("%I") + # minute = time.strftime("%M") + # assert hour12 + ":" + minute in tool_message or hour24 + ":" + minute + recorded_time = "12:30" + assert recorded_time in tool_message + print("Used tool_outputs") + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + + """ + # DISABLED: rewrite to ensure run is not complete when cancel_run is called + # test cancelling run + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_cancel_run(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print( + "No tool calls provided - cancelling run" + ) # TODO how can i make sure that it wants tools? should i have some kind of error message? + await client.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here + print("Tool outputs:", tool_outputs) + if tool_outputs: + await client.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + messages = await client.list_messages(thread_id=thread.id, run_id=run.id) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + hour12 = time.strftime("%H") + hour24 = time.strftime("%I") + minute = time.strftime("%M") + assert hour12 + ":" + minute in tool_message or hour24 + ":" + minute + print("Used tool_outputs") + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + """ + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy_async + async def test_create_parallel_tool_thread_true(self, **kwargs): + """Test creation of parallel runs.""" + await self._do_test_create_parallel_thread_runs(True, True, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy_async + async def test_create_parallel_tool_thread_false(self, **kwargs): + """Test creation of parallel runs.""" + await self._do_test_create_parallel_thread_runs(False, True, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy_async + async def test_create_parallel_tool_run_true(self, **kwargs): + """Test creation of parallel runs.""" + await self._do_test_create_parallel_thread_runs(True, False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy_async + async def test_create_parallel_tool_run_false(self, **kwargs): + """Test creation of parallel runs.""" + await self._do_test_create_parallel_thread_runs(False, False, **kwargs) + + async def _wait_for_run(self, client, run, timeout=1): + """Wait while run will get to terminal state.""" + while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS, RunStatus.REQUIRES_ACTION]: + time.sleep(timeout) + run = await client.get_run(thread_id=run.thread_id, run_id=run.id) + return run + + async def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_thread_run, **kwargs): + """Test creation of parallel runs.""" + + # create client + client = self.create_client( + **kwargs, + ) + assert isinstance(client, AssistantsClient) + + # Initialize assistant tools + functions = FunctionTool(functions=user_functions_recording) + code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + assistant = await client.create_assistant( + model="gpt-4", + name="my-assistant", + instructions="You are helpful assistant", + toolset=toolset, + ) + assert assistant.id + + message = ThreadMessageOptions( + role="user", + content="Hello, what time is it?", + ) + + if create_thread_run: + run = await client.create_thread_and_run( + assistant_id=assistant.id, + parallel_tool_calls=use_parallel_runs, + ) + run = await self._wait_for_run(client, run) + else: + thread = await client.create_thread(messages=[message]) + assert thread.id + + run = await client.create_and_process_run( + thread_id=thread.id, + assistant_id=assistant.id, + parallel_tool_calls=use_parallel_runs, + ) + assert run.id + assert run.status == RunStatus.COMPLETED, run.last_error.message + assert run.parallel_tool_calls == use_parallel_runs + + assert (await client.delete_assistant(assistant.id)).deleted, "The assistant was not deleted" + messages = await client.list_messages(thread_id=run.thread_id) + assert len(messages.data), "The data from the assistant was not received." + + """ + # DISABLED: rewrite to ensure run is not complete when cancel_run is called + # test cancelling run + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_cancel_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check status and cancel + assert run.status in ["queued", "in_progress", "requires_action"] + client.cancel_run(thread_id=thread.id, run_id=run.id) + + while run.status in ["queued", "cancelling"]: + time.sleep(1) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + print("Current run status:", run.status) + assert run.status == "cancelled" + print("Run cancelled") + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + """ + + # test create thread and run + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_and_run(self, **kwargs): + time.sleep(26) + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread and run + run = await client.create_thread_and_run(assistant_id=assistant.id) + assert run.id + assert run.thread_id + print("Created run, run ID", run.id) + + # get thread + thread = await client.get_thread(run.thread_id) + assert thread.id + print("Created thread, thread ID", thread.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + # assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + assert run.status == "completed" + print("Run completed") + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + + # test create thread and run with body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_and_run_with_body(self, **kwargs): + # time.sleep(26) + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create body for thread + body = { + "assistant_id": assistant.id, + "metadata": {"key1": "value1", "key2": "value2"}, + } + + # create thread and run + run = await client.create_thread_and_run(body=body) + assert run.id + assert run.thread_id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # get thread + thread = await client.get_thread(run.thread_id) + assert thread.id + print("Created thread, thread ID", thread.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + # assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + assert run.status == "completed" + print("Run completed") + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test create thread and run with body: IO[bytes] + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_and_run_with_iobytes(self, **kwargs): + # time.sleep(26) + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create body for thread + body = { + "assistant_id": assistant.id, + "metadata": {"key1": "value1", "key2": "value2"}, + } + binary_body = json.dumps(body).encode("utf-8") + + # create thread and run + run = await client.create_thread_and_run(body=io.BytesIO(binary_body)) + assert run.id + assert run.thread_id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # get thread + thread = await client.get_thread(run.thread_id) + assert thread.id + print("Created thread, thread ID", thread.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + # assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + assert run.status == "completed" + print("Run completed") + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + """ + # test listing run steps + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_list_run_step(self, **kwargs): + + time.sleep(50) + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.create_message( + thread_id=thread.id, role="user", content="Hello, what time is it?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + steps = await client.list_run_steps(thread_id=thread.id, run_id=run.id) + # commenting assertion out below, do we know exactly when run starts? + # assert steps['data'].__len__() == 0 + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "completed", + ] + print("Run status:", run.status) + steps = await client.list_run_steps( + thread_id=thread.id, run_id=run.id + ) + assert steps["data"].__len__() > 0 + + assert run.status == "completed" + print("Run completed") + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + """ + + # test getting run step + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_get_run_step(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.create_message( + thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "completed", + ] + print("Run status:", run.status) + + # list steps, check that get_run_step works with first step_id + steps = await client.list_run_steps(thread_id=thread.id, run_id=run.id) + assert steps["data"].__len__() > 0 + step = steps["data"][0] + get_step = await client.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) + assert step == get_step + + # delete assistant and close client + await client.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_azure(self, **kwargs): + """Test the assistant with vector store creation.""" + await self._do_test_create_vector_store(streaming=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_vector_store_file_id(self, **kwargs): + """Test the assistant with vector store creation.""" + await self._do_test_create_vector_store(streaming=False, file_path=self._get_data_file(), **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_azure_streaming(self, **kwargs): + """Test the assistant with vector store creation.""" + await self._do_test_create_vector_store(streaming=True, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_vector_store_file_id_streaming(self, **kwargs): + """Test the assistant with vector store creation.""" + await self._do_test_create_vector_store(streaming=True, file_path=self._get_data_file(), **kwargs) + + async def _do_test_create_vector_store(self, streaming, **kwargs): + """Test the assistant with vector store creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + file_ids = [file_id] if file_id else None + if file_ids: + ds = None + else: + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = await ai_client.create_vector_store_and_poll( + file_ids=file_ids, data_sources=ds, name="my_vectorstore" + ) + assert vector_store.id + await self._test_file_search(ai_client, vector_store, file_id, streaming) + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_vector_store_add_file_file_id(self, **kwargs): + """Test adding single file to vector store withn file ID.""" + await self._do_test_create_vector_store_add_file(streaming=False, file_path=self._get_data_file(), **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_add_file_azure(self, **kwargs): + """Test adding single file to vector store with azure asset ID.""" + await self._do_test_create_vector_store_add_file(streaming=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_vector_store_add_file_file_id_streaming(self, **kwargs): + """Test adding single file to vector store withn file ID.""" + await self._do_test_create_vector_store_add_file(streaming=True, file_path=self._get_data_file(), **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_add_file_azure_streaming(self, **kwargs): + """Test adding single file to vector store with azure asset ID.""" + await self._do_test_create_vector_store_add_file(streaming=True, **kwargs) + + async def _do_test_create_vector_store_add_file(self, streaming, **kwargs): + """Test adding single file to vector store.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + if file_id: + ds = None + else: + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + vector_store = await ai_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + assert vector_store.id + vector_store_file = await ai_client.create_vector_store_file( + vector_store_id=vector_store.id, data_source=ds, file_id=file_id + ) + assert vector_store_file.id + await self._test_file_search(ai_client, vector_store, file_id, streaming) + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_vector_store_batch_file_ids(self, **kwargs): + """Test adding multiple files to vector store with file IDs.""" + await self._do_test_create_vector_store_batch(streaming=False, file_path=self._get_data_file(), **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_batch_azure(self, **kwargs): + """Test adding multiple files to vector store with azure asset IDs.""" + await self._do_test_create_vector_store_batch(streaming=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_vector_store_batch_file_ids_streaming(self, **kwargs): + """Test adding multiple files to vector store with file IDs.""" + await self._do_test_create_vector_store_batch(streaming=True, file_path=self._get_data_file(), **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_batch_azure_streaming(self, **kwargs): + """Test adding multiple files to vector store with azure asset IDs.""" + await self._do_test_create_vector_store_batch(streaming=True, **kwargs) + + async def _do_test_create_vector_store_batch(self, streaming, **kwargs): + """Test the assistant with vector store creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + if file_id: + file_ids = [file_id] + ds = None + else: + file_ids = None + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = await ai_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + assert vector_store.id + vector_store_file_batch = await ai_client.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids + ) + assert vector_store_file_batch.id + await self._test_file_search(ai_client, vector_store, file_id, streaming) + + async def _test_file_search( + self, ai_client: AssistantsClient, vector_store: VectorStore, file_id: str, streaming: bool + ) -> None: + """Test the file search""" + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + assistant = await ai_client.create_assistant( + model="gpt-4", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert assistant.id + thread = await ai_client.create_thread() + assert thread.id + # create message + message = await ai_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + if streaming: + thread_run = None + async with await ai_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id + ) as stream: + async for _, event_data, _ in stream: + if isinstance(event_data, ThreadRun): + thread_run = event_data + elif ( + isinstance(event_data, RunStepDeltaChunk) + and isinstance(event_data.delta.step_details, RunStepDeltaToolCallObject) + and event_data.delta.step_details.tool_calls + ): + assert isinstance( + event_data.delta.step_details.tool_calls[0].file_search, RunStepFileSearchToolCallResults + ) + assert thread_run is not None + run = await ai_client.get_run(thread_id=thread_run.thread_id, run_id=thread_run.id) + assert run is not None + else: + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + await ai_client.delete_vector_store(vector_store.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = await ai_client.list_messages(thread_id=thread.id) + assert len(messages) + await self._remove_file_maybe(file_id, ai_client) + # delete assistant and close client + await ai_client.delete_assistant(assistant.id) + print("Deleted assistant") + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_message_attachement_azure(self, **kwargs): + """Test message attachment with azure ID.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + await self._do_test_message_attachment(data_sources=[ds], **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_message_attachement_file_ids(self, **kwargs): + """Test message attachment with file ID.""" + await self._do_test_message_attachment(file_path=self._get_data_file(), **kwargs) + + async def _do_test_message_attachment(self, **kwargs): + """Test assistant with the message attachment.""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + + # Create assistant with file search tool + assistant = await ai_client.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + ) + assert assistant.id, "Assistant was not created" + + thread = await ai_client.create_thread() + assert thread.id, "The thread was not created." + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment( + file_id=file_id, + data_sources=kwargs.get("data_sources"), + tools=[ + FileSearchTool().definitions[0], + CodeInterpreterTool().definitions[0], + ], + ) + message = await ai_client.create_message( + thread_id=thread.id, + role="user", + content="What does the attachment say?", + attachments=[attachment], + ) + assert message.id, "The message was not created." + + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id, "The run was not created." + await self._remove_file_maybe(file_id, ai_client) + await ai_client.delete_assistant(assistant.id) + + messages = await ai_client.list_messages(thread_id=thread.id) + assert len(messages), "No messages were created" + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("Failing with Http Response Errors.") + @recorded_by_proxy_async + async def test_vector_store_threads_file_search_azure(self, **kwargs): + """Test file search when azure asset ids are supplied during thread creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + fs = FileSearchToolResource( + vector_stores=[ + VectorStoreConfigurations( + store_name="my_vector_store", + store_configuration=VectorStoreConfiguration(data_sources=ds), + ) + ] + ) + file_search = FileSearchTool() + assistant = await ai_client.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert assistant.id + + thread = await ai_client.create_thread(tool_resources=ToolResources(file_search=fs)) + assert thread.id + # create message + message = await ai_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = await ai_client.list_messages(thread.id) + assert len(messages) + await ai_client.delete_assistant(assistant.id) + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("The API is not supported yet.") + @recorded_by_proxy_async + async def test_create_assistant_with_interpreter_azure(self, **kwargs): + """Test Create assistant with code interpreter with azure asset ids.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + await self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_assistant_with_interpreter_file_ids(self, **kwargs): + """Test Create assistant with code interpreter with file IDs.""" + await self._do_test_create_assistant_with_interpreter(file_path=self._get_data_file(), **kwargs) + + async def _do_test_create_assistant_with_interpreter(self, **kwargs): + """Test create assistant with code interpreter and project asset id""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + code_interpreter = CodeInterpreterTool() + + file_id = None + if "file_path" in kwargs: + file = await ai_client.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) + assert file.id, "The file was not uploaded." + file_id = file.id + + cdr = CodeInterpreterToolResource( + file_ids=[file_id] if file_id else None, + data_sources=kwargs.get("data_sources"), + ) + tr = ToolResources(code_interpreter=cdr) + # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment + assistant = await ai_client.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=code_interpreter.definitions, + tool_resources=tr, + ) + assert assistant.id, "Assistant was not created" + + thread = await ai_client.create_thread() + assert thread.id, "The thread was not created." + + message = await ai_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id, "The run was not created." + await self._remove_file_maybe(file_id, ai_client) + assert run.status == "completed", f"Error in run: {run.last_error}" + await ai_client.delete_assistant(assistant.id) + messages = await ai_client.list_messages(thread_id=thread.id) + assert len(messages), "No messages were created" + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("The API is not supported yet.") + @recorded_by_proxy_async + async def test_create_thread_with_interpreter_azure(self, **kwargs): + """Test Create assistant with code interpreter with azure asset ids.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + await self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_thread_with_interpreter_file_ids(self, **kwargs): + """Test Create assistant with code interpreter with file IDs.""" + await self._do_test_create_thread_with_interpreter(file_path=self._get_data_file(), **kwargs) + + async def _do_test_create_thread_with_interpreter(self, **kwargs): + """Test create assistant with code interpreter and project asset id""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + code_interpreter = CodeInterpreterTool() + + file_id = None + if "file_path" in kwargs: + file = await ai_client.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) + assert file.id, "The file was not uploaded." + file_id = file.id + + cdr = CodeInterpreterToolResource( + file_ids=[file_id] if file_id else None, + data_sources=kwargs.get("data_sources"), + ) + tr = ToolResources(code_interpreter=cdr) + # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment + assistant = await ai_client.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + assert assistant.id, "Assistant was not created" + + thread = await ai_client.create_thread(tool_resources=tr) + assert thread.id, "The thread was not created." + + message = await ai_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id, "The run was not created." + await self._remove_file_maybe(file_id, ai_client) + assert run.status == "completed", f"Error in run: {run.last_error}" + await ai_client.delete_assistant(assistant.id) + messages = await ai_client.list_messages(thread.id) + assert len(messages) + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("Failing with Http Response Errors.") + @recorded_by_proxy_async + async def test_create_assistant_with_inline_vs_azure(self, **kwargs): + """Test creation of asistant with vector store inline.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + fs = FileSearchToolResource( + vector_stores=[ + VectorStoreConfigurations( + store_name="my_vector_store", + store_configuration=VectorStoreConfiguration(data_sources=ds), + ) + ] + ) + file_search = FileSearchTool() + assistant = await ai_client.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=ToolResources(file_search=fs), + ) + assert assistant.id + + thread = await ai_client.create_thread() + assert thread.id + # create message + message = await ai_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = await ai_client.list_messages(thread.id) + assert len(messages) + await ai_client.delete_assistant(assistant.id) + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("The API is not supported yet.") + @recorded_by_proxy_async + async def test_create_attachment_in_thread_azure(self, **kwargs): + """Create thread with message attachment inline with azure asset IDs.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + await self._do_test_create_attachment_in_thread_azure(data_sources=[ds], **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_attachment_in_thread_file_ids(self, **kwargs): + """Create thread with message attachment inline with azure asset IDs.""" + await self._do_test_create_attachment_in_thread_azure(file_path=self._get_data_file(), **kwargs) + + async def _do_test_create_attachment_in_thread_azure(self, **kwargs): + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + + file_search = FileSearchTool() + assistant = await ai_client.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + ) + assert assistant.id + + # create message + attachment = MessageAttachment( + file_id=file_id, + data_sources=kwargs.get("data_sources"), + tools=[ + FileSearchTool().definitions[0], + CodeInterpreterTool().definitions[0], + ], + ) + message = ThreadMessageOptions( + role="user", + content="What does the attachment say?", + attachments=[attachment], + ) + thread = await ai_client.create_thread(messages=[message]) + assert thread.id + + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = await ai_client.list_messages(thread.id) + assert len(messages) + await ai_client.delete_assistant(assistant.id) + await ai_client.close() + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_azure_function_call(self, **kwargs): + """Test calling Azure functions.""" + # Note: This test was recorded in westus region as for now + # 2025-02-05 it is not supported in test region (East US 2) + # create client + storage_queue = kwargs["azure_ai_assistants_tests_storage_queue"] + async with self.create_client(**kwargs) as client: + azure_function_tool = AzureFunctionTool( + name="foo", + description="Get answers from the foo bot.", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + }, + input_queue=AzureFunctionStorageQueue( + queue_name="azure-function-foo-input", + storage_service_endpoint=storage_queue, + ), + output_queue=AzureFunctionStorageQueue( + queue_name="azure-function-tool-output", + storage_service_endpoint=storage_queue, + ), + ) + assistant = await client.create_assistant( + model="gpt-4", + name="azure-function-assistant-foo", + instructions=( + "You are a helpful support assistant. Use the provided function any " + "time the prompt contains the string 'What would foo say?'. When " + "you invoke the function, ALWAYS specify the output queue uri parameter as " + f"'{storage_queue}/azure-function-tool-output'" + '. Always responds with "Foo says" and then the response from the tool.' + ), + headers={"x-ms-enable-preview": "true"}, + tools=azure_function_tool.definitions, + ) + assert assistant.id, "The assistant was not created" + + # Create a thread + thread = await client.create_thread() + assert thread.id, "The thread was not created." + + # Create a message + message = await client.create_message( + thread_id=thread.id, + role="user", + content="What is the most prevalent element in the universe? What would foo say?", + ) + assert message.id, "The message was not created." + + run = await client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == RunStatus.COMPLETED, f"The run is in {run.status} state." + + # Get messages from the thread + messages = await client.list_messages(thread_id=thread.id) + assert len(messages.text_messages) > 1, "No messages were received from assistant." + + # Chech that we have function response in at least one message. + assert any("bar" in msg.text.value.lower() for msg in messages.text_messages) + + # Delete the assistant once done + result = await client.delete_assistant(assistant.id) + assert result.deleted, "The assistant was not deleted." + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_client_with_thread_messages(self, **kwargs): + """Test assistant with thread messages.""" + async with self.create_client(**kwargs) as client: + + # [START create_assistant] + assistant = await client.create_assistant( + model="gpt-4", + name="my-assistant", + instructions="You are helpful assistant", + ) + assert assistant.id, "The assistant was not created." + thread = await client.create_thread() + assert thread.id, "Thread was not created" + + message = await client.create_message( + thread_id=thread.id, role="user", content="What is the equation of light energy?" + ) + assert message.id, "The message was not created." + + additional_messages = [ + ThreadMessageOptions(role=MessageRole.ASSISTANT, content="E=mc^2"), + ThreadMessageOptions(role=MessageRole.USER, content="What is the impedance formula?"), + ] + run = await client.create_run( + thread_id=thread.id, assistant_id=assistant.id, additional_messages=additional_messages + ) + + # poll the run as long as run status is queued or in progress + while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS]: + # wait for a second + time.sleep(1) + run = await client.get_run( + thread_id=thread.id, + run_id=run.id, + ) + assert run.status in RunStatus.COMPLETED, run.last_error + + assert (await client.delete_assistant(assistant.id)).deleted, "The assistant was not deleted" + messages = await client.list_messages(thread_id=thread.id) + assert len(messages.data), "The data from the assistant was not received." + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy_async + async def test_include_file_search_results_no_stream(self, **kwargs): + """Test using include_file_search.""" + await self._do_test_include_file_search_results(use_stream=False, include_content=True, **kwargs) + await self._do_test_include_file_search_results(use_stream=False, include_content=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy_async + async def test_include_file_search_results_stream(self, **kwargs): + """Test using include_file_search with streaming.""" + await self._do_test_include_file_search_results(use_stream=True, include_content=True, **kwargs) + await self._do_test_include_file_search_results(use_stream=True, include_content=False, **kwargs) + + async def _do_test_include_file_search_results(self, use_stream, include_content, **kwargs): + """Run the test with file search results.""" + async with self.create_client(**kwargs) as ai_client: + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = await ai_client.create_vector_store_and_poll( + file_ids=[], data_sources=ds, name="my_vectorstore" + ) + # vector_store = await ai_client.get_vector_store('vs_M9oxKG7JngORHcYNBGVZ6Iz3') + assert vector_store.id + + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + assistant = await ai_client.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert assistant.id + thread = await ai_client.create_thread() + assert thread.id + # create message + message = await ai_client.create_message( + thread_id=thread.id, + role="user", + # content="What does the attachment say?" + content="What Contoso Galaxy Innovations produces?", + ) + assert message.id, "The message was not created." + include = [RunAdditionalFieldList.FILE_SEARCH_CONTENTS] if include_content else None + + if use_stream: + run = None + async with await ai_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, include=include + ) as stream: + async for event_type, event_data, _ in stream: + if isinstance(event_data, ThreadRun): + run = event_data + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + break + else: + run = await ai_client.create_and_process_run( + thread_id=thread.id, assistant_id=assistant.id, include=include + ) + assert run.status == RunStatus.COMPLETED + assert run is not None + steps = await ai_client.list_run_steps(thread_id=thread.id, run_id=run.id, include=include) + # The 1st (not 0th) step is a tool call. + step_id = steps.data[1].id + one_step = await ai_client.get_run_step( + thread_id=thread.id, run_id=run.id, step_id=step_id, include=include + ) + self._assert_file_search_valid(one_step.step_details.tool_calls[0], include_content) + self._assert_file_search_valid(steps.data[1].step_details.tool_calls[0], include_content) + + messages = await ai_client.list_messages(thread_id=thread.id) + assert len(messages) + + await ai_client.delete_vector_store(vector_store.id) + # delete assistant and close client + await ai_client.delete_assistant(assistant.id) + print("Deleted assistant") + await ai_client.close() + + def _assert_file_search_valid(self, tool_call: Any, include_content: bool) -> None: + """Test that file search result is properly populated.""" + assert isinstance(tool_call, RunStepFileSearchToolCall), f"Wrong type of tool call: {type(tool_call)}." + assert isinstance( + tool_call.file_search, RunStepFileSearchToolCallResults + ), f"Wrong type of search results: {type(tool_call.file_search)}." + assert isinstance( + tool_call.file_search.results[0], RunStepFileSearchToolCallResult + ), f"Wrong type of search result: {type(tool_call.file_search.results[0])}." + assert tool_call.file_search.results + if include_content: + assert tool_call.file_search.results[0].content + assert isinstance(tool_call.file_search.results[0].content[0], FileSearchToolCallContent) + assert tool_call.file_search.results[0].content[0].type == "text" + assert tool_call.file_search.results[0].content[0].text + else: + assert tool_call.file_search.results[0].content is None + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy_async + async def test_assistants_with_json_schema(self, **kwargs): + """Test structured output from the assistant.""" + async with self.create_client(**kwargs) as ai_client: + assistant = await ai_client.create_assistant( + # Note only gpt-4o-mini-2024-07-18 and + # gpt-4o-2024-08-06 and later support structured output. + model="gpt-4o-mini", + name="my-assistant", + instructions="Extract the information about planets.", + headers={"x-ms-enable-preview": "true"}, + response_format=ResponseFormatJsonSchemaType( + json_schema=ResponseFormatJsonSchema( + name="planet_mass", + description="Extract planet mass.", + schema={ + "$defs": { + "Planets": {"enum": ["Earth", "Mars", "Jupyter"], "title": "Planets", "type": "string"} + }, + "properties": { + "planet": {"$ref": "#/$defs/Planets"}, + "mass": {"title": "Mass", "type": "number"}, + }, + "required": ["planet", "mass"], + "title": "Planet", + "type": "object", + }, + ) + ), + ) + assert assistant.id + + thread = await ai_client.create_thread() + assert thread.id + + message = await ai_client.create_message( + thread_id=thread.id, + role="user", + content=("The mass of the Mars is 6.4171E23 kg"), + ) + assert message.id + + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + + assert run.status == RunStatus.COMPLETED, run.last_error.message + + del_assistant = await ai_client.delete_assistant(assistant.id) + assert del_assistant.deleted + + messages = await ai_client.list_messages(thread_id=thread.id) + + planet_info = [] + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + # We will only list assistant responses here. + if isinstance(last_message_content, MessageTextContent) and data_point.role == MessageRole.ASSISTANT: + planet_info.append(json.loads(last_message_content.text.value)) + assert len(planet_info) == 1 + assert len(planet_info[0]) == 2 + assert planet_info[0].get("mass") == pytest.approx(6.4171e23, 1e22) + assert planet_info[0].get("planet") == "Mars" + + async def _get_file_id_maybe(self, ai_client: AssistantsClient, **kwargs) -> str: + """Return file id if kwargs has file path.""" + if "file_path" in kwargs: + file = await ai_client.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) + assert file.id, "The file was not uploaded." + return file.id + return None + + async def _remove_file_maybe(self, file_id: str, ai_client: AssistantsClient) -> None: + """Remove file if we have file ID.""" + if file_id: + await ai_client.delete_file(file_id) + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Streaming APIs + # # + # # ********************************************************************************** + + # TODO + + # # ********************************************************************************** + # # + # # NEGATIVE TESTS + # # + # # ********************************************************************************** + + """ + # DISABLED, PASSES LIVE ONLY: recordings don't capture DNS lookup errors + # test assistant creation and deletion + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_negative_create_delete_assistant(self, **kwargs): + # create client using bad endpoint + bad_connection_string = "https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm" + + credential = self.get_credential(AssistantsClient, is_async=False) + client = AssistantsClient.from_connection_string( + credential=credential, + connection=bad_connection_string, + ) + + # attempt to create assistant with bad client + exception_caught = False + try: + assistant = await client.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + # check for error (will not have a status code since it failed on request -- no response was recieved) + except (ServiceRequestError, HttpResponseError) as e: + exception_caught = True + if type(e) == ServiceRequestError: + assert e.message + assert "failed to resolve 'foo.bar.some-domain.ms'" in e.message.lower() + else: + assert "No such host is known" and "foo.bar.some-domain.ms" in str(e) + + # close client and confirm an exception was caught + await client.close() + assert exception_caught + """ diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py new file mode 100644 index 000000000000..5bae3fbe46f1 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py @@ -0,0 +1,555 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +from typing import Any, Iterator, List, MutableMapping, Optional, Dict + +import json +import os +import pytest +from unittest.mock import MagicMock, Mock, patch + +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + CodeInterpreterTool, + FunctionTool, + RequiredFunctionToolCall, + RequiredFunctionToolCallDetails, + RequiredToolCall, + RunStatus, + SubmitToolOutputsAction, + SubmitToolOutputsDetails, + ToolSet, + ToolOutput, +) + +from user_functions import user_functions + + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object + + +def read_file(file_name: str) -> str: + with open(os.path.join(os.path.dirname(__file__), "assets", f"{file_name}.txt"), "r") as file: + return file.read() + + +main_stream_response = read_file("main_stream_response") +fetch_current_datetime_and_weather_stream_response = read_file("fetch_current_datetime_and_weather_stream_response") +send_email_stream_response = read_file("send_email_stream_response") + + +def convert_to_byte_iterator(main_stream_response: str) -> Iterator[bytes]: + yield main_stream_response.encode() + + +def function1(): + return "output from the first assistant" + + +def function2(): + return "output from the second assistant" + + +class TestAssistantsMock: + """Tests for assistant operations""" + + LOCAL_FN = {function1.__name__: function1, function2.__name__: function2} + + def get_mock_client(self) -> AssistantsClient: + """Return the fake project client""" + client = AssistantsClient( + endpoint="www.bcac95dd-a1eb-11ef-978f-8c1645fec84b.com", + credential=MagicMock(), + ) + client.submit_tool_outputs_to_run = MagicMock() + client.submit_tool_outputs_to_stream = MagicMock() + return client + + def get_toolset(self, file_id: Optional[str], function: Optional[str]) -> Optional[ToolSet]: + """Get the tool set with given file id and function""" + if file_id is None or function is None: + return None + functions = FunctionTool({function}) + code_interpreter = CodeInterpreterTool(file_ids=[file_id]) + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + return toolset + + def _assert_pipeline_and_reset(self, mock_pipeline_run: MagicMock, tool_set: Optional[ToolSet]) -> None: + """Check that the pipeline has correct values of tools.""" + mock_pipeline_run.assert_called_once() + data = json.loads(mock_pipeline_run.call_args_list[0].args[0].body) + assert isinstance(data, dict), f"Wrong body JSON type expected dict, found {type(data)}" + if tool_set is not None: + assert "tool_resources" in data, "tool_resources must be in data" + assert "tools" in data, "tools must be in data" + expected_file_id = tool_set.resources.code_interpreter.file_ids[0] + expected_function_name = tool_set.definitions[0].function.name + # Check code interpreter file id. + assert data["tool_resources"], "Tools resources is empty." + assert "code_interpreter" in data["tool_resources"] + assert data["tool_resources"]["code_interpreter"], "Code interpreter section is empty." + assert "file_ids" in data["tool_resources"]["code_interpreter"] + assert ( + data["tool_resources"]["code_interpreter"]["file_ids"][0] == expected_file_id + ), f"{expected_file_id[0]=}, but found {data['tool_resources']['code_interpreter']['file_ids']}" + # Check tools. + assert data["tools"], "Tools must not be empty" + assert "function" in data["tools"][0] + assert "name" in data["tools"][0]["function"] + assert ( + data["tools"][0]["function"]["name"] == expected_function_name + ), f"{expected_function_name=}, but encountered {data['tools'][0]['function']['name']}" + else: + assert "tool_resources" not in data, "tool_resources must not be in data" + assert "tools" not in data, "tools must not be in data" + mock_pipeline_run.reset_mock() + + def _get_assistant_json(self, name: str, assistant_id: str, tool_set: Optional[ToolSet]) -> Dict[str, Any]: + """Read in the assistant JSON, so that we can assume service returnred it.""" + with open( + os.path.join(os.path.dirname(__file__), "test_data", "assistant.json"), + "r", + ) as fp: + assistant_dict: Dict[str, Any] = json.load(fp) + assert isinstance(assistant_dict, dict) + assistant_dict["name"] = name + assistant_dict["id"] = assistant_id + if tool_set is not None: + assistant_dict["tool_resources"] = tool_set.resources.as_dict() + assistant_dict["tools"] = tool_set.definitions + return assistant_dict + + def _get_run( + self, thread_id: str, tool_set: Optional[ToolSet], add_azure_fn: bool = False, is_complete: bool = False + ) -> Dict[str, Any]: + """Return JSON as if we have created the run.""" + with open( + os.path.join( + os.path.dirname(__file__), + "test_data", + "thread_run.json", + ), + "r", + ) as fp: + run_dict: Dict[str, Any] = json.load(fp) + run_dict["id"] = thread_id + run_dict["assistant_id"] = thread_id[3:] + assert isinstance(run_dict, dict) + if is_complete: + run_dict["status"] = RunStatus.COMPLETED + tool_calls = [] + definitions = [] + if add_azure_fn: + tool_calls.append(RequiredToolCall(id="1", type="azure_function")) + definitions.append( + { + "type": "azure_function", + "azure_function": { + "function": { + "name": "foo", + "description": "Get answers from the foo bot.", + "parameters": { + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + "required": ["query"], + }, + }, + "input_binding": { + "type": "storage_queue", + "storage_queue": { + "queue_service_uri": "https://example.windows.net", + "queue_name": "azure-function-foo-input", + }, + }, + "output_binding": { + "type": "storage_queue", + "storage_queue": { + "queue_service_uri": "https://example.queue.core.windows.net", + "queue_name": "azure-function-tool-output", + }, + }, + }, + } + ) + if tool_set is not None: + tool_calls.append( + RequiredFunctionToolCall( + id="0", + function=RequiredFunctionToolCallDetails( + name=tool_set.definitions[0].function.name, + arguments="{}", + ), + ) + ) + definitions.extend(tool_set.definitions) + run_dict["tool_resources"] = tool_set.resources.as_dict() + if tool_calls: + sb = SubmitToolOutputsAction(submit_tool_outputs=SubmitToolOutputsDetails(tool_calls=tool_calls)) + run_dict["required_action"] = sb.as_dict() + run_dict["tools"] = definitions + return run_dict + + def _assert_tool_call(self, submit_tool_mock: MagicMock, run_id: str, tool_set: Optional[ToolSet]) -> None: + """Check that submit_tool_outputs_to_run was called with correct parameters or was not called""" + if tool_set is not None: + expected_out = TestAssistantsMock.LOCAL_FN[tool_set.definitions[0].function.name]() + submit_tool_mock.assert_called_once() + submit_tool_mock.assert_called_with( + thread_id="some_thread_id", + run_id=run_id, + tool_outputs=[{"tool_call_id": "0", "output": expected_out}], + ) + submit_tool_mock.reset_mock() + else: + submit_tool_mock.assert_not_called() + + def _assert_toolset_dict(self, assistants_client: AssistantsClient, assistant_id: str, toolset: Optional[ToolSet]): + """Check that the tool set dictionary state is as expected.""" + if toolset is None: + assert assistant_id not in assistants_client._toolset + else: + assert assistants_client._toolset.get(assistant_id) is not None + + @patch("azure.ai.assistants._client.PipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,file_assistant_2", + [ + ("file_for_assistant1", "file_for_assistant2"), + (None, "file_for_assistant2"), + ("file_for_assistant1", None), + (None, None), + ], + ) + def test_multiple_assistants_create( + self, + mock_pipeline_client_gen: MagicMock, + file_assistant_1: Optional[str], + file_assistant_2: Optional[str], + ) -> None: + """Test assistants can get correct toolset.""" + toolset1 = self.get_toolset(file_assistant_1, function1) + toolset2 = self.get_toolset(file_assistant_2, function2) + + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset1), + self._get_assistant_json("second", "456", toolset2), + self._get_run("run123", toolset1), # create_run + self._get_run("run123", toolset1), # get_run + self._get_run("run123", toolset1, is_complete=True), # get_run after resubmitting with tool results + self._get_run("run456", toolset2), # create_run + self._get_run("run456", toolset2), # get_run + self._get_run("run456", toolset2, is_complete=True), # get_run after resubmitting with tool results + "{}", # delete assistant 1 + "{}", # delete assistant 2 + ] + mock_pipeline_response = MagicMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = MagicMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + with assistants_client: + # Check that pipelines are created as expected. + assistant1 = assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset1, + ) + self._assert_pipeline_and_reset(mock_pipeline._pipeline.run, tool_set=toolset1) + + assistant2 = assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="second", + instructions="You are a helpful assistant", + toolset=toolset2, + ) + self._assert_pipeline_and_reset(mock_pipeline._pipeline.run, tool_set=toolset2) + # Check that the new assistants are called with correct tool sets. + assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant1.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset1) + + assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant2.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run456", toolset2) + # Check the contents of a toolset + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + self._assert_toolset_dict(assistants_client, assistant2.id, toolset2) + # Check that we cleanup tools after deleting assistant. + assistants_client.delete_assistant(assistant1.id) + self._assert_toolset_dict(assistants_client, assistant1.id, None) + self._assert_toolset_dict(assistants_client, assistant2.id, toolset2) + assistants_client.delete_assistant(assistant2.id) + self._assert_toolset_dict(assistants_client, assistant1.id, None) + self._assert_toolset_dict(assistants_client, assistant2.id, None) + + @patch("azure.ai.assistants._client.PipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,file_assistant_2", + [ + ("file_for_assistant1", "file_for_assistant2"), + (None, "file_for_assistant2"), + ("file_for_assistant1", None), + (None, None), + ], + ) + def test_update_assistant_tools( + self, + mock_pipeline_client_gen: MagicMock, + file_assistant_1: Optional[str], + file_assistant_2: Optional[str], + ) -> None: + """Test that tools are properly updated.""" + toolset1 = self.get_toolset(file_assistant_1, function1) + toolset2 = self.get_toolset(file_assistant_2, function2) + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset1), + self._get_assistant_json("first", "123", toolset2), + ] + mock_pipeline_response = MagicMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = MagicMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + with assistants_client: + # Check that pipelines are created as expected. + assistant1 = assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset1, + ) + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + assistants_client.update_assistant(assistant1.id, toolset=toolset2) + if toolset2 is None: + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + else: + self._assert_toolset_dict(assistants_client, assistant1.id, toolset2) + + @patch("azure.ai.assistants._client.PipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,file_assistant_2", + [ + ("file_for_assistant1", "file_for_assistant2"), + (None, "file_for_assistant2"), + ("file_for_assistant1", None), + (None, None), + ], + ) + def test_create_run_tools_override( + self, + mock_pipeline_client_gen: MagicMock, + file_assistant_1: Optional[str], + file_assistant_2: Optional[str], + ) -> None: + """Test that if user have set tool set in create create_and_process_run method, that tools are used.""" + toolset1 = self.get_toolset(file_assistant_1, function1) + toolset2 = self.get_toolset(file_assistant_2, function2) + mock_response = MagicMock() + mock_response.status_code = 200 + side_effect = [self._get_assistant_json("first", "123", toolset1)] + if toolset1 is not None or toolset2 is not None: + toolset = toolset2 if toolset2 is not None else toolset1 + side_effect.append(self._get_run("run123", toolset)) # create_run + side_effect.append(self._get_run("run123", toolset)) # get_run + side_effect.append( + self._get_run("run123", toolset, is_complete=True) + ) # get_run after resubmitting with tool results + else: + side_effect.append( + self._get_run("run123", None, is_complete=True) + ) # Run must be marked as completed in this case. + mock_response.json.side_effect = side_effect + mock_pipeline_response = MagicMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = MagicMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + with assistants_client: + # Check that pipelines are created as expected. + assistant1 = assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset1, + ) + self._assert_pipeline_and_reset(mock_pipeline._pipeline.run, tool_set=toolset1) + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + + # Create run with new tool set, which also can be none. + assistants_client.create_and_process_run( + thread_id="some_thread_id", assistant_id=assistant1.id, toolset=toolset2 + ) + if toolset2 is not None: + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset2) + else: + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset1) + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + + @patch("azure.ai.assistants._client.PipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,add_azure_fn", + [ + ("file_for_assistant1", True), + (None, True), + ("file_for_assistant1", False), + (None, False), + ], + ) + def test_with_azure_function( + self, + mock_pipeline_client_gen: MagicMock, + file_assistant_1: Optional[str], + add_azure_fn: bool, + ) -> None: + """Test azure function with toolset.""" + toolset = self.get_toolset(file_assistant_1, function1) + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset), + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # create_run + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # get_run + self._get_run( + "run123", toolset, add_azure_fn=add_azure_fn, is_complete=True + ), # get_run after resubmitting with tool results + ] + mock_pipeline_response = MagicMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = MagicMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + with assistants_client: + # Check that pipelines are created as expected. + assistant1 = assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset, + ) + # Create run with new tool set, which also can be none. + assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant1.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset) + + def _assert_stream_call(self, submit_tool_mock: MagicMock, run_id: str, tool_set: Optional[ToolSet]) -> None: + """Assert that stream has received the correct values.""" + if tool_set is not None: + expected_out = TestAssistantsMock.LOCAL_FN[tool_set.definitions[0].function.name]() + submit_tool_mock.assert_called_once() + submit_tool_mock.assert_called_with( + thread_id="some_thread_id", + run_id=run_id, + tool_outputs=[{"tool_call_id": "0", "output": expected_out}], + event_handler=None, + ) + submit_tool_mock.reset_mock() + else: + submit_tool_mock.assert_not_called() + + @patch("azure.ai.assistants._client.PipelineClient") + @pytest.mark.skip("Recordings not yet available") + @pytest.mark.parametrize( + "file_assistant_1,add_azure_fn", + [ + ("file_for_assistant1", True), + (None, True), + ("file_for_assistant1", False), + (None, False), + ], + ) + def test_handle_submit_tool_outputs( + self, + mock_pipeline_client_gen: MagicMock, + file_assistant_1: Optional[str], + add_azure_fn: bool, + ) -> None: + """Test handling of stream tools response.""" + toolset = self.get_toolset(file_assistant_1, function1) + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset), + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # create_run + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # get_run + self._get_run( + "run123", toolset, add_azure_fn=add_azure_fn, is_complete=True + ), # get_run after resubmitting with tool results + ] + mock_pipeline_response = MagicMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = MagicMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + with assistants_client: + # Check that pipelines are created as expected. + assistant1 = assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset, + ) + # Create run with new tool set, which also can be none. + run = assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant1.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset) + assistants_client._handle_submit_tool_outputs(run) + self._assert_stream_call(assistants_client.submit_tool_outputs_to_stream, "run123", toolset) + + +class TestIntegrationAssistantsMock: + + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, *, tool_outputs: List[ToolOutput], stream_parameter: bool, stream: bool + ) -> Iterator[bytes]: + assert thread_id == "thread_01" + assert run_id == "run_01" + assert stream_parameter == True + assert stream == True + if ( + len(tool_outputs) == 2 + and tool_outputs[0]["tool_call_id"] == "call_01" + and tool_outputs[1]["tool_call_id"] == "call_02" + ): + return convert_to_byte_iterator(fetch_current_datetime_and_weather_stream_response) + elif len(tool_outputs) == 1 and tool_outputs[0]["tool_call_id"] == "call_03": + return convert_to_byte_iterator(send_email_stream_response) + raise ValueError("Unexpected tool outputs") + + @patch( + "azure.ai.assistants._operations.AssistantsClientOperationsMixin.create_run", + return_value=convert_to_byte_iterator(main_stream_response), + ) + @patch("azure.ai.assistants.AssistantsClient.__init__", return_value=None) + @patch( + "azure.ai.assistants._operations.AssistantsClientOperationsMixin.submit_tool_outputs_to_run", + ) + def test_create_stream_with_tool_calls(self, mock_submit_tool_outputs_to_run: Mock, *args): + mock_submit_tool_outputs_to_run.side_effect = self.submit_tool_outputs_to_run + functions = FunctionTool(user_functions) + toolset = ToolSet() + toolset.add(functions) + + operation = AssistantsClient() + operation._toolset = {"asst_01": toolset} + count = 0 + + with operation.create_stream(thread_id="thread_id", assistant_id="asst_01") as stream: + for _ in stream: + count += 1 + assert count == ( + main_stream_response.count("event:") + + fetch_current_datetime_and_weather_stream_response.count("event:") + + send_email_stream_response.count("event:") + ) diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py new file mode 100644 index 000000000000..278b3ad963f1 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py @@ -0,0 +1,565 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +from typing import Any, MutableMapping, Optional, Dict, List, AsyncIterator + +import json +import os +import pytest +from unittest.mock import AsyncMock, MagicMock, Mock, patch + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ( + AsyncFunctionTool, + AsyncToolSet, + CodeInterpreterTool, + RequiredFunctionToolCall, + RequiredFunctionToolCallDetails, + RequiredToolCall, + RunStatus, + SubmitToolOutputsAction, + SubmitToolOutputsDetails, + ToolOutput, +) + +from user_functions import user_functions + + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object + + +def read_file(file_name: str) -> str: + with open(os.path.join(os.path.dirname(__file__), "assets", f"{file_name}.txt"), "r") as file: + return file.read() + + +main_stream_response = read_file("main_stream_response") +fetch_current_datetime_and_weather_stream_response = read_file("fetch_current_datetime_and_weather_stream_response") +send_email_stream_response = read_file("send_email_stream_response") + + +async def convert_to_byte_iterator(main_stream_response: str) -> AsyncIterator[bytes]: + yield main_stream_response.encode() + + +def function1(): + return "output from the first assistant" + + +def function2(): + return "output from the second assistant" + + +class TestAssistantsMock: + """Tests for assistant operations""" + + LOCAL_FN = {function1.__name__: function1, function2.__name__: function2} + + def get_mock_client(self) -> AssistantsClient: + """Return the fake project client""" + client = AssistantsClient( + endpoint="www.bcac95dd-a1eb-11ef-978f-8c1645fec84b.com", + credential=AsyncMock(), + ) + client.submit_tool_outputs_to_run = AsyncMock() + client.submit_tool_outputs_to_stream = AsyncMock() + # Set sync method to avoid warning. + client._client.format_url = MagicMock() + return client + + def get_toolset(self, file_id: Optional[str], function: Optional[str]) -> Optional[AsyncToolSet]: + """Get the tool set with given file id and function""" + if file_id is None or function is None: + return None + functions = AsyncFunctionTool({function}) + code_interpreter = CodeInterpreterTool(file_ids=[file_id]) + toolset = AsyncToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + return toolset + + def _assert_pipeline_and_reset(self, mock_pipeline_run: AsyncMock, tool_set: Optional[AsyncToolSet]) -> None: + """Check that the pipeline has correct values of tools.""" + mock_pipeline_run.assert_called_once() + data = json.loads(mock_pipeline_run.call_args_list[0].args[0].body) + assert isinstance(data, dict), f"Wrong body JSON type expected dict, found {type(data)}" + if tool_set is not None: + assert "tool_resources" in data, "tool_resources must be in data" + assert "tools" in data, "tools must be in data" + expected_file_id = tool_set.resources.code_interpreter.file_ids[0] + expected_function_name = tool_set.definitions[0].function.name + # Check code interpreter file id. + assert data["tool_resources"], "Tools resources is empty." + assert "code_interpreter" in data["tool_resources"] + assert data["tool_resources"]["code_interpreter"], "Code interpreter section is empty." + assert "file_ids" in data["tool_resources"]["code_interpreter"] + assert ( + data["tool_resources"]["code_interpreter"]["file_ids"][0] == expected_file_id + ), f"{expected_file_id[0]=}, but found {data['tool_resources']['code_interpreter']['file_ids']}" + # Check tools. + assert data["tools"], "Tools must not be empty" + assert "function" in data["tools"][0] + assert "name" in data["tools"][0]["function"] + assert ( + data["tools"][0]["function"]["name"] == expected_function_name + ), f"{expected_function_name=}, but encountered {data['tools'][0]['function']['name']}" + else: + assert "tool_resources" not in data, "tool_resources must not be in data" + assert "tools" not in data, "tools must not be in data" + mock_pipeline_run.reset_mock() + + def _get_assistant_json(self, name: str, assistant_id: str, tool_set: Optional[AsyncToolSet]) -> Dict[str, Any]: + """Read in the assistant JSON, so that we can assume service returnred it.""" + with open( + os.path.join(os.path.dirname(__file__), "test_data", "assistant.json"), + "r", + ) as fp: + assistant_dict: Dict[str, Any] = json.load(fp) + assert isinstance(assistant_dict, dict) + assistant_dict["name"] = name + assistant_dict["id"] = assistant_id + if tool_set is not None: + assistant_dict["tool_resources"] = tool_set.resources.as_dict() + assistant_dict["tools"] = tool_set.definitions + return assistant_dict + + def _get_run( + self, thread_id: str, tool_set: Optional[AsyncToolSet], add_azure_fn: bool = False, is_complete: bool = False + ) -> Dict[str, Any]: + """Return JSON as if we have created the run.""" + with open( + os.path.join( + os.path.dirname(__file__), + "test_data", + "thread_run.json", + ), + "r", + ) as fp: + run_dict: Dict[str, Any] = json.load(fp) + run_dict["id"] = thread_id + run_dict["assistant_id"] = thread_id[3:] + assert isinstance(run_dict, dict) + if is_complete: + run_dict["status"] = RunStatus.COMPLETED + tool_calls = [] + definitions = [] + if add_azure_fn: + tool_calls.append(RequiredToolCall(id="1", type="azure_function")) + definitions.append( + { + "type": "azure_function", + "azure_function": { + "function": { + "name": "foo", + "description": "Get answers from the foo bot.", + "parameters": { + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + "required": ["query"], + }, + }, + "input_binding": { + "type": "storage_queue", + "storage_queue": { + "queue_service_uri": "https://example.windows.net", + "queue_name": "azure-function-foo-input", + }, + }, + "output_binding": { + "type": "storage_queue", + "storage_queue": { + "queue_service_uri": "https://example.queue.core.windows.net", + "queue_name": "azure-function-tool-output", + }, + }, + }, + } + ) + if tool_set is not None: + tool_calls.append( + RequiredFunctionToolCall( + id="0", + function=RequiredFunctionToolCallDetails( + name=tool_set.definitions[0].function.name, + arguments="{}", + ), + ) + ) + definitions.extend(tool_set.definitions) + run_dict["tool_resources"] = tool_set.resources.as_dict() + if tool_calls: + sb = SubmitToolOutputsAction(submit_tool_outputs=SubmitToolOutputsDetails(tool_calls=tool_calls)) + run_dict["required_action"] = sb.as_dict() + run_dict["tools"] = definitions + return run_dict + + def _assert_tool_call(self, submit_tool_mock: AsyncMock, run_id: str, tool_set: Optional[AsyncToolSet]) -> None: + """Check that submit_tool_outputs_to_run was called with correct parameters or was not called""" + if tool_set is not None: + expected_out = TestAssistantsMock.LOCAL_FN[tool_set.definitions[0].function.name]() + submit_tool_mock.assert_called_once() + submit_tool_mock.assert_called_with( + thread_id="some_thread_id", + run_id=run_id, + tool_outputs=[{"tool_call_id": "0", "output": expected_out}], + ) + submit_tool_mock.reset_mock() + else: + submit_tool_mock.assert_not_called() + + def _assert_toolset_dict( + self, assistants_client: AssistantsClient, assistant_id: str, toolset: Optional[AsyncToolSet] + ): + """Check that the tool set dictionary state is as expected.""" + if toolset is None: + assert assistant_id not in assistants_client._toolset + else: + assert assistants_client._toolset.get(assistant_id) is not None + + @pytest.mark.asyncio + @patch("azure.ai.assistants.aio._client.AsyncPipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,file_assistant_2", + [ + # ("file_for_assistant1", "file_for_assistant2"), + (None, "file_for_assistant2"), + # ("file_for_assistant1", None), + # (None, None), + ], + ) + async def test_multiple_assistants_create( + self, + mock_pipeline_client_gen: AsyncMock, + file_assistant_1: Optional[str], + file_assistant_2: Optional[str], + ) -> None: + """Test assistants can get correct toolset.""" + toolset1 = self.get_toolset(file_assistant_1, function1) + toolset2 = self.get_toolset(file_assistant_2, function2) + + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset1), + self._get_assistant_json("second", "456", toolset2), + self._get_run("run123", toolset1), # create_run + self._get_run("run123", toolset1), # get_run + self._get_run("run123", toolset1, is_complete=True), # get_run after resubmitting with tool results + self._get_run("run456", toolset2), # create_run + self._get_run("run456", toolset2), # get_run + self._get_run("run456", toolset2, is_complete=True), # get_run after resubmitting with tool results + "{}", # delete assistant 1 + "{}", # delete assistant 2 + ] + mock_pipeline_response = AsyncMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = AsyncMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + async with assistants_client: + # Check that pipelines are created as expected. + assistant1 = await assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset1, + ) + self._assert_pipeline_and_reset(mock_pipeline._pipeline.run, tool_set=toolset1) + + assistant2 = await assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="second", + instructions="You are a helpful assistant", + toolset=toolset2, + ) + self._assert_pipeline_and_reset(mock_pipeline._pipeline.run, tool_set=toolset2) + # Check that the new assistants are called with correct tool sets. + await assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant1.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset1) + + await assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant2.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run456", toolset2) + # Check the contents of a toolset + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + self._assert_toolset_dict(assistants_client, assistant2.id, toolset2) + # Check that we cleanup tools after deleting assistant. + await assistants_client.delete_assistant(assistant1.id) + self._assert_toolset_dict(assistants_client, assistant1.id, None) + self._assert_toolset_dict(assistants_client, assistant2.id, toolset2) + await assistants_client.delete_assistant(assistant2.id) + self._assert_toolset_dict(assistants_client, assistant1.id, None) + self._assert_toolset_dict(assistants_client, assistant2.id, None) + + @pytest.mark.asyncio + @patch("azure.ai.assistants.aio._client.AsyncPipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,file_assistant_2", + [ + ("file_for_assistant1", "file_for_assistant2"), + (None, "file_for_assistant2"), + ("file_for_assistant1", None), + (None, None), + ], + ) + async def test_update_assistant_tools( + self, + mock_pipeline_client_gen: AsyncMock, + file_assistant_1: Optional[str], + file_assistant_2: Optional[str], + ) -> None: + """Test that tools are properly updated.""" + toolset1 = self.get_toolset(file_assistant_1, function1) + toolset2 = self.get_toolset(file_assistant_2, function2) + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset1), + self._get_assistant_json("first", "123", toolset2), + ] + mock_pipeline_response = AsyncMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = AsyncMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + async with assistants_client: + # Check that pipelines are created as expected. + assistant1 = await assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset1, + ) + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + await assistants_client.update_assistant(assistant1.id, toolset=toolset2) + if toolset2 is None: + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + else: + self._assert_toolset_dict(assistants_client, assistant1.id, toolset2) + + @pytest.mark.asyncio + @patch("azure.ai.assistants.aio._client.AsyncPipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,file_assistant_2", + [ + ("file_for_assistant1", "file_for_assistant2"), + (None, "file_for_assistant2"), + ("file_for_assistant1", None), + (None, None), + ], + ) + async def test_create_run_tools_override( + self, + mock_pipeline_client_gen: AsyncMock, + file_assistant_1: Optional[str], + file_assistant_2: Optional[str], + ) -> None: + """Test that if user have set tool set in create create_and_process_run method, that tools are used.""" + toolset1 = self.get_toolset(file_assistant_1, function1) + toolset2 = self.get_toolset(file_assistant_2, function2) + mock_response = MagicMock() + mock_response.status_code = 200 + side_effect = [self._get_assistant_json("first", "123", toolset1)] + if toolset1 is not None or toolset2 is not None: + toolset = toolset2 if toolset2 is not None else toolset1 + side_effect.append(self._get_run("run123", toolset)) # create_run + side_effect.append(self._get_run("run123", toolset)) # get_run + side_effect.append( + self._get_run("run123", toolset, is_complete=True) + ) # get_run after resubmitting with tool results + else: + side_effect.append( + self._get_run("run123", None, is_complete=True) + ) # Run must be marked as completed in this case. + mock_response.json.side_effect = side_effect + mock_pipeline_response = AsyncMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = AsyncMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + async with assistants_client: + # Check that pipelines are created as expected. + assistant1 = await assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset1, + ) + self._assert_pipeline_and_reset(mock_pipeline._pipeline.run, tool_set=toolset1) + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + + # Create run with new tool set, which also can be none. + await assistants_client.create_and_process_run( + thread_id="some_thread_id", assistant_id=assistant1.id, toolset=toolset2 + ) + if toolset2 is not None: + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset2) + else: + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset1) + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + + @pytest.mark.asyncio + @patch("azure.ai.assistants.aio._client.AsyncPipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,add_azure_fn", + [ + ("file_for_assistant1", True), + (None, True), + ("file_for_assistant1", False), + (None, False), + ], + ) + async def test_with_azure_function( + self, + mock_pipeline_client_gen: AsyncMock, + file_assistant_1: Optional[str], + add_azure_fn: bool, + ) -> None: + """Test azure function with toolset.""" + toolset = self.get_toolset(file_assistant_1, function1) + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset), + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # create_run + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # get_run + self._get_run( + "run123", toolset, add_azure_fn=add_azure_fn, is_complete=True + ), # get_run after resubmitting with tool results + ] + mock_pipeline_response = AsyncMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = AsyncMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + async with assistants_client: + # Check that pipelines are created as expected. + assistant1 = await assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset, + ) + # Create run with new tool set, which also can be none. + await assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant1.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset) + + def _assert_stream_call(self, submit_tool_mock: AsyncMock, run_id: str, tool_set: Optional[AsyncToolSet]) -> None: + """Assert that stream has received the correct values.""" + if tool_set is not None: + expected_out = TestAssistantsMock.LOCAL_FN[tool_set.definitions[0].function.name]() + submit_tool_mock.assert_called_once() + submit_tool_mock.assert_called_with( + thread_id="some_thread_id", + run_id=run_id, + tool_outputs=[{"tool_call_id": "0", "output": expected_out}], + event_handler=None, + ) + submit_tool_mock.reset_mock() + else: + submit_tool_mock.assert_not_called() + + @pytest.mark.asyncio + @pytest.mark.skip("Recordings not yet available") + @patch("azure.ai.assistants.aio._client.AsyncPipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,add_azure_fn", + [ + ("file_for_assistant1", True), + (None, True), + ("file_for_assistant1", False), + (None, False), + ], + ) + async def test_handle_submit_tool_outputs( + self, + mock_pipeline_client_gen: AsyncMock, + file_assistant_1: Optional[str], + add_azure_fn: bool, + ) -> None: + """Test handling of stream tools response.""" + toolset = self.get_toolset(file_assistant_1, function1) + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset), + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # create_run + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # get_run + self._get_run( + "run123", toolset, add_azure_fn=add_azure_fn, is_complete=True + ), # get_run after resubmitting with tool results + ] + mock_pipeline_response = AsyncMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = AsyncMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + async with assistants_client: + # Check that pipelines are created as expected. + assistant1 = await assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset, + ) + # Create run with new tool set, which also can be none. + run = await assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant1.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset) + await assistants_client._handle_submit_tool_outputs(run) + self._assert_stream_call(assistants_client.submit_tool_outputs_to_stream, "run123", toolset) + + +class TestIntegrationAssistantsClient: + + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, *, tool_outputs: List[ToolOutput], stream_parameter: bool, stream: bool + ) -> AsyncIterator[bytes]: + assert thread_id == "thread_01" + assert run_id == "run_01" + assert stream_parameter == True + assert stream == True + if ( + len(tool_outputs) == 2 + and tool_outputs[0]["tool_call_id"] == "call_01" + and tool_outputs[1]["tool_call_id"] == "call_02" + ): + return convert_to_byte_iterator(fetch_current_datetime_and_weather_stream_response) + elif len(tool_outputs) == 1 and tool_outputs[0]["tool_call_id"] == "call_03": + return convert_to_byte_iterator(send_email_stream_response) + raise ValueError("Unexpected tool outputs") + + @pytest.mark.asyncio + @patch( + "azure.ai.assistants.aio._operations.AssistantsClientOperationsMixin.create_run", + return_value=convert_to_byte_iterator(main_stream_response), + ) + @patch("azure.ai.assistants.aio.AssistantsClient.__init__", return_value=None) + @patch( + "azure.ai.assistants.aio._operations.AssistantsClientOperationsMixin.submit_tool_outputs_to_run", + ) + async def test_create_stream_with_tool_calls(self, mock_submit_tool_outputs_to_run: Mock, *args): + mock_submit_tool_outputs_to_run.side_effect = self.submit_tool_outputs_to_run + functions = AsyncFunctionTool(user_functions) + toolset = AsyncToolSet() + toolset.add(functions) + + operation = AssistantsClient() + operation._toolset = {"asst_01": toolset} + count = 0 + + async with await operation.create_stream(thread_id="thread_id", assistant_id="asst_01") as stream: + async for _ in stream: + count += 1 + assert count == ( + main_stream_response.count("event:") + + fetch_current_datetime_and_weather_stream_response.count("event:") + + send_email_stream_response.count("event:") + ) diff --git a/sdk/ai/azure-ai-assistants/tests/test_data/assistant.json b/sdk/ai/azure-ai-assistants/tests/test_data/assistant.json new file mode 100644 index 000000000000..7221baa942bc --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_data/assistant.json @@ -0,0 +1,11 @@ +{ + "id": "{agent_id_placeholder}", + "object": "assistant", + "created_at": 1731539287, + "name": "{agent_name_placeholder}", + "description": null, + "model": "gpt-4-1106-preview", + "instructions": "You are a helpful assistant", + "metadata": {}, + "response_format": "auto" +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/tests/test_data/product_info_1.md b/sdk/ai/azure-ai-assistants/tests/test_data/product_info_1.md new file mode 100644 index 000000000000..041155831d53 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_data/product_info_1.md @@ -0,0 +1,51 @@ +# Information about product item_number: 1 + +## Brand +Contoso Galaxy Innovations + +## Category +Smart Eyewear + +## Features +- Augmented Reality interface +- Voice-controlled AI assistant +- HD video recording with 3D audio +- UV protection and blue light filtering +- Wireless charging with extended battery life + +## User Guide + +### 1. Introduction +Introduction to your new SmartView Glasses + +### 2. Product Overview +Overview of features and controls + +### 3. Sizing and Fit +Finding your perfect fit and style adjustments + +### 4. Proper Care and Maintenance +Cleaning and caring for your SmartView Glasses + +### 5. Break-in Period +Adjusting to the augmented reality experience + +### 6. Safety Tips +Safety guidelines for public and private spaces + +### 7. Troubleshooting +Quick fixes for common issues + +## Warranty Information +Two-year limited warranty on all electronic components + +## Contact Information +Customer Support at support@contoso-galaxy-innovations.com + +## Return Policy +30-day return policy with no questions asked + +## FAQ +- How to sync your SmartView Glasses with your devices +- Troubleshooting connection issues +- Customizing your augmented reality environment diff --git a/sdk/ai/azure-ai-assistants/tests/test_data/thread_run.json b/sdk/ai/azure-ai-assistants/tests/test_data/thread_run.json new file mode 100644 index 000000000000..37c431735d97 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_data/thread_run.json @@ -0,0 +1,31 @@ +{ + "id": "thread_id_placeholder", + "object": "thread.run", + "created_at": 1731542794, + "assistant_id": "some_id", + "thread_id": "some_thread_id", + "status": "requires_action", + "started_at": null, + "expires_at": 1731543394, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "required_action": null, + "last_error": null, + "model": "gpt-4-1106-preview", + "instructions": "You are a helpful assistant", + "metadata": {}, + "temperature": 1.0, + "top_p": 1.0, + "max_completion_tokens": null, + "max_prompt_tokens": null, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "incomplete_details": null, + "usage": null, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/tests/test_deserialization.py b/sdk/ai/azure-ai-assistants/tests/test_deserialization.py new file mode 100644 index 000000000000..9c22e5e9c24d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_deserialization.py @@ -0,0 +1,93 @@ +# # ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import copy +import datetime +import pytest + +from azure.ai.assistants.models._models import ThreadRun, RunStep, ThreadMessage +from azure.ai.assistants.models._patch import _safe_instantiate, _filter_parameters + + +class TestDeserialization: + """Tests for deserialization of sse responses.""" + + @pytest.mark.parametrize( + "valid_params,model_cls", + [ + ( + { + "id": "12345", + "object": "thread.run", + "thread_id": "6789", + "assistant_id": "101112", + "status": "in_progress", + "required_action": "test", + "last_error": "none", + "model": "gpt-4", + "instructions": "Test instruction", + "tools": "Test function", + "created_at": datetime.datetime(2024, 11, 14), + "expires_at": datetime.datetime(2024, 11, 17), + "started_at": datetime.datetime(2024, 11, 15), + "completed_at": datetime.datetime(2024, 11, 16), + "cancelled_at": datetime.datetime(2024, 11, 16), + "failed_at": datetime.datetime(2024, 11, 16), + "incomplete_details": "max_completion_tokens", + "usage": "in_progress", + "temperature": 1.0, + "top_p": 1.0, + "max_completion_tokens": 1000, + "truncation_strategy": "test", + "tool_choice": "tool name", + "response_format": "json", + "metadata": {"foo": "bar"}, + "tool_resources": "test", + "parallel_tool_calls": True, + }, + ThreadRun, + ), + ( + { + "id": "1233", + "object": "thread.message", + "created_at": datetime.datetime(2024, 11, 14), + "thread_id": "5678", + "status": "incomplete", + "incomplete_details": "test", + "completed_at": datetime.datetime(2024, 11, 16), + "incomplete_at": datetime.datetime(2024, 11, 16), + "role": "assistant", + "content": "Test", + "assistant_id": "9911", + "run_id": "11", + "attachments": ["4", "8", "15", "16", "23", "42"], + "metadata": {"foo", "bar"}, + }, + ThreadMessage, + ), + ], + ) + def test_correct_thread_params(self, valid_params, model_cls): + """Test that if service returned extra parameter in SSE response, it does not create issues.""" + + bad_params = {"foo": "bar"} + params = copy.deepcopy(valid_params) + params.update(bad_params) + # We should bot e able to create Thread Run with bad parameters. + with pytest.raises(TypeError): + model_cls(**params) + filtered_params = _filter_parameters(model_cls, params) + for k in valid_params: + assert k in filtered_params, f"{k} not in {list(filtered_params.keys())}" + for k in bad_params: + assert k not in filtered_params + # Implicitly check that we can create object with the filtered parameters. + model_cls(**filtered_params) + # Check safe initialization. + assert isinstance(_safe_instantiate(model_cls, params), model_cls) + + def test_safe_instantiate_non_dict(self): + """Test that safe_instantiate method when user supplies not a dictionary.""" + assert _safe_instantiate(RunStep, 42) == 42 diff --git a/sdk/ai/azure-ai-assistants/tests/test_overload_assert.py b/sdk/ai/azure-ai-assistants/tests/test_overload_assert.py new file mode 100644 index 000000000000..38240159be7f --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_overload_assert.py @@ -0,0 +1,28 @@ +# pylint: disable=line-too-long,useless-suppression +import unittest +import pytest +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.aio import AssistantsClient as AsyncAssistantsOperations +from overload_assert_utils import OverloadAssertion, assert_same_http_requests + + +class TestDeclarator(unittest.TestCase): + + @pytest.mark.asyncio + @assert_same_http_requests + async def test_assert_errors( + self, assistant: AssistantsClient, _: AsyncAssistantsOperations, assertion: OverloadAssertion + ): + # This is a special test case tested verified the decorator assert name field presents in one call but not another + model = "gpt-4-1106-preview" + name = "first" + instructions = "You are a helpful assistant" + body = {"model": model, "name": name, "instructions": instructions} + + assistant.create_agent(model=model, instructions=instructions) + assistant.create_agent(body=body) + + # Expect failure because the name field is missing in the second call + # If it doesn't assert, it means the decorator is not working and the test is failing here + with pytest.raises(AssertionError): + assertion.same_http_requests_from(operation_count=2, api_per_operation_count=1) diff --git a/sdk/ai/azure-ai-assistants/tests/test_vector_store.py b/sdk/ai/azure-ai-assistants/tests/test_vector_store.py new file mode 100644 index 000000000000..1c741bfca2bd --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_vector_store.py @@ -0,0 +1,43 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +# cSpell:disable +import unittest +from azure.ai.assistants._model_base import _deserialize +from azure.ai.assistants.models import _models + + +class Test(unittest.TestCase): + + def testName(self): + val = { + "id": "vs_OQpX6y9YM368EBZ5GmF45kRO", + "object": "vector_store", + "name": "TV Support FAQ", + "status": "completed", + "usage_bytes": 0, + "created_at": 1729730726, + "file_counts": {"in_progress": 0, "completed": 0, "failed": 0, "cancelled": 0, "total": 0}, + "metadata": {"source": "Assistant API Tests"}, + "expires_after": None, + "expires_at": None, + "last_active_at": 1729730726, + "configuration": { + "data_sources": [ + { + "type": "uri_asset", + "uri": "azureml://subscriptions/10e1de13-9717-4242-acf5-3e241940d326/resourcegroups/rg-sawidderai/workspaces/sawidder-0278/datastores/workspaceblobstore/paths/UI/2024-10-01_001042_UTC/unit-test.txt", + } + ] + }, + "configuration1": {}, + } + # json_val = json.dumps(val) + vct = _deserialize(_models.VectorStore, val) + + +if __name__ == "__main__": + # import sys;sys.argv = ['', 'Test.testName'] + unittest.main() diff --git a/sdk/ai/azure-ai-assistants/tests/user_functions.py b/sdk/ai/azure-ai-assistants/tests/user_functions.py new file mode 100644 index 000000000000..883fd2fa8e32 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/user_functions.py @@ -0,0 +1,228 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import json +import datetime +from typing import Any, Callable, Set, Dict, List, Optional + +# These are the user-defined functions that can be called by the agent. + + +def fetch_current_datetime(format: Optional[str] = None) -> str: + """ + Get the current time as a JSON string, optionally formatted. + + :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. + :return: The current time in JSON format. + :rtype: str + """ + current_time = datetime.datetime.now() + + # Use the provided format if available, else use a default format + if format: + time_format = format + else: + time_format = "%Y-%m-%d %H:%M:%S" + + time_json = json.dumps({"current_time": current_time.strftime(time_format)}) + return time_json + + +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +def send_email(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Email address of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +def calculate_sum(a: int, b: int) -> str: + """Calculates the sum of two integers. + + :param a (int): First integer. + :rtype: int + :param b (int): Second integer. + :rtype: int + + :return: The sum of the two integers. + :rtype: str + """ + result = a + b + return json.dumps({"result": result}) + + +def convert_temperature(celsius: float) -> str: + """Converts temperature from Celsius to Fahrenheit. + + :param celsius (float): Temperature in Celsius. + :rtype: float + + :return: Temperature in Fahrenheit. + :rtype: str + """ + fahrenheit = (celsius * 9 / 5) + 32 + return json.dumps({"fahrenheit": fahrenheit}) + + +def toggle_flag(flag: bool) -> str: + """Toggles a boolean flag. + + :param flag (bool): The flag to toggle. + :rtype: bool + + :return: The toggled flag. + :rtype: str + """ + toggled = not flag + return json.dumps({"toggled_flag": toggled}) + + +def merge_dicts(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> str: + """Merges two dictionaries. + + :param dict1 (Dict[str, Any]): First dictionary. + :rtype: dict + :param dict2 (Dict[str, Any]): Second dictionary. + :rtype: dict + + :return: The merged dictionary. + :rtype: str + """ + merged = dict1.copy() + merged.update(dict2) + return json.dumps({"merged_dict": merged}) + + +def get_user_info(user_id: int) -> str: + """Retrieves user information based on user ID. + + :param user_id (int): ID of the user. + :rtype: int + + :return: User information as a JSON string. + :rtype: str + """ + mock_users = { + 1: {"name": "Alice", "email": "alice@example.com"}, + 2: {"name": "Bob", "email": "bob@example.com"}, + 3: {"name": "Charlie", "email": "charlie@example.com"}, + } + user_info = mock_users.get(user_id, {"error": "User not found."}) + return json.dumps({"user_info": user_info}) + + +def longest_word_in_sentences(sentences: List[str]) -> str: + """Finds the longest word in each sentence. + + :param sentences (List[str]): A list of sentences. + :return: A JSON string mapping each sentence to its longest word. + :rtype: str + """ + if not sentences: + return json.dumps({"error": "The list of sentences is empty"}) + + longest_words = {} + for sentence in sentences: + # Split sentence into words + words = sentence.split() + if words: + # Find the longest word + longest_word = max(words, key=len) + longest_words[sentence] = longest_word + else: + longest_words[sentence] = "" + + return json.dumps({"longest_words": longest_words}) + + +def process_records(records: List[Dict[str, int]]) -> str: + """ + Process a list of records, where each record is a dictionary with string keys and integer values. + + :param records: A list containing dictionaries that map strings to integers. + :return: A list of sums of the integer values in each record. + """ + sums = [] + for record in records: + # Sum up all the values in each dictionary and append the result to the sums list + total = sum(record.values()) + sums.append(total) + return json.dumps({"sums": sums}) + + +# Example User Input for Each Function +# 1. Fetch Current DateTime +# User Input: "What is the current date and time?" +# User Input: "What is the current date and time in '%Y-%m-%d %H:%M:%S' format?" + +# 2. Fetch Weather +# User Input: "Can you provide the weather information for New York?" + +# 3. Send Email +# User Input: "Send an email to john.doe@example.com with the subject 'Meeting Reminder' and body 'Don't forget our meeting at 3 PM.'" + +# 4. Calculate Sum +# User Input: "What is the sum of 45 and 55?" + +# 5. Convert Temperature +# User Input: "Convert 25 degrees Celsius to Fahrenheit." + +# 6. Toggle Flag +# User Input: "Toggle the flag True." + +# 7. Merge Dictionaries +# User Input: "Merge these two dictionaries: {'name': 'Alice'} and {'age': 30}." + +# 8. Get User Info +# User Input: "Retrieve user information for user ID 1." + +# 9. Longest Word in Sentences +# User Input: "Find the longest word in each of these sentences: ['The quick brown fox jumps over the lazy dog', 'Python is an amazing programming language', 'Azure AI capabilities are impressive']." + +# 10. Process Records +# User Input: "Process the following records: [{'a': 10, 'b': 20}, {'x': 5, 'y': 15, 'z': 25}, {'m': 30}]." + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_current_datetime, + fetch_weather, + send_email, + calculate_sum, + convert_temperature, + toggle_flag, + merge_dicts, + get_user_info, + longest_word_in_sentences, + process_records, +} diff --git a/sdk/ai/azure-ai-assistants/tsp-location.yaml b/sdk/ai/azure-ai-assistants/tsp-location.yaml new file mode 100644 index 000000000000..524b693d4faf --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tsp-location.yaml @@ -0,0 +1,4 @@ +directory: specification/ai/Azure.AI.Assistants +commit: 4b4d290aa11e0d562c7e53276effe9a60fcec1fa +repo: Azure/azure-rest-api-specs +additionalDirectories: diff --git a/sdk/ai/ci.yml b/sdk/ai/ci.yml index 117b9c6c785d..633ae26cc11e 100644 --- a/sdk/ai/ci.yml +++ b/sdk/ai/ci.yml @@ -50,6 +50,8 @@ extends: # Selection: sparse # GenerateVMJobs: true Artifacts: + - name: azure-ai-assistants + safeName: azureaiassistants - name: azure-ai-projects safeName: azureaiprojects - name: azure-ai-inference