From 6954c2b3568f0b7de297b5d69af723e1bbb70f8b Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Mon, 31 Mar 2025 20:09:34 -0700 Subject: [PATCH 1/7] Initial code generation --- sdk/ai/azure-ai-assistants/CHANGELOG.md | 5 + sdk/ai/azure-ai-assistants/LICENSE | 21 + sdk/ai/azure-ai-assistants/MANIFEST.in | 7 + sdk/ai/azure-ai-assistants/README.md | 43 + .../apiview-properties.json | 238 + sdk/ai/azure-ai-assistants/azure/__init__.py | 1 + .../azure-ai-assistants/azure/ai/__init__.py | 1 + .../azure/ai/assistants/__init__.py | 32 + .../azure/ai/assistants/_client.py | 101 + .../azure/ai/assistants/_configuration.py | 72 + .../azure/ai/assistants/_model_base.py | 1235 +++ .../ai/assistants/_operations/__init__.py | 25 + .../ai/assistants/_operations/_operations.py | 5890 +++++++++++++++ .../azure/ai/assistants/_operations/_patch.py | 20 + .../azure/ai/assistants/_patch.py | 20 + .../azure/ai/assistants/_serialization.py | 2050 +++++ .../azure/ai/assistants/_types.py | 21 + .../azure/ai/assistants/_vendor.py | 66 + .../azure/ai/assistants/_version.py | 9 + .../azure/ai/assistants/aio/__init__.py | 29 + .../azure/ai/assistants/aio/_client.py | 105 + .../azure/ai/assistants/aio/_configuration.py | 74 + .../ai/assistants/aio/_operations/__init__.py | 25 + .../assistants/aio/_operations/_operations.py | 4788 ++++++++++++ .../ai/assistants/aio/_operations/_patch.py | 20 + .../azure/ai/assistants/aio/_patch.py | 20 + .../azure/ai/assistants/aio/_vendor.py | 25 + .../azure/ai/assistants/models/__init__.py | 416 + .../azure/ai/assistants/models/_enums.py | 520 ++ .../azure/ai/assistants/models/_models.py | 6686 +++++++++++++++++ .../azure/ai/assistants/models/_patch.py | 20 + .../azure/ai/assistants/py.typed | 1 + .../azure-ai-assistants/dev_requirements.txt | 3 + sdk/ai/azure-ai-assistants/setup.py | 71 + sdk/ai/azure-ai-assistants/tsp-location.yaml | 4 + 35 files changed, 22664 insertions(+) create mode 100644 sdk/ai/azure-ai-assistants/CHANGELOG.md create mode 100644 sdk/ai/azure-ai-assistants/LICENSE create mode 100644 sdk/ai/azure-ai-assistants/MANIFEST.in create mode 100644 sdk/ai/azure-ai-assistants/README.md create mode 100644 sdk/ai/azure-ai-assistants/apiview-properties.json create mode 100644 sdk/ai/azure-ai-assistants/azure/__init__.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/__init__.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/__init__.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/_model_base.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/__init__.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_patch.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/_serialization.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/_vendor.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/_version.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/__init__.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/__init__.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_vendor.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/py.typed create mode 100644 sdk/ai/azure-ai-assistants/dev_requirements.txt create mode 100644 sdk/ai/azure-ai-assistants/setup.py create mode 100644 sdk/ai/azure-ai-assistants/tsp-location.yaml diff --git a/sdk/ai/azure-ai-assistants/CHANGELOG.md b/sdk/ai/azure-ai-assistants/CHANGELOG.md new file mode 100644 index 000000000000..628743d283a9 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/CHANGELOG.md @@ -0,0 +1,5 @@ +# Release History + +## 1.0.0b1 (1970-01-01) + +- Initial version diff --git a/sdk/ai/azure-ai-assistants/LICENSE b/sdk/ai/azure-ai-assistants/LICENSE new file mode 100644 index 000000000000..63447fd8bbbf --- /dev/null +++ b/sdk/ai/azure-ai-assistants/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) Microsoft Corporation. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/MANIFEST.in b/sdk/ai/azure-ai-assistants/MANIFEST.in new file mode 100644 index 000000000000..c50d503e6ce9 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/MANIFEST.in @@ -0,0 +1,7 @@ +include *.md +include LICENSE +include azure/ai/assistants/py.typed +recursive-include tests *.py +recursive-include samples *.py *.md +include azure/__init__.py +include azure/ai/__init__.py diff --git a/sdk/ai/azure-ai-assistants/README.md b/sdk/ai/azure-ai-assistants/README.md new file mode 100644 index 000000000000..30d4deef34eb --- /dev/null +++ b/sdk/ai/azure-ai-assistants/README.md @@ -0,0 +1,43 @@ +# Azure Ai Assistants client library for Python + + +## Getting started + +### Install the package + +```bash +python -m pip install azure-ai-assistants +``` + +#### Prequisites + +- Python 3.8 or later is required to use this package. +- You need an [Azure subscription][azure_sub] to use this package. +- An existing Azure Ai Assistants instance. + + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, +see the Code of Conduct FAQ or contact opencode@microsoft.com with any +additional questions or comments. + + +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[authenticate_with_token]: https://docs.microsoft.com/azure/cognitive-services/authentication?tabs=powershell#authenticate-with-an-authentication-token +[azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials +[azure_identity_pip]: https://pypi.org/project/azure-identity/ +[default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential +[pip]: https://pypi.org/project/pip/ +[azure_sub]: https://azure.microsoft.com/free/ diff --git a/sdk/ai/azure-ai-assistants/apiview-properties.json b/sdk/ai/azure-ai-assistants/apiview-properties.json new file mode 100644 index 000000000000..840508d6d84d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/apiview-properties.json @@ -0,0 +1,238 @@ +{ + "CrossLanguagePackageId": "Azure.AI.Assistants", + "CrossLanguageDefinitionId": { + "azure.ai.assistants.models.Agent": "Azure.AI.Assistants.Agent", + "azure.ai.assistants.models.AgentDeletionStatus": "Azure.AI.Assistants.AgentDeletionStatus", + "azure.ai.assistants.models.AgentsApiResponseFormat": "Azure.AI.Assistants.AgentsApiResponseFormat", + "azure.ai.assistants.models.AgentsNamedToolChoice": "Azure.AI.Assistants.AgentsNamedToolChoice", + "azure.ai.assistants.models.AgentThread": "Azure.AI.Assistants.AgentThread", + "azure.ai.assistants.models.AgentThreadCreationOptions": "Azure.AI.Assistants.AgentThreadCreationOptions", + "azure.ai.assistants.models.AISearchIndexResource": "Azure.AI.Assistants.AISearchIndexResource", + "azure.ai.assistants.models.AzureAISearchResource": "Azure.AI.Assistants.AzureAISearchResource", + "azure.ai.assistants.models.ToolDefinition": "Azure.AI.Assistants.ToolDefinition", + "azure.ai.assistants.models.AzureAISearchToolDefinition": "Azure.AI.Assistants.AzureAISearchToolDefinition", + "azure.ai.assistants.models.AzureFunctionBinding": "Azure.AI.Assistants.AzureFunctionBinding", + "azure.ai.assistants.models.AzureFunctionDefinition": "Azure.AI.Assistants.AzureFunctionDefinition", + "azure.ai.assistants.models.AzureFunctionStorageQueue": "Azure.AI.Assistants.AzureFunctionStorageQueue", + "azure.ai.assistants.models.AzureFunctionToolDefinition": "Azure.AI.Assistants.AzureFunctionToolDefinition", + "azure.ai.assistants.models.BingCustomSearchToolDefinition": "Azure.AI.Assistants.BingCustomSearchToolDefinition", + "azure.ai.assistants.models.BingGroundingToolDefinition": "Azure.AI.Assistants.BingGroundingToolDefinition", + "azure.ai.assistants.models.CodeInterpreterToolDefinition": "Azure.AI.Assistants.CodeInterpreterToolDefinition", + "azure.ai.assistants.models.CodeInterpreterToolResource": "Azure.AI.Assistants.CodeInterpreterToolResource", + "azure.ai.assistants.models.ErrorResponse": "Azure.ResourceManager.CommonTypes.ErrorResponse", + "azure.ai.assistants.models.FileDeletionStatus": "Azure.AI.Assistants.FileDeletionStatus", + "azure.ai.assistants.models.FileListResponse": "Azure.AI.Assistants.FileListResponse", + "azure.ai.assistants.models.FileSearchRankingOptions": "Azure.AI.Assistants.FileSearchRankingOptions", + "azure.ai.assistants.models.FileSearchToolCallContent": "Azure.AI.Assistants.FileSearchToolCallContent", + "azure.ai.assistants.models.FileSearchToolDefinition": "Azure.AI.Assistants.FileSearchToolDefinition", + "azure.ai.assistants.models.FileSearchToolDefinitionDetails": "Azure.AI.Assistants.FileSearchToolDefinitionDetails", + "azure.ai.assistants.models.FileSearchToolResource": "Azure.AI.Assistants.FileSearchToolResource", + "azure.ai.assistants.models.FunctionDefinition": "Azure.AI.Assistants.FunctionDefinition", + "azure.ai.assistants.models.FunctionName": "Azure.AI.Assistants.FunctionName", + "azure.ai.assistants.models.FunctionToolDefinition": "Azure.AI.Assistants.FunctionToolDefinition", + "azure.ai.assistants.models.IncompleteRunDetails": "Azure.AI.Assistants.IncompleteRunDetails", + "azure.ai.assistants.models.MessageAttachment": "Azure.AI.Assistants.MessageAttachment", + "azure.ai.assistants.models.MessageContent": "Azure.AI.Assistants.MessageContent", + "azure.ai.assistants.models.MessageDelta": "Azure.AI.Assistants.MessageDelta", + "azure.ai.assistants.models.MessageDeltaChunk": "Azure.AI.Assistants.MessageDeltaChunk", + "azure.ai.assistants.models.MessageDeltaContent": "Azure.AI.Assistants.MessageDeltaContent", + "azure.ai.assistants.models.MessageDeltaImageFileContent": "Azure.AI.Assistants.MessageDeltaImageFileContent", + "azure.ai.assistants.models.MessageDeltaImageFileContentObject": "Azure.AI.Assistants.MessageDeltaImageFileContentObject", + "azure.ai.assistants.models.MessageDeltaTextAnnotation": "Azure.AI.Assistants.MessageDeltaTextAnnotation", + "azure.ai.assistants.models.MessageDeltaTextContent": "Azure.AI.Assistants.MessageDeltaTextContent", + "azure.ai.assistants.models.MessageDeltaTextContentObject": "Azure.AI.Assistants.MessageDeltaTextContentObject", + "azure.ai.assistants.models.MessageDeltaTextFileCitationAnnotation": "Azure.AI.Assistants.MessageDeltaTextFileCitationAnnotation", + "azure.ai.assistants.models.MessageDeltaTextFileCitationAnnotationObject": "Azure.AI.Assistants.MessageDeltaTextFileCitationAnnotationObject", + "azure.ai.assistants.models.MessageDeltaTextFilePathAnnotation": "Azure.AI.Assistants.MessageDeltaTextFilePathAnnotation", + "azure.ai.assistants.models.MessageDeltaTextFilePathAnnotationObject": "Azure.AI.Assistants.MessageDeltaTextFilePathAnnotationObject", + "azure.ai.assistants.models.MessageDeltaTextUrlCitationAnnotation": "Azure.AI.Assistants.MessageDeltaTextUrlCitationAnnotation", + "azure.ai.assistants.models.MessageDeltaTextUrlCitationDetails": "Azure.AI.Assistants.MessageDeltaTextUrlCitationDetails", + "azure.ai.assistants.models.MessageImageFileContent": "Azure.AI.Assistants.MessageImageFileContent", + "azure.ai.assistants.models.MessageImageFileDetails": "Azure.AI.Assistants.MessageImageFileDetails", + "azure.ai.assistants.models.MessageIncompleteDetails": "Azure.AI.Assistants.MessageIncompleteDetails", + "azure.ai.assistants.models.MessageTextAnnotation": "Azure.AI.Assistants.MessageTextAnnotation", + "azure.ai.assistants.models.MessageTextContent": "Azure.AI.Assistants.MessageTextContent", + "azure.ai.assistants.models.MessageTextDetails": "Azure.AI.Assistants.MessageTextDetails", + "azure.ai.assistants.models.MessageTextFileCitationAnnotation": "Azure.AI.Assistants.MessageTextFileCitationAnnotation", + "azure.ai.assistants.models.MessageTextFileCitationDetails": "Azure.AI.Assistants.MessageTextFileCitationDetails", + "azure.ai.assistants.models.MessageTextFilePathAnnotation": "Azure.AI.Assistants.MessageTextFilePathAnnotation", + "azure.ai.assistants.models.MessageTextFilePathDetails": "Azure.AI.Assistants.MessageTextFilePathDetails", + "azure.ai.assistants.models.MessageTextUrlCitationAnnotation": "Azure.AI.Assistants.MessageTextUrlCitationAnnotation", + "azure.ai.assistants.models.MessageTextUrlCitationDetails": "Azure.AI.Assistants.MessageTextUrlCitationDetails", + "azure.ai.assistants.models.MicrosoftFabricToolDefinition": "Azure.AI.Assistants.MicrosoftFabricToolDefinition", + "azure.ai.assistants.models.OpenAIFile": "Azure.AI.Assistants.OpenAIFile", + "azure.ai.assistants.models.OpenAIPageableListOfAgent": "Azure.AI.Assistants.OpenAIPageableListOf", + "azure.ai.assistants.models.OpenAIPageableListOfRunStep": "Azure.AI.Assistants.OpenAIPageableListOf", + "azure.ai.assistants.models.OpenAIPageableListOfThreadMessage": "Azure.AI.Assistants.OpenAIPageableListOf", + "azure.ai.assistants.models.OpenAIPageableListOfThreadRun": "Azure.AI.Assistants.OpenAIPageableListOf", + "azure.ai.assistants.models.OpenAIPageableListOfVectorStore": "Azure.AI.Assistants.OpenAIPageableListOf", + "azure.ai.assistants.models.OpenAIPageableListOfVectorStoreFile": "Azure.AI.Assistants.OpenAIPageableListOf", + "azure.ai.assistants.models.OpenApiAuthDetails": "Azure.AI.Assistants.OpenApiAuthDetails", + "azure.ai.assistants.models.OpenApiAnonymousAuthDetails": "Azure.AI.Assistants.OpenApiAnonymousAuthDetails", + "azure.ai.assistants.models.OpenApiConnectionAuthDetails": "Azure.AI.Assistants.OpenApiConnectionAuthDetails", + "azure.ai.assistants.models.OpenApiConnectionSecurityScheme": "Azure.AI.Assistants.OpenApiConnectionSecurityScheme", + "azure.ai.assistants.models.OpenApiFunctionDefinition": "Azure.AI.Assistants.OpenApiFunctionDefinition", + "azure.ai.assistants.models.OpenApiManagedAuthDetails": "Azure.AI.Assistants.OpenApiManagedAuthDetails", + "azure.ai.assistants.models.OpenApiManagedSecurityScheme": "Azure.AI.Assistants.OpenApiManagedSecurityScheme", + "azure.ai.assistants.models.OpenApiToolDefinition": "Azure.AI.Assistants.OpenApiToolDefinition", + "azure.ai.assistants.models.RequiredAction": "Azure.AI.Assistants.RequiredAction", + "azure.ai.assistants.models.RequiredToolCall": "Azure.AI.Assistants.RequiredToolCall", + "azure.ai.assistants.models.RequiredFunctionToolCall": "Azure.AI.Assistants.RequiredFunctionToolCall", + "azure.ai.assistants.models.RequiredFunctionToolCallDetails": "Azure.AI.Assistants.RequiredFunctionToolCallDetails", + "azure.ai.assistants.models.ResponseFormatJsonSchema": "Azure.AI.Assistants.ResponseFormatJsonSchema", + "azure.ai.assistants.models.ResponseFormatJsonSchemaType": "Azure.AI.Assistants.ResponseFormatJsonSchemaType", + "azure.ai.assistants.models.RunCompletionUsage": "Azure.AI.Assistants.RunCompletionUsage", + "azure.ai.assistants.models.RunError": "Azure.AI.Assistants.RunError", + "azure.ai.assistants.models.RunStep": "Azure.AI.Assistants.RunStep", + "azure.ai.assistants.models.RunStepToolCall": "Azure.AI.Assistants.RunStepToolCall", + "azure.ai.assistants.models.RunStepAzureAISearchToolCall": "Azure.AI.Assistants.RunStepAzureAISearchToolCall", + "azure.ai.assistants.models.RunStepBingGroundingToolCall": "Azure.AI.Assistants.RunStepBingGroundingToolCall", + "azure.ai.assistants.models.RunStepCodeInterpreterToolCallOutput": "Azure.AI.Assistants.RunStepCodeInterpreterToolCallOutput", + "azure.ai.assistants.models.RunStepCodeInterpreterImageOutput": "Azure.AI.Assistants.RunStepCodeInterpreterImageOutput", + "azure.ai.assistants.models.RunStepCodeInterpreterImageReference": "Azure.AI.Assistants.RunStepCodeInterpreterImageReference", + "azure.ai.assistants.models.RunStepCodeInterpreterLogOutput": "Azure.AI.Assistants.RunStepCodeInterpreterLogOutput", + "azure.ai.assistants.models.RunStepCodeInterpreterToolCall": "Azure.AI.Assistants.RunStepCodeInterpreterToolCall", + "azure.ai.assistants.models.RunStepCodeInterpreterToolCallDetails": "Azure.AI.Assistants.RunStepCodeInterpreterToolCallDetails", + "azure.ai.assistants.models.RunStepCompletionUsage": "Azure.AI.Assistants.RunStepCompletionUsage", + "azure.ai.assistants.models.RunStepCustomSearchToolCall": "Azure.AI.Assistants.RunStepCustomSearchToolCall", + "azure.ai.assistants.models.RunStepDelta": "Azure.AI.Assistants.RunStepDelta", + "azure.ai.assistants.models.RunStepDeltaChunk": "Azure.AI.Assistants.RunStepDeltaChunk", + "azure.ai.assistants.models.RunStepDeltaCodeInterpreterDetailItemObject": "Azure.AI.Assistants.RunStepDeltaCodeInterpreterDetailItemObject", + "azure.ai.assistants.models.RunStepDeltaCodeInterpreterOutput": "Azure.AI.Assistants.RunStepDeltaCodeInterpreterOutput", + "azure.ai.assistants.models.RunStepDeltaCodeInterpreterImageOutput": "Azure.AI.Assistants.RunStepDeltaCodeInterpreterImageOutput", + "azure.ai.assistants.models.RunStepDeltaCodeInterpreterImageOutputObject": "Azure.AI.Assistants.RunStepDeltaCodeInterpreterImageOutputObject", + "azure.ai.assistants.models.RunStepDeltaCodeInterpreterLogOutput": "Azure.AI.Assistants.RunStepDeltaCodeInterpreterLogOutput", + "azure.ai.assistants.models.RunStepDeltaToolCall": "Azure.AI.Assistants.RunStepDeltaToolCall", + "azure.ai.assistants.models.RunStepDeltaCodeInterpreterToolCall": "Azure.AI.Assistants.RunStepDeltaCodeInterpreterToolCall", + "azure.ai.assistants.models.RunStepDeltaDetail": "Azure.AI.Assistants.RunStepDeltaDetail", + "azure.ai.assistants.models.RunStepDeltaFileSearchToolCall": "Azure.AI.Assistants.RunStepDeltaFileSearchToolCall", + "azure.ai.assistants.models.RunStepDeltaFunction": "Azure.AI.Assistants.RunStepDeltaFunction", + "azure.ai.assistants.models.RunStepDeltaFunctionToolCall": "Azure.AI.Assistants.RunStepDeltaFunctionToolCall", + "azure.ai.assistants.models.RunStepDeltaMessageCreation": "Azure.AI.Assistants.RunStepDeltaMessageCreation", + "azure.ai.assistants.models.RunStepDeltaMessageCreationObject": "Azure.AI.Assistants.RunStepDeltaMessageCreationObject", + "azure.ai.assistants.models.RunStepDeltaToolCallObject": "Azure.AI.Assistants.RunStepDeltaToolCallObject", + "azure.ai.assistants.models.RunStepDetails": "Azure.AI.Assistants.RunStepDetails", + "azure.ai.assistants.models.RunStepError": "Azure.AI.Assistants.RunStepError", + "azure.ai.assistants.models.RunStepFileSearchToolCall": "Azure.AI.Assistants.RunStepFileSearchToolCall", + "azure.ai.assistants.models.RunStepFileSearchToolCallResult": "Azure.AI.Assistants.RunStepFileSearchToolCallResult", + "azure.ai.assistants.models.RunStepFileSearchToolCallResults": "Azure.AI.Assistants.RunStepFileSearchToolCallResults", + "azure.ai.assistants.models.RunStepFunctionToolCall": "Azure.AI.Assistants.RunStepFunctionToolCall", + "azure.ai.assistants.models.RunStepFunctionToolCallDetails": "Azure.AI.Assistants.RunStepFunctionToolCallDetails", + "azure.ai.assistants.models.RunStepMessageCreationDetails": "Azure.AI.Assistants.RunStepMessageCreationDetails", + "azure.ai.assistants.models.RunStepMessageCreationReference": "Azure.AI.Assistants.RunStepMessageCreationReference", + "azure.ai.assistants.models.RunStepMicrosoftFabricToolCall": "Azure.AI.Assistants.RunStepMicrosoftFabricToolCall", + "azure.ai.assistants.models.RunStepOpenAPIToolCall": "Azure.AI.Assistants.RunStepOpenAPIToolCall", + "azure.ai.assistants.models.RunStepSharepointToolCall": "Azure.AI.Assistants.RunStepSharepointToolCall", + "azure.ai.assistants.models.RunStepToolCallDetails": "Azure.AI.Assistants.RunStepToolCallDetails", + "azure.ai.assistants.models.SearchConfiguration": "Azure.AI.Assistants.SearchConfiguration", + "azure.ai.assistants.models.SearchConfigurationList": "Azure.AI.Assistants.SearchConfigurationList", + "azure.ai.assistants.models.SharepointToolDefinition": "Azure.AI.Assistants.SharepointToolDefinition", + "azure.ai.assistants.models.SubmitToolOutputsAction": "Azure.AI.Assistants.SubmitToolOutputsAction", + "azure.ai.assistants.models.SubmitToolOutputsDetails": "Azure.AI.Assistants.SubmitToolOutputsDetails", + "azure.ai.assistants.models.ThreadDeletionStatus": "Azure.AI.Assistants.ThreadDeletionStatus", + "azure.ai.assistants.models.ThreadMessage": "Azure.AI.Assistants.ThreadMessage", + "azure.ai.assistants.models.ThreadMessageOptions": "Azure.AI.Assistants.ThreadMessageOptions", + "azure.ai.assistants.models.ThreadRun": "Azure.AI.Assistants.ThreadRun", + "azure.ai.assistants.models.ToolConnection": "Azure.AI.Assistants.ToolConnection", + "azure.ai.assistants.models.ToolConnectionList": "Azure.AI.Assistants.ToolConnectionList", + "azure.ai.assistants.models.ToolOutput": "Azure.AI.Assistants.ToolOutput", + "azure.ai.assistants.models.ToolResources": "Azure.AI.Assistants.ToolResources", + "azure.ai.assistants.models.TruncationObject": "Azure.AI.Assistants.TruncationObject", + "azure.ai.assistants.models.UpdateCodeInterpreterToolResourceOptions": "Azure.AI.Assistants.UpdateCodeInterpreterToolResourceOptions", + "azure.ai.assistants.models.UpdateFileSearchToolResourceOptions": "Azure.AI.Assistants.UpdateFileSearchToolResourceOptions", + "azure.ai.assistants.models.UpdateToolResourcesOptions": "Azure.AI.Assistants.UpdateToolResourcesOptions", + "azure.ai.assistants.models.UploadFileRequest": "Azure.AI.Assistants.uploadFile.Request.anonymous", + "azure.ai.assistants.models.VectorStore": "Azure.AI.Assistants.VectorStore", + "azure.ai.assistants.models.VectorStoreChunkingStrategyRequest": "Azure.AI.Assistants.VectorStoreChunkingStrategyRequest", + "azure.ai.assistants.models.VectorStoreAutoChunkingStrategyRequest": "Azure.AI.Assistants.VectorStoreAutoChunkingStrategyRequest", + "azure.ai.assistants.models.VectorStoreChunkingStrategyResponse": "Azure.AI.Assistants.VectorStoreChunkingStrategyResponse", + "azure.ai.assistants.models.VectorStoreAutoChunkingStrategyResponse": "Azure.AI.Assistants.VectorStoreAutoChunkingStrategyResponse", + "azure.ai.assistants.models.VectorStoreConfiguration": "Azure.AI.Assistants.VectorStoreConfiguration", + "azure.ai.assistants.models.VectorStoreConfigurations": "Azure.AI.Assistants.VectorStoreConfigurations", + "azure.ai.assistants.models.VectorStoreDataSource": "Azure.AI.Assistants.VectorStoreDataSource", + "azure.ai.assistants.models.VectorStoreDeletionStatus": "Azure.AI.Assistants.VectorStoreDeletionStatus", + "azure.ai.assistants.models.VectorStoreExpirationPolicy": "Azure.AI.Assistants.VectorStoreExpirationPolicy", + "azure.ai.assistants.models.VectorStoreFile": "Azure.AI.Assistants.VectorStoreFile", + "azure.ai.assistants.models.VectorStoreFileBatch": "Azure.AI.Assistants.VectorStoreFileBatch", + "azure.ai.assistants.models.VectorStoreFileCount": "Azure.AI.Assistants.VectorStoreFileCount", + "azure.ai.assistants.models.VectorStoreFileDeletionStatus": "Azure.AI.Assistants.VectorStoreFileDeletionStatus", + "azure.ai.assistants.models.VectorStoreFileError": "Azure.AI.Assistants.VectorStoreFileError", + "azure.ai.assistants.models.VectorStoreStaticChunkingStrategyOptions": "Azure.AI.Assistants.VectorStoreStaticChunkingStrategyOptions", + "azure.ai.assistants.models.VectorStoreStaticChunkingStrategyRequest": "Azure.AI.Assistants.VectorStoreStaticChunkingStrategyRequest", + "azure.ai.assistants.models.VectorStoreStaticChunkingStrategyResponse": "Azure.AI.Assistants.VectorStoreStaticChunkingStrategyResponse", + "azure.ai.assistants.models.OpenApiAuthType": "Azure.AI.Assistants.OpenApiAuthType", + "azure.ai.assistants.models.VectorStoreDataSourceAssetType": "Azure.AI.Assistants.VectorStoreDataSourceAssetType", + "azure.ai.assistants.models.AzureAISearchQueryType": "Azure.AI.Assistants.AzureAISearchQueryType", + "azure.ai.assistants.models.AgentsApiResponseFormatMode": "Azure.AI.Assistants.AgentsApiResponseFormatMode", + "azure.ai.assistants.models.ResponseFormat": "Azure.AI.Assistants.ResponseFormat", + "azure.ai.assistants.models.ListSortOrder": "Azure.AI.Assistants.ListSortOrder", + "azure.ai.assistants.models.MessageRole": "Azure.AI.Assistants.MessageRole", + "azure.ai.assistants.models.MessageStatus": "Azure.AI.Assistants.MessageStatus", + "azure.ai.assistants.models.MessageIncompleteDetailsReason": "Azure.AI.Assistants.MessageIncompleteDetailsReason", + "azure.ai.assistants.models.RunStatus": "Azure.AI.Assistants.RunStatus", + "azure.ai.assistants.models.IncompleteDetailsReason": "Azure.AI.Assistants.IncompleteDetailsReason", + "azure.ai.assistants.models.TruncationStrategy": "Azure.AI.Assistants.TruncationStrategy", + "azure.ai.assistants.models.AgentsApiToolChoiceOptionMode": "Azure.AI.Assistants.AgentsApiToolChoiceOptionMode", + "azure.ai.assistants.models.AgentsNamedToolChoiceType": "Azure.AI.Assistants.AgentsNamedToolChoiceType", + "azure.ai.assistants.models.RunAdditionalFieldList": "Azure.AI.Assistants.RunAdditionalFieldList", + "azure.ai.assistants.models.RunStepType": "Azure.AI.Assistants.RunStepType", + "azure.ai.assistants.models.RunStepStatus": "Azure.AI.Assistants.RunStepStatus", + "azure.ai.assistants.models.RunStepErrorCode": "Azure.AI.Assistants.RunStepErrorCode", + "azure.ai.assistants.models.FilePurpose": "Azure.AI.Assistants.FilePurpose", + "azure.ai.assistants.models.FileState": "Azure.AI.Assistants.FileState", + "azure.ai.assistants.models.VectorStoreStatus": "Azure.AI.Assistants.VectorStoreStatus", + "azure.ai.assistants.models.VectorStoreExpirationPolicyAnchor": "Azure.AI.Assistants.VectorStoreExpirationPolicyAnchor", + "azure.ai.assistants.models.VectorStoreChunkingStrategyRequestType": "Azure.AI.Assistants.VectorStoreChunkingStrategyRequestType", + "azure.ai.assistants.models.VectorStoreFileStatus": "Azure.AI.Assistants.VectorStoreFileStatus", + "azure.ai.assistants.models.VectorStoreFileErrorCode": "Azure.AI.Assistants.VectorStoreFileErrorCode", + "azure.ai.assistants.models.VectorStoreChunkingStrategyResponseType": "Azure.AI.Assistants.VectorStoreChunkingStrategyResponseType", + "azure.ai.assistants.models.VectorStoreFileStatusFilter": "Azure.AI.Assistants.VectorStoreFileStatusFilter", + "azure.ai.assistants.models.VectorStoreFileBatchStatus": "Azure.AI.Assistants.VectorStoreFileBatchStatus", + "azure.ai.assistants.models.ThreadStreamEvent": "Azure.AI.Assistants.ThreadStreamEvent", + "azure.ai.assistants.models.RunStreamEvent": "Azure.AI.Assistants.RunStreamEvent", + "azure.ai.assistants.models.RunStepStreamEvent": "Azure.AI.Assistants.RunStepStreamEvent", + "azure.ai.assistants.models.MessageStreamEvent": "Azure.AI.Assistants.MessageStreamEvent", + "azure.ai.assistants.models.ErrorEvent": "Azure.AI.Assistants.ErrorEvent", + "azure.ai.assistants.models.DoneEvent": "Azure.AI.Assistants.DoneEvent", + "azure.ai.assistants.models.AgentStreamEvent": "Azure.AI.Assistants.AgentStreamEvent", + "azure.ai.assistants.AIAssistantClient.create_agent": "Azure.AI.Assistants.createAgent", + "azure.ai.assistants.AIAssistantClient.list_agents": "Azure.AI.Assistants.listAgents", + "azure.ai.assistants.AIAssistantClient.get_agent": "Azure.AI.Assistants.getAgent", + "azure.ai.assistants.AIAssistantClient.update_agent": "Azure.AI.Assistants.updateAgent", + "azure.ai.assistants.AIAssistantClient.delete_agent": "Azure.AI.Assistants.deleteAgent", + "azure.ai.assistants.AIAssistantClient.create_thread": "Azure.AI.Assistants.createThread", + "azure.ai.assistants.AIAssistantClient.get_thread": "Azure.AI.Assistants.getThread", + "azure.ai.assistants.AIAssistantClient.update_thread": "Azure.AI.Assistants.updateThread", + "azure.ai.assistants.AIAssistantClient.delete_thread": "Azure.AI.Assistants.deleteThread", + "azure.ai.assistants.AIAssistantClient.create_message": "Azure.AI.Assistants.createMessage", + "azure.ai.assistants.AIAssistantClient.list_messages": "Azure.AI.Assistants.listMessages", + "azure.ai.assistants.AIAssistantClient.get_message": "Azure.AI.Assistants.getMessage", + "azure.ai.assistants.AIAssistantClient.update_message": "Azure.AI.Assistants.updateMessage", + "azure.ai.assistants.AIAssistantClient.create_run": "Azure.AI.Assistants.createRun", + "azure.ai.assistants.AIAssistantClient.list_runs": "Azure.AI.Assistants.listRuns", + "azure.ai.assistants.AIAssistantClient.get_run": "Azure.AI.Assistants.getRun", + "azure.ai.assistants.AIAssistantClient.update_run": "Azure.AI.Assistants.updateRun", + "azure.ai.assistants.AIAssistantClient.submit_tool_outputs_to_run": "Azure.AI.Assistants.submitToolOutputsToRun", + "azure.ai.assistants.AIAssistantClient.cancel_run": "Azure.AI.Assistants.cancelRun", + "azure.ai.assistants.AIAssistantClient.create_thread_and_run": "Azure.AI.Assistants.createThreadAndRun", + "azure.ai.assistants.AIAssistantClient.get_run_step": "Azure.AI.Assistants.getRunStep", + "azure.ai.assistants.AIAssistantClient.list_run_steps": "Azure.AI.Assistants.listRunSteps", + "azure.ai.assistants.AIAssistantClient.list_files": "Azure.AI.Assistants.listFiles", + "azure.ai.assistants.AIAssistantClient.upload_file": "Azure.AI.Assistants.uploadFile", + "azure.ai.assistants.AIAssistantClient.delete_file": "Azure.AI.Assistants.deleteFile", + "azure.ai.assistants.AIAssistantClient.get_file": "Azure.AI.Assistants.getFile", + "azure.ai.assistants.AIAssistantClient.list_vector_stores": "Azure.AI.Assistants.listVectorStores", + "azure.ai.assistants.AIAssistantClient.create_vector_store": "Azure.AI.Assistants.createVectorStore", + "azure.ai.assistants.AIAssistantClient.get_vector_store": "Azure.AI.Assistants.getVectorStore", + "azure.ai.assistants.AIAssistantClient.modify_vector_store": "Azure.AI.Assistants.modifyVectorStore", + "azure.ai.assistants.AIAssistantClient.delete_vector_store": "Azure.AI.Assistants.deleteVectorStore", + "azure.ai.assistants.AIAssistantClient.list_vector_store_files": "Azure.AI.Assistants.listVectorStoreFiles", + "azure.ai.assistants.AIAssistantClient.create_vector_store_file": "Azure.AI.Assistants.createVectorStoreFile", + "azure.ai.assistants.AIAssistantClient.get_vector_store_file": "Azure.AI.Assistants.getVectorStoreFile", + "azure.ai.assistants.AIAssistantClient.delete_vector_store_file": "Azure.AI.Assistants.deleteVectorStoreFile", + "azure.ai.assistants.AIAssistantClient.create_vector_store_file_batch": "Azure.AI.Assistants.createVectorStoreFileBatch", + "azure.ai.assistants.AIAssistantClient.get_vector_store_file_batch": "Azure.AI.Assistants.getVectorStoreFileBatch", + "azure.ai.assistants.AIAssistantClient.cancel_vector_store_file_batch": "Azure.AI.Assistants.cancelVectorStoreFileBatch", + "azure.ai.assistants.AIAssistantClient.list_vector_store_file_batch_files": "Azure.AI.Assistants.listVectorStoreFileBatchFiles" + } +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/azure/__init__.py b/sdk/ai/azure-ai-assistants/azure/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-assistants/azure/ai/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/__init__.py new file mode 100644 index 000000000000..9b2280c14ff9 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/__init__.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import AIAssistantClient # type: ignore +from ._version import VERSION + +__version__ = VERSION + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AIAssistantClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py new file mode 100644 index 000000000000..1f43c04fda7f --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py @@ -0,0 +1,101 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, TYPE_CHECKING, Union +from typing_extensions import Self + +from azure.core import PipelineClient +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse + +from ._configuration import AIAssistantClientConfiguration +from ._operations import AIAssistantClientOperationsMixin +from ._serialization import Deserializer, Serializer + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + + +class AIAssistantClient(AIAssistantClientOperationsMixin): + """AIAssistantClient. + + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is "latest". + Note that overriding this default value may result in unsupported behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + _endpoint = "{endpoint}" + self._config = AIAssistantClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> Self: + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py new file mode 100644 index 000000000000..4cfdb063c073 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING, Union + +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies + +from ._version import VERSION + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + + +class AIAssistantClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for AIAssistantClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is "latest". + Note that overriding this default value may result in unsupported behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "latest") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-assistants/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Authorization", prefix="Bearer", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_model_base.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_model_base.py new file mode 100644 index 000000000000..3072ee252ed9 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_model_base.py @@ -0,0 +1,1235 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access, broad-except + +import copy +import calendar +import decimal +import functools +import sys +import logging +import base64 +import re +import typing +import enum +import email.utils +from datetime import datetime, date, time, timedelta, timezone +from json import JSONEncoder +import xml.etree.ElementTree as ET +from typing_extensions import Self +import isodate +from azure.core.exceptions import DeserializationError +from azure.core import CaseInsensitiveEnumMeta +from azure.core.pipeline import PipelineResponse +from azure.core.serialization import _Null + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping + +_LOGGER = logging.getLogger(__name__) + +__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] + +TZ_UTC = timezone.utc +_T = typing.TypeVar("_T") + + +def _timedelta_as_isostr(td: timedelta) -> str: + """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' + + Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython + + :param timedelta td: The timedelta to convert + :rtype: str + :return: ISO8601 version of this timedelta + """ + + # Split seconds to larger units + seconds = td.total_seconds() + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + + days, hours, minutes = list(map(int, (days, hours, minutes))) + seconds = round(seconds, 6) + + # Build date + date_str = "" + if days: + date_str = "%sD" % days + + if hours or minutes or seconds: + # Build time + time_str = "T" + + # Hours + bigger_exists = date_str or hours + if bigger_exists: + time_str += "{:02}H".format(hours) + + # Minutes + bigger_exists = bigger_exists or minutes + if bigger_exists: + time_str += "{:02}M".format(minutes) + + # Seconds + try: + if seconds.is_integer(): + seconds_string = "{:02}".format(int(seconds)) + else: + # 9 chars long w/ leading 0, 6 digits after decimal + seconds_string = "%09.6f" % seconds + # Remove trailing zeros + seconds_string = seconds_string.rstrip("0") + except AttributeError: # int.is_integer() raises + seconds_string = "{:02}".format(seconds) + + time_str += "{}S".format(seconds_string) + else: + time_str = "" + + return "P" + date_str + time_str + + +def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: + encoded = base64.b64encode(o).decode() + if format == "base64url": + return encoded.strip("=").replace("+", "-").replace("/", "_") + return encoded + + +def _serialize_datetime(o, format: typing.Optional[str] = None): + if hasattr(o, "year") and hasattr(o, "hour"): + if format == "rfc7231": + return email.utils.format_datetime(o, usegmt=True) + if format == "unix-timestamp": + return int(calendar.timegm(o.utctimetuple())) + + # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) + if not o.tzinfo: + iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() + else: + iso_formatted = o.astimezone(TZ_UTC).isoformat() + # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) + return iso_formatted.replace("+00:00", "Z") + # Next try datetime.date or datetime.time + return o.isoformat() + + +def _is_readonly(p): + try: + return p._visibility == ["read"] + except AttributeError: + return False + + +class SdkJSONEncoder(JSONEncoder): + """A JSON encoder that's capable of serializing datetime objects and bytes.""" + + def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): + super().__init__(*args, **kwargs) + self.exclude_readonly = exclude_readonly + self.format = format + + def default(self, o): # pylint: disable=too-many-return-statements + if _is_model(o): + if self.exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + return {k: v for k, v in o.items() if k not in readonly_props} + return dict(o.items()) + try: + return super(SdkJSONEncoder, self).default(o) + except TypeError: + if isinstance(o, _Null): + return None + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, self.format) + try: + # First try datetime.datetime + return _serialize_datetime(o, self.format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return super(SdkJSONEncoder, self).default(o) + + +_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") +_VALID_RFC7231 = re.compile( + r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" + r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" +) + + +def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + attr = attr.upper() + match = _VALID_DATE.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + return date_obj + + +def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize RFC7231 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + match = _VALID_RFC7231.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + return email.utils.parsedate_to_datetime(attr) + + +def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: + """Deserialize unix timestamp into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + return datetime.fromtimestamp(attr, TZ_UTC) + + +def _deserialize_date(attr: typing.Union[str, date]) -> date: + """Deserialize ISO-8601 formatted string into Date object. + :param str attr: response string to be deserialized. + :rtype: date + :returns: The date object from that input + """ + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + if isinstance(attr, date): + return attr + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore + + +def _deserialize_time(attr: typing.Union[str, time]) -> time: + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :returns: The time object from that input + """ + if isinstance(attr, time): + return attr + return isodate.parse_time(attr) + + +def _deserialize_bytes(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + return bytes(base64.b64decode(attr)) + + +def _deserialize_bytes_base64(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return bytes(base64.b64decode(encoded)) + + +def _deserialize_duration(attr): + if isinstance(attr, timedelta): + return attr + return isodate.parse_duration(attr) + + +def _deserialize_decimal(attr): + if isinstance(attr, decimal.Decimal): + return attr + return decimal.Decimal(str(attr)) + + +def _deserialize_int_as_str(attr): + if isinstance(attr, int): + return attr + return int(attr) + + +_DESERIALIZE_MAPPING = { + datetime: _deserialize_datetime, + date: _deserialize_date, + time: _deserialize_time, + bytes: _deserialize_bytes, + bytearray: _deserialize_bytes, + timedelta: _deserialize_duration, + typing.Any: lambda x: x, + decimal.Decimal: _deserialize_decimal, +} + +_DESERIALIZE_MAPPING_WITHFORMAT = { + "rfc3339": _deserialize_datetime, + "rfc7231": _deserialize_datetime_rfc7231, + "unix-timestamp": _deserialize_datetime_unix_timestamp, + "base64": _deserialize_bytes, + "base64url": _deserialize_bytes_base64, +} + + +def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): + if annotation is int and rf and rf._format == "str": + return _deserialize_int_as_str + if rf and rf._format: + return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) + return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore + + +def _get_type_alias_type(module_name: str, alias_name: str): + types = { + k: v + for k, v in sys.modules[module_name].__dict__.items() + if isinstance(v, typing._GenericAlias) # type: ignore + } + if alias_name not in types: + return alias_name + return types[alias_name] + + +def _get_model(module_name: str, model_name: str): + models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} + module_end = module_name.rsplit(".", 1)[0] + models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) + if isinstance(model_name, str): + model_name = model_name.split(".")[-1] + if model_name not in models: + return model_name + return models[model_name] + + +_UNSET = object() + + +class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object + def __init__(self, data: typing.Dict[str, typing.Any]) -> None: + self._data = data + + def __contains__(self, key: typing.Any) -> bool: + return key in self._data + + def __getitem__(self, key: str) -> typing.Any: + return self._data.__getitem__(key) + + def __setitem__(self, key: str, value: typing.Any) -> None: + self._data.__setitem__(key, value) + + def __delitem__(self, key: str) -> None: + self._data.__delitem__(key) + + def __iter__(self) -> typing.Iterator[typing.Any]: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + def __ne__(self, other: typing.Any) -> bool: + return not self.__eq__(other) + + def keys(self) -> typing.KeysView[str]: + """ + :returns: a set-like object providing a view on D's keys + :rtype: ~typing.KeysView + """ + return self._data.keys() + + def values(self) -> typing.ValuesView[typing.Any]: + """ + :returns: an object providing a view on D's values + :rtype: ~typing.ValuesView + """ + return self._data.values() + + def items(self) -> typing.ItemsView[str, typing.Any]: + """ + :returns: set-like object providing a view on D's items + :rtype: ~typing.ItemsView + """ + return self._data.items() + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + """ + Get the value for key if key is in the dictionary, else default. + :param str key: The key to look up. + :param any default: The value to return if key is not in the dictionary. Defaults to None + :returns: D[k] if k in D, else d. + :rtype: any + """ + try: + return self[key] + except KeyError: + return default + + @typing.overload + def pop(self, key: str) -> typing.Any: ... + + @typing.overload + def pop(self, key: str, default: _T) -> _T: ... + + @typing.overload + def pop(self, key: str, default: typing.Any) -> typing.Any: ... + + def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Removes specified key and return the corresponding value. + :param str key: The key to pop. + :param any default: The value to return if key is not in the dictionary + :returns: The value corresponding to the key. + :rtype: any + :raises KeyError: If key is not found and default is not given. + """ + if default is _UNSET: + return self._data.pop(key) + return self._data.pop(key, default) + + def popitem(self) -> typing.Tuple[str, typing.Any]: + """ + Removes and returns some (key, value) pair + :returns: The (key, value) pair. + :rtype: tuple + :raises KeyError: if D is empty. + """ + return self._data.popitem() + + def clear(self) -> None: + """ + Remove all items from D. + """ + self._data.clear() + + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + """ + Updates D from mapping/iterable E and F. + :param any args: Either a mapping object or an iterable of key-value pairs. + """ + self._data.update(*args, **kwargs) + + @typing.overload + def setdefault(self, key: str, default: None = None) -> None: ... + + @typing.overload + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... + + def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Same as calling D.get(k, d), and setting D[k]=d if k not found + :param str key: The key to look up. + :param any default: The value to set if key is not in the dictionary + :returns: D[k] if k in D, else d. + :rtype: any + """ + if default is _UNSET: + return self._data.setdefault(key) + return self._data.setdefault(key, default) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_model = self.__class__(other) + except Exception: + return False + return self._data == other_model._data + + def __repr__(self) -> str: + return str(self._data) + + +def _is_model(obj: typing.Any) -> bool: + return getattr(obj, "_is_model", False) + + +def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements + if isinstance(o, list): + return [_serialize(x, format) for x in o] + if isinstance(o, dict): + return {k: _serialize(v, format) for k, v in o.items()} + if isinstance(o, set): + return {_serialize(x, format) for x in o} + if isinstance(o, tuple): + return tuple(_serialize(x, format) for x in o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, format) + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, enum.Enum): + return o.value + if isinstance(o, int): + if format == "str": + return str(o) + return o + try: + # First try datetime.datetime + return _serialize_datetime(o, format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return o + + +def _get_rest_field( + attr_to_rest_field: typing.Dict[str, "_RestField"], rest_name: str +) -> typing.Optional["_RestField"]: + try: + return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) + except StopIteration: + return None + + +def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: + if not rf: + return _serialize(value, None) + if rf._is_multipart_file_input: + return value + if rf._is_model: + return _deserialize(rf._type, value) + if isinstance(value, ET.Element): + value = _deserialize(rf._type, value) + return _serialize(value, rf._format) + + +class Model(_MyMutableMapping): + _is_model = True + # label whether current class's _attr_to_rest_field has been calculated + # could not see _attr_to_rest_field directly because subclass inherits it from parent class + _calculated: typing.Set[str] = set() + + def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: + class_name = self.__class__.__name__ + if len(args) > 1: + raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") + dict_to_pass = { + rest_field._rest_name: rest_field._default + for rest_field in self._attr_to_rest_field.values() + if rest_field._default is not _UNSET + } + if args: # pylint: disable=too-many-nested-blocks + if isinstance(args[0], ET.Element): + existed_attr_keys = [] + model_meta = getattr(self, "_xml", {}) + + for rf in self._attr_to_rest_field.values(): + prop_meta = getattr(rf, "_xml", {}) + xml_name = prop_meta.get("name", rf._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + # attribute + if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name)) + continue + + # unwrapped element is array + if prop_meta.get("unwrapped", False): + # unwrapped array could either use prop items meta/prop meta + if prop_meta.get("itemsName"): + xml_name = prop_meta.get("itemsName") + xml_ns = prop_meta.get("itemNs") + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + items = args[0].findall(xml_name) # pyright: ignore + if len(items) > 0: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) + continue + + # text element is primitive type + if prop_meta.get("text", False): + if args[0].text is not None: + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text) + continue + + # wrapped element could be normal property or array, it should only have one element + item = args[0].find(xml_name) + if item is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, item) + + # rest thing is additional properties + for e in args[0]: + if e.tag not in existed_attr_keys: + dict_to_pass[e.tag] = _convert_element(e) + else: + dict_to_pass.update( + {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} + ) + else: + non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] + if non_attr_kwargs: + # actual type errors only throw the first wrong keyword arg they see, so following that. + raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") + dict_to_pass.update( + { + self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) + for k, v in kwargs.items() + if v is not None + } + ) + super().__init__(dict_to_pass) + + def copy(self) -> "Model": + return Model(self.__dict__) + + def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: + if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated: + # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', + # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' + mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order + attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property + k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") + } + annotations = { + k: v + for mro_class in mros + if hasattr(mro_class, "__annotations__") + for k, v in mro_class.__annotations__.items() + } + for attr, rf in attr_to_rest_field.items(): + rf._module = cls.__module__ + if not rf._type: + rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) + if not rf._rest_name_input: + rf._rest_name_input = attr + cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) + cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") + + return super().__new__(cls) # pylint: disable=no-value-for-parameter + + def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: + for base in cls.__bases__: + if hasattr(base, "__mapping__"): + base.__mapping__[discriminator or cls.__name__] = cls # type: ignore + + @classmethod + def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: + for v in cls.__dict__.values(): + if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators: + return v + return None + + @classmethod + def _deserialize(cls, data, exist_discriminators): + if not hasattr(cls, "__mapping__"): + return cls(data) + discriminator = cls._get_discriminator(exist_discriminators) + if discriminator is None: + return cls(data) + exist_discriminators.append(discriminator._rest_name) + if isinstance(data, ET.Element): + model_meta = getattr(cls, "_xml", {}) + prop_meta = getattr(discriminator, "_xml", {}) + xml_name = prop_meta.get("name", discriminator._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + if data.get(xml_name) is not None: + discriminator_value = data.get(xml_name) + else: + discriminator_value = data.find(xml_name).text # pyright: ignore + else: + discriminator_value = data.get(discriminator._rest_name) + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore + return mapped_cls._deserialize(data, exist_discriminators) + + def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: + """Return a dict that can be turned into json using json.dump. + + :keyword bool exclude_readonly: Whether to remove the readonly properties. + :returns: A dict JSON compatible object + :rtype: dict + """ + + result = {} + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] + for k, v in self.items(): + if exclude_readonly and k in readonly_props: # pyright: ignore + continue + is_multipart_file_input = False + try: + is_multipart_file_input = next( + rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k + )._is_multipart_file_input + except StopIteration: + pass + result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) + return result + + @staticmethod + def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: + if v is None or isinstance(v, _Null): + return None + if isinstance(v, (list, tuple, set)): + return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) + if isinstance(v, dict): + return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} + return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v + + +def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): + if _is_model(obj): + return obj + return _deserialize(model_deserializer, obj) + + +def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): + if obj is None: + return obj + return _deserialize_with_callable(if_obj_deserializer, obj) + + +def _deserialize_with_union(deserializers, obj): + for deserializer in deserializers: + try: + return _deserialize(deserializer, obj) + except DeserializationError: + pass + raise DeserializationError() + + +def _deserialize_dict( + value_deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj: typing.Dict[typing.Any, typing.Any], +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = {child.tag: child for child in obj} + return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} + + +def _deserialize_multiple_sequence( + entry_deserializers: typing.List[typing.Optional[typing.Callable]], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) + + +def _deserialize_sequence( + deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = list(obj) + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) + + +def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.Any]: + return sorted( + types, + key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), + ) + + +def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-branches + annotation: typing.Any, + module: typing.Optional[str], + rf: typing.Optional["_RestField"] = None, +) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + if not annotation: + return None + + # is it a type alias? + if isinstance(annotation, str): + if module is not None: + annotation = _get_type_alias_type(module, annotation) + + # is it a forward ref / in quotes? + if isinstance(annotation, (str, typing.ForwardRef)): + try: + model_name = annotation.__forward_arg__ # type: ignore + except AttributeError: + model_name = annotation + if module is not None: + annotation = _get_model(module, model_name) # type: ignore + + try: + if module and _is_model(annotation): + if rf: + rf._is_model = True + + return functools.partial(_deserialize_model, annotation) # pyright: ignore + except Exception: + pass + + # is it a literal? + try: + if annotation.__origin__ is typing.Literal: # pyright: ignore + return None + except AttributeError: + pass + + # is it optional? + try: + if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore + if len(annotation.__args__) <= 2: # pyright: ignore + if_obj_deserializer = _get_deserialize_callable_from_annotation( + next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_with_optional, if_obj_deserializer) + # the type is Optional[Union[...]], we need to remove the None type from the Union + annotation_copy = copy.copy(annotation) + annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore + return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) + except AttributeError: + pass + + # is it union? + if getattr(annotation, "__origin__", None) is typing.Union: + # initial ordering is we make `string` the last deserialization option, because it is often them most generic + deserializers = [ + _get_deserialize_callable_from_annotation(arg, module, rf) + for arg in _sorted_annotations(annotation.__args__) # pyright: ignore + ] + + return functools.partial(_deserialize_with_union, deserializers) + + try: + if annotation._name == "Dict": # pyright: ignore + value_deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[1], module, rf # pyright: ignore + ) + + return functools.partial( + _deserialize_dict, + value_deserializer, + module, + ) + except (AttributeError, IndexError): + pass + try: + if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore + if len(annotation.__args__) > 1: # pyright: ignore + entry_deserializers = [ + _get_deserialize_callable_from_annotation(dt, module, rf) + for dt in annotation.__args__ # pyright: ignore + ] + return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) + deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[0], module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_sequence, deserializer, module) + except (TypeError, IndexError, AttributeError, SyntaxError): + pass + + def _deserialize_default( + deserializer, + obj, + ): + if obj is None: + return obj + try: + return _deserialize_with_callable(deserializer, obj) + except Exception: + pass + return obj + + if get_deserializer(annotation, rf): + return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) + + return functools.partial(_deserialize_default, annotation) + + +def _deserialize_with_callable( + deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], + value: typing.Any, +): # pylint: disable=too-many-return-statements + try: + if value is None or isinstance(value, _Null): + return None + if isinstance(value, ET.Element): + if deserializer is str: + return value.text or "" + if deserializer is int: + return int(value.text) if value.text else None + if deserializer is float: + return float(value.text) if value.text else None + if deserializer is bool: + return value.text == "true" if value.text else None + if deserializer is None: + return value + if deserializer in [int, float, bool]: + return deserializer(value) + if isinstance(deserializer, CaseInsensitiveEnumMeta): + try: + return deserializer(value) + except ValueError: + # for unknown value, return raw value + return value + if isinstance(deserializer, type) and issubclass(deserializer, Model): + return deserializer._deserialize(value, []) + return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) + except Exception as e: + raise DeserializationError() from e + + +def _deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + if isinstance(value, PipelineResponse): + value = value.http_response.json() + if rf is None and format: + rf = _RestField(format=format) + if not isinstance(deserializer, functools.partial): + deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) + return _deserialize_with_callable(deserializer, value) + + +def _failsafe_deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + try: + return _deserialize(deserializer, value, module, rf, format) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + +def _failsafe_deserialize_xml( + deserializer: typing.Any, + value: typing.Any, +) -> typing.Any: + try: + return _deserialize_xml(deserializer, value) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + +class _RestField: + def __init__( + self, + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + is_discriminator: bool = False, + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, + ): + self._type = type + self._rest_name_input = name + self._module: typing.Optional[str] = None + self._is_discriminator = is_discriminator + self._visibility = visibility + self._is_model = False + self._default = default + self._format = format + self._is_multipart_file_input = is_multipart_file_input + self._xml = xml if xml is not None else {} + + @property + def _class_type(self) -> typing.Any: + return getattr(self._type, "args", [None])[0] + + @property + def _rest_name(self) -> str: + if self._rest_name_input is None: + raise ValueError("Rest name was never set") + return self._rest_name_input + + def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin + # by this point, type and rest_name will have a value bc we default + # them in __new__ of the Model class + item = obj.get(self._rest_name) + if item is None: + return item + if self._is_model: + return item + return _deserialize(self._type, _serialize(item, self._format), rf=self) + + def __set__(self, obj: Model, value) -> None: + if value is None: + # we want to wipe out entries if users set attr to None + try: + obj.__delitem__(self._rest_name) + except KeyError: + pass + return + if self._is_model: + if not _is_model(value): + value = _deserialize(self._type, value) + obj.__setitem__(self._rest_name, value) + return + obj.__setitem__(self._rest_name, _serialize(value, self._format)) + + def _get_deserialize_callable_from_annotation( + self, annotation: typing.Any + ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + return _get_deserialize_callable_from_annotation(annotation, self._module, self) + + +def rest_field( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField( + name=name, + type=type, + visibility=visibility, + default=default, + format=format, + is_multipart_file_input=is_multipart_file_input, + xml=xml, + ) + + +def rest_discriminator( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml) + + +def serialize_xml(model: Model, exclude_readonly: bool = False) -> str: + """Serialize a model to XML. + + :param Model model: The model to serialize. + :param bool exclude_readonly: Whether to exclude readonly properties. + :returns: The XML representation of the model. + :rtype: str + """ + return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore + + +def _get_element( + o: typing.Any, + exclude_readonly: bool = False, + parent_meta: typing.Optional[typing.Dict[str, typing.Any]] = None, + wrapped_element: typing.Optional[ET.Element] = None, +) -> typing.Union[ET.Element, typing.List[ET.Element]]: + if _is_model(o): + model_meta = getattr(o, "_xml", {}) + + # if prop is a model, then use the prop element directly, else generate a wrapper of model + if wrapped_element is None: + wrapped_element = _create_xml_element( + model_meta.get("name", o.__class__.__name__), + model_meta.get("prefix"), + model_meta.get("ns"), + ) + + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + + for k, v in o.items(): + # do not serialize readonly properties + if exclude_readonly and k in readonly_props: + continue + + prop_rest_field = _get_rest_field(o._attr_to_rest_field, k) + if prop_rest_field: + prop_meta = getattr(prop_rest_field, "_xml").copy() + # use the wire name as xml name if no specific name is set + if prop_meta.get("name") is None: + prop_meta["name"] = k + else: + # additional properties will not have rest field, use the wire name as xml name + prop_meta = {"name": k} + + # if no ns for prop, use model's + if prop_meta.get("ns") is None and model_meta.get("ns"): + prop_meta["ns"] = model_meta.get("ns") + prop_meta["prefix"] = model_meta.get("prefix") + + if prop_meta.get("unwrapped", False): + # unwrapped could only set on array + wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta)) + elif prop_meta.get("text", False): + # text could only set on primitive type + wrapped_element.text = _get_primitive_type_value(v) + elif prop_meta.get("attribute", False): + xml_name = prop_meta.get("name", k) + if prop_meta.get("ns"): + ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore + xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore + # attribute should be primitive type + wrapped_element.set(xml_name, _get_primitive_type_value(v)) + else: + # other wrapped prop element + wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta)) + return wrapped_element + if isinstance(o, list): + return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore + if isinstance(o, dict): + result = [] + for k, v in o.items(): + result.append( + _get_wrapped_element( + v, + exclude_readonly, + { + "name": k, + "ns": parent_meta.get("ns") if parent_meta else None, + "prefix": parent_meta.get("prefix") if parent_meta else None, + }, + ) + ) + return result + + # primitive case need to create element based on parent_meta + if parent_meta: + return _get_wrapped_element( + o, + exclude_readonly, + { + "name": parent_meta.get("itemsName", parent_meta.get("name")), + "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")), + "ns": parent_meta.get("itemsNs", parent_meta.get("ns")), + }, + ) + + raise ValueError("Could not serialize value into xml: " + o) + + +def _get_wrapped_element( + v: typing.Any, + exclude_readonly: bool, + meta: typing.Optional[typing.Dict[str, typing.Any]], +) -> ET.Element: + wrapped_element = _create_xml_element( + meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None + ) + if isinstance(v, (dict, list)): + wrapped_element.extend(_get_element(v, exclude_readonly, meta)) + elif _is_model(v): + _get_element(v, exclude_readonly, meta, wrapped_element) + else: + wrapped_element.text = _get_primitive_type_value(v) + return wrapped_element + + +def _get_primitive_type_value(v) -> str: + if v is True: + return "true" + if v is False: + return "false" + if isinstance(v, _Null): + return "" + return str(v) + + +def _create_xml_element(tag, prefix=None, ns=None): + if prefix and ns: + ET.register_namespace(prefix, ns) + if ns: + return ET.Element("{" + ns + "}" + tag) + return ET.Element(tag) + + +def _deserialize_xml( + deserializer: typing.Any, + value: str, +) -> typing.Any: + element = ET.fromstring(value) # nosec + return _deserialize(deserializer, element) + + +def _convert_element(e: ET.Element): + # dict case + if len(e.attrib) > 0 or len({child.tag for child in e}) > 1: + dict_result: typing.Dict[str, typing.Any] = {} + for child in e: + if dict_result.get(child.tag) is not None: + if isinstance(dict_result[child.tag], list): + dict_result[child.tag].append(_convert_element(child)) + else: + dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)] + else: + dict_result[child.tag] = _convert_element(child) + dict_result.update(e.attrib) + return dict_result + # array case + if len(e) > 0: + array_result: typing.List[typing.Any] = [] + for child in e: + array_result.append(_convert_element(child)) + return array_result + # primitive case + return e.text diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/__init__.py new file mode 100644 index 000000000000..28950a5960c0 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/__init__.py @@ -0,0 +1,25 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import AIAssistantClientOperationsMixin # type: ignore + +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AIAssistantClientOperationsMixin", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py new file mode 100644 index 000000000000..210901feac86 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py @@ -0,0 +1,5890 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, Callable, Dict, IO, Iterator, List, Optional, TYPE_CHECKING, TypeVar, Union, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import _model_base, models as _models +from .._model_base import SdkJSONEncoder, _deserialize +from .._serialization import Serializer +from .._vendor import AIAssistantClientMixinABC, prepare_multipart_form_data + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore + +if TYPE_CHECKING: + from .. import _types +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_ai_assistant_create_agent_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_list_agents_request( + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_get_agent_request(agent_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("agent_id", agent_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_update_agent_request(agent_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("agent_id", agent_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_delete_agent_request(agent_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("agent_id", agent_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_create_thread_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_get_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_update_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_delete_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_create_message_request( # pylint: disable=name-too-long + thread_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_list_messages_request( + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if run_id is not None: + _params["runId"] = _SERIALIZER.query("run_id", run_id, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_get_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages/{messageId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "messageId": _SERIALIZER.url("message_id", message_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_update_message_request( # pylint: disable=name-too-long + thread_id: str, message_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages/{messageId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "messageId": _SERIALIZER.url("message_id", message_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_create_run_request( + thread_id: str, *, include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if include is not None: + _params["include[]"] = _SERIALIZER.query("include", include, "[str]", div=",") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_list_runs_request( + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_get_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_update_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_submit_tool_outputs_to_run_request( # pylint: disable=name-too-long + thread_id: str, run_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/submit_tool_outputs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_cancel_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/cancel" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_create_thread_and_run_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/runs" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_get_run_step_request( + thread_id: str, + run_id: str, + step_id: str, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/steps/{stepId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + "stepId": _SERIALIZER.url("step_id", step_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if include is not None: + _params["include[]"] = _SERIALIZER.query("include", include, "[str]", div=",") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_list_run_steps_request( # pylint: disable=name-too-long + thread_id: str, + run_id: str, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/steps" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if include is not None: + _params["include[]"] = _SERIALIZER.query("include", include, "[str]", div=",") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_list_files_request( + *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if purpose is not None: + _params["purpose"] = _SERIALIZER.query("purpose", purpose, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_upload_file_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_delete_file_request(file_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files/{fileId}" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_get_file_request(file_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files/{fileId}" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_get_file_content_request( # pylint: disable=name-too-long + file_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/octet-stream") + + # Construct URL + _url = "/files/{fileId}/content" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_list_vector_stores_request( # pylint: disable=name-too-long + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_create_vector_store_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_get_vector_store_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_modify_vector_store_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_delete_vector_store_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_list_vector_store_files_request( # pylint: disable=name-too-long + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if filter is not None: + _params["filter"] = _SERIALIZER.query("filter", filter, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_create_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_get_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, file_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files/{fileId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_delete_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, file_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files/{fileId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_create_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_get_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, batch_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_cancel_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, batch_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}/cancel" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_ai_assistant_list_vector_store_file_batch_files_request( # pylint: disable=name-too-long + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if filter is not None: + _params["filter"] = _SERIALIZER.query("filter", filter, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class AIAssistantClientOperationsMixin(AIAssistantClientMixinABC): # pylint: disable=too-many-public-methods + + @overload + def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_agent( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + if model is _Unset: + raise TypeError("missing required argument: model") + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_agent_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_agents( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfAgent: + """Gets a list of agents that were previously created. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfAgent. The OpenAIPageableListOfAgent is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfAgent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfAgent] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_agents_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfAgent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: + """Retrieves an existing agent. + + :param agent_id: Identifier of the agent. Required. + :type agent_id: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_agent_request( + agent_id=agent_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_agent( + self, + agent_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_agent( + self, agent_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_agent( + self, agent_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_agent( + self, + agent_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_update_agent_request( + agent_id=agent_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: + """Deletes an agent. + + :param agent_id: Identifier of the agent. Required. + :type agent_id: str + :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) + + _request = build_ai_assistant_delete_agent_request( + agent_id=agent_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_thread( + self, + *, + content_type: str = "application/json", + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_thread( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_thread_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: + """Gets information about an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_thread( + self, + thread_id: str, + *, + content_type: str = "application/json", + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_thread( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_thread( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_thread( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_update_thread_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: + """Deletes an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: ThreadDeletionStatus. The ThreadDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) + + _request = build_ai_assistant_delete_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_message( + self, + thread_id: str, + *, + role: Union[str, _models.MessageRole], + content: str, + content_type: str = "application/json", + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword role: The role of the entity that is creating the message. Allowed values include: + + * `user`: Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * `assistant`: Indicates the message is generated by the agent. Use this value to insert + messages from the agent into the + conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.assistants.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.assistants.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_message( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_message( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_message( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + role: Union[str, _models.MessageRole] = _Unset, + content: str = _Unset, + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword role: The role of the entity that is creating the message. Allowed values include: + + * `user`: Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * `assistant`: Indicates the message is generated by the agent. Use this value to insert + messages from the agent into the + conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.assistants.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.assistants.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + if role is _Unset: + raise TypeError("missing required argument: role") + if content is _Unset: + raise TypeError("missing required argument: content") + body = {"attachments": attachments, "content": content, "metadata": metadata, "role": role} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_message_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_messages( + self, + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadMessage: + """Gets a list of messages that exist on a thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword run_id: Filter messages by the run ID that generated them. Default value is None. + :paramtype run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadMessage. The OpenAIPageableListOfThreadMessage is compatible + with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_messages_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: + """Gets an existing message from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_message_request( + thread_id=thread_id, + message_id=message_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_message( + self, + thread_id: str, + message_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_message( + self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_message( + self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_message( + self, + thread_id: str, + message_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_update_message_request( + thread_id=thread_id, + message_id=message_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_run( + self, + thread_id: str, + *, + agent_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword agent_id: The ID of the agent that should run the thread. Required. + :paramtype agent_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.assistants.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, + thread_id: str, + body: JSON, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, + thread_id: str, + body: IO[bytes], + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + agent_id: str = _Unset, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword agent_id: The ID of the agent that should run the thread. Required. + :paramtype agent_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.assistants.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if agent_id is _Unset: + raise TypeError("missing required argument: agent_id") + body = { + "additional_instructions": additional_instructions, + "additional_messages": additional_messages, + "assistant_id": agent_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "parallel_tool_calls": parallel_tool_calls, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_run_request( + thread_id=thread_id, + include=include, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_runs( + self, + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadRun: + """Gets a list of runs for a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadRun. The OpenAIPageableListOfThreadRun is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_runs_request( + thread_id=thread_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Gets an existing run from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_run( + self, + thread_id: str, + run_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_update_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if tool_outputs is _Unset: + raise TypeError("missing required argument: tool_outputs") + body = {"stream": stream_parameter, "tool_outputs": tool_outputs} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_submit_tool_outputs_to_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Cancels a run of an in progress thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_ai_assistant_cancel_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_thread_and_run( + self, + *, + agent_id: str, + content_type: str = "application/json", + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :keyword agent_id: The ID of the agent for which the thread should be created. Required. + :paramtype agent_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.assistants.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.assistants.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread_and_run( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread_and_run( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_thread_and_run( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + agent_id: str = _Unset, + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword agent_id: The ID of the agent for which the thread should be created. Required. + :paramtype agent_id: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.assistants.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.assistants.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if agent_id is _Unset: + raise TypeError("missing required argument: agent_id") + body = { + "assistant_id": agent_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "parallel_tool_calls": parallel_tool_calls, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "thread": thread, + "tool_choice": tool_choice, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_thread_and_run_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_run_step( + self, + thread_id: str, + run_id: str, + step_id: str, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + **kwargs: Any + ) -> _models.RunStep: + """Gets a single run step from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param step_id: Identifier of the run step. Required. + :type step_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :return: RunStep. The RunStep is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.RunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_run_step_request( + thread_id=thread_id, + run_id=run_id, + step_id=step_id, + include=include, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.RunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_run_steps( + self, + thread_id: str, + run_id: str, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfRunStep: + """Gets a list of run steps from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfRunStep. The OpenAIPageableListOfRunStep is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfRunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_run_steps_request( + thread_id=thread_id, + run_id=run_id, + include=include, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfRunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_files( + self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any + ) -> _models.FileListResponse: + """Gets a list of previously uploaded files. + + :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Default value is + None. + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose + :return: FileListResponse. The FileListResponse is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.FileListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_files_request( + purpose=purpose, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileListResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def upload_file(self, body: _models.UploadFileRequest, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Multipart body. Required. + :type body: ~azure.ai.assistants.models.UploadFileRequest + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Multipart body. Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def upload_file(self, body: Union[_models.UploadFileRequest, JSON], **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Multipart body. Is either a UploadFileRequest type or a JSON type. Required. + :type body: ~azure.ai.assistants.models.UploadFileRequest or JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + _body = body.as_dict() if isinstance(body, _model_base.Model) else body + _file_fields: List[str] = ["file"] + _data_fields: List[str] = ["purpose", "filename"] + _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) + + _request = build_ai_assistant_upload_file_request( + api_version=self._config.api_version, + files=_files, + data=_data, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: + """Delete a previously uploaded file. + + :param file_id: The ID of the file to delete. Required. + :type file_id: str + :return: FileDeletionStatus. The FileDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.FileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_ai_assistant_delete_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def _get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: + """Retrieves the raw content of a specific file. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: Iterator[bytes] + :rtype: Iterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_file_content_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", True) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_vector_stores( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStore: + """Returns a list of vector stores. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStore. The OpenAIPageableListOfVectorStore is compatible + with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfVectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_vector_stores_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + store_configuration: Optional[_models.VectorStoreConfiguration] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword store_configuration: The vector store configuration, used when vector store is created + from Azure asset URIs. Default value is None. + :paramtype store_configuration: ~azure.ai.assistants.models.VectorStoreConfiguration + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + store_configuration: Optional[_models.VectorStoreConfiguration] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword store_configuration: The vector store configuration, used when vector store is created + from Azure asset URIs. Default value is None. + :paramtype store_configuration: ~azure.ai.assistants.models.VectorStoreConfiguration + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "chunking_strategy": chunking_strategy, + "configuration": store_configuration, + "expires_after": expires_after, + "file_ids": file_ids, + "metadata": metadata, + "name": name, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_vector_store_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: + """Returns the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def modify_vector_store( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def modify_vector_store( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def modify_vector_store( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def modify_vector_store( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"expires_after": expires_after, "metadata": metadata, "name": name} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_modify_vector_store_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: + """Deletes the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStoreDeletionStatus. The VectorStoreDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) + + _request = build_ai_assistant_delete_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_vector_store_files( + self, + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.assistants.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_vector_store_files_request( + vector_store_id=vector_store_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store_file( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"chunking_strategy": chunking_strategy, "data_source": data_source, "file_id": file_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_vector_store_file_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: + """Retrieves a vector store file. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_vector_store_file( + self, vector_store_id: str, file_id: str, **kwargs: Any + ) -> _models.VectorStoreFileDeletionStatus: + """Delete a vector store file. This will remove the file from the vector store but the file itself + will not be deleted. + To delete the file, use the delete file endpoint. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFileDeletionStatus. The VectorStoreFileDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_ai_assistant_delete_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store_file_batch( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: List of file identifiers. Default value is None. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file_batch( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Default value is None. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"chunking_strategy": chunking_strategy, "data_sources": data_sources, "file_ids": file_ids} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_vector_store_file_batch_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Retrieve a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def cancel_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch + as soon as possible. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_ai_assistant_cancel_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_vector_store_file_batch_files( + self, + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files in a batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.assistants.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_vector_store_file_batch_files_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_serialization.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_serialization.py new file mode 100644 index 000000000000..7a0232de5ddc --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_serialization.py @@ -0,0 +1,2050 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Dict, + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + MutableMapping, + List, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore +from typing_extensions import Self + +from azure.core.exceptions import DeserializationError, SerializationError +from azure.core.serialization import NULL as CoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + :return: The deserialized data. + :rtype: object + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) from err + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError as err: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise DeserializationError("XML is invalid") from err + elif content_type.startswith("text/"): + return data_as_str + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + + :param bytes body_bytes: The body of the response. + :param dict headers: The headers of the response. + :returns: The deserialized data. + :rtype: object + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + +TZ_UTC = datetime.timezone.utc + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Optional[Dict[str, Any]] = {} + for k in kwargs: # pylint: disable=consider-using-dict-items + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are equal + :rtype: bool + """ + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are not equal + :rtype: bool + """ + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node. + + :returns: The XML node + :rtype: xml.etree.ElementTree.Element + """ + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to server from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, keep_readonly=keep_readonly, **kwargs + ) + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs + ) + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: # pylint: disable=broad-exception-caught + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises DeserializationError: if something went wrong + :rtype: Self + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def from_dict( + cls, + data: Any, + key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> Self: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param function key_extractors: A key extractor function. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises DeserializationError: if something went wrong + :rtype: Self + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result.update(objects[valuetype]._flatten_subtype(key, objects)) # pylint: disable=protected-access + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + + :param dict response: The initial data + :param dict objects: The class objects + :returns: The class to be used + :rtype: class + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + :returns: The decoded key + :rtype: str + """ + return key.replace("\\.", ".") + + +class Serializer: # pylint: disable=too-many-public-methods + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals + self, target_obj, data_type=None, **kwargs + ): + """Serialize data into a string according to type. + + :param object target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises SerializationError: if serialization fails. + :returns: The serialized data. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() # pylint: disable=protected-access + try: + attributes = target_obj._attribute_map # pylint: disable=protected-access + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access + attr_name, {} + ).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized.update(target_obj.additional_properties) + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, + # we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError as err: + if isinstance(err, SerializationError): + raise + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise SerializationError(msg) from err + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises SerializationError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized request body + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access + except DeserializationError as err: + raise SerializationError("Unable to build a model: " + str(err)) from err + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param str name: The name of the URL path parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :returns: The serialized URL path + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + output = output.replace("{", quote("{")).replace("}", quote("}")) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param str name: The name of the query parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, list + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized query parameter + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + do_quote = not kwargs.get("skip_quote", False) + return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param str name: The name of the header. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized header + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :raises AttributeError: if required data is None. + :raises ValueError: if data is None + :raises SerializationError: if serialization fails. + :returns: The serialized data. + :rtype: str, int, float, bool, dict, list + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is CoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + if data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, data.__class__) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise SerializationError(msg.format(data, data_type)) from err + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param obj data: Object to be serialized. + :param str data_type: Type of object in the iterable. + :rtype: str, int, float, bool + :return: serialized object + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + return eval(data_type)(data) # nosec # pylint: disable=eval-used + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param str data: Object to be serialized. + :rtype: str + :return: serialized object + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list data: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + Defaults to False. + :rtype: list, str + :return: serialized iterable + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized.append(None) + + if kwargs.get("do_quote", False): + serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :rtype: dict + :return: serialized dictionary + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + :return: serialized object + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + if obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError as exc: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) from exc + + @staticmethod + def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument + """Serialize bytearray into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument + """Serialize str into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Decimal object to float. + + :param decimal attr: Object to be serialized. + :rtype: float + :return: serialized decimal + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): # pylint: disable=unused-argument + """Serialize long (Py2) or int (Py3). + + :param int attr: Object to be serialized. + :rtype: int/long + :return: serialized long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + :return: serialized date + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + :return: serialized time + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + :return: serialized duration + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises TypeError: if format invalid. + :return: serialized rfc + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError as exc: + raise TypeError("RFC1123 object must be valid Datetime object.") from exc + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises SerializationError: if format invalid. + :return: serialized iso + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise SerializationError(msg) from err + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise TypeError(msg) from err + + @staticmethod + def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises SerializationError: if format invalid + :return: serialied unix + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError as exc: + raise TypeError("Unix time object must be valid Datetime object.") from exc + + +def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(List[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements + attr, attr_desc, data +): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer: + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, str): + return self.deserialize_data(data, response) + if isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None or data is CoreNull: + return data + try: + attributes = response._attribute_map # type: ignore # pylint: disable=protected-access + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise DeserializationError(msg) from err + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :return: The classified target object and its class name. + :rtype: tuple + """ + if target is None: + return None, None + + if isinstance(target, str): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + :return: Deserialized object. + :rtype: object + """ + try: + return self(target_obj, data, content_type=content_type) + except: # pylint: disable=bare-except + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param obj raw_data: Data to be processed. + :param str content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + :rtype: object + :return: Unpacked content. + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param Response response: The response model class. + :param dict attrs: The deserialized response attributes. + :param dict additional_properties: Additional properties to be set. + :rtype: Response + :return: The instantiated response model. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("readonly") + ] + const = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("constant") + ] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties # type: ignore + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) from err + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) from exp + + def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment + "object", + "[]", + r"{}", + ] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise DeserializationError(msg) from err + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :return: Deserialized iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :return: Deserialized dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :return: Deserialized object. + :rtype: dict + :raises TypeError: if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, str): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :return: Deserialized basic type. + :rtype: str, int, float or bool + :raises TypeError: if string format is not valid. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + if isinstance(attr, str): + if attr.lower() in ["true", "1"]: + return True + if attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + return eval(data_type)(attr) # nosec # pylint: disable=eval-used + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :return: Deserialized string. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :return: Deserialized enum object. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + try: + return list(enum_obj.__members__.values())[data] + except IndexError as exc: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) from exc + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :return: Deserialized bytearray + :rtype: bytearray + :raises TypeError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :return: Deserialized base64 string + :rtype: bytearray + :raises TypeError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :return: Deserialized decimal + :raises DeserializationError: if string format invalid. + :rtype: decimal + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(str(attr)) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise DeserializationError(msg) from err + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :return: Deserialized int + :rtype: long or int + :raises ValueError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :return: Deserialized duration + :rtype: TimeDelta + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise DeserializationError(msg) from err + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :return: Deserialized date + :rtype: Date + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=0, defaultday=0) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :return: Deserialized time + :rtype: datetime.time + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized RFC datetime + :rtype: Datetime + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=datetime.timezone(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized ISO datetime + :rtype: Datetime + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :return: Deserialized datetime + :rtype: Datetime + :raises DeserializationError: if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + attr = int(attr) + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise DeserializationError(msg) from err + return date_obj diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py new file mode 100644 index 000000000000..1c059e5809cc --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING, Union + +if TYPE_CHECKING: + from . import models as _models +AgentsApiResponseFormatOption = Union[ + str, + str, + "_models.AgentsApiResponseFormatMode", + "_models.AgentsApiResponseFormat", + "_models.ResponseFormatJsonSchemaType", +] +MessageAttachmentToolDefinition = Union["_models.CodeInterpreterToolDefinition", "_models.FileSearchToolDefinition"] +AgentsApiToolChoiceOption = Union[str, str, "_models.AgentsApiToolChoiceOptionMode", "_models.AgentsNamedToolChoice"] diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_vendor.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_vendor.py new file mode 100644 index 000000000000..e130f325a0a7 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_vendor.py @@ -0,0 +1,66 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +import json +from typing import Any, Dict, IO, List, Mapping, Optional, TYPE_CHECKING, Tuple, Union + +from ._configuration import AIAssistantClientConfiguration +from ._model_base import Model, SdkJSONEncoder + +if TYPE_CHECKING: + from azure.core import PipelineClient + + from ._serialization import Deserializer, Serializer + + +class AIAssistantClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "PipelineClient" + _config: AIAssistantClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +# file-like tuple could be `(filename, IO (or bytes))` or `(filename, IO (or bytes), content_type)` +FileContent = Union[str, bytes, IO[str], IO[bytes]] + +FileType = Union[ + # file (or bytes) + FileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], FileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], FileContent, Optional[str]], +] + + +def serialize_multipart_data_entry(data_entry: Any) -> Any: + if isinstance(data_entry, (list, tuple, dict, Model)): + return json.dumps(data_entry, cls=SdkJSONEncoder, exclude_readonly=True) + return data_entry + + +def prepare_multipart_form_data( + body: Mapping[str, Any], multipart_fields: List[str], data_fields: List[str] +) -> Tuple[List[FileType], Dict[str, Any]]: + files: List[FileType] = [] + data: Dict[str, Any] = {} + for multipart_field in multipart_fields: + multipart_entry = body.get(multipart_field) + if isinstance(multipart_entry, list): + files.extend([(multipart_field, e) for e in multipart_entry]) + elif multipart_entry: + files.append((multipart_field, multipart_entry)) + + for data_field in data_fields: + data_entry = body.get(data_field) + if data_entry: + data[data_field] = serialize_multipart_data_entry(data_entry) + + return files, data diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_version.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_version.py new file mode 100644 index 000000000000..be71c81bd282 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0.0b1" diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/__init__.py new file mode 100644 index 000000000000..6219b129c895 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import AIAssistantClient # type: ignore + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AIAssistantClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py new file mode 100644 index 000000000000..5ec076d2444f --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py @@ -0,0 +1,105 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, TYPE_CHECKING, Union +from typing_extensions import Self + +from azure.core import AsyncPipelineClient +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies +from azure.core.rest import AsyncHttpResponse, HttpRequest + +from .._serialization import Deserializer, Serializer +from ._configuration import AIAssistantClientConfiguration +from ._operations import AIAssistantClientOperationsMixin + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + + +class AIAssistantClient(AIAssistantClientOperationsMixin): + """AIAssistantClient. + + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is "latest". + Note that overriding this default value may result in unsupported behavior. + :paramtype api_version: str + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + _endpoint = "{endpoint}" + self._config = AIAssistantClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> Self: + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py new file mode 100644 index 000000000000..9d4bc36de261 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING, Union + +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies + +from .._version import VERSION + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + + +class AIAssistantClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for AIAssistantClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is "latest". + Note that overriding this default value may result in unsupported behavior. + :paramtype api_version: str + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "latest") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-assistants/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Authorization", prefix="Bearer", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/__init__.py new file mode 100644 index 000000000000..28950a5960c0 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/__init__.py @@ -0,0 +1,25 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import AIAssistantClientOperationsMixin # type: ignore + +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AIAssistantClientOperationsMixin", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py new file mode 100644 index 000000000000..da13d2fbdfcc --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py @@ -0,0 +1,4788 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, AsyncIterator, Callable, Dict, IO, List, Optional, TYPE_CHECKING, TypeVar, Union, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import _model_base, models as _models +from ..._model_base import SdkJSONEncoder, _deserialize +from ..._operations._operations import ( + build_ai_assistant_cancel_run_request, + build_ai_assistant_cancel_vector_store_file_batch_request, + build_ai_assistant_create_agent_request, + build_ai_assistant_create_message_request, + build_ai_assistant_create_run_request, + build_ai_assistant_create_thread_and_run_request, + build_ai_assistant_create_thread_request, + build_ai_assistant_create_vector_store_file_batch_request, + build_ai_assistant_create_vector_store_file_request, + build_ai_assistant_create_vector_store_request, + build_ai_assistant_delete_agent_request, + build_ai_assistant_delete_file_request, + build_ai_assistant_delete_thread_request, + build_ai_assistant_delete_vector_store_file_request, + build_ai_assistant_delete_vector_store_request, + build_ai_assistant_get_agent_request, + build_ai_assistant_get_file_content_request, + build_ai_assistant_get_file_request, + build_ai_assistant_get_message_request, + build_ai_assistant_get_run_request, + build_ai_assistant_get_run_step_request, + build_ai_assistant_get_thread_request, + build_ai_assistant_get_vector_store_file_batch_request, + build_ai_assistant_get_vector_store_file_request, + build_ai_assistant_get_vector_store_request, + build_ai_assistant_list_agents_request, + build_ai_assistant_list_files_request, + build_ai_assistant_list_messages_request, + build_ai_assistant_list_run_steps_request, + build_ai_assistant_list_runs_request, + build_ai_assistant_list_vector_store_file_batch_files_request, + build_ai_assistant_list_vector_store_files_request, + build_ai_assistant_list_vector_stores_request, + build_ai_assistant_modify_vector_store_request, + build_ai_assistant_submit_tool_outputs_to_run_request, + build_ai_assistant_update_agent_request, + build_ai_assistant_update_message_request, + build_ai_assistant_update_run_request, + build_ai_assistant_update_thread_request, + build_ai_assistant_upload_file_request, +) +from ..._vendor import prepare_multipart_form_data +from .._vendor import AIAssistantClientMixinABC + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore + +if TYPE_CHECKING: + from ... import _types +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class AIAssistantClientOperationsMixin(AIAssistantClientMixinABC): # pylint: disable=too-many-public-methods + + @overload + async def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_agent( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + if model is _Unset: + raise TypeError("missing required argument: model") + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_agent_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_agents( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfAgent: + """Gets a list of agents that were previously created. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfAgent. The OpenAIPageableListOfAgent is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfAgent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfAgent] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_agents_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfAgent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: + """Retrieves an existing agent. + + :param agent_id: Identifier of the agent. Required. + :type agent_id: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_agent_request( + agent_id=agent_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_agent( + self, + agent_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_agent( + self, agent_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_agent( + self, agent_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_agent( + self, + agent_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param agent_id: The ID of the agent to modify. Required. + :type agent_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_update_agent_request( + agent_id=agent_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: + """Deletes an agent. + + :param agent_id: Identifier of the agent. Required. + :type agent_id: str + :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) + + _request = build_ai_assistant_delete_agent_request( + agent_id=agent_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_thread( + self, + *, + content_type: str = "application/json", + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_thread( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_thread_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: + """Gets information about an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_thread( + self, + thread_id: str, + *, + content_type: str = "application/json", + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_thread( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_thread( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_thread( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_update_thread_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: + """Deletes an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: ThreadDeletionStatus. The ThreadDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) + + _request = build_ai_assistant_delete_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_message( + self, + thread_id: str, + *, + role: Union[str, _models.MessageRole], + content: str, + content_type: str = "application/json", + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword role: The role of the entity that is creating the message. Allowed values include: + + * `user`: Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * `assistant`: Indicates the message is generated by the agent. Use this value to insert + messages from the agent into the + conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.assistants.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.assistants.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_message( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_message( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_message( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + role: Union[str, _models.MessageRole] = _Unset, + content: str = _Unset, + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword role: The role of the entity that is creating the message. Allowed values include: + + * `user`: Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * `assistant`: Indicates the message is generated by the agent. Use this value to insert + messages from the agent into the + conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.assistants.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.assistants.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + if role is _Unset: + raise TypeError("missing required argument: role") + if content is _Unset: + raise TypeError("missing required argument: content") + body = {"attachments": attachments, "content": content, "metadata": metadata, "role": role} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_message_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_messages( + self, + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadMessage: + """Gets a list of messages that exist on a thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword run_id: Filter messages by the run ID that generated them. Default value is None. + :paramtype run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadMessage. The OpenAIPageableListOfThreadMessage is compatible + with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_messages_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: + """Gets an existing message from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_message_request( + thread_id=thread_id, + message_id=message_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_message( + self, + thread_id: str, + message_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_message( + self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_message( + self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_message( + self, + thread_id: str, + message_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_update_message_request( + thread_id=thread_id, + message_id=message_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_run( + self, + thread_id: str, + *, + agent_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword agent_id: The ID of the agent that should run the thread. Required. + :paramtype agent_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.assistants.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, + thread_id: str, + body: JSON, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, + thread_id: str, + body: IO[bytes], + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + agent_id: str = _Unset, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword agent_id: The ID of the agent that should run the thread. Required. + :paramtype agent_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.assistants.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if agent_id is _Unset: + raise TypeError("missing required argument: agent_id") + body = { + "additional_instructions": additional_instructions, + "additional_messages": additional_messages, + "assistant_id": agent_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "parallel_tool_calls": parallel_tool_calls, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_run_request( + thread_id=thread_id, + include=include, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_runs( + self, + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadRun: + """Gets a list of runs for a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadRun. The OpenAIPageableListOfThreadRun is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_runs_request( + thread_id=thread_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Gets an existing run from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_run( + self, + thread_id: str, + run_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_update_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if tool_outputs is _Unset: + raise TypeError("missing required argument: tool_outputs") + body = {"stream": stream_parameter, "tool_outputs": tool_outputs} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_submit_tool_outputs_to_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Cancels a run of an in progress thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_ai_assistant_cancel_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_thread_and_run( + self, + *, + agent_id: str, + content_type: str = "application/json", + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :keyword agent_id: The ID of the agent for which the thread should be created. Required. + :paramtype agent_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.assistants.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.assistants.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread_and_run( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread_and_run( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_thread_and_run( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + agent_id: str = _Unset, + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword agent_id: The ID of the agent for which the thread should be created. Required. + :paramtype agent_id: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.assistants.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run + as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.assistants.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :paramtype response_format: str or str or + ~azure.ai.assistants.models.AgentsApiResponseFormatMode or + ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if agent_id is _Unset: + raise TypeError("missing required argument: agent_id") + body = { + "assistant_id": agent_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "parallel_tool_calls": parallel_tool_calls, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "thread": thread, + "tool_choice": tool_choice, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_thread_and_run_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_run_step( + self, + thread_id: str, + run_id: str, + step_id: str, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + **kwargs: Any + ) -> _models.RunStep: + """Gets a single run step from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param step_id: Identifier of the run step. Required. + :type step_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :return: RunStep. The RunStep is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.RunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_run_step_request( + thread_id=thread_id, + run_id=run_id, + step_id=step_id, + include=include, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.RunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_run_steps( + self, + thread_id: str, + run_id: str, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfRunStep: + """Gets a list of run steps from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfRunStep. The OpenAIPageableListOfRunStep is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfRunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_run_steps_request( + thread_id=thread_id, + run_id=run_id, + include=include, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfRunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_files( + self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any + ) -> _models.FileListResponse: + """Gets a list of previously uploaded files. + + :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Default value is + None. + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose + :return: FileListResponse. The FileListResponse is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.FileListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_files_request( + purpose=purpose, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileListResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def upload_file(self, body: _models.UploadFileRequest, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Multipart body. Required. + :type body: ~azure.ai.assistants.models.UploadFileRequest + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Multipart body. Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def upload_file(self, body: Union[_models.UploadFileRequest, JSON], **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Multipart body. Is either a UploadFileRequest type or a JSON type. Required. + :type body: ~azure.ai.assistants.models.UploadFileRequest or JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + _body = body.as_dict() if isinstance(body, _model_base.Model) else body + _file_fields: List[str] = ["file"] + _data_fields: List[str] = ["purpose", "filename"] + _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) + + _request = build_ai_assistant_upload_file_request( + api_version=self._config.api_version, + files=_files, + data=_data, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: + """Delete a previously uploaded file. + + :param file_id: The ID of the file to delete. Required. + :type file_id: str + :return: FileDeletionStatus. The FileDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.FileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_ai_assistant_delete_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def _get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[bytes]: + """Retrieves the raw content of a specific file. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: AsyncIterator[bytes] + :rtype: AsyncIterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_file_content_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", True) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_stores( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStore: + """Returns a list of vector stores. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStore. The OpenAIPageableListOfVectorStore is compatible + with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfVectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_vector_stores_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + store_configuration: Optional[_models.VectorStoreConfiguration] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword store_configuration: The vector store configuration, used when vector store is created + from Azure asset URIs. Default value is None. + :paramtype store_configuration: ~azure.ai.assistants.models.VectorStoreConfiguration + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + store_configuration: Optional[_models.VectorStoreConfiguration] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword store_configuration: The vector store configuration, used when vector store is created + from Azure asset URIs. Default value is None. + :paramtype store_configuration: ~azure.ai.assistants.models.VectorStoreConfiguration + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "chunking_strategy": chunking_strategy, + "configuration": store_configuration, + "expires_after": expires_after, + "file_ids": file_ids, + "metadata": metadata, + "name": name, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_vector_store_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: + """Returns the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def modify_vector_store( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def modify_vector_store( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def modify_vector_store( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def modify_vector_store( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"expires_after": expires_after, "metadata": metadata, "name": name} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_modify_vector_store_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: + """Deletes the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStoreDeletionStatus. The VectorStoreDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) + + _request = build_ai_assistant_delete_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_store_files( + self, + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.assistants.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_vector_store_files_request( + vector_store_id=vector_store_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store_file( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"chunking_strategy": chunking_strategy, "data_source": data_source, "file_id": file_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_vector_store_file_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: + """Retrieves a vector store file. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_vector_store_file( + self, vector_store_id: str, file_id: str, **kwargs: Any + ) -> _models.VectorStoreFileDeletionStatus: + """Delete a vector store file. This will remove the file from the vector store but the file itself + will not be deleted. + To delete the file, use the delete file endpoint. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFileDeletionStatus. The VectorStoreFileDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_ai_assistant_delete_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store_file_batch( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: List of file identifiers. Default value is None. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file_batch( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Default value is None. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"chunking_strategy": chunking_strategy, "data_sources": data_sources, "file_ids": file_ids} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_ai_assistant_create_vector_store_file_batch_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Retrieve a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_ai_assistant_get_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def cancel_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch + as soon as possible. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_ai_assistant_cancel_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_store_file_batch_files( + self, + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files in a batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.assistants.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.assistants.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_ai_assistant_list_vector_store_file_batch_files_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_vendor.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_vendor.py new file mode 100644 index 000000000000..46e0725a5b24 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_vendor.py @@ -0,0 +1,25 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +from typing import TYPE_CHECKING + +from ._configuration import AIAssistantClientConfiguration + +if TYPE_CHECKING: + from azure.core import AsyncPipelineClient + + from .._serialization import Deserializer, Serializer + + +class AIAssistantClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "AsyncPipelineClient" + _config: AIAssistantClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py new file mode 100644 index 000000000000..081376de319f --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py @@ -0,0 +1,416 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + + +from ._models import ( # type: ignore + AISearchIndexResource, + Agent, + AgentDeletionStatus, + AgentThread, + AgentThreadCreationOptions, + AgentsApiResponseFormat, + AgentsNamedToolChoice, + AzureAISearchResource, + AzureAISearchToolDefinition, + AzureFunctionBinding, + AzureFunctionDefinition, + AzureFunctionStorageQueue, + AzureFunctionToolDefinition, + BingCustomSearchToolDefinition, + BingGroundingToolDefinition, + CodeInterpreterToolDefinition, + CodeInterpreterToolResource, + ErrorResponse, + FileDeletionStatus, + FileListResponse, + FileSearchRankingOptions, + FileSearchToolCallContent, + FileSearchToolDefinition, + FileSearchToolDefinitionDetails, + FileSearchToolResource, + FunctionDefinition, + FunctionName, + FunctionToolDefinition, + IncompleteRunDetails, + MessageAttachment, + MessageContent, + MessageDelta, + MessageDeltaChunk, + MessageDeltaContent, + MessageDeltaImageFileContent, + MessageDeltaImageFileContentObject, + MessageDeltaTextAnnotation, + MessageDeltaTextContent, + MessageDeltaTextContentObject, + MessageDeltaTextFileCitationAnnotation, + MessageDeltaTextFileCitationAnnotationObject, + MessageDeltaTextFilePathAnnotation, + MessageDeltaTextFilePathAnnotationObject, + MessageDeltaTextUrlCitationAnnotation, + MessageDeltaTextUrlCitationDetails, + MessageImageFileContent, + MessageImageFileDetails, + MessageIncompleteDetails, + MessageTextAnnotation, + MessageTextContent, + MessageTextDetails, + MessageTextFileCitationAnnotation, + MessageTextFileCitationDetails, + MessageTextFilePathAnnotation, + MessageTextFilePathDetails, + MessageTextUrlCitationAnnotation, + MessageTextUrlCitationDetails, + MicrosoftFabricToolDefinition, + OpenAIFile, + OpenAIPageableListOfAgent, + OpenAIPageableListOfRunStep, + OpenAIPageableListOfThreadMessage, + OpenAIPageableListOfThreadRun, + OpenAIPageableListOfVectorStore, + OpenAIPageableListOfVectorStoreFile, + OpenApiAnonymousAuthDetails, + OpenApiAuthDetails, + OpenApiConnectionAuthDetails, + OpenApiConnectionSecurityScheme, + OpenApiFunctionDefinition, + OpenApiManagedAuthDetails, + OpenApiManagedSecurityScheme, + OpenApiToolDefinition, + RequiredAction, + RequiredFunctionToolCall, + RequiredFunctionToolCallDetails, + RequiredToolCall, + ResponseFormatJsonSchema, + ResponseFormatJsonSchemaType, + RunCompletionUsage, + RunError, + RunStep, + RunStepAzureAISearchToolCall, + RunStepBingGroundingToolCall, + RunStepCodeInterpreterImageOutput, + RunStepCodeInterpreterImageReference, + RunStepCodeInterpreterLogOutput, + RunStepCodeInterpreterToolCall, + RunStepCodeInterpreterToolCallDetails, + RunStepCodeInterpreterToolCallOutput, + RunStepCompletionUsage, + RunStepCustomSearchToolCall, + RunStepDelta, + RunStepDeltaChunk, + RunStepDeltaCodeInterpreterDetailItemObject, + RunStepDeltaCodeInterpreterImageOutput, + RunStepDeltaCodeInterpreterImageOutputObject, + RunStepDeltaCodeInterpreterLogOutput, + RunStepDeltaCodeInterpreterOutput, + RunStepDeltaCodeInterpreterToolCall, + RunStepDeltaDetail, + RunStepDeltaFileSearchToolCall, + RunStepDeltaFunction, + RunStepDeltaFunctionToolCall, + RunStepDeltaMessageCreation, + RunStepDeltaMessageCreationObject, + RunStepDeltaToolCall, + RunStepDeltaToolCallObject, + RunStepDetails, + RunStepError, + RunStepFileSearchToolCall, + RunStepFileSearchToolCallResult, + RunStepFileSearchToolCallResults, + RunStepFunctionToolCall, + RunStepFunctionToolCallDetails, + RunStepMessageCreationDetails, + RunStepMessageCreationReference, + RunStepMicrosoftFabricToolCall, + RunStepOpenAPIToolCall, + RunStepSharepointToolCall, + RunStepToolCall, + RunStepToolCallDetails, + SearchConfiguration, + SearchConfigurationList, + SharepointToolDefinition, + SubmitToolOutputsAction, + SubmitToolOutputsDetails, + ThreadDeletionStatus, + ThreadMessage, + ThreadMessageOptions, + ThreadRun, + ToolConnection, + ToolConnectionList, + ToolDefinition, + ToolOutput, + ToolResources, + TruncationObject, + UpdateCodeInterpreterToolResourceOptions, + UpdateFileSearchToolResourceOptions, + UpdateToolResourcesOptions, + UploadFileRequest, + VectorStore, + VectorStoreAutoChunkingStrategyRequest, + VectorStoreAutoChunkingStrategyResponse, + VectorStoreChunkingStrategyRequest, + VectorStoreChunkingStrategyResponse, + VectorStoreConfiguration, + VectorStoreConfigurations, + VectorStoreDataSource, + VectorStoreDeletionStatus, + VectorStoreExpirationPolicy, + VectorStoreFile, + VectorStoreFileBatch, + VectorStoreFileCount, + VectorStoreFileDeletionStatus, + VectorStoreFileError, + VectorStoreStaticChunkingStrategyOptions, + VectorStoreStaticChunkingStrategyRequest, + VectorStoreStaticChunkingStrategyResponse, +) + +from ._enums import ( # type: ignore + AgentStreamEvent, + AgentsApiResponseFormatMode, + AgentsApiToolChoiceOptionMode, + AgentsNamedToolChoiceType, + AzureAISearchQueryType, + DoneEvent, + ErrorEvent, + FilePurpose, + FileState, + IncompleteDetailsReason, + ListSortOrder, + MessageIncompleteDetailsReason, + MessageRole, + MessageStatus, + MessageStreamEvent, + OpenApiAuthType, + ResponseFormat, + RunAdditionalFieldList, + RunStatus, + RunStepErrorCode, + RunStepStatus, + RunStepStreamEvent, + RunStepType, + RunStreamEvent, + ThreadStreamEvent, + TruncationStrategy, + VectorStoreChunkingStrategyRequestType, + VectorStoreChunkingStrategyResponseType, + VectorStoreDataSourceAssetType, + VectorStoreExpirationPolicyAnchor, + VectorStoreFileBatchStatus, + VectorStoreFileErrorCode, + VectorStoreFileStatus, + VectorStoreFileStatusFilter, + VectorStoreStatus, +) +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AISearchIndexResource", + "Agent", + "AgentDeletionStatus", + "AgentThread", + "AgentThreadCreationOptions", + "AgentsApiResponseFormat", + "AgentsNamedToolChoice", + "AzureAISearchResource", + "AzureAISearchToolDefinition", + "AzureFunctionBinding", + "AzureFunctionDefinition", + "AzureFunctionStorageQueue", + "AzureFunctionToolDefinition", + "BingCustomSearchToolDefinition", + "BingGroundingToolDefinition", + "CodeInterpreterToolDefinition", + "CodeInterpreterToolResource", + "ErrorResponse", + "FileDeletionStatus", + "FileListResponse", + "FileSearchRankingOptions", + "FileSearchToolCallContent", + "FileSearchToolDefinition", + "FileSearchToolDefinitionDetails", + "FileSearchToolResource", + "FunctionDefinition", + "FunctionName", + "FunctionToolDefinition", + "IncompleteRunDetails", + "MessageAttachment", + "MessageContent", + "MessageDelta", + "MessageDeltaChunk", + "MessageDeltaContent", + "MessageDeltaImageFileContent", + "MessageDeltaImageFileContentObject", + "MessageDeltaTextAnnotation", + "MessageDeltaTextContent", + "MessageDeltaTextContentObject", + "MessageDeltaTextFileCitationAnnotation", + "MessageDeltaTextFileCitationAnnotationObject", + "MessageDeltaTextFilePathAnnotation", + "MessageDeltaTextFilePathAnnotationObject", + "MessageDeltaTextUrlCitationAnnotation", + "MessageDeltaTextUrlCitationDetails", + "MessageImageFileContent", + "MessageImageFileDetails", + "MessageIncompleteDetails", + "MessageTextAnnotation", + "MessageTextContent", + "MessageTextDetails", + "MessageTextFileCitationAnnotation", + "MessageTextFileCitationDetails", + "MessageTextFilePathAnnotation", + "MessageTextFilePathDetails", + "MessageTextUrlCitationAnnotation", + "MessageTextUrlCitationDetails", + "MicrosoftFabricToolDefinition", + "OpenAIFile", + "OpenAIPageableListOfAgent", + "OpenAIPageableListOfRunStep", + "OpenAIPageableListOfThreadMessage", + "OpenAIPageableListOfThreadRun", + "OpenAIPageableListOfVectorStore", + "OpenAIPageableListOfVectorStoreFile", + "OpenApiAnonymousAuthDetails", + "OpenApiAuthDetails", + "OpenApiConnectionAuthDetails", + "OpenApiConnectionSecurityScheme", + "OpenApiFunctionDefinition", + "OpenApiManagedAuthDetails", + "OpenApiManagedSecurityScheme", + "OpenApiToolDefinition", + "RequiredAction", + "RequiredFunctionToolCall", + "RequiredFunctionToolCallDetails", + "RequiredToolCall", + "ResponseFormatJsonSchema", + "ResponseFormatJsonSchemaType", + "RunCompletionUsage", + "RunError", + "RunStep", + "RunStepAzureAISearchToolCall", + "RunStepBingGroundingToolCall", + "RunStepCodeInterpreterImageOutput", + "RunStepCodeInterpreterImageReference", + "RunStepCodeInterpreterLogOutput", + "RunStepCodeInterpreterToolCall", + "RunStepCodeInterpreterToolCallDetails", + "RunStepCodeInterpreterToolCallOutput", + "RunStepCompletionUsage", + "RunStepCustomSearchToolCall", + "RunStepDelta", + "RunStepDeltaChunk", + "RunStepDeltaCodeInterpreterDetailItemObject", + "RunStepDeltaCodeInterpreterImageOutput", + "RunStepDeltaCodeInterpreterImageOutputObject", + "RunStepDeltaCodeInterpreterLogOutput", + "RunStepDeltaCodeInterpreterOutput", + "RunStepDeltaCodeInterpreterToolCall", + "RunStepDeltaDetail", + "RunStepDeltaFileSearchToolCall", + "RunStepDeltaFunction", + "RunStepDeltaFunctionToolCall", + "RunStepDeltaMessageCreation", + "RunStepDeltaMessageCreationObject", + "RunStepDeltaToolCall", + "RunStepDeltaToolCallObject", + "RunStepDetails", + "RunStepError", + "RunStepFileSearchToolCall", + "RunStepFileSearchToolCallResult", + "RunStepFileSearchToolCallResults", + "RunStepFunctionToolCall", + "RunStepFunctionToolCallDetails", + "RunStepMessageCreationDetails", + "RunStepMessageCreationReference", + "RunStepMicrosoftFabricToolCall", + "RunStepOpenAPIToolCall", + "RunStepSharepointToolCall", + "RunStepToolCall", + "RunStepToolCallDetails", + "SearchConfiguration", + "SearchConfigurationList", + "SharepointToolDefinition", + "SubmitToolOutputsAction", + "SubmitToolOutputsDetails", + "ThreadDeletionStatus", + "ThreadMessage", + "ThreadMessageOptions", + "ThreadRun", + "ToolConnection", + "ToolConnectionList", + "ToolDefinition", + "ToolOutput", + "ToolResources", + "TruncationObject", + "UpdateCodeInterpreterToolResourceOptions", + "UpdateFileSearchToolResourceOptions", + "UpdateToolResourcesOptions", + "UploadFileRequest", + "VectorStore", + "VectorStoreAutoChunkingStrategyRequest", + "VectorStoreAutoChunkingStrategyResponse", + "VectorStoreChunkingStrategyRequest", + "VectorStoreChunkingStrategyResponse", + "VectorStoreConfiguration", + "VectorStoreConfigurations", + "VectorStoreDataSource", + "VectorStoreDeletionStatus", + "VectorStoreExpirationPolicy", + "VectorStoreFile", + "VectorStoreFileBatch", + "VectorStoreFileCount", + "VectorStoreFileDeletionStatus", + "VectorStoreFileError", + "VectorStoreStaticChunkingStrategyOptions", + "VectorStoreStaticChunkingStrategyRequest", + "VectorStoreStaticChunkingStrategyResponse", + "AgentStreamEvent", + "AgentsApiResponseFormatMode", + "AgentsApiToolChoiceOptionMode", + "AgentsNamedToolChoiceType", + "AzureAISearchQueryType", + "DoneEvent", + "ErrorEvent", + "FilePurpose", + "FileState", + "IncompleteDetailsReason", + "ListSortOrder", + "MessageIncompleteDetailsReason", + "MessageRole", + "MessageStatus", + "MessageStreamEvent", + "OpenApiAuthType", + "ResponseFormat", + "RunAdditionalFieldList", + "RunStatus", + "RunStepErrorCode", + "RunStepStatus", + "RunStepStreamEvent", + "RunStepType", + "RunStreamEvent", + "ThreadStreamEvent", + "TruncationStrategy", + "VectorStoreChunkingStrategyRequestType", + "VectorStoreChunkingStrategyResponseType", + "VectorStoreDataSourceAssetType", + "VectorStoreExpirationPolicyAnchor", + "VectorStoreFileBatchStatus", + "VectorStoreFileErrorCode", + "VectorStoreFileStatus", + "VectorStoreFileStatusFilter", + "VectorStoreStatus", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py new file mode 100644 index 000000000000..405ab0478f62 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py @@ -0,0 +1,520 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class AgentsApiResponseFormatMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Represents the mode in which the model will handle the return format of a tool call.""" + + AUTO = "auto" + """Default value. Let the model handle the return format.""" + NONE = "none" + """Setting the value to ``none``, will result in a 400 Bad request.""" + + +class AgentsApiToolChoiceOptionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies how the tool choice will be used.""" + + NONE = "none" + """The model will not call a function and instead generates a message.""" + AUTO = "auto" + """The model can pick between generating a message or calling a function.""" + + +class AgentsNamedToolChoiceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Available tool types for agents named tools.""" + + FUNCTION = "function" + """Tool type ``function``""" + CODE_INTERPRETER = "code_interpreter" + """Tool type ``code_interpreter``""" + FILE_SEARCH = "file_search" + """Tool type ``file_search``""" + BING_GROUNDING = "bing_grounding" + """Tool type ``bing_grounding``""" + MICROSOFT_FABRIC = "fabric_dataagent" + """Tool type ``fabric_dataagent``""" + SHAREPOINT = "sharepoint_grounding" + """Tool type ``sharepoint_grounding``""" + AZURE_AI_SEARCH = "azure_ai_search" + """Tool type ``azure_ai_search``""" + BING_CUSTOM_SEARCH = "bing_custom_search" + """Tool type ``bing_custom_search``""" + + +class AgentStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Each event in a server-sent events stream has an ``event`` and ``data`` property: + + + + .. code-block:: + + event: thread.created + data: {"id": "thread_123", "object": "thread", ...} + + We emit events whenever a new object is created, transitions to a new state, or is being + streamed in parts (deltas). For example, we emit ``thread.run.created`` when a new run + is created, ``thread.run.completed`` when a run completes, and so on. When an Agent chooses + to create a message during a run, we emit a ``thread.message.created event``, a + ``thread.message.in_progress`` event, many ``thread.message.delta`` events, and finally a + ``thread.message.completed`` event. + + We may add additional events over time, so we recommend handling unknown events gracefully + in your code. + """ + + THREAD_CREATED = "thread.created" + """Event sent when a new thread is created. The data of this event is of type AgentThread""" + THREAD_RUN_CREATED = "thread.run.created" + """Event sent when a new run is created. The data of this event is of type ThreadRun""" + THREAD_RUN_QUEUED = "thread.run.queued" + """Event sent when a run moves to ``queued`` status. The data of this event is of type ThreadRun""" + THREAD_RUN_IN_PROGRESS = "thread.run.in_progress" + """Event sent when a run moves to ``in_progress`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_REQUIRES_ACTION = "thread.run.requires_action" + """Event sent when a run moves to ``requires_action`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_COMPLETED = "thread.run.completed" + """Event sent when a run is completed. The data of this event is of type ThreadRun""" + THREAD_RUN_INCOMPLETE = "thread.run.incomplete" + """Event sent when a run ends incomplete. The data of this event is of type ThreadRun""" + THREAD_RUN_FAILED = "thread.run.failed" + """Event sent when a run fails. The data of this event is of type ThreadRun""" + THREAD_RUN_CANCELLING = "thread.run.cancelling" + """Event sent when a run moves to ``cancelling`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_CANCELLED = "thread.run.cancelled" + """Event sent when a run is cancelled. The data of this event is of type ThreadRun""" + THREAD_RUN_EXPIRED = "thread.run.expired" + """Event sent when a run is expired. The data of this event is of type ThreadRun""" + THREAD_RUN_STEP_CREATED = "thread.run.step.created" + """Event sent when a new thread run step is created. The data of this event is of type RunStep""" + THREAD_RUN_STEP_IN_PROGRESS = "thread.run.step.in_progress" + """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type + RunStep""" + THREAD_RUN_STEP_DELTA = "thread.run.step.delta" + """Event sent when a run step is being streamed. The data of this event is of type + RunStepDeltaChunk""" + THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" + """Event sent when a run step is completed. The data of this event is of type RunStep""" + THREAD_RUN_STEP_FAILED = "thread.run.step.failed" + """Event sent when a run step fails. The data of this event is of type RunStep""" + THREAD_RUN_STEP_CANCELLED = "thread.run.step.cancelled" + """Event sent when a run step is cancelled. The data of this event is of type RunStep""" + THREAD_RUN_STEP_EXPIRED = "thread.run.step.expired" + """Event sent when a run step is expired. The data of this event is of type RunStep""" + THREAD_MESSAGE_CREATED = "thread.message.created" + """Event sent when a new message is created. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_IN_PROGRESS = "thread.message.in_progress" + """Event sent when a message moves to ``in_progress`` status. The data of this event is of type + ThreadMessage""" + THREAD_MESSAGE_DELTA = "thread.message.delta" + """Event sent when a message is being streamed. The data of this event is of type + MessageDeltaChunk""" + THREAD_MESSAGE_COMPLETED = "thread.message.completed" + """Event sent when a message is completed. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_INCOMPLETE = "thread.message.incomplete" + """Event sent before a message is completed. The data of this event is of type ThreadMessage""" + ERROR = "error" + """Event sent when an error occurs, such as an internal server error or a timeout.""" + DONE = "done" + """Event sent when the stream is done.""" + + +class AzureAISearchQueryType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Available query types for Azure AI Search tool.""" + + SIMPLE = "simple" + """Query type ``simple``""" + SEMANTIC = "semantic" + """Query type ``semantic``""" + VECTOR = "vector" + """Query type ``vector``""" + VECTOR_SIMPLE_HYBRID = "vector_simple_hybrid" + """Query type ``vector_simple_hybrid``""" + VECTOR_SEMANTIC_HYBRID = "vector_semantic_hybrid" + """Query type ``vector_semantic_hybrid``""" + + +class DoneEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Terminal event indicating the successful end of a stream.""" + + DONE = "done" + """Event sent when the stream is done.""" + + +class ErrorEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Terminal event indicating a server side error while streaming.""" + + ERROR = "error" + """Event sent when an error occurs, such as an internal server error or a timeout.""" + + +class FilePurpose(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible values denoting the intended usage of a file.""" + + FINE_TUNE = "fine-tune" + """Indicates a file is used for fine tuning input.""" + FINE_TUNE_RESULTS = "fine-tune-results" + """Indicates a file is used for fine tuning results.""" + AGENTS = "assistants" + """Indicates a file is used as input to agents.""" + AGENTS_OUTPUT = "assistants_output" + """Indicates a file is used as output by agents.""" + BATCH = "batch" + """Indicates a file is used as input to .""" + BATCH_OUTPUT = "batch_output" + """Indicates a file is used as output by a vector store batch operation.""" + VISION = "vision" + """Indicates a file is used as input to a vision operation.""" + + +class FileState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The state of the file.""" + + UPLOADED = "uploaded" + """The file has been uploaded but it's not yet processed. This state is not returned by Azure + OpenAI and exposed only for + compatibility. It can be categorized as an inactive state.""" + PENDING = "pending" + """The operation was created and is not queued to be processed in the future. It can be + categorized as an inactive state.""" + RUNNING = "running" + """The operation has started to be processed. It can be categorized as an active state.""" + PROCESSED = "processed" + """The operation has successfully processed and is ready for consumption. It can be categorized as + a terminal state.""" + ERROR = "error" + """The operation has completed processing with a failure and cannot be further consumed. It can be + categorized as a terminal state.""" + DELETING = "deleting" + """The entity is in the process to be deleted. This state is not returned by Azure OpenAI and + exposed only for compatibility. + It can be categorized as an active state.""" + DELETED = "deleted" + """The entity has been deleted but may still be referenced by other entities predating the + deletion. It can be categorized as a + terminal state.""" + + +class IncompleteDetailsReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The reason why the run is incomplete. This will point to which specific token limit was reached + over the course of the run. + """ + + MAX_COMPLETION_TOKENS = "max_completion_tokens" + """Maximum completion tokens exceeded""" + MAX_PROMPT_TOKENS = "max_prompt_tokens" + """Maximum prompt tokens exceeded""" + + +class ListSortOrder(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The available sorting options when requesting a list of response objects.""" + + ASCENDING = "asc" + """Specifies an ascending sort order.""" + DESCENDING = "desc" + """Specifies a descending sort order.""" + + +class MessageIncompleteDetailsReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """A set of reasons describing why a message is marked as incomplete.""" + + CONTENT_FILTER = "content_filter" + """The run generating the message was terminated due to content filter flagging.""" + MAX_TOKENS = "max_tokens" + """The run generating the message exhausted available tokens before completion.""" + RUN_CANCELLED = "run_cancelled" + """The run generating the message was cancelled before completion.""" + RUN_FAILED = "run_failed" + """The run generating the message failed.""" + RUN_EXPIRED = "run_expired" + """The run generating the message expired.""" + + +class MessageRole(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible values for roles attributed to messages in a thread.""" + + USER = "user" + """The role representing the end-user.""" + AGENT = "assistant" + """The role representing the agent.""" + + +class MessageStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible execution status values for a thread message.""" + + IN_PROGRESS = "in_progress" + """A run is currently creating this message.""" + INCOMPLETE = "incomplete" + """This message is incomplete. See incomplete_details for more information.""" + COMPLETED = "completed" + """This message was successfully completed by a run.""" + + +class MessageStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Message operation related streaming events.""" + + THREAD_MESSAGE_CREATED = "thread.message.created" + """Event sent when a new message is created. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_IN_PROGRESS = "thread.message.in_progress" + """Event sent when a message moves to ``in_progress`` status. The data of this event is of type + ThreadMessage""" + THREAD_MESSAGE_DELTA = "thread.message.delta" + """Event sent when a message is being streamed. The data of this event is of type + MessageDeltaChunk""" + THREAD_MESSAGE_COMPLETED = "thread.message.completed" + """Event sent when a message is completed. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_INCOMPLETE = "thread.message.incomplete" + """Event sent before a message is completed. The data of this event is of type ThreadMessage""" + + +class OpenApiAuthType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Authentication type for OpenApi endpoint. Allowed types are: + + * Anonymous (no authentication required) + * Connection (requires connection_id to endpoint, as setup in AI Foundry) + * Managed_Identity (requires audience for identity based auth). + """ + + ANONYMOUS = "anonymous" + CONNECTION = "connection" + MANAGED_IDENTITY = "managed_identity" + + +class ResponseFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible API response formats.""" + + TEXT = "text" + """``text`` format should be used for requests involving any sort of ToolCall.""" + JSON_OBJECT = "json_object" + """Using ``json_object`` format will limit the usage of ToolCall to only functions.""" + + +class RunAdditionalFieldList(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """A list of additional fields to include in the response.""" + + FILE_SEARCH_CONTENTS = "step_details.tool_calls[*].file_search.results[*].content" + """File search result content.""" + + +class RunStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible values for the status of an agent thread run.""" + + QUEUED = "queued" + """Represents a run that is queued to start.""" + IN_PROGRESS = "in_progress" + """Represents a run that is in progress.""" + REQUIRES_ACTION = "requires_action" + """Represents a run that needs another operation, such as tool output submission, to continue.""" + CANCELLING = "cancelling" + """Represents a run that is in the process of cancellation.""" + CANCELLED = "cancelled" + """Represents a run that has been cancelled.""" + FAILED = "failed" + """Represents a run that failed.""" + COMPLETED = "completed" + """Represents a run that successfully completed.""" + EXPIRED = "expired" + """Represents a run that expired before it could otherwise finish.""" + + +class RunStepErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible error code values attributable to a failed run step.""" + + SERVER_ERROR = "server_error" + """Represents a server error.""" + RATE_LIMIT_EXCEEDED = "rate_limit_exceeded" + """Represents an error indicating configured rate limits were exceeded.""" + + +class RunStepStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible values for the status of a run step.""" + + IN_PROGRESS = "in_progress" + """Represents a run step still in progress.""" + CANCELLED = "cancelled" + """Represents a run step that was cancelled.""" + FAILED = "failed" + """Represents a run step that failed.""" + COMPLETED = "completed" + """Represents a run step that successfully completed.""" + EXPIRED = "expired" + """Represents a run step that expired before otherwise finishing.""" + + +class RunStepStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Run step operation related streaming events.""" + + THREAD_RUN_STEP_CREATED = "thread.run.step.created" + """Event sent when a new thread run step is created. The data of this event is of type RunStep""" + THREAD_RUN_STEP_IN_PROGRESS = "thread.run.step.in_progress" + """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type + RunStep""" + THREAD_RUN_STEP_DELTA = "thread.run.step.delta" + """Event sent when a run step is being streamed. The data of this event is of type + RunStepDeltaChunk""" + THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" + """Event sent when a run step is completed. The data of this event is of type RunStep""" + THREAD_RUN_STEP_FAILED = "thread.run.step.failed" + """Event sent when a run step fails. The data of this event is of type RunStep""" + THREAD_RUN_STEP_CANCELLED = "thread.run.step.cancelled" + """Event sent when a run step is cancelled. The data of this event is of type RunStep""" + THREAD_RUN_STEP_EXPIRED = "thread.run.step.expired" + """Event sent when a run step is expired. The data of this event is of type RunStep""" + + +class RunStepType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible types of run steps.""" + + MESSAGE_CREATION = "message_creation" + """Represents a run step to create a message.""" + TOOL_CALLS = "tool_calls" + """Represents a run step that calls tools.""" + + +class RunStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Run operation related streaming events.""" + + THREAD_RUN_CREATED = "thread.run.created" + """Event sent when a new run is created. The data of this event is of type ThreadRun""" + THREAD_RUN_QUEUED = "thread.run.queued" + """Event sent when a run moves to ``queued`` status. The data of this event is of type ThreadRun""" + THREAD_RUN_IN_PROGRESS = "thread.run.in_progress" + """Event sent when a run moves to ``in_progress`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_REQUIRES_ACTION = "thread.run.requires_action" + """Event sent when a run moves to ``requires_action`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_COMPLETED = "thread.run.completed" + """Event sent when a run is completed. The data of this event is of type ThreadRun""" + THREAD_RUN_INCOMPLETE = "thread.run.incomplete" + """Event sent when a run ends incomplete. The data of this event is of type ThreadRun""" + THREAD_RUN_FAILED = "thread.run.failed" + """Event sent when a run fails. The data of this event is of type ThreadRun""" + THREAD_RUN_CANCELLING = "thread.run.cancelling" + """Event sent when a run moves to ``cancelling`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_CANCELLED = "thread.run.cancelled" + """Event sent when a run is cancelled. The data of this event is of type ThreadRun""" + THREAD_RUN_EXPIRED = "thread.run.expired" + """Event sent when a run is expired. The data of this event is of type ThreadRun""" + + +class ThreadStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Thread operation related streaming events.""" + + THREAD_CREATED = "thread.created" + """Event sent when a new thread is created. The data of this event is of type AgentThread""" + + +class TruncationStrategy(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible truncation strategies for the thread.""" + + AUTO = "auto" + """Default value. Messages in the middle of the thread will be dropped to fit the context length + of the model.""" + LAST_MESSAGES = "last_messages" + """The thread will truncate to the ``lastMessages`` count of recent messages.""" + + +class VectorStoreChunkingStrategyRequestType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of chunking strategy.""" + + AUTO = "auto" + STATIC = "static" + + +class VectorStoreChunkingStrategyResponseType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of chunking strategy.""" + + OTHER = "other" + STATIC = "static" + + +class VectorStoreDataSourceAssetType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of vector storage asset. Asset type may be a uri_asset, in this case it should contain + asset URI ID, + in the case of id_asset it should contain the data ID. + """ + + URI_ASSET = "uri_asset" + """Azure URI""" + ID_ASSET = "id_asset" + """The data ID""" + + +class VectorStoreExpirationPolicyAnchor(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Describes the relationship between the days and the expiration of this vector store.""" + + LAST_ACTIVE_AT = "last_active_at" + """The expiration policy is based on the last time the vector store was active.""" + + +class VectorStoreFileBatchStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The status of the vector store file batch.""" + + IN_PROGRESS = "in_progress" + """The vector store is still processing this file batch.""" + COMPLETED = "completed" + """the vector store file batch is ready for use.""" + CANCELLED = "cancelled" + """The vector store file batch was cancelled.""" + FAILED = "failed" + """The vector store file batch failed to process.""" + + +class VectorStoreFileErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Error code variants for vector store file processing.""" + + SERVER_ERROR = "server_error" + """An server error occurred.""" + INVALID_FILE = "invalid_file" + """The file is not valid.""" + UNSUPPORTED_FILE = "unsupported_file" + """The file is of unsupported type.""" + + +class VectorStoreFileStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Vector store file status.""" + + IN_PROGRESS = "in_progress" + """The file is currently being processed.""" + COMPLETED = "completed" + """The file has been successfully processed.""" + FAILED = "failed" + """The file has failed to process.""" + CANCELLED = "cancelled" + """The file was cancelled.""" + + +class VectorStoreFileStatusFilter(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Query parameter filter for vector store file retrieval endpoint.""" + + IN_PROGRESS = "in_progress" + """Retrieve only files that are currently being processed""" + COMPLETED = "completed" + """Retrieve only files that have been successfully processed""" + FAILED = "failed" + """Retrieve only files that have failed to process""" + CANCELLED = "cancelled" + """Retrieve only files that were cancelled""" + + +class VectorStoreStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Vector store possible status.""" + + EXPIRED = "expired" + """expired status indicates that this vector store has expired and is no longer available for use.""" + IN_PROGRESS = "in_progress" + """in_progress status indicates that this vector store is still processing files.""" + COMPLETED = "completed" + """completed status indicates that this vector store is ready for use.""" diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py new file mode 100644 index 000000000000..6b6d52e9c14e --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py @@ -0,0 +1,6686 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=useless-super-delegation + +import datetime +from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload + +from .. import _model_base +from .._model_base import rest_discriminator, rest_field +from .._vendor import FileType +from ._enums import ( + OpenApiAuthType, + RunStepType, + VectorStoreChunkingStrategyRequestType, + VectorStoreChunkingStrategyResponseType, +) + +if TYPE_CHECKING: + from .. import _types, models as _models + + +class Agent(_model_base.Model): + """Represents an agent that can call the model and use tools. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always assistant. Required. Default value is + "assistant". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar name: The name of the agent. Required. + :vartype name: str + :ivar description: The description of the agent. Required. + :vartype description: str + :ivar model: The ID of the model to use. Required. + :vartype model: str + :ivar instructions: The system instructions for the agent to use. Required. + :vartype instructions: str + :ivar tools: The collection of tools enabled for the agent. Required. + :vartype tools: list[~azure.ai.assistants.models.ToolDefinition] + :ivar tool_resources: A set of resources that are used by the agent's tools. The resources are + specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Required. + :vartype tool_resources: ~azure.ai.assistants.models.ToolResources + :ivar temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Required. + :vartype temperature: float + :ivar top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Required. + :vartype top_p: float + :ivar response_format: The response format of the tool calls used by this agent. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType + :vartype response_format: str or str or ~azure.ai.assistants.models.AgentsApiResponseFormatMode + or ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["assistant"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always assistant. Required. Default value is \"assistant\".""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the agent. Required.""" + description: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The description of the agent. Required.""" + model: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the model to use. Required.""" + instructions: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The system instructions for the agent to use. Required.""" + tools: List["_models.ToolDefinition"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The collection of tools enabled for the agent. Required.""" + tool_resources: "_models.ToolResources" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of resources that are used by the agent's tools. The resources are specific to the type + of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Required.""" + temperature: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, + while lower values like 0.2 will make it more focused and deterministic. Required.""" + top_p: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Required.""" + response_format: Optional["_types.AgentsApiResponseFormatOption"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The response format of the tool calls used by this agent. Is one of the following types: str, + Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat, + ResponseFormatJsonSchemaType""" + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + name: str, + description: str, + model: str, + instructions: str, + tools: List["_models.ToolDefinition"], + tool_resources: "_models.ToolResources", + temperature: float, + top_p: float, + metadata: Dict[str, str], + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["assistant"] = "assistant" + + +class AgentDeletionStatus(_model_base.Model): + """The status of an agent deletion operation. + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'assistant.deleted'. Required. Default value is + "assistant.deleted". + :vartype object: str + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether deletion was successful. Required.""" + object: Literal["assistant.deleted"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'assistant.deleted'. Required. Default value is + \"assistant.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["assistant.deleted"] = "assistant.deleted" + + +class AgentsApiResponseFormat(_model_base.Model): + """An object describing the expected output of the model. If ``json_object`` only ``function`` + type ``tools`` are allowed to be passed to the Run. + If ``text`` the model can return text or any value needed. + + :ivar type: Must be one of ``text`` or ``json_object``. Known values are: "text" and + "json_object". + :vartype type: str or ~azure.ai.assistants.models.ResponseFormat + """ + + type: Optional[Union[str, "_models.ResponseFormat"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Must be one of ``text`` or ``json_object``. Known values are: \"text\" and \"json_object\".""" + + @overload + def __init__( + self, + *, + type: Optional[Union[str, "_models.ResponseFormat"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AgentsNamedToolChoice(_model_base.Model): + """Specifies a tool the model should use. Use to force the model to call a specific tool. + + :ivar type: the type of tool. If type is ``function``, the function name must be set. Required. + Known values are: "function", "code_interpreter", "file_search", "bing_grounding", + "fabric_dataagent", "sharepoint_grounding", "azure_ai_search", and "bing_custom_search". + :vartype type: str or ~azure.ai.assistants.models.AgentsNamedToolChoiceType + :ivar function: The name of the function to call. + :vartype function: ~azure.ai.assistants.models.FunctionName + """ + + type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """the type of tool. If type is ``function``, the function name must be set. Required. Known + values are: \"function\", \"code_interpreter\", \"file_search\", \"bing_grounding\", + \"fabric_dataagent\", \"sharepoint_grounding\", \"azure_ai_search\", and + \"bing_custom_search\".""" + function: Optional["_models.FunctionName"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the function to call.""" + + @overload + def __init__( + self, + *, + type: Union[str, "_models.AgentsNamedToolChoiceType"], + function: Optional["_models.FunctionName"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AgentThread(_model_base.Model): + """Information about a single thread associated with an agent. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread'. Required. Default value is "thread". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar tool_resources: A set of resources that are made available to the agent's tools in this + thread. The resources are specific to the type + of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list + of vector store IDs. Required. + :vartype tool_resources: ~azure.ai.assistants.models.ToolResources + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'thread'. Required. Default value is \"thread\".""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + tool_resources: "_models.ToolResources" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of resources that are made available to the agent's tools in this thread. The resources + are specific to the type + of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list + of vector store IDs. Required.""" + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + tool_resources: "_models.ToolResources", + metadata: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread"] = "thread" + + +class AgentThreadCreationOptions(_model_base.Model): + """The details used to create a new agent thread. + + :ivar messages: The initial messages to associate with the new thread. + :vartype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] + :ivar tool_resources: A set of resources that are made available to the agent's tools in this + thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires + a list of vector store IDs. + :vartype tool_resources: ~azure.ai.assistants.models.ToolResources + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. + :vartype metadata: dict[str, str] + """ + + messages: Optional[List["_models.ThreadMessageOptions"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The initial messages to associate with the new thread.""" + tool_resources: Optional["_models.ToolResources"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """A set of resources that are made available to the agent's tools in this thread. The resources + are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires + a list of vector store IDs.""" + metadata: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length.""" + + @overload + def __init__( + self, + *, + messages: Optional[List["_models.ThreadMessageOptions"]] = None, + tool_resources: Optional["_models.ToolResources"] = None, + metadata: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AISearchIndexResource(_model_base.Model): + """A AI Search Index resource. + + :ivar index_connection_id: An index connection id in an IndexResource attached to this agent. + Required. + :vartype index_connection_id: str + :ivar index_name: The name of an index in an IndexResource attached to this agent. Required. + :vartype index_name: str + :ivar query_type: Type of query in an AIIndexResource attached to this agent. Known values are: + "simple", "semantic", "vector", "vector_simple_hybrid", and "vector_semantic_hybrid". + :vartype query_type: str or ~azure.ai.assistants.models.AzureAISearchQueryType + :ivar top_k: Number of documents to retrieve from search and present to the model. + :vartype top_k: int + :ivar filter: Odata filter string for search resource. + :vartype filter: str + """ + + index_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """An index connection id in an IndexResource attached to this agent. Required.""" + index_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of an index in an IndexResource attached to this agent. Required.""" + query_type: Optional[Union[str, "_models.AzureAISearchQueryType"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Type of query in an AIIndexResource attached to this agent. Known values are: \"simple\", + \"semantic\", \"vector\", \"vector_simple_hybrid\", and \"vector_semantic_hybrid\".""" + top_k: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Number of documents to retrieve from search and present to the model.""" + filter: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Odata filter string for search resource.""" + + @overload + def __init__( + self, + *, + index_connection_id: str, + index_name: str, + query_type: Optional[Union[str, "_models.AzureAISearchQueryType"]] = None, + top_k: Optional[int] = None, + filter: Optional[str] = None, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AzureAISearchResource(_model_base.Model): + """A set of index resources used by the ``azure_ai_search`` tool. + + :ivar index_list: The indices attached to this agent. There can be a maximum of 1 index + resource attached to the agent. + :vartype index_list: list[~azure.ai.assistants.models.AISearchIndexResource] + """ + + index_list: Optional[List["_models.AISearchIndexResource"]] = rest_field( + name="indexes", visibility=["read", "create", "update", "delete", "query"] + ) + """The indices attached to this agent. There can be a maximum of 1 index + resource attached to the agent.""" + + @overload + def __init__( + self, + *, + index_list: Optional[List["_models.AISearchIndexResource"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ToolDefinition(_model_base.Model): + """An abstract representation of an input tool definition that an agent can use. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AzureAISearchToolDefinition, AzureFunctionToolDefinition, BingCustomSearchToolDefinition, + BingGroundingToolDefinition, CodeInterpreterToolDefinition, MicrosoftFabricToolDefinition, + FileSearchToolDefinition, FunctionToolDefinition, OpenApiToolDefinition, + SharepointToolDefinition + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AzureAISearchToolDefinition(ToolDefinition, discriminator="azure_ai_search"): + """The input definition information for an Azure AI search tool as used to configure an agent. + + :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is + "azure_ai_search". + :vartype type: str + """ + + type: Literal["azure_ai_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'azure_ai_search'. Required. Default value is + \"azure_ai_search\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="azure_ai_search", **kwargs) + + +class AzureFunctionBinding(_model_base.Model): + """The structure for keeping storage queue name and URI. + + :ivar type: The type of binding, which is always 'storage_queue'. Required. Default value is + "storage_queue". + :vartype type: str + :ivar storage_queue: Storage queue. Required. + :vartype storage_queue: ~azure.ai.assistants.models.AzureFunctionStorageQueue + """ + + type: Literal["storage_queue"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The type of binding, which is always 'storage_queue'. Required. Default value is + \"storage_queue\".""" + storage_queue: "_models.AzureFunctionStorageQueue" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Storage queue. Required.""" + + @overload + def __init__( + self, + *, + storage_queue: "_models.AzureFunctionStorageQueue", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.type: Literal["storage_queue"] = "storage_queue" + + +class AzureFunctionDefinition(_model_base.Model): + """The definition of Azure function. + + :ivar function: The definition of azure function and its parameters. Required. + :vartype function: ~azure.ai.assistants.models.FunctionDefinition + :ivar input_binding: Input storage queue. The queue storage trigger runs a function as messages + are added to it. Required. + :vartype input_binding: ~azure.ai.assistants.models.AzureFunctionBinding + :ivar output_binding: Output storage queue. The function writes output to this queue when the + input items are processed. Required. + :vartype output_binding: ~azure.ai.assistants.models.AzureFunctionBinding + """ + + function: "_models.FunctionDefinition" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The definition of azure function and its parameters. Required.""" + input_binding: "_models.AzureFunctionBinding" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Input storage queue. The queue storage trigger runs a function as messages are added to it. + Required.""" + output_binding: "_models.AzureFunctionBinding" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Output storage queue. The function writes output to this queue when the input items are + processed. Required.""" + + @overload + def __init__( + self, + *, + function: "_models.FunctionDefinition", + input_binding: "_models.AzureFunctionBinding", + output_binding: "_models.AzureFunctionBinding", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AzureFunctionStorageQueue(_model_base.Model): + """The structure for keeping storage queue name and URI. + + :ivar storage_service_endpoint: URI to the Azure Storage Queue service allowing you to + manipulate a queue. Required. + :vartype storage_service_endpoint: str + :ivar queue_name: The name of an Azure function storage queue. Required. + :vartype queue_name: str + """ + + storage_service_endpoint: str = rest_field( + name="queue_service_endpoint", visibility=["read", "create", "update", "delete", "query"] + ) + """URI to the Azure Storage Queue service allowing you to manipulate a queue. Required.""" + queue_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of an Azure function storage queue. Required.""" + + @overload + def __init__( + self, + *, + storage_service_endpoint: str, + queue_name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AzureFunctionToolDefinition(ToolDefinition, discriminator="azure_function"): + """The input definition information for a azure function tool as used to configure an agent. + + :ivar type: The object type, which is always 'azure_function'. Required. Default value is + "azure_function". + :vartype type: str + :ivar azure_function: The definition of the concrete function that the function tool should + call. Required. + :vartype azure_function: ~azure.ai.assistants.models.AzureFunctionDefinition + """ + + type: Literal["azure_function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'azure_function'. Required. Default value is + \"azure_function\".""" + azure_function: "_models.AzureFunctionDefinition" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The definition of the concrete function that the function tool should call. Required.""" + + @overload + def __init__( + self, + *, + azure_function: "_models.AzureFunctionDefinition", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="azure_function", **kwargs) + + +class BingCustomSearchToolDefinition(ToolDefinition, discriminator="bing_custom_search"): + """The input definition information for a Bing custom search tool as used to configure an agent. + + :ivar type: The object type, which is always 'bing_custom_search'. Required. Default value is + "bing_custom_search". + :vartype type: str + :ivar bing_custom_search: The list of search configurations used by the bing custom search + tool. Required. + :vartype bing_custom_search: ~azure.ai.assistants.models.SearchConfigurationList + """ + + type: Literal["bing_custom_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'bing_custom_search'. Required. Default value is + \"bing_custom_search\".""" + bing_custom_search: "_models.SearchConfigurationList" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The list of search configurations used by the bing custom search tool. Required.""" + + @overload + def __init__( + self, + *, + bing_custom_search: "_models.SearchConfigurationList", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="bing_custom_search", **kwargs) + + +class BingGroundingToolDefinition(ToolDefinition, discriminator="bing_grounding"): + """The input definition information for a bing grounding search tool as used to configure an + agent. + + :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is + "bing_grounding". + :vartype type: str + :ivar bing_grounding: The list of connections used by the bing grounding tool. Required. + :vartype bing_grounding: ~azure.ai.assistants.models.ToolConnectionList + """ + + type: Literal["bing_grounding"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'bing_grounding'. Required. Default value is + \"bing_grounding\".""" + bing_grounding: "_models.ToolConnectionList" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The list of connections used by the bing grounding tool. Required.""" + + @overload + def __init__( + self, + *, + bing_grounding: "_models.ToolConnectionList", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="bing_grounding", **kwargs) + + +class CodeInterpreterToolDefinition(ToolDefinition, discriminator="code_interpreter"): + """The input definition information for a code interpreter tool as used to configure an agent. + + :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is + "code_interpreter". + :vartype type: str + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'code_interpreter'. Required. Default value is + \"code_interpreter\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="code_interpreter", **kwargs) + + +class CodeInterpreterToolResource(_model_base.Model): + """A set of resources that are used by the ``code_interpreter`` tool. + + :ivar file_ids: A list of file IDs made available to the ``code_interpreter`` tool. There can + be a maximum of 20 files + associated with the tool. + :vartype file_ids: list[str] + :ivar data_sources: The data sources to be used. This option is mutually exclusive with the + ``fileIds`` property. + :vartype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + """ + + file_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A list of file IDs made available to the ``code_interpreter`` tool. There can be a maximum of + 20 files + associated with the tool.""" + data_sources: Optional[List["_models.VectorStoreDataSource"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The data sources to be used. This option is mutually exclusive with the ``fileIds`` property.""" + + @overload + def __init__( + self, + *, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List["_models.VectorStoreDataSource"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ErrorResponse(_model_base.Model): + """Common error response for all Azure Resource Manager APIs to return error details for failed + operations. + + :ivar error: The error object. + :vartype error: ~azure.ai.assistants.models.ErrorDetail + """ + + error: Optional["_models.ErrorDetail"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The error object.""" + + @overload + def __init__( + self, + *, + error: Optional["_models.ErrorDetail"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FileDeletionStatus(_model_base.Model): + """A status response from a file deletion operation. + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'file'. Required. Default value is "file". + :vartype object: str + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether deletion was successful. Required.""" + object: Literal["file"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'file'. Required. Default value is \"file\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["file"] = "file" + + +class FileListResponse(_model_base.Model): + """The response data from a file list operation. + + :ivar object: The object type, which is always 'list'. Required. Default value is "list". + :vartype object: str + :ivar data: The files returned for the request. Required. + :vartype data: list[~azure.ai.assistants.models.OpenAIFile] + """ + + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'list'. Required. Default value is \"list\".""" + data: List["_models.OpenAIFile"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The files returned for the request. Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.OpenAIFile"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class FileSearchRankingOptions(_model_base.Model): + """Ranking options for file search. + + :ivar ranker: File search ranker. Required. + :vartype ranker: str + :ivar score_threshold: Ranker search threshold. Required. + :vartype score_threshold: float + """ + + ranker: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """File search ranker. Required.""" + score_threshold: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Ranker search threshold. Required.""" + + @overload + def __init__( + self, + *, + ranker: str, + score_threshold: float, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FileSearchToolCallContent(_model_base.Model): + """The file search result content object. + + :ivar type: The type of the content. Required. Default value is "text". + :vartype type: str + :ivar text: The text content of the file. Required. + :vartype text: str + """ + + type: Literal["text"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The type of the content. Required. Default value is \"text\".""" + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The text content of the file. Required.""" + + @overload + def __init__( + self, + *, + text: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.type: Literal["text"] = "text" + + +class FileSearchToolDefinition(ToolDefinition, discriminator="file_search"): + """The input definition information for a file search tool as used to configure an agent. + + :ivar type: The object type, which is always 'file_search'. Required. Default value is + "file_search". + :vartype type: str + :ivar file_search: Options overrides for the file search tool. + :vartype file_search: ~azure.ai.assistants.models.FileSearchToolDefinitionDetails + """ + + type: Literal["file_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" + file_search: Optional["_models.FileSearchToolDefinitionDetails"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Options overrides for the file search tool.""" + + @overload + def __init__( + self, + *, + file_search: Optional["_models.FileSearchToolDefinitionDetails"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_search", **kwargs) + + +class FileSearchToolDefinitionDetails(_model_base.Model): + """Options overrides for the file search tool. + + :ivar max_num_results: The maximum number of results the file search tool should output. The + default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 + inclusive. + + Note that the file search tool may output fewer than ``max_num_results`` results. See the file + search tool documentation for more information. + :vartype max_num_results: int + :ivar ranking_options: Ranking options for file search. + :vartype ranking_options: ~azure.ai.assistants.models.FileSearchRankingOptions + """ + + max_num_results: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The maximum number of results the file search tool should output. The default is 20 for gpt-4* + models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than ``max_num_results`` results. See the file + search tool documentation for more information.""" + ranking_options: Optional["_models.FileSearchRankingOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Ranking options for file search.""" + + @overload + def __init__( + self, + *, + max_num_results: Optional[int] = None, + ranking_options: Optional["_models.FileSearchRankingOptions"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FileSearchToolResource(_model_base.Model): + """A set of resources that are used by the ``file_search`` tool. + + :ivar vector_store_ids: The ID of the vector store attached to this agent. There can be a + maximum of 1 vector + store attached to the agent. + :vartype vector_store_ids: list[str] + :ivar vector_stores: The list of vector store configuration objects from Azure. + This list is limited to one element. + The only element of this list contains the list of azure asset IDs used by the search tool. + :vartype vector_stores: list[~azure.ai.assistants.models.VectorStoreConfigurations] + """ + + vector_store_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the vector store attached to this agent. There can be a maximum of 1 vector + store attached to the agent.""" + vector_stores: Optional[List["_models.VectorStoreConfigurations"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The list of vector store configuration objects from Azure. + This list is limited to one element. + The only element of this list contains the list of azure asset IDs used by the search tool.""" + + @overload + def __init__( + self, + *, + vector_store_ids: Optional[List[str]] = None, + vector_stores: Optional[List["_models.VectorStoreConfigurations"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FunctionDefinition(_model_base.Model): + """The input definition information for a function. + + :ivar name: The name of the function to be called. Required. + :vartype name: str + :ivar description: A description of what the function does, used by the model to choose when + and how to call the function. + :vartype description: str + :ivar parameters: The parameters the functions accepts, described as a JSON Schema object. + Required. + :vartype parameters: any + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the function to be called. Required.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A description of what the function does, used by the model to choose when and how to call the + function.""" + parameters: Any = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The parameters the functions accepts, described as a JSON Schema object. Required.""" + + @overload + def __init__( + self, + *, + name: str, + parameters: Any, + description: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FunctionName(_model_base.Model): + """The function name that will be used, if using the ``function`` tool. + + :ivar name: The name of the function to call. Required. + :vartype name: str + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the function to call. Required.""" + + @overload + def __init__( + self, + *, + name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FunctionToolDefinition(ToolDefinition, discriminator="function"): + """The input definition information for a function tool as used to configure an agent. + + :ivar type: The object type, which is always 'function'. Required. Default value is "function". + :vartype type: str + :ivar function: The definition of the concrete function that the function tool should call. + Required. + :vartype function: ~azure.ai.assistants.models.FunctionDefinition + """ + + type: Literal["function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'function'. Required. Default value is \"function\".""" + function: "_models.FunctionDefinition" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The definition of the concrete function that the function tool should call. Required.""" + + @overload + def __init__( + self, + *, + function: "_models.FunctionDefinition", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class IncompleteRunDetails(_model_base.Model): + """Details on why the run is incomplete. Will be ``null`` if the run is not incomplete. + + :ivar reason: The reason why the run is incomplete. This indicates which specific token limit + was reached during the run. Required. Known values are: "max_completion_tokens" and + "max_prompt_tokens". + :vartype reason: str or ~azure.ai.assistants.models.IncompleteDetailsReason + """ + + reason: Union[str, "_models.IncompleteDetailsReason"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The reason why the run is incomplete. This indicates which specific token limit was reached + during the run. Required. Known values are: \"max_completion_tokens\" and + \"max_prompt_tokens\".""" + + @overload + def __init__( + self, + *, + reason: Union[str, "_models.IncompleteDetailsReason"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageAttachment(_model_base.Model): + """This describes to which tools a file has been attached. + + :ivar file_id: The ID of the file to attach to the message. + :vartype file_id: str + :ivar data_source: Azure asset ID. + :vartype data_source: ~azure.ai.assistants.models.VectorStoreDataSource + :ivar tools: The tools to add to this file. Required. + :vartype tools: list[~azure.ai.assistants.models.CodeInterpreterToolDefinition or + ~azure.ai.assistants.models.FileSearchToolDefinition] + """ + + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the file to attach to the message.""" + data_source: Optional["_models.VectorStoreDataSource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Azure asset ID.""" + tools: List["_types.MessageAttachmentToolDefinition"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The tools to add to this file. Required.""" + + @overload + def __init__( + self, + *, + tools: List["_types.MessageAttachmentToolDefinition"], + file_id: Optional[str] = None, + data_source: Optional["_models.VectorStoreDataSource"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageContent(_model_base.Model): + """An abstract representation of a single item of thread message content. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageImageFileContent, MessageTextContent + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDelta(_model_base.Model): + """Represents the typed 'delta' payload within a streaming message delta chunk. + + :ivar role: The entity that produced the message. Required. Known values are: "user" and + "assistant". + :vartype role: str or ~azure.ai.assistants.models.MessageRole + :ivar content: The content of the message as an array of text and/or images. Required. + :vartype content: list[~azure.ai.assistants.models.MessageDeltaContent] + """ + + role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The entity that produced the message. Required. Known values are: \"user\" and \"assistant\".""" + content: List["_models.MessageDeltaContent"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The content of the message as an array of text and/or images. Required.""" + + @overload + def __init__( + self, + *, + role: Union[str, "_models.MessageRole"], + content: List["_models.MessageDeltaContent"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaChunk(_model_base.Model): + """Represents a message delta i.e. any changed fields on a message during streaming. + + :ivar id: The identifier of the message, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``thread.message.delta``. Required. Default + value is "thread.message.delta". + :vartype object: str + :ivar delta: The delta containing the fields that have changed on the Message. Required. + :vartype delta: ~azure.ai.assistants.models.MessageDelta + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier of the message, which can be referenced in API endpoints. Required.""" + object: Literal["thread.message.delta"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always ``thread.message.delta``. Required. Default value is + \"thread.message.delta\".""" + delta: "_models.MessageDelta" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The delta containing the fields that have changed on the Message. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + delta: "_models.MessageDelta", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.message.delta"] = "thread.message.delta" + + +class MessageDeltaContent(_model_base.Model): + """The abstract base representation of a partial streamed message content payload. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageDeltaImageFileContent, MessageDeltaTextContent + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The index of the content part of the message. Required.""" + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The type of content for this content part. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaImageFileContent(MessageDeltaContent, discriminator="image_file"): + """Represents a streamed image file content part within a streaming message delta chunk. + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part, which is always "image_file.". Required. + Default value is "image_file". + :vartype type: str + :ivar image_file: The image_file data. + :vartype image_file: ~azure.ai.assistants.models.MessageDeltaImageFileContentObject + """ + + type: Literal["image_file"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The type of content for this content part, which is always \"image_file.\". Required. Default + value is \"image_file\".""" + image_file: Optional["_models.MessageDeltaImageFileContentObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The image_file data.""" + + @overload + def __init__( + self, + *, + index: int, + image_file: Optional["_models.MessageDeltaImageFileContentObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image_file", **kwargs) + + +class MessageDeltaImageFileContentObject(_model_base.Model): + """Represents the 'image_file' payload within streaming image file content. + + :ivar file_id: The file ID of the image in the message content. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The file ID of the image in the message content.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextAnnotation(_model_base.Model): + """The abstract base representation of a streamed text content part's text annotation. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageDeltaTextFileCitationAnnotation, MessageDeltaTextFilePathAnnotation, + MessageDeltaTextUrlCitationAnnotation + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The index of the annotation within a text content part. Required.""" + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The type of the text content annotation. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextContent(MessageDeltaContent, discriminator="text"): + """Represents a streamed text content part within a streaming message delta chunk. + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part, which is always "text.". Required. + Default value is "text". + :vartype type: str + :ivar text: The text content details. + :vartype text: ~azure.ai.assistants.models.MessageDeltaTextContentObject + """ + + type: Literal["text"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The type of content for this content part, which is always \"text.\". Required. Default value + is \"text\".""" + text: Optional["_models.MessageDeltaTextContentObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The text content details.""" + + @overload + def __init__( + self, + *, + index: int, + text: Optional["_models.MessageDeltaTextContentObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="text", **kwargs) + + +class MessageDeltaTextContentObject(_model_base.Model): + """Represents the data of a streamed text content part within a streaming message delta chunk. + + :ivar value: The data that makes up the text. + :vartype value: str + :ivar annotations: Annotations for the text. + :vartype annotations: list[~azure.ai.assistants.models.MessageDeltaTextAnnotation] + """ + + value: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The data that makes up the text.""" + annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Annotations for the text.""" + + @overload + def __init__( + self, + *, + value: Optional[str] = None, + annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextFileCitationAnnotation(MessageDeltaTextAnnotation, discriminator="file_citation"): + """Represents a streamed file citation applied to a streaming text content part. + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation, which is always "file_citation.". + Required. Default value is "file_citation". + :vartype type: str + :ivar file_citation: The file citation information. + :vartype file_citation: + ~azure.ai.assistants.models.MessageDeltaTextFileCitationAnnotationObject + :ivar text: The text in the message content that needs to be replaced. + :vartype text: str + :ivar start_index: The start index of this annotation in the content text. + :vartype start_index: int + :ivar end_index: The end index of this annotation in the content text. + :vartype end_index: int + """ + + type: Literal["file_citation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The type of the text content annotation, which is always \"file_citation.\". Required. Default + value is \"file_citation\".""" + file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The file citation information.""" + text: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The text in the message content that needs to be replaced.""" + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The start index of this annotation in the content text.""" + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The end index of this annotation in the content text.""" + + @overload + def __init__( + self, + *, + index: int, + file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = None, + text: Optional[str] = None, + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_citation", **kwargs) + + +class MessageDeltaTextFileCitationAnnotationObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the data of a streamed file citation as applied to a streaming text content part. + + :ivar file_id: The ID of the specific file the citation is from. + :vartype file_id: str + :ivar quote: The specific quote in the cited file. + :vartype quote: str + """ + + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the specific file the citation is from.""" + quote: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The specific quote in the cited file.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + quote: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextFilePathAnnotation(MessageDeltaTextAnnotation, discriminator="file_path"): + """Represents a streamed file path annotation applied to a streaming text content part. + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation, which is always "file_path.". Required. + Default value is "file_path". + :vartype type: str + :ivar file_path: The file path information. + :vartype file_path: ~azure.ai.assistants.models.MessageDeltaTextFilePathAnnotationObject + :ivar start_index: The start index of this annotation in the content text. + :vartype start_index: int + :ivar end_index: The end index of this annotation in the content text. + :vartype end_index: int + :ivar text: The text in the message content that needs to be replaced. + :vartype text: str + """ + + type: Literal["file_path"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The type of the text content annotation, which is always \"file_path.\". Required. Default + value is \"file_path\".""" + file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The file path information.""" + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The start index of this annotation in the content text.""" + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The end index of this annotation in the content text.""" + text: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The text in the message content that needs to be replaced.""" + + @overload + def __init__( + self, + *, + index: int, + file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = None, + start_index: Optional[int] = None, + end_index: Optional[int] = None, + text: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_path", **kwargs) + + +class MessageDeltaTextFilePathAnnotationObject(_model_base.Model): + """Represents the data of a streamed file path annotation as applied to a streaming text content + part. + + :ivar file_id: The file ID for the annotation. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The file ID for the annotation.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextUrlCitationAnnotation(MessageDeltaTextAnnotation, discriminator="url_citation"): + """A citation within the message that points to a specific URL associated with the message. + Generated when the agent uses tools such as 'bing_grounding' to search the Internet. + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The object type, which is always 'url_citation'. Required. Default value is + "url_citation". + :vartype type: str + :ivar url_citation: The details of the URL citation. Required. + :vartype url_citation: ~azure.ai.assistants.models.MessageDeltaTextUrlCitationDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["url_citation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'url_citation'. Required. Default value is \"url_citation\".""" + url_citation: "_models.MessageDeltaTextUrlCitationDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The details of the URL citation. Required.""" + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + index: int, + url_citation: "_models.MessageDeltaTextUrlCitationDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="url_citation", **kwargs) + + +class MessageDeltaTextUrlCitationDetails(_model_base.Model): + """A representation of a URL citation, as used in text thread message content. + + :ivar url: The URL associated with this citation. Required. + :vartype url: str + :ivar title: The title of the URL. + :vartype title: str + """ + + url: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The URL associated with this citation. Required.""" + title: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The title of the URL.""" + + @overload + def __init__( + self, + *, + url: str, + title: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageImageFileContent(MessageContent, discriminator="image_file"): + """A representation of image file content in a thread message. + + :ivar type: The object type, which is always 'image_file'. Required. Default value is + "image_file". + :vartype type: str + :ivar image_file: The image file for this thread message content item. Required. + :vartype image_file: ~azure.ai.assistants.models.MessageImageFileDetails + """ + + type: Literal["image_file"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'image_file'. Required. Default value is \"image_file\".""" + image_file: "_models.MessageImageFileDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The image file for this thread message content item. Required.""" + + @overload + def __init__( + self, + *, + image_file: "_models.MessageImageFileDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image_file", **kwargs) + + +class MessageImageFileDetails(_model_base.Model): + """An image reference, as represented in thread message content. + + :ivar file_id: The ID for the file associated with this image. Required. + :vartype file_id: str + """ + + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID for the file associated with this image. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageIncompleteDetails(_model_base.Model): + """Information providing additional detail about a message entering an incomplete status. + + :ivar reason: The provided reason describing why the message was marked as incomplete. + Required. Known values are: "content_filter", "max_tokens", "run_cancelled", "run_failed", and + "run_expired". + :vartype reason: str or ~azure.ai.assistants.models.MessageIncompleteDetailsReason + """ + + reason: Union[str, "_models.MessageIncompleteDetailsReason"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The provided reason describing why the message was marked as incomplete. Required. Known values + are: \"content_filter\", \"max_tokens\", \"run_cancelled\", \"run_failed\", and + \"run_expired\".""" + + @overload + def __init__( + self, + *, + reason: Union[str, "_models.MessageIncompleteDetailsReason"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextAnnotation(_model_base.Model): + """An abstract representation of an annotation to text thread message content. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageTextFileCitationAnnotation, MessageTextFilePathAnnotation, + MessageTextUrlCitationAnnotation + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Default value is None.""" + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The textual content associated with this text annotation item. Required.""" + + @overload + def __init__( + self, + *, + type: str, + text: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextContent(MessageContent, discriminator="text"): + """A representation of a textual item of thread message content. + + :ivar type: The object type, which is always 'text'. Required. Default value is "text". + :vartype type: str + :ivar text: The text and associated annotations for this thread message content item. Required. + :vartype text: ~azure.ai.assistants.models.MessageTextDetails + """ + + type: Literal["text"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'text'. Required. Default value is \"text\".""" + text: "_models.MessageTextDetails" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The text and associated annotations for this thread message content item. Required.""" + + @overload + def __init__( + self, + *, + text: "_models.MessageTextDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="text", **kwargs) + + +class MessageTextDetails(_model_base.Model): + """The text and associated annotations for a single item of agent thread message content. + + :ivar value: The text data. Required. + :vartype value: str + :ivar annotations: A list of annotations associated with this text. Required. + :vartype annotations: list[~azure.ai.assistants.models.MessageTextAnnotation] + """ + + value: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The text data. Required.""" + annotations: List["_models.MessageTextAnnotation"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """A list of annotations associated with this text. Required.""" + + @overload + def __init__( + self, + *, + value: str, + annotations: List["_models.MessageTextAnnotation"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextFileCitationAnnotation(MessageTextAnnotation, discriminator="file_citation"): + """A citation within the message that points to a specific quote from a specific File associated + with the agent or the message. Generated when the agent uses the 'file_search' tool to search + files. + + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + :ivar type: The object type, which is always 'file_citation'. Required. Default value is + "file_citation". + :vartype type: str + :ivar file_citation: A citation within the message that points to a specific quote from a + specific file. + Generated when the agent uses the "file_search" tool to search files. Required. + :vartype file_citation: ~azure.ai.assistants.models.MessageTextFileCitationDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["file_citation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'file_citation'. Required. Default value is \"file_citation\".""" + file_citation: "_models.MessageTextFileCitationDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """A citation within the message that points to a specific quote from a specific file. + Generated when the agent uses the \"file_search\" tool to search files. Required.""" + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + text: str, + file_citation: "_models.MessageTextFileCitationDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_citation", **kwargs) + + +class MessageTextFileCitationDetails(_model_base.Model): + """A representation of a file-based text citation, as used in a file-based annotation of text + thread message content. + + :ivar file_id: The ID of the file associated with this citation. Required. + :vartype file_id: str + :ivar quote: The specific quote cited in the associated file. Required. + :vartype quote: str + """ + + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the file associated with this citation. Required.""" + quote: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The specific quote cited in the associated file. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + quote: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextFilePathAnnotation(MessageTextAnnotation, discriminator="file_path"): + """A citation within the message that points to a file located at a specific path. + + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + :ivar type: The object type, which is always 'file_path'. Required. Default value is + "file_path". + :vartype type: str + :ivar file_path: A URL for the file that's generated when the agent used the code_interpreter + tool to generate a file. Required. + :vartype file_path: ~azure.ai.assistants.models.MessageTextFilePathDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["file_path"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'file_path'. Required. Default value is \"file_path\".""" + file_path: "_models.MessageTextFilePathDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """A URL for the file that's generated when the agent used the code_interpreter tool to generate a + file. Required.""" + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + text: str, + file_path: "_models.MessageTextFilePathDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_path", **kwargs) + + +class MessageTextFilePathDetails(_model_base.Model): + """An encapsulation of an image file ID, as used by message image content. + + :ivar file_id: The ID of the specific file that the citation is from. Required. + :vartype file_id: str + """ + + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the specific file that the citation is from. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextUrlCitationAnnotation(MessageTextAnnotation, discriminator="url_citation"): + """A citation within the message that points to a specific URL associated with the message. + Generated when the agent uses tools such as 'bing_grounding' to search the Internet. + + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + :ivar type: The object type, which is always 'url_citation'. Required. Default value is + "url_citation". + :vartype type: str + :ivar url_citation: The details of the URL citation. Required. + :vartype url_citation: ~azure.ai.assistants.models.MessageTextUrlCitationDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["url_citation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'url_citation'. Required. Default value is \"url_citation\".""" + url_citation: "_models.MessageTextUrlCitationDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The details of the URL citation. Required.""" + start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + text: str, + url_citation: "_models.MessageTextUrlCitationDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="url_citation", **kwargs) + + +class MessageTextUrlCitationDetails(_model_base.Model): + """A representation of a URL citation, as used in text thread message content. + + :ivar url: The URL associated with this citation. Required. + :vartype url: str + :ivar title: The title of the URL. + :vartype title: str + """ + + url: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The URL associated with this citation. Required.""" + title: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The title of the URL.""" + + @overload + def __init__( + self, + *, + url: str, + title: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MicrosoftFabricToolDefinition(ToolDefinition, discriminator="fabric_dataagent"): + """The input definition information for a Microsoft Fabric tool as used to configure an agent. + + :ivar type: The object type, which is always 'fabric_dataagent'. Required. Default value is + "fabric_dataagent". + :vartype type: str + :ivar fabric_dataagent: The list of connections used by the Microsoft Fabric tool. Required. + :vartype fabric_dataagent: ~azure.ai.assistants.models.ToolConnectionList + """ + + type: Literal["fabric_dataagent"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'fabric_dataagent'. Required. Default value is + \"fabric_dataagent\".""" + fabric_dataagent: "_models.ToolConnectionList" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The list of connections used by the Microsoft Fabric tool. Required.""" + + @overload + def __init__( + self, + *, + fabric_dataagent: "_models.ToolConnectionList", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="fabric_dataagent", **kwargs) + + +class OpenAIFile(_model_base.Model): + """Represents an agent that can call the model and use tools. + + :ivar object: The object type, which is always 'file'. Required. Default value is "file". + :vartype object: str + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar bytes: The size of the file, in bytes. Required. + :vartype bytes: int + :ivar filename: The name of the file. Required. + :vartype filename: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar purpose: The intended purpose of a file. Required. Known values are: "fine-tune", + "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". + :vartype purpose: str or ~azure.ai.assistants.models.FilePurpose + :ivar status: The state of the file. This field is available in Azure OpenAI only. Known values + are: "uploaded", "pending", "running", "processed", "error", "deleting", and "deleted". + :vartype status: str or ~azure.ai.assistants.models.FileState + :ivar status_details: The error message with details in case processing of this file failed. + This field is available in Azure OpenAI only. + :vartype status_details: str + """ + + object: Literal["file"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'file'. Required. Default value is \"file\".""" + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + bytes: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The size of the file, in bytes. Required.""" + filename: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the file. Required.""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + purpose: Union[str, "_models.FilePurpose"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The intended purpose of a file. Required. Known values are: \"fine-tune\", + \"fine-tune-results\", \"assistants\", \"assistants_output\", \"batch\", \"batch_output\", and + \"vision\".""" + status: Optional[Union[str, "_models.FileState"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The state of the file. This field is available in Azure OpenAI only. Known values are: + \"uploaded\", \"pending\", \"running\", \"processed\", \"error\", \"deleting\", and + \"deleted\".""" + status_details: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The error message with details in case processing of this file failed. This field is available + in Azure OpenAI only.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + bytes: int, + filename: str, + created_at: datetime.datetime, + purpose: Union[str, "_models.FilePurpose"], + status: Optional[Union[str, "_models.FileState"]] = None, + status_details: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["file"] = "file" + + +class OpenAIPageableListOfAgent(_model_base.Model): + """The response data for a requested list of items. + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.assistants.models.Agent] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.Agent"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The requested list of items. Required.""" + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first ID represented in this list. Required.""" + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last ID represented in this list. Required.""" + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.Agent"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfRunStep(_model_base.Model): + """The response data for a requested list of items. + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.assistants.models.RunStep] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.RunStep"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The requested list of items. Required.""" + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first ID represented in this list. Required.""" + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last ID represented in this list. Required.""" + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.RunStep"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfThreadMessage(_model_base.Model): + """The response data for a requested list of items. + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.assistants.models.ThreadMessage] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.ThreadMessage"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The requested list of items. Required.""" + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first ID represented in this list. Required.""" + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last ID represented in this list. Required.""" + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.ThreadMessage"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfThreadRun(_model_base.Model): + """The response data for a requested list of items. + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.assistants.models.ThreadRun] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.ThreadRun"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The requested list of items. Required.""" + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first ID represented in this list. Required.""" + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last ID represented in this list. Required.""" + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.ThreadRun"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfVectorStore(_model_base.Model): + """The response data for a requested list of items. + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.assistants.models.VectorStore] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.VectorStore"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The requested list of items. Required.""" + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first ID represented in this list. Required.""" + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last ID represented in this list. Required.""" + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.VectorStore"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfVectorStoreFile(_model_base.Model): + """The response data for a requested list of items. + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.assistants.models.VectorStoreFile] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.VectorStoreFile"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The requested list of items. Required.""" + first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The first ID represented in this list. Required.""" + last_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last ID represented in this list. Required.""" + has_more: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.VectorStoreFile"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenApiAuthDetails(_model_base.Model): + """authentication details for OpenApiFunctionDefinition. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + OpenApiAnonymousAuthDetails, OpenApiConnectionAuthDetails, OpenApiManagedAuthDetails + + :ivar type: The type of authentication, must be anonymous/connection/managed_identity. + Required. Known values are: "anonymous", "connection", and "managed_identity". + :vartype type: str or ~azure.ai.assistants.models.OpenApiAuthType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The type of authentication, must be anonymous/connection/managed_identity. Required. Known + values are: \"anonymous\", \"connection\", and \"managed_identity\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class OpenApiAnonymousAuthDetails(OpenApiAuthDetails, discriminator="anonymous"): + """Security details for OpenApi anonymous authentication. + + :ivar type: The object type, which is always 'anonymous'. Required. + :vartype type: str or ~azure.ai.assistants.models.ANONYMOUS + """ + + type: Literal[OpenApiAuthType.ANONYMOUS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'anonymous'. Required.""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=OpenApiAuthType.ANONYMOUS, **kwargs) + + +class OpenApiConnectionAuthDetails(OpenApiAuthDetails, discriminator="connection"): + """Security details for OpenApi connection authentication. + + :ivar type: The object type, which is always 'connection'. Required. + :vartype type: str or ~azure.ai.assistants.models.CONNECTION + :ivar security_scheme: Connection auth security details. Required. + :vartype security_scheme: ~azure.ai.assistants.models.OpenApiConnectionSecurityScheme + """ + + type: Literal[OpenApiAuthType.CONNECTION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'connection'. Required.""" + security_scheme: "_models.OpenApiConnectionSecurityScheme" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Connection auth security details. Required.""" + + @overload + def __init__( + self, + *, + security_scheme: "_models.OpenApiConnectionSecurityScheme", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=OpenApiAuthType.CONNECTION, **kwargs) + + +class OpenApiConnectionSecurityScheme(_model_base.Model): + """Security scheme for OpenApi managed_identity authentication. + + :ivar connection_id: Connection id for Connection auth type. Required. + :vartype connection_id: str + """ + + connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Connection id for Connection auth type. Required.""" + + @overload + def __init__( + self, + *, + connection_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class OpenApiFunctionDefinition(_model_base.Model): + """The input definition information for an openapi function. + + :ivar name: The name of the function to be called. Required. + :vartype name: str + :ivar description: A description of what the function does, used by the model to choose when + and how to call the function. + :vartype description: str + :ivar spec: The openapi function shape, described as a JSON Schema object. Required. + :vartype spec: any + :ivar auth: Open API authentication details. Required. + :vartype auth: ~azure.ai.assistants.models.OpenApiAuthDetails + :ivar default_params: List of OpenAPI spec parameters that will use user-provided defaults. + :vartype default_params: list[str] + :ivar functions: List of functions returned in response. + :vartype functions: list[~azure.ai.assistants.models.FunctionDefinition] + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the function to be called. Required.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A description of what the function does, used by the model to choose when and how to call the + function.""" + spec: Any = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The openapi function shape, described as a JSON Schema object. Required.""" + auth: "_models.OpenApiAuthDetails" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Open API authentication details. Required.""" + default_params: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """List of OpenAPI spec parameters that will use user-provided defaults.""" + functions: Optional[List["_models.FunctionDefinition"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """List of functions returned in response.""" + + @overload + def __init__( + self, + *, + name: str, + spec: Any, + auth: "_models.OpenApiAuthDetails", + description: Optional[str] = None, + default_params: Optional[List[str]] = None, + functions: Optional[List["_models.FunctionDefinition"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class OpenApiManagedAuthDetails(OpenApiAuthDetails, discriminator="managed_identity"): + """Security details for OpenApi managed_identity authentication. + + :ivar type: The object type, which is always 'managed_identity'. Required. + :vartype type: str or ~azure.ai.assistants.models.MANAGED_IDENTITY + :ivar security_scheme: Connection auth security details. Required. + :vartype security_scheme: ~azure.ai.assistants.models.OpenApiManagedSecurityScheme + """ + + type: Literal[OpenApiAuthType.MANAGED_IDENTITY] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'managed_identity'. Required.""" + security_scheme: "_models.OpenApiManagedSecurityScheme" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Connection auth security details. Required.""" + + @overload + def __init__( + self, + *, + security_scheme: "_models.OpenApiManagedSecurityScheme", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=OpenApiAuthType.MANAGED_IDENTITY, **kwargs) + + +class OpenApiManagedSecurityScheme(_model_base.Model): + """Security scheme for OpenApi managed_identity authentication. + + :ivar audience: Authentication scope for managed_identity auth type. Required. + :vartype audience: str + """ + + audience: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Authentication scope for managed_identity auth type. Required.""" + + @overload + def __init__( + self, + *, + audience: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class OpenApiToolDefinition(ToolDefinition, discriminator="openapi"): + """The input definition information for an OpenAPI tool as used to configure an agent. + + :ivar type: The object type, which is always 'openapi'. Required. Default value is "openapi". + :vartype type: str + :ivar openapi: The openapi function definition. Required. + :vartype openapi: ~azure.ai.assistants.models.OpenApiFunctionDefinition + """ + + type: Literal["openapi"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'openapi'. Required. Default value is \"openapi\".""" + openapi: "_models.OpenApiFunctionDefinition" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The openapi function definition. Required.""" + + @overload + def __init__( + self, + *, + openapi: "_models.OpenApiFunctionDefinition", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="openapi", **kwargs) + + +class RequiredAction(_model_base.Model): + """An abstract representation of a required action for an agent thread run to continue. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + SubmitToolOutputsAction + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RequiredToolCall(_model_base.Model): + """An abstract representation of a tool invocation needed by the model to continue a run. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RequiredFunctionToolCall + + :ivar type: The object type for the required tool call. Required. Default value is None. + :vartype type: str + :ivar id: The ID of the tool call. This ID must be referenced when submitting tool outputs. + Required. + :vartype id: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type for the required tool call. Required. Default value is None.""" + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the tool call. This ID must be referenced when submitting tool outputs. Required.""" + + @overload + def __init__( + self, + *, + type: str, + id: str, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RequiredFunctionToolCall(RequiredToolCall, discriminator="function"): + """A representation of a requested call to a function tool, needed by the model to continue + evaluation of a run. + + :ivar id: The ID of the tool call. This ID must be referenced when submitting tool outputs. + Required. + :vartype id: str + :ivar type: The object type of the required tool call. Always 'function' for function tools. + Required. Default value is "function". + :vartype type: str + :ivar function: Detailed information about the function to be executed by the tool that + includes name and arguments. Required. + :vartype function: ~azure.ai.assistants.models.RequiredFunctionToolCallDetails + """ + + type: Literal["function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type of the required tool call. Always 'function' for function tools. Required. + Default value is \"function\".""" + function: "_models.RequiredFunctionToolCallDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Detailed information about the function to be executed by the tool that includes name and + arguments. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + function: "_models.RequiredFunctionToolCallDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class RequiredFunctionToolCallDetails(_model_base.Model): + """The detailed information for a function invocation, as provided by a required action invoking a + function tool, that includes the name of and arguments to the function. + + :ivar name: The name of the function. Required. + :vartype name: str + :ivar arguments: The arguments to use when invoking the named function, as provided by the + model. Arguments are presented as a JSON document that should be validated and parsed for + evaluation. Required. + :vartype arguments: str + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the function. Required.""" + arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The arguments to use when invoking the named function, as provided by the model. Arguments are + presented as a JSON document that should be validated and parsed for evaluation. Required.""" + + @overload + def __init__( + self, + *, + name: str, + arguments: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ResponseFormatJsonSchema(_model_base.Model): + """A description of what the response format is for, used by the model to determine how to respond + in the format. + + :ivar description: A description of what the response format is for, used by the model to + determine how to respond in the format. + :vartype description: str + :ivar name: The name of a schema. Required. + :vartype name: str + :ivar schema: The JSON schema object, describing the response format. Required. + :vartype schema: any + """ + + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A description of what the response format is for, used by the model to determine how to respond + in the format.""" + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of a schema. Required.""" + schema: Any = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The JSON schema object, describing the response format. Required.""" + + @overload + def __init__( + self, + *, + name: str, + schema: Any, + description: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ResponseFormatJsonSchemaType(_model_base.Model): + """The type of response format being defined: ``json_schema``. + + :ivar type: Type. Required. Default value is "json_schema". + :vartype type: str + :ivar json_schema: The JSON schema, describing response format. Required. + :vartype json_schema: ~azure.ai.assistants.models.ResponseFormatJsonSchema + """ + + type: Literal["json_schema"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Type. Required. Default value is \"json_schema\".""" + json_schema: "_models.ResponseFormatJsonSchema" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The JSON schema, describing response format. Required.""" + + @overload + def __init__( + self, + *, + json_schema: "_models.ResponseFormatJsonSchema", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.type: Literal["json_schema"] = "json_schema" + + +class RunCompletionUsage(_model_base.Model): + """Usage statistics related to the run. This value will be ``null`` if the run is not in a + terminal state (i.e. ``in_progress``, ``queued``, etc.). + + :ivar completion_tokens: Number of completion tokens used over the course of the run. Required. + :vartype completion_tokens: int + :ivar prompt_tokens: Number of prompt tokens used over the course of the run. Required. + :vartype prompt_tokens: int + :ivar total_tokens: Total number of tokens used (prompt + completion). Required. + :vartype total_tokens: int + """ + + completion_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Number of completion tokens used over the course of the run. Required.""" + prompt_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Number of prompt tokens used over the course of the run. Required.""" + total_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Total number of tokens used (prompt + completion). Required.""" + + @overload + def __init__( + self, + *, + completion_tokens: int, + prompt_tokens: int, + total_tokens: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunError(_model_base.Model): + """The details of an error as encountered by an agent thread run. + + :ivar code: The status for the error. Required. + :vartype code: str + :ivar message: The human-readable text associated with the error. Required. + :vartype message: str + """ + + code: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The status for the error. Required.""" + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The human-readable text associated with the error. Required.""" + + @overload + def __init__( + self, + *, + code: str, + message: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStep(_model_base.Model): + """Detailed information about a single step of an agent thread run. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.run.step'. Required. Default value is + "thread.run.step". + :vartype object: str + :ivar type: The type of run step, which can be either message_creation or tool_calls. Required. + Known values are: "message_creation" and "tool_calls". + :vartype type: str or ~azure.ai.assistants.models.RunStepType + :ivar agent_id: The ID of the agent associated with the run step. Required. + :vartype agent_id: str + :ivar thread_id: The ID of the thread that was run. Required. + :vartype thread_id: str + :ivar run_id: The ID of the run that this run step is a part of. Required. + :vartype run_id: str + :ivar status: The status of this run step. Required. Known values are: "in_progress", + "cancelled", "failed", "completed", and "expired". + :vartype status: str or ~azure.ai.assistants.models.RunStepStatus + :ivar step_details: The details for this run step. Required. + :vartype step_details: ~azure.ai.assistants.models.RunStepDetails + :ivar last_error: If applicable, information about the last error encountered by this run step. + Required. + :vartype last_error: ~azure.ai.assistants.models.RunStepError + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar expired_at: The Unix timestamp, in seconds, representing when this item expired. + Required. + :vartype expired_at: ~datetime.datetime + :ivar completed_at: The Unix timestamp, in seconds, representing when this completed. Required. + :vartype completed_at: ~datetime.datetime + :ivar cancelled_at: The Unix timestamp, in seconds, representing when this was cancelled. + Required. + :vartype cancelled_at: ~datetime.datetime + :ivar failed_at: The Unix timestamp, in seconds, representing when this failed. Required. + :vartype failed_at: ~datetime.datetime + :ivar usage: Usage statistics related to the run step. This value will be ``null`` while the + run step's status is ``in_progress``. + :vartype usage: ~azure.ai.assistants.models.RunStepCompletionUsage + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run.step"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'thread.run.step'. Required. Default value is + \"thread.run.step\".""" + type: Union[str, "_models.RunStepType"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The type of run step, which can be either message_creation or tool_calls. Required. Known + values are: \"message_creation\" and \"tool_calls\".""" + agent_id: str = rest_field(name="assistant_id", visibility=["read", "create", "update", "delete", "query"]) + """The ID of the agent associated with the run step. Required.""" + thread_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the thread that was run. Required.""" + run_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the run that this run step is a part of. Required.""" + status: Union[str, "_models.RunStepStatus"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The status of this run step. Required. Known values are: \"in_progress\", \"cancelled\", + \"failed\", \"completed\", and \"expired\".""" + step_details: "_models.RunStepDetails" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The details for this run step. Required.""" + last_error: "_models.RunStepError" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """If applicable, information about the last error encountered by this run step. Required.""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + expired_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this item expired. Required.""" + completed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this completed. Required.""" + cancelled_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" + failed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this failed. Required.""" + usage: Optional["_models.RunStepCompletionUsage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Usage statistics related to the run step. This value will be ``null`` while the run step's + status is ``in_progress``.""" + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + type: Union[str, "_models.RunStepType"], + agent_id: str, + thread_id: str, + run_id: str, + status: Union[str, "_models.RunStepStatus"], + step_details: "_models.RunStepDetails", + last_error: "_models.RunStepError", + created_at: datetime.datetime, + expired_at: datetime.datetime, + completed_at: datetime.datetime, + cancelled_at: datetime.datetime, + failed_at: datetime.datetime, + metadata: Dict[str, str], + usage: Optional["_models.RunStepCompletionUsage"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run.step"] = "thread.run.step" + + +class RunStepToolCall(_model_base.Model): + """An abstract representation of a detailed tool call as recorded within a run step for an + existing run. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepAzureAISearchToolCall, RunStepCustomSearchToolCall, RunStepBingGroundingToolCall, + RunStepCodeInterpreterToolCall, RunStepMicrosoftFabricToolCall, RunStepFileSearchToolCall, + RunStepFunctionToolCall, RunStepOpenAPIToolCall, RunStepSharepointToolCall + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Default value is None.""" + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the tool call. This ID must be referenced when you submit tool outputs. Required.""" + + @overload + def __init__( + self, + *, + type: str, + id: str, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepAzureAISearchToolCall(RunStepToolCall, discriminator="azure_ai_search"): + """A record of a call to an Azure AI Search tool, issued by the model in evaluation of a defined + tool, that represents + executed Azure AI search. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is + "azure_ai_search". + :vartype type: str + :ivar azure_ai_search: Reserved for future use. Required. + :vartype azure_ai_search: dict[str, str] + """ + + type: Literal["azure_ai_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'azure_ai_search'. Required. Default value is + \"azure_ai_search\".""" + azure_ai_search: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + azure_ai_search: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="azure_ai_search", **kwargs) + + +class RunStepBingGroundingToolCall(RunStepToolCall, discriminator="bing_grounding"): + """A record of a call to a bing grounding tool, issued by the model in evaluation of a defined + tool, that represents + executed search with bing grounding. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is + "bing_grounding". + :vartype type: str + :ivar bing_grounding: Reserved for future use. Required. + :vartype bing_grounding: dict[str, str] + """ + + type: Literal["bing_grounding"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'bing_grounding'. Required. Default value is + \"bing_grounding\".""" + bing_grounding: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + bing_grounding: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="bing_grounding", **kwargs) + + +class RunStepCodeInterpreterToolCallOutput(_model_base.Model): + """An abstract representation of an emitted output from a code interpreter tool. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepCodeInterpreterImageOutput, RunStepCodeInterpreterLogOutput + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepCodeInterpreterImageOutput(RunStepCodeInterpreterToolCallOutput, discriminator="image"): + """A representation of an image output emitted by a code interpreter tool in response to a tool + call by the model. + + :ivar type: The object type, which is always 'image'. Required. Default value is "image". + :vartype type: str + :ivar image: Referential information for the image associated with this output. Required. + :vartype image: ~azure.ai.assistants.models.RunStepCodeInterpreterImageReference + """ + + type: Literal["image"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'image'. Required. Default value is \"image\".""" + image: "_models.RunStepCodeInterpreterImageReference" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Referential information for the image associated with this output. Required.""" + + @overload + def __init__( + self, + *, + image: "_models.RunStepCodeInterpreterImageReference", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image", **kwargs) + + +class RunStepCodeInterpreterImageReference(_model_base.Model): + """An image reference emitted by a code interpreter tool in response to a tool call by the model. + + :ivar file_id: The ID of the file associated with this image. Required. + :vartype file_id: str + """ + + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the file associated with this image. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepCodeInterpreterLogOutput(RunStepCodeInterpreterToolCallOutput, discriminator="logs"): + """A representation of a log output emitted by a code interpreter tool in response to a tool call + by the model. + + :ivar type: The object type, which is always 'logs'. Required. Default value is "logs". + :vartype type: str + :ivar logs: The serialized log output emitted by the code interpreter. Required. + :vartype logs: str + """ + + type: Literal["logs"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'logs'. Required. Default value is \"logs\".""" + logs: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The serialized log output emitted by the code interpreter. Required.""" + + @overload + def __init__( + self, + *, + logs: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="logs", **kwargs) + + +class RunStepCodeInterpreterToolCall(RunStepToolCall, discriminator="code_interpreter"): + """A record of a call to a code interpreter tool, issued by the model in evaluation of a defined + tool, that + represents inputs and outputs consumed and emitted by the code interpreter. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is + "code_interpreter". + :vartype type: str + :ivar code_interpreter: The details of the tool call to the code interpreter tool. Required. + :vartype code_interpreter: ~azure.ai.assistants.models.RunStepCodeInterpreterToolCallDetails + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'code_interpreter'. Required. Default value is + \"code_interpreter\".""" + code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The details of the tool call to the code interpreter tool. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="code_interpreter", **kwargs) + + +class RunStepCodeInterpreterToolCallDetails(_model_base.Model): + """The detailed information about a code interpreter invocation by the model. + + :ivar input: The input provided by the model to the code interpreter tool. Required. + :vartype input: str + :ivar outputs: The outputs produced by the code interpreter tool back to the model in response + to the tool call. Required. + :vartype outputs: list[~azure.ai.assistants.models.RunStepCodeInterpreterToolCallOutput] + """ + + input: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The input provided by the model to the code interpreter tool. Required.""" + outputs: List["_models.RunStepCodeInterpreterToolCallOutput"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The outputs produced by the code interpreter tool back to the model in response to the tool + call. Required.""" + + @overload + def __init__( + self, + *, + input: str, + outputs: List["_models.RunStepCodeInterpreterToolCallOutput"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepCompletionUsage(_model_base.Model): + """Usage statistics related to the run step. + + :ivar completion_tokens: Number of completion tokens used over the course of the run step. + Required. + :vartype completion_tokens: int + :ivar prompt_tokens: Number of prompt tokens used over the course of the run step. Required. + :vartype prompt_tokens: int + :ivar total_tokens: Total number of tokens used (prompt + completion). Required. + :vartype total_tokens: int + """ + + completion_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Number of completion tokens used over the course of the run step. Required.""" + prompt_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Number of prompt tokens used over the course of the run step. Required.""" + total_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Total number of tokens used (prompt + completion). Required.""" + + @overload + def __init__( + self, + *, + completion_tokens: int, + prompt_tokens: int, + total_tokens: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepCustomSearchToolCall(RunStepToolCall, discriminator="bing_custom_search"): + """A record of a call to a bing custom search tool, issued by the model in evaluation of a defined + tool, that represents + executed search with bing custom search. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'bing_custom_search'. Required. Default value is + "bing_custom_search". + :vartype type: str + :ivar bing_custom_search: Reserved for future use. Required. + :vartype bing_custom_search: dict[str, str] + """ + + type: Literal["bing_custom_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'bing_custom_search'. Required. Default value is + \"bing_custom_search\".""" + bing_custom_search: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + bing_custom_search: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="bing_custom_search", **kwargs) + + +class RunStepDelta(_model_base.Model): + """Represents the delta payload in a streaming run step delta chunk. + + :ivar step_details: The details of the run step. + :vartype step_details: ~azure.ai.assistants.models.RunStepDeltaDetail + """ + + step_details: Optional["_models.RunStepDeltaDetail"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The details of the run step.""" + + @overload + def __init__( + self, + *, + step_details: Optional["_models.RunStepDeltaDetail"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaChunk(_model_base.Model): + """Represents a run step delta i.e. any changed fields on a run step during streaming. + + :ivar id: The identifier of the run step, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``thread.run.step.delta``. Required. Default + value is "thread.run.step.delta". + :vartype object: str + :ivar delta: The delta containing the fields that have changed on the run step. Required. + :vartype delta: ~azure.ai.assistants.models.RunStepDelta + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier of the run step, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run.step.delta"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always ``thread.run.step.delta``. Required. Default value is + \"thread.run.step.delta\".""" + delta: "_models.RunStepDelta" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The delta containing the fields that have changed on the run step. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + delta: "_models.RunStepDelta", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run.step.delta"] = "thread.run.step.delta" + + +class RunStepDeltaCodeInterpreterDetailItemObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the Code Interpreter tool call data in a streaming run step's tool calls. + + :ivar input: The input into the Code Interpreter tool call. + :vartype input: str + :ivar outputs: The outputs from the Code Interpreter tool call. Code Interpreter can output one + or more + items, including text (``logs``) or images (``image``). Each of these are represented by a + different object type. + :vartype outputs: list[~azure.ai.assistants.models.RunStepDeltaCodeInterpreterOutput] + """ + + input: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The input into the Code Interpreter tool call.""" + outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The outputs from the Code Interpreter tool call. Code Interpreter can output one or more + items, including text (``logs``) or images (``image``). Each of these are represented by a + different object type.""" + + @overload + def __init__( + self, + *, + input: Optional[str] = None, + outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterOutput(_model_base.Model): + """The abstract base representation of a streaming run step tool call's Code Interpreter tool + output. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaCodeInterpreterImageOutput, RunStepDeltaCodeInterpreterLogOutput + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The type of the streaming run step tool call's Code Interpreter output. Required. + Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The index of the output in the streaming run step tool call's Code Interpreter outputs array. + Required.""" + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The type of the streaming run step tool call's Code Interpreter output. Required. Default value + is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterImageOutput(RunStepDeltaCodeInterpreterOutput, discriminator="image"): + """Represents an image output as produced the Code interpreter tool and as represented in a + streaming run step's delta tool calls collection. + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The object type, which is always "image.". Required. Default value is "image". + :vartype type: str + :ivar image: The image data for the Code Interpreter tool call output. + :vartype image: ~azure.ai.assistants.models.RunStepDeltaCodeInterpreterImageOutputObject + """ + + type: Literal["image"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always \"image.\". Required. Default value is \"image\".""" + image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The image data for the Code Interpreter tool call output.""" + + @overload + def __init__( + self, + *, + index: int, + image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image", **kwargs) + + +class RunStepDeltaCodeInterpreterImageOutputObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the data for a streaming run step's Code Interpreter tool call image output. + + :ivar file_id: The file ID for the image. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The file ID for the image.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterLogOutput(RunStepDeltaCodeInterpreterOutput, discriminator="logs"): + """Represents a log output as produced by the Code Interpreter tool and as represented in a + streaming run step's delta tool calls collection. + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The type of the object, which is always "logs.". Required. Default value is "logs". + :vartype type: str + :ivar logs: The text output from the Code Interpreter tool call. + :vartype logs: str + """ + + type: Literal["logs"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The type of the object, which is always \"logs.\". Required. Default value is \"logs\".""" + logs: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The text output from the Code Interpreter tool call.""" + + @overload + def __init__( + self, + *, + index: int, + logs: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="logs", **kwargs) + + +class RunStepDeltaToolCall(_model_base.Model): + """The abstract base representation of a single tool call within a streaming run step's delta tool + call details. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaCodeInterpreterToolCall, RunStepDeltaFileSearchToolCall, + RunStepDeltaFunctionToolCall + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The type of the tool call detail item in a streaming run step's details. Required. + Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The index of the tool call detail in the run step's tool_calls array. Required.""" + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the tool call, used when submitting outputs to the run. Required.""" + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The type of the tool call detail item in a streaming run step's details. Required. Default + value is None.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterToolCall(RunStepDeltaToolCall, discriminator="code_interpreter"): + """Represents a Code Interpreter tool call within a streaming run step's tool call details. + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "code_interpreter.". Required. Default value is + "code_interpreter". + :vartype type: str + :ivar code_interpreter: The Code Interpreter data for the tool call. + :vartype code_interpreter: + ~azure.ai.assistants.models.RunStepDeltaCodeInterpreterDetailItemObject + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always \"code_interpreter.\". Required. Default value is + \"code_interpreter\".""" + code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The Code Interpreter data for the tool call.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="code_interpreter", **kwargs) + + +class RunStepDeltaDetail(_model_base.Model): + """Represents a single run step detail item in a streaming run step's delta payload. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaMessageCreation, RunStepDeltaToolCallObject + + :ivar type: The object type for the run step detail object. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type for the run step detail object. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaFileSearchToolCall(RunStepDeltaToolCall, discriminator="file_search"): + """Represents a file search tool call within a streaming run step's tool call details. + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "file_search.". Required. Default value is + "file_search". + :vartype type: str + :ivar file_search: Reserved for future use. + :vartype file_search: ~azure.ai.assistants.models.RunStepFileSearchToolCallResults + """ + + type: Literal["file_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always \"file_search.\". Required. Default value is \"file_search\".""" + file_search: Optional["_models.RunStepFileSearchToolCallResults"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Reserved for future use.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + file_search: Optional["_models.RunStepFileSearchToolCallResults"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_search", **kwargs) + + +class RunStepDeltaFunction(_model_base.Model): + """Represents the function data in a streaming run step delta's function tool call. + + :ivar name: The name of the function. + :vartype name: str + :ivar arguments: The arguments passed to the function as input. + :vartype arguments: str + :ivar output: The output of the function, null if outputs have not yet been submitted. + :vartype output: str + """ + + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the function.""" + arguments: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The arguments passed to the function as input.""" + output: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The output of the function, null if outputs have not yet been submitted.""" + + @overload + def __init__( + self, + *, + name: Optional[str] = None, + arguments: Optional[str] = None, + output: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaFunctionToolCall(RunStepDeltaToolCall, discriminator="function"): + """Represents a function tool call within a streaming run step's tool call details. + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "function.". Required. Default value is + "function". + :vartype type: str + :ivar function: The function data for the tool call. + :vartype function: ~azure.ai.assistants.models.RunStepDeltaFunction + """ + + type: Literal["function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always \"function.\". Required. Default value is \"function\".""" + function: Optional["_models.RunStepDeltaFunction"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The function data for the tool call.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + function: Optional["_models.RunStepDeltaFunction"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class RunStepDeltaMessageCreation(RunStepDeltaDetail, discriminator="message_creation"): + """Represents a message creation within a streaming run step delta. + + :ivar type: The object type, which is always "message_creation.". Required. Default value is + "message_creation". + :vartype type: str + :ivar message_creation: The message creation data. + :vartype message_creation: ~azure.ai.assistants.models.RunStepDeltaMessageCreationObject + """ + + type: Literal["message_creation"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always \"message_creation.\". Required. Default value is + \"message_creation\".""" + message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The message creation data.""" + + @overload + def __init__( + self, + *, + message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="message_creation", **kwargs) + + +class RunStepDeltaMessageCreationObject(_model_base.Model): + """Represents the data within a streaming run step message creation response object. + + :ivar message_id: The ID of the newly-created message. + :vartype message_id: str + """ + + message_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the newly-created message.""" + + @overload + def __init__( + self, + *, + message_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaToolCallObject(RunStepDeltaDetail, discriminator="tool_calls"): + """Represents an invocation of tool calls as part of a streaming run step. + + :ivar type: The object type, which is always "tool_calls.". Required. Default value is + "tool_calls". + :vartype type: str + :ivar tool_calls: The collection of tool calls for the tool call detail item. + :vartype tool_calls: list[~azure.ai.assistants.models.RunStepDeltaToolCall] + """ + + type: Literal["tool_calls"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always \"tool_calls.\". Required. Default value is \"tool_calls\".""" + tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The collection of tool calls for the tool call detail item.""" + + @overload + def __init__( + self, + *, + tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="tool_calls", **kwargs) + + +class RunStepDetails(_model_base.Model): + """An abstract representation of the details for a run step. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepMessageCreationDetails, RunStepToolCallDetails + + :ivar type: The object type. Required. Known values are: "message_creation" and "tool_calls". + :vartype type: str or ~azure.ai.assistants.models.RunStepType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Known values are: \"message_creation\" and \"tool_calls\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepError(_model_base.Model): + """The error information associated with a failed run step. + + :ivar code: The error code for this error. Required. Known values are: "server_error" and + "rate_limit_exceeded". + :vartype code: str or ~azure.ai.assistants.models.RunStepErrorCode + :ivar message: The human-readable text associated with this error. Required. + :vartype message: str + """ + + code: Union[str, "_models.RunStepErrorCode"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The error code for this error. Required. Known values are: \"server_error\" and + \"rate_limit_exceeded\".""" + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The human-readable text associated with this error. Required.""" + + @overload + def __init__( + self, + *, + code: Union[str, "_models.RunStepErrorCode"], + message: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepFileSearchToolCall(RunStepToolCall, discriminator="file_search"): + """A record of a call to a file search tool, issued by the model in evaluation of a defined tool, + that represents + executed file search. + + :ivar type: The object type, which is always 'file_search'. Required. Default value is + "file_search". + :vartype type: str + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar file_search: For now, this is always going to be an empty object. Required. + :vartype file_search: ~azure.ai.assistants.models.RunStepFileSearchToolCallResults + """ + + type: Literal["file_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" + file_search: "_models.RunStepFileSearchToolCallResults" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """For now, this is always going to be an empty object. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + file_search: "_models.RunStepFileSearchToolCallResults", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_search", **kwargs) + + +class RunStepFileSearchToolCallResult(_model_base.Model): + """File search tool call result. + + :ivar file_id: The ID of the file that result was found in. Required. + :vartype file_id: str + :ivar file_name: The name of the file that result was found in. Required. + :vartype file_name: str + :ivar score: The score of the result. All values must be a floating point number between 0 and + 1. Required. + :vartype score: float + :ivar content: The content of the result that was found. The content is only included if + requested via the include query parameter. + :vartype content: list[~azure.ai.assistants.models.FileSearchToolCallContent] + """ + + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the file that result was found in. Required.""" + file_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the file that result was found in. Required.""" + score: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The score of the result. All values must be a floating point number between 0 and 1. Required.""" + content: Optional[List["_models.FileSearchToolCallContent"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The content of the result that was found. The content is only included if requested via the + include query parameter.""" + + @overload + def __init__( + self, + *, + file_id: str, + file_name: str, + score: float, + content: Optional[List["_models.FileSearchToolCallContent"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepFileSearchToolCallResults(_model_base.Model): + """The results of the file search. + + :ivar ranking_options: Ranking options for file search. + :vartype ranking_options: ~azure.ai.assistants.models.FileSearchRankingOptions + :ivar results: The array of a file search results. Required. + :vartype results: list[~azure.ai.assistants.models.RunStepFileSearchToolCallResult] + """ + + ranking_options: Optional["_models.FileSearchRankingOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Ranking options for file search.""" + results: List["_models.RunStepFileSearchToolCallResult"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The array of a file search results. Required.""" + + @overload + def __init__( + self, + *, + results: List["_models.RunStepFileSearchToolCallResult"], + ranking_options: Optional["_models.FileSearchRankingOptions"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepFunctionToolCall(RunStepToolCall, discriminator="function"): + """A record of a call to a function tool, issued by the model in evaluation of a defined tool, + that represents the inputs + and output consumed and emitted by the specified function. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'function'. Required. Default value is "function". + :vartype type: str + :ivar function: The detailed information about the function called by the model. Required. + :vartype function: ~azure.ai.assistants.models.RunStepFunctionToolCallDetails + """ + + type: Literal["function"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'function'. Required. Default value is \"function\".""" + function: "_models.RunStepFunctionToolCallDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The detailed information about the function called by the model. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + function: "_models.RunStepFunctionToolCallDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class RunStepFunctionToolCallDetails(_model_base.Model): + """The detailed information about the function called by the model. + + :ivar name: The name of the function. Required. + :vartype name: str + :ivar arguments: The arguments that the model requires are provided to the named function. + Required. + :vartype arguments: str + :ivar output: The output of the function, only populated for function calls that have already + have had their outputs submitted. Required. + :vartype output: str + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the function. Required.""" + arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The arguments that the model requires are provided to the named function. Required.""" + output: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The output of the function, only populated for function calls that have already have had their + outputs submitted. Required.""" + + @overload + def __init__( + self, + *, + name: str, + arguments: str, + output: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepMessageCreationDetails(RunStepDetails, discriminator="message_creation"): + """The detailed information associated with a message creation run step. + + :ivar type: The object type, which is always 'message_creation'. Required. Represents a run + step to create a message. + :vartype type: str or ~azure.ai.assistants.models.MESSAGE_CREATION + :ivar message_creation: Information about the message creation associated with this run step. + Required. + :vartype message_creation: ~azure.ai.assistants.models.RunStepMessageCreationReference + """ + + type: Literal[RunStepType.MESSAGE_CREATION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'message_creation'. Required. Represents a run step to create + a message.""" + message_creation: "_models.RunStepMessageCreationReference" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Information about the message creation associated with this run step. Required.""" + + @overload + def __init__( + self, + *, + message_creation: "_models.RunStepMessageCreationReference", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=RunStepType.MESSAGE_CREATION, **kwargs) + + +class RunStepMessageCreationReference(_model_base.Model): + """The details of a message created as a part of a run step. + + :ivar message_id: The ID of the message created by this run step. Required. + :vartype message_id: str + """ + + message_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the message created by this run step. Required.""" + + @overload + def __init__( + self, + *, + message_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepMicrosoftFabricToolCall(RunStepToolCall, discriminator="fabric_dataagent"): + """A record of a call to a Microsoft Fabric tool, issued by the model in evaluation of a defined + tool, that represents + executed Microsoft Fabric operations. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'fabric_dataagent'. Required. Default value is + "fabric_dataagent". + :vartype type: str + :ivar microsoft_fabric: Reserved for future use. Required. + :vartype microsoft_fabric: dict[str, str] + """ + + type: Literal["fabric_dataagent"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'fabric_dataagent'. Required. Default value is + \"fabric_dataagent\".""" + microsoft_fabric: Dict[str, str] = rest_field( + name="fabric_dataagent", visibility=["read", "create", "update", "delete", "query"] + ) + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + microsoft_fabric: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="fabric_dataagent", **kwargs) + + +class RunStepOpenAPIToolCall(RunStepToolCall, discriminator="openapi"): + """A record of a call to an OpenAPI tool, issued by the model in evaluation of a defined tool, + that represents + executed OpenAPI operations. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'openapi'. Required. Default value is "openapi". + :vartype type: str + :ivar open_api: Reserved for future use. Required. + :vartype open_api: dict[str, str] + """ + + type: Literal["openapi"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'openapi'. Required. Default value is \"openapi\".""" + open_api: Dict[str, str] = rest_field(name="openapi", visibility=["read", "create", "update", "delete", "query"]) + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + open_api: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="openapi", **kwargs) + + +class RunStepSharepointToolCall(RunStepToolCall, discriminator="sharepoint_grounding"): + """A record of a call to a SharePoint tool, issued by the model in evaluation of a defined tool, + that represents + executed SharePoint actions. + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'sharepoint_grounding'. Required. Default value is + "sharepoint_grounding". + :vartype type: str + :ivar share_point: Reserved for future use. Required. + :vartype share_point: dict[str, str] + """ + + type: Literal["sharepoint_grounding"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'sharepoint_grounding'. Required. Default value is + \"sharepoint_grounding\".""" + share_point: Dict[str, str] = rest_field( + name="sharepoint_grounding", visibility=["read", "create", "update", "delete", "query"] + ) + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + share_point: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="sharepoint_grounding", **kwargs) + + +class RunStepToolCallDetails(RunStepDetails, discriminator="tool_calls"): + """The detailed information associated with a run step calling tools. + + :ivar type: The object type, which is always 'tool_calls'. Required. Represents a run step that + calls tools. + :vartype type: str or ~azure.ai.assistants.models.TOOL_CALLS + :ivar tool_calls: A list of tool call details for this run step. Required. + :vartype tool_calls: list[~azure.ai.assistants.models.RunStepToolCall] + """ + + type: Literal[RunStepType.TOOL_CALLS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'tool_calls'. Required. Represents a run step that calls + tools.""" + tool_calls: List["_models.RunStepToolCall"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A list of tool call details for this run step. Required.""" + + @overload + def __init__( + self, + *, + tool_calls: List["_models.RunStepToolCall"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=RunStepType.TOOL_CALLS, **kwargs) + + +class SearchConfiguration(_model_base.Model): + """A custom search configuration. + + :ivar connection_id: A connection in a ToolConnectionList attached to this tool. Required. + :vartype connection_id: str + :ivar instance_name: Name of the custom configuration instance given to config. Required. + :vartype instance_name: str + """ + + connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A connection in a ToolConnectionList attached to this tool. Required.""" + instance_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name of the custom configuration instance given to config. Required.""" + + @overload + def __init__( + self, + *, + connection_id: str, + instance_name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchConfigurationList(_model_base.Model): + """A list of search configurations currently used by the ``bing_custom_search`` tool. + + :ivar search_configurations: The connections attached to this tool. There can be a maximum of 1 + connection + resource attached to the tool. Required. + :vartype search_configurations: list[~azure.ai.assistants.models.SearchConfiguration] + """ + + search_configurations: List["_models.SearchConfiguration"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The connections attached to this tool. There can be a maximum of 1 connection + resource attached to the tool. Required.""" + + @overload + def __init__( + self, + *, + search_configurations: List["_models.SearchConfiguration"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SharepointToolDefinition(ToolDefinition, discriminator="sharepoint_grounding"): + """The input definition information for a sharepoint tool as used to configure an agent. + + :ivar type: The object type, which is always 'sharepoint_grounding'. Required. Default value is + "sharepoint_grounding". + :vartype type: str + :ivar sharepoint_grounding: The list of connections used by the SharePoint tool. Required. + :vartype sharepoint_grounding: ~azure.ai.assistants.models.ToolConnectionList + """ + + type: Literal["sharepoint_grounding"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'sharepoint_grounding'. Required. Default value is + \"sharepoint_grounding\".""" + sharepoint_grounding: "_models.ToolConnectionList" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The list of connections used by the SharePoint tool. Required.""" + + @overload + def __init__( + self, + *, + sharepoint_grounding: "_models.ToolConnectionList", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="sharepoint_grounding", **kwargs) + + +class SubmitToolOutputsAction(RequiredAction, discriminator="submit_tool_outputs"): + """The details for required tool calls that must be submitted for an agent thread run to continue. + + :ivar type: The object type, which is always 'submit_tool_outputs'. Required. Default value is + "submit_tool_outputs". + :vartype type: str + :ivar submit_tool_outputs: The details describing tools that should be called to submit tool + outputs. Required. + :vartype submit_tool_outputs: ~azure.ai.assistants.models.SubmitToolOutputsDetails + """ + + type: Literal["submit_tool_outputs"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'submit_tool_outputs'. Required. Default value is + \"submit_tool_outputs\".""" + submit_tool_outputs: "_models.SubmitToolOutputsDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The details describing tools that should be called to submit tool outputs. Required.""" + + @overload + def __init__( + self, + *, + submit_tool_outputs: "_models.SubmitToolOutputsDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="submit_tool_outputs", **kwargs) + + +class SubmitToolOutputsDetails(_model_base.Model): + """The details describing tools that should be called to submit tool outputs. + + :ivar tool_calls: The list of tool calls that must be resolved for the agent thread run to + continue. Required. + :vartype tool_calls: list[~azure.ai.assistants.models.RequiredToolCall] + """ + + tool_calls: List["_models.RequiredToolCall"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The list of tool calls that must be resolved for the agent thread run to continue. Required.""" + + @overload + def __init__( + self, + *, + tool_calls: List["_models.RequiredToolCall"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ThreadDeletionStatus(_model_base.Model): + """The status of a thread deletion operation. + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'thread.deleted'. Required. Default value is + "thread.deleted". + :vartype object: str + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether deletion was successful. Required.""" + object: Literal["thread.deleted"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'thread.deleted'. Required. Default value is + \"thread.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.deleted"] = "thread.deleted" + + +class ThreadMessage(_model_base.Model): + """A single, existing message within an agent thread. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.message'. Required. Default value is + "thread.message". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar thread_id: The ID of the thread that this message belongs to. Required. + :vartype thread_id: str + :ivar status: The status of the message. Required. Known values are: "in_progress", + "incomplete", and "completed". + :vartype status: str or ~azure.ai.assistants.models.MessageStatus + :ivar incomplete_details: On an incomplete message, details about why the message is + incomplete. Required. + :vartype incomplete_details: ~azure.ai.assistants.models.MessageIncompleteDetails + :ivar completed_at: The Unix timestamp (in seconds) for when the message was completed. + Required. + :vartype completed_at: ~datetime.datetime + :ivar incomplete_at: The Unix timestamp (in seconds) for when the message was marked as + incomplete. Required. + :vartype incomplete_at: ~datetime.datetime + :ivar role: The role associated with the agent thread message. Required. Known values are: + "user" and "assistant". + :vartype role: str or ~azure.ai.assistants.models.MessageRole + :ivar content: The list of content items associated with the agent thread message. Required. + :vartype content: list[~azure.ai.assistants.models.MessageContent] + :ivar agent_id: If applicable, the ID of the agent that authored this message. Required. + :vartype agent_id: str + :ivar run_id: If applicable, the ID of the run associated with the authoring of this message. + Required. + :vartype run_id: str + :ivar attachments: A list of files attached to the message, and the tools they were added to. + Required. + :vartype attachments: list[~azure.ai.assistants.models.MessageAttachment] + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.message"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'thread.message'. Required. Default value is + \"thread.message\".""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + thread_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the thread that this message belongs to. Required.""" + status: Union[str, "_models.MessageStatus"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The status of the message. Required. Known values are: \"in_progress\", \"incomplete\", and + \"completed\".""" + incomplete_details: "_models.MessageIncompleteDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """On an incomplete message, details about why the message is incomplete. Required.""" + completed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (in seconds) for when the message was completed. Required.""" + incomplete_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (in seconds) for when the message was marked as incomplete. Required.""" + role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The role associated with the agent thread message. Required. Known values are: \"user\" and + \"assistant\".""" + content: List["_models.MessageContent"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The list of content items associated with the agent thread message. Required.""" + agent_id: str = rest_field(name="assistant_id", visibility=["read", "create", "update", "delete", "query"]) + """If applicable, the ID of the agent that authored this message. Required.""" + run_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """If applicable, the ID of the run associated with the authoring of this message. Required.""" + attachments: List["_models.MessageAttachment"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """A list of files attached to the message, and the tools they were added to. Required.""" + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + thread_id: str, + status: Union[str, "_models.MessageStatus"], + incomplete_details: "_models.MessageIncompleteDetails", + completed_at: datetime.datetime, + incomplete_at: datetime.datetime, + role: Union[str, "_models.MessageRole"], + content: List["_models.MessageContent"], + agent_id: str, + run_id: str, + attachments: List["_models.MessageAttachment"], + metadata: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.message"] = "thread.message" + + +class ThreadMessageOptions(_model_base.Model): + """A single message within an agent thread, as provided during that thread's creation for its + initial state. + + :ivar role: The role of the entity that is creating the message. Allowed values include: + + * `user`: Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * `assistant`: Indicates the message is generated by the agent. Use this value to insert + messages from the agent into the + conversation. Required. Known values are: "user" and "assistant". + :vartype role: str or ~azure.ai.assistants.models.MessageRole + :ivar content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :vartype content: str + :ivar attachments: A list of files attached to the message, and the tools they should be added + to. + :vartype attachments: list[~azure.ai.assistants.models.MessageAttachment] + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. + :vartype metadata: dict[str, str] + """ + + role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The role of the entity that is creating the message. Allowed values include: + + * `user`: Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * `assistant`: Indicates the message is generated by the agent. Use this value to insert + messages from the agent into the + conversation. Required. Known values are: \"user\" and \"assistant\".""" + content: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The textual content of the initial message. Currently, robust input including images and + annotated text may only be provided via + a separate call to the create message API. Required.""" + attachments: Optional[List["_models.MessageAttachment"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """A list of files attached to the message, and the tools they should be added to.""" + metadata: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length.""" + + @overload + def __init__( + self, + *, + role: Union[str, "_models.MessageRole"], + content: str, + attachments: Optional[List["_models.MessageAttachment"]] = None, + metadata: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ThreadRun(_model_base.Model): + """Data representing a single evaluation run of an agent thread. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.run'. Required. Default value is + "thread.run". + :vartype object: str + :ivar thread_id: The ID of the thread associated with this run. Required. + :vartype thread_id: str + :ivar agent_id: The ID of the agent associated with the thread this run was performed against. + Required. + :vartype agent_id: str + :ivar status: The status of the agent thread run. Required. Known values are: "queued", + "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", and + "expired". + :vartype status: str or ~azure.ai.assistants.models.RunStatus + :ivar required_action: The details of the action required for the agent thread run to continue. + :vartype required_action: ~azure.ai.assistants.models.RequiredAction + :ivar last_error: The last error, if any, encountered by this agent thread run. Required. + :vartype last_error: ~azure.ai.assistants.models.RunError + :ivar model: The ID of the model to use. Required. + :vartype model: str + :ivar instructions: The overridden system instructions used for this agent thread run. + Required. + :vartype instructions: str + :ivar tools: The overridden enabled tools used for this agent thread run. Required. + :vartype tools: list[~azure.ai.assistants.models.ToolDefinition] + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar expires_at: The Unix timestamp, in seconds, representing when this item expires. + Required. + :vartype expires_at: ~datetime.datetime + :ivar started_at: The Unix timestamp, in seconds, representing when this item was started. + Required. + :vartype started_at: ~datetime.datetime + :ivar completed_at: The Unix timestamp, in seconds, representing when this completed. Required. + :vartype completed_at: ~datetime.datetime + :ivar cancelled_at: The Unix timestamp, in seconds, representing when this was cancelled. + Required. + :vartype cancelled_at: ~datetime.datetime + :ivar failed_at: The Unix timestamp, in seconds, representing when this failed. Required. + :vartype failed_at: ~datetime.datetime + :ivar incomplete_details: Details on why the run is incomplete. Will be ``null`` if the run is + not incomplete. Required. + :vartype incomplete_details: ~azure.ai.assistants.models.IncompleteRunDetails + :ivar usage: Usage statistics related to the run. This value will be ``null`` if the run is not + in a terminal state (i.e. ``in_progress``, ``queued``, etc.). Required. + :vartype usage: ~azure.ai.assistants.models.RunCompletionUsage + :ivar temperature: The sampling temperature used for this run. If not set, defaults to 1. + :vartype temperature: float + :ivar top_p: The nucleus sampling value used for this run. If not set, defaults to 1. + :vartype top_p: float + :ivar max_prompt_tokens: The maximum number of prompt tokens specified to have been used over + the course of the run. Required. + :vartype max_prompt_tokens: int + :ivar max_completion_tokens: The maximum number of completion tokens specified to have been + used over the course of the run. Required. + :vartype max_completion_tokens: int + :ivar truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Required. + :vartype truncation_strategy: ~azure.ai.assistants.models.TruncationObject + :ivar tool_choice: Controls whether or not and which tool is called by the model. Required. Is + one of the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice + :vartype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.assistants.models.AgentsNamedToolChoice + :ivar response_format: The response format of the tool calls used in this run. Required. Is one + of the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat, ResponseFormatJsonSchemaType + :vartype response_format: str or str or ~azure.ai.assistants.models.AgentsApiResponseFormatMode + or ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.ResponseFormatJsonSchemaType + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + :ivar tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. + :vartype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions + :ivar parallel_tool_calls: Determines if tools can be executed in parallel within the run. + Required. + :vartype parallel_tool_calls: bool + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'thread.run'. Required. Default value is \"thread.run\".""" + thread_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the thread associated with this run. Required.""" + agent_id: str = rest_field(name="assistant_id", visibility=["read", "create", "update", "delete", "query"]) + """The ID of the agent associated with the thread this run was performed against. Required.""" + status: Union[str, "_models.RunStatus"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The status of the agent thread run. Required. Known values are: \"queued\", \"in_progress\", + \"requires_action\", \"cancelling\", \"cancelled\", \"failed\", \"completed\", and \"expired\".""" + required_action: Optional["_models.RequiredAction"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The details of the action required for the agent thread run to continue.""" + last_error: "_models.RunError" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last error, if any, encountered by this agent thread run. Required.""" + model: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the model to use. Required.""" + instructions: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The overridden system instructions used for this agent thread run. Required.""" + tools: List["_models.ToolDefinition"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The overridden enabled tools used for this agent thread run. Required.""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + expires_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this item expires. Required.""" + started_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this item was started. Required.""" + completed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this completed. Required.""" + cancelled_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" + failed_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp, in seconds, representing when this failed. Required.""" + incomplete_details: "_models.IncompleteRunDetails" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Details on why the run is incomplete. Will be ``null`` if the run is not incomplete. Required.""" + usage: "_models.RunCompletionUsage" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Usage statistics related to the run. This value will be ``null`` if the run is not in a + terminal state (i.e. ``in_progress``, ``queued``, etc.). Required.""" + temperature: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The sampling temperature used for this run. If not set, defaults to 1.""" + top_p: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The nucleus sampling value used for this run. If not set, defaults to 1.""" + max_prompt_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The maximum number of prompt tokens specified to have been used over the course of the run. + Required.""" + max_completion_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The maximum number of completion tokens specified to have been used over the course of the run. + Required.""" + truncation_strategy: "_models.TruncationObject" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The strategy to use for dropping messages as the context windows moves forward. Required.""" + tool_choice: "_types.AgentsApiToolChoiceOption" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Controls whether or not and which tool is called by the model. Required. Is one of the + following types: str, Union[str, \"_models.AgentsApiToolChoiceOptionMode\"], + AgentsNamedToolChoice""" + response_format: "_types.AgentsApiResponseFormatOption" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The response format of the tool calls used in this run. Required. Is one of the following + types: str, Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat, + ResponseFormatJsonSchemaType""" + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + tool_resources: Optional["_models.UpdateToolResourcesOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Override the tools the agent can use for this run. This is useful for modifying the behavior on + a per-run basis.""" + parallel_tool_calls: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Determines if tools can be executed in parallel within the run. Required.""" + + @overload + def __init__( # pylint: disable=too-many-locals + self, + *, + id: str, # pylint: disable=redefined-builtin + thread_id: str, + agent_id: str, + status: Union[str, "_models.RunStatus"], + last_error: "_models.RunError", + model: str, + instructions: str, + tools: List["_models.ToolDefinition"], + created_at: datetime.datetime, + expires_at: datetime.datetime, + started_at: datetime.datetime, + completed_at: datetime.datetime, + cancelled_at: datetime.datetime, + failed_at: datetime.datetime, + incomplete_details: "_models.IncompleteRunDetails", + usage: "_models.RunCompletionUsage", + max_prompt_tokens: int, + max_completion_tokens: int, + truncation_strategy: "_models.TruncationObject", + tool_choice: "_types.AgentsApiToolChoiceOption", + response_format: "_types.AgentsApiResponseFormatOption", + metadata: Dict[str, str], + parallel_tool_calls: bool, + required_action: Optional["_models.RequiredAction"] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + tool_resources: Optional["_models.UpdateToolResourcesOptions"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run"] = "thread.run" + + +class ToolConnection(_model_base.Model): + """A connection resource. + + :ivar connection_id: A connection in a ToolConnectionList attached to this tool. Required. + :vartype connection_id: str + """ + + connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A connection in a ToolConnectionList attached to this tool. Required.""" + + @overload + def __init__( + self, + *, + connection_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ToolConnectionList(_model_base.Model): + """A set of connection resources currently used by either the ``bing_grounding``, + ``fabric_dataagent``, or ``sharepoint_grounding`` tools. + + :ivar connection_list: The connections attached to this tool. There can be a maximum of 1 + connection + resource attached to the tool. + :vartype connection_list: list[~azure.ai.assistants.models.ToolConnection] + """ + + connection_list: Optional[List["_models.ToolConnection"]] = rest_field( + name="connections", visibility=["read", "create", "update", "delete", "query"] + ) + """The connections attached to this tool. There can be a maximum of 1 connection + resource attached to the tool.""" + + @overload + def __init__( + self, + *, + connection_list: Optional[List["_models.ToolConnection"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ToolOutput(_model_base.Model): + """The data provided during a tool outputs submission to resolve pending tool calls and allow the + model to continue. + + :ivar tool_call_id: The ID of the tool call being resolved, as provided in the tool calls of a + required action from a run. + :vartype tool_call_id: str + :ivar output: The output from the tool to be submitted. + :vartype output: str + """ + + tool_call_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the tool call being resolved, as provided in the tool calls of a required action from + a run.""" + output: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The output from the tool to be submitted.""" + + @overload + def __init__( + self, + *, + tool_call_id: Optional[str] = None, + output: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ToolResources(_model_base.Model): + """A set of resources that are used by the agent's tools. The resources are specific to the type + of + tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` + tool requires a list of vector store IDs. + + :ivar code_interpreter: Resources to be used by the ``code_interpreter`` tool consisting of + file IDs. + :vartype code_interpreter: ~azure.ai.assistants.models.CodeInterpreterToolResource + :ivar file_search: Resources to be used by the ``file_search`` tool consisting of vector store + IDs. + :vartype file_search: ~azure.ai.assistants.models.FileSearchToolResource + :ivar azure_ai_search: Resources to be used by the ``azure_ai_search`` tool consisting of index + IDs and names. + :vartype azure_ai_search: ~azure.ai.assistants.models.AzureAISearchResource + """ + + code_interpreter: Optional["_models.CodeInterpreterToolResource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Resources to be used by the ``code_interpreter`` tool consisting of file IDs.""" + file_search: Optional["_models.FileSearchToolResource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Resources to be used by the ``file_search`` tool consisting of vector store IDs.""" + azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Resources to be used by the ``azure_ai_search`` tool consisting of index IDs and names.""" + + @overload + def __init__( + self, + *, + code_interpreter: Optional["_models.CodeInterpreterToolResource"] = None, + file_search: Optional["_models.FileSearchToolResource"] = None, + azure_ai_search: Optional["_models.AzureAISearchResource"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TruncationObject(_model_base.Model): + """Controls for how a thread will be truncated prior to the run. Use this to control the initial + context window of the run. + + :ivar type: The truncation strategy to use for the thread. The default is ``auto``. If set to + ``last_messages``, the thread will + be truncated to the ``lastMessages`` count most recent messages in the thread. When set to + ``auto``, messages in the middle of the thread + will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known + values are: "auto" and "last_messages". + :vartype type: str or ~azure.ai.assistants.models.TruncationStrategy + :ivar last_messages: The number of most recent messages from the thread when constructing the + context for the run. + :vartype last_messages: int + """ + + type: Union[str, "_models.TruncationStrategy"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The truncation strategy to use for the thread. The default is ``auto``. If set to + ``last_messages``, the thread will + be truncated to the ``lastMessages`` count most recent messages in the thread. When set to + ``auto``, messages in the middle of the thread + will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known + values are: \"auto\" and \"last_messages\".""" + last_messages: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The number of most recent messages from the thread when constructing the context for the run.""" + + @overload + def __init__( + self, + *, + type: Union[str, "_models.TruncationStrategy"], + last_messages: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UpdateCodeInterpreterToolResourceOptions(_model_base.Model): + """Request object to update ``code_interpreted`` tool resources. + + :ivar file_ids: A list of file IDs to override the current list of the agent. + :vartype file_ids: list[str] + """ + + file_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A list of file IDs to override the current list of the agent.""" + + @overload + def __init__( + self, + *, + file_ids: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UpdateFileSearchToolResourceOptions(_model_base.Model): + """Request object to update ``file_search`` tool resources. + + :ivar vector_store_ids: A list of vector store IDs to override the current list of the agent. + :vartype vector_store_ids: list[str] + """ + + vector_store_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A list of vector store IDs to override the current list of the agent.""" + + @overload + def __init__( + self, + *, + vector_store_ids: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UpdateToolResourcesOptions(_model_base.Model): + """Request object. A set of resources that are used by the agent's tools. The resources are + specific to the type of tool. + For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list of + vector store IDs. + + :ivar code_interpreter: Overrides the list of file IDs made available to the + ``code_interpreter`` tool. There can be a maximum of 20 files + associated with the tool. + :vartype code_interpreter: ~azure.ai.assistants.models.UpdateCodeInterpreterToolResourceOptions + :ivar file_search: Overrides the vector store attached to this agent. There can be a maximum of + 1 vector store attached to the agent. + :vartype file_search: ~azure.ai.assistants.models.UpdateFileSearchToolResourceOptions + :ivar azure_ai_search: Overrides the resources to be used by the ``azure_ai_search`` tool + consisting of index IDs and names. + :vartype azure_ai_search: ~azure.ai.assistants.models.AzureAISearchResource + """ + + code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Overrides the list of file IDs made available to the ``code_interpreter`` tool. There can be a + maximum of 20 files + associated with the tool.""" + file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Overrides the vector store attached to this agent. There can be a maximum of 1 vector store + attached to the agent.""" + azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Overrides the resources to be used by the ``azure_ai_search`` tool consisting of index IDs and + names.""" + + @overload + def __init__( + self, + *, + code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = None, + file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = None, + azure_ai_search: Optional["_models.AzureAISearchResource"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UploadFileRequest(_model_base.Model): + """UploadFileRequest. + + :ivar file: The file data, in bytes. Required. + :vartype file: ~azure.ai.assistants._vendor.FileType + :ivar purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and + Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and + ``fine-tune`` for Fine-tuning. Required. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". + :vartype purpose: str or ~azure.ai.assistants.models.FilePurpose + :ivar filename: The name of the file. + :vartype filename: str + """ + + file: FileType = rest_field( + visibility=["read", "create", "update", "delete", "query"], is_multipart_file_input=True + ) + """The file data, in bytes. Required.""" + purpose: Union[str, "_models.FilePurpose"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The intended purpose of the uploaded file. Use ``assistants`` for Agents and Message files, + ``vision`` for Agents image file inputs, ``batch`` for Batch API, and ``fine-tune`` for + Fine-tuning. Required. Known values are: \"fine-tune\", \"fine-tune-results\", \"assistants\", + \"assistants_output\", \"batch\", \"batch_output\", and \"vision\".""" + filename: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the file.""" + + @overload + def __init__( + self, + *, + file: FileType, + purpose: Union[str, "_models.FilePurpose"], + filename: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStore(_model_base.Model): + """A vector store is a collection of processed files can be used by the ``file_search`` tool. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store``. Required. Default value is + "vector_store". + :vartype object: str + :ivar created_at: The Unix timestamp (in seconds) for when the vector store was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar name: The name of the vector store. Required. + :vartype name: str + :ivar usage_bytes: The total number of bytes used by the files in the vector store. Required. + :vartype usage_bytes: int + :ivar file_counts: Files count grouped by status processed or being processed by this vector + store. Required. + :vartype file_counts: ~azure.ai.assistants.models.VectorStoreFileCount + :ivar status: The status of the vector store, which can be either ``expired``, ``in_progress``, + or ``completed``. A status of ``completed`` indicates that the vector store is ready for use. + Required. Known values are: "expired", "in_progress", and "completed". + :vartype status: str or ~azure.ai.assistants.models.VectorStoreStatus + :ivar expires_after: Details on when this vector store expires. + :vartype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy + :ivar expires_at: The Unix timestamp (in seconds) for when the vector store will expire. + :vartype expires_at: ~datetime.datetime + :ivar last_active_at: The Unix timestamp (in seconds) for when the vector store was last + active. Required. + :vartype last_active_at: ~datetime.datetime + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always ``vector_store``. Required. Default value is \"vector_store\".""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (in seconds) for when the vector store was created. Required.""" + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the vector store. Required.""" + usage_bytes: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The total number of bytes used by the files in the vector store. Required.""" + file_counts: "_models.VectorStoreFileCount" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Files count grouped by status processed or being processed by this vector store. Required.""" + status: Union[str, "_models.VectorStoreStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The status of the vector store, which can be either ``expired``, ``in_progress``, or + ``completed``. A status of ``completed`` indicates that the vector store is ready for use. + Required. Known values are: \"expired\", \"in_progress\", and \"completed\".""" + expires_after: Optional["_models.VectorStoreExpirationPolicy"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Details on when this vector store expires.""" + expires_at: Optional[datetime.datetime] = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (in seconds) for when the vector store will expire.""" + last_active_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (in seconds) for when the vector store was last active. Required.""" + metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + name: str, + usage_bytes: int, + file_counts: "_models.VectorStoreFileCount", + status: Union[str, "_models.VectorStoreStatus"], + last_active_at: datetime.datetime, + metadata: Dict[str, str], + expires_after: Optional["_models.VectorStoreExpirationPolicy"] = None, + expires_at: Optional[datetime.datetime] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store"] = "vector_store" + + +class VectorStoreChunkingStrategyRequest(_model_base.Model): + """An abstract representation of a vector store chunking strategy configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + VectorStoreAutoChunkingStrategyRequest, VectorStoreStaticChunkingStrategyRequest + + :ivar type: The object type. Required. Known values are: "auto" and "static". + :vartype type: str or ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequestType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Known values are: \"auto\" and \"static\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreAutoChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, discriminator="auto"): + """The default strategy. This strategy currently uses a max_chunk_size_tokens of 800 and + chunk_overlap_tokens of 400. + + :ivar type: The object type, which is always 'auto'. Required. + :vartype type: str or ~azure.ai.assistants.models.AUTO + """ + + type: Literal[VectorStoreChunkingStrategyRequestType.AUTO] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'auto'. Required.""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyRequestType.AUTO, **kwargs) + + +class VectorStoreChunkingStrategyResponse(_model_base.Model): + """An abstract representation of a vector store chunking strategy configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + VectorStoreAutoChunkingStrategyResponse, VectorStoreStaticChunkingStrategyResponse + + :ivar type: The object type. Required. Known values are: "other" and "static". + :vartype type: str or ~azure.ai.assistants.models.VectorStoreChunkingStrategyResponseType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """The object type. Required. Known values are: \"other\" and \"static\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreAutoChunkingStrategyResponse(VectorStoreChunkingStrategyResponse, discriminator="other"): + """This is returned when the chunking strategy is unknown. Typically, this is because the file was + indexed before the chunking_strategy concept was introduced in the API. + + :ivar type: The object type, which is always 'other'. Required. + :vartype type: str or ~azure.ai.assistants.models.OTHER + """ + + type: Literal[VectorStoreChunkingStrategyResponseType.OTHER] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'other'. Required.""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.OTHER, **kwargs) + + +class VectorStoreConfiguration(_model_base.Model): + """Vector storage configuration is the list of data sources, used when multiple + files can be used for the enterprise file search. + + :ivar data_sources: Data sources. Required. + :vartype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] + """ + + data_sources: List["_models.VectorStoreDataSource"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Data sources. Required.""" + + @overload + def __init__( + self, + *, + data_sources: List["_models.VectorStoreDataSource"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreConfigurations(_model_base.Model): + """The structure, containing the list of vector storage configurations i.e. the list of azure + asset IDs. + + :ivar store_name: Name. Required. + :vartype store_name: str + :ivar store_configuration: Configurations. Required. + :vartype store_configuration: ~azure.ai.assistants.models.VectorStoreConfiguration + """ + + store_name: str = rest_field(name="name", visibility=["read", "create", "update", "delete", "query"]) + """Name. Required.""" + store_configuration: "_models.VectorStoreConfiguration" = rest_field( + name="configuration", visibility=["read", "create", "update", "delete", "query"] + ) + """Configurations. Required.""" + + @overload + def __init__( + self, + *, + store_name: str, + store_configuration: "_models.VectorStoreConfiguration", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreDataSource(_model_base.Model): + """The structure, containing Azure asset URI path and the asset type of the file used as a data + source + for the enterprise file search. + + :ivar asset_identifier: Asset URI. Required. + :vartype asset_identifier: str + :ivar asset_type: The asset type. Required. Known values are: "uri_asset" and "id_asset". + :vartype asset_type: str or ~azure.ai.assistants.models.VectorStoreDataSourceAssetType + """ + + asset_identifier: str = rest_field(name="uri", visibility=["read", "create", "update", "delete", "query"]) + """Asset URI. Required.""" + asset_type: Union[str, "_models.VectorStoreDataSourceAssetType"] = rest_field( + name="type", visibility=["read", "create", "update", "delete", "query"] + ) + """The asset type. Required. Known values are: \"uri_asset\" and \"id_asset\".""" + + @overload + def __init__( + self, + *, + asset_identifier: str, + asset_type: Union[str, "_models.VectorStoreDataSourceAssetType"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreDeletionStatus(_model_base.Model): + """Response object for deleting a vector store. + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'vector_store.deleted'. Required. Default value + is "vector_store.deleted". + :vartype object: str + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether deletion was successful. Required.""" + object: Literal["vector_store.deleted"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always 'vector_store.deleted'. Required. Default value is + \"vector_store.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.deleted"] = "vector_store.deleted" + + +class VectorStoreExpirationPolicy(_model_base.Model): + """The expiration policy for a vector store. + + :ivar anchor: Anchor timestamp after which the expiration policy applies. Supported anchors: + ``last_active_at``. Required. "last_active_at" + :vartype anchor: str or ~azure.ai.assistants.models.VectorStoreExpirationPolicyAnchor + :ivar days: The anchor timestamp after which the expiration policy applies. Required. + :vartype days: int + """ + + anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Anchor timestamp after which the expiration policy applies. Supported anchors: + ``last_active_at``. Required. \"last_active_at\"""" + days: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The anchor timestamp after which the expiration policy applies. Required.""" + + @overload + def __init__( + self, + *, + anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"], + days: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreFile(_model_base.Model): + """Description of a file attached to a vector store. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store.file``. Required. Default value + is "vector_store.file". + :vartype object: str + :ivar usage_bytes: The total vector store usage in bytes. Note that this may be different from + the original file + size. Required. + :vartype usage_bytes: int + :ivar created_at: The Unix timestamp (in seconds) for when the vector store file was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar vector_store_id: The ID of the vector store that the file is attached to. Required. + :vartype vector_store_id: str + :ivar status: The status of the vector store file, which can be either ``in_progress``, + ``completed``, ``cancelled``, or ``failed``. The status ``completed`` indicates that the vector + store file is ready for use. Required. Known values are: "in_progress", "completed", "failed", + and "cancelled". + :vartype status: str or ~azure.ai.assistants.models.VectorStoreFileStatus + :ivar last_error: The last error associated with this vector store file. Will be ``null`` if + there are no errors. Required. + :vartype last_error: ~azure.ai.assistants.models.VectorStoreFileError + :ivar chunking_strategy: The strategy used to chunk the file. Required. + :vartype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyResponse + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store.file"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always ``vector_store.file``. Required. Default value is + \"vector_store.file\".""" + usage_bytes: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The total vector store usage in bytes. Note that this may be different from the original file + size. Required.""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (in seconds) for when the vector store file was created. Required.""" + vector_store_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the vector store that the file is attached to. Required.""" + status: Union[str, "_models.VectorStoreFileStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The status of the vector store file, which can be either ``in_progress``, ``completed``, + ``cancelled``, or ``failed``. The status ``completed`` indicates that the vector store file is + ready for use. Required. Known values are: \"in_progress\", \"completed\", \"failed\", and + \"cancelled\".""" + last_error: "_models.VectorStoreFileError" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The last error associated with this vector store file. Will be ``null`` if there are no errors. + Required.""" + chunking_strategy: "_models.VectorStoreChunkingStrategyResponse" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The strategy used to chunk the file. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + usage_bytes: int, + created_at: datetime.datetime, + vector_store_id: str, + status: Union[str, "_models.VectorStoreFileStatus"], + last_error: "_models.VectorStoreFileError", + chunking_strategy: "_models.VectorStoreChunkingStrategyResponse", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.file"] = "vector_store.file" + + +class VectorStoreFileBatch(_model_base.Model): + """A batch of files attached to a vector store. + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store.file_batch``. Required. Default + value is "vector_store.files_batch". + :vartype object: str + :ivar created_at: The Unix timestamp (in seconds) for when the vector store files batch was + created. Required. + :vartype created_at: ~datetime.datetime + :ivar vector_store_id: The ID of the vector store that the file is attached to. Required. + :vartype vector_store_id: str + :ivar status: The status of the vector store files batch, which can be either ``in_progress``, + ``completed``, ``cancelled`` or ``failed``. Required. Known values are: "in_progress", + "completed", "cancelled", and "failed". + :vartype status: str or ~azure.ai.assistants.models.VectorStoreFileBatchStatus + :ivar file_counts: Files count grouped by status processed or being processed by this vector + store. Required. + :vartype file_counts: ~azure.ai.assistants.models.VectorStoreFileCount + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store.files_batch"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The object type, which is always ``vector_store.file_batch``. Required. Default value is + \"vector_store.files_batch\".""" + created_at: datetime.datetime = rest_field( + visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" + ) + """The Unix timestamp (in seconds) for when the vector store files batch was created. Required.""" + vector_store_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the vector store that the file is attached to. Required.""" + status: Union[str, "_models.VectorStoreFileBatchStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The status of the vector store files batch, which can be either ``in_progress``, ``completed``, + ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", \"completed\", + \"cancelled\", and \"failed\".""" + file_counts: "_models.VectorStoreFileCount" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Files count grouped by status processed or being processed by this vector store. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + vector_store_id: str, + status: Union[str, "_models.VectorStoreFileBatchStatus"], + file_counts: "_models.VectorStoreFileCount", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.files_batch"] = "vector_store.files_batch" + + +class VectorStoreFileCount(_model_base.Model): + """Counts of files processed or being processed by this vector store grouped by status. + + :ivar in_progress: The number of files that are currently being processed. Required. + :vartype in_progress: int + :ivar completed: The number of files that have been successfully processed. Required. + :vartype completed: int + :ivar failed: The number of files that have failed to process. Required. + :vartype failed: int + :ivar cancelled: The number of files that were cancelled. Required. + :vartype cancelled: int + :ivar total: The total number of files. Required. + :vartype total: int + """ + + in_progress: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The number of files that are currently being processed. Required.""" + completed: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The number of files that have been successfully processed. Required.""" + failed: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The number of files that have failed to process. Required.""" + cancelled: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The number of files that were cancelled. Required.""" + total: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The total number of files. Required.""" + + @overload + def __init__( + self, + *, + in_progress: int, + completed: int, + failed: int, + cancelled: int, + total: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreFileDeletionStatus(_model_base.Model): + """Response object for deleting a vector store file relationship. + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'vector_store.deleted'. Required. Default value + is "vector_store.file.deleted". + :vartype object: str + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A value indicating whether deletion was successful. Required.""" + object: Literal["vector_store.file.deleted"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The object type, which is always 'vector_store.deleted'. Required. Default value is + \"vector_store.file.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.file.deleted"] = "vector_store.file.deleted" + + +class VectorStoreFileError(_model_base.Model): + """Details on the error that may have occurred while processing a file for this vector store. + + :ivar code: One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: + "server_error", "invalid_file", and "unsupported_file". + :vartype code: str or ~azure.ai.assistants.models.VectorStoreFileErrorCode + :ivar message: A human-readable description of the error. Required. + :vartype message: str + """ + + code: Union[str, "_models.VectorStoreFileErrorCode"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: + \"server_error\", \"invalid_file\", and \"unsupported_file\".""" + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A human-readable description of the error. Required.""" + + @overload + def __init__( + self, + *, + code: Union[str, "_models.VectorStoreFileErrorCode"], + message: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreStaticChunkingStrategyOptions(_model_base.Model): + """Options to configure a vector store static chunking strategy. + + :ivar max_chunk_size_tokens: The maximum number of tokens in each chunk. The default value is + 800. The minimum value is 100 and the maximum value is 4096. Required. + :vartype max_chunk_size_tokens: int + :ivar chunk_overlap_tokens: The number of tokens that overlap between chunks. The default value + is 400. + Note that the overlap must not exceed half of max_chunk_size_tokens. Required. + :vartype chunk_overlap_tokens: int + """ + + max_chunk_size_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The maximum number of tokens in each chunk. The default value is 800. The minimum value is 100 + and the maximum value is 4096. Required.""" + chunk_overlap_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The number of tokens that overlap between chunks. The default value is 400. + Note that the overlap must not exceed half of max_chunk_size_tokens. Required.""" + + @overload + def __init__( + self, + *, + max_chunk_size_tokens: int, + chunk_overlap_tokens: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreStaticChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, discriminator="static"): + """A statically configured chunking strategy. + + :ivar type: The object type, which is always 'static'. Required. + :vartype type: str or ~azure.ai.assistants.models.STATIC + :ivar static: The options for the static chunking strategy. Required. + :vartype static: ~azure.ai.assistants.models.VectorStoreStaticChunkingStrategyOptions + """ + + type: Literal[VectorStoreChunkingStrategyRequestType.STATIC] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'static'. Required.""" + static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The options for the static chunking strategy. Required.""" + + @overload + def __init__( + self, + *, + static: "_models.VectorStoreStaticChunkingStrategyOptions", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyRequestType.STATIC, **kwargs) + + +class VectorStoreStaticChunkingStrategyResponse( + VectorStoreChunkingStrategyResponse, discriminator="static" +): # pylint: disable=name-too-long + """A statically configured chunking strategy. + + :ivar type: The object type, which is always 'static'. Required. + :vartype type: str or ~azure.ai.assistants.models.STATIC + :ivar static: The options for the static chunking strategy. Required. + :vartype static: ~azure.ai.assistants.models.VectorStoreStaticChunkingStrategyOptions + """ + + type: Literal[VectorStoreChunkingStrategyResponseType.STATIC] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'static'. Required.""" + static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The options for the static chunking strategy. Required.""" + + @overload + def __init__( + self, + *, + static: "_models.VectorStoreStaticChunkingStrategyOptions", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.STATIC, **kwargs) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/py.typed b/sdk/ai/azure-ai-assistants/azure/ai/assistants/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/dev_requirements.txt b/sdk/ai/azure-ai-assistants/dev_requirements.txt new file mode 100644 index 000000000000..105486471444 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/dev_requirements.txt @@ -0,0 +1,3 @@ +-e ../../../tools/azure-sdk-tools +../../core/azure-core +aiohttp \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/setup.py b/sdk/ai/azure-ai-assistants/setup.py new file mode 100644 index 000000000000..2752c3ba025c --- /dev/null +++ b/sdk/ai/azure-ai-assistants/setup.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# coding: utf-8 + +import os +import re +from setuptools import setup, find_packages + + +PACKAGE_NAME = "azure-ai-assistants" +PACKAGE_PPRINT_NAME = "Azure Ai Assistants" + +# a-b-c => a/b/c +package_folder_path = PACKAGE_NAME.replace("-", "/") + +# Version extraction inspired from 'requests' +with open(os.path.join(package_folder_path, "_version.py"), "r") as fd: + version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError("Cannot find version information") + + +setup( + name=PACKAGE_NAME, + version=version, + description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), + long_description=open("README.md", "r").read(), + long_description_content_type="text/markdown", + license="MIT License", + author="Microsoft Corporation", + author_email="azpysdkhelp@microsoft.com", + url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk", + keywords="azure, azure sdk", + classifiers=[ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "License :: OSI Approved :: MIT License", + ], + zip_safe=False, + packages=find_packages( + exclude=[ + "tests", + # Exclude packages that will be covered by PEP420 or nspkg + "azure", + "azure.ai", + ] + ), + include_package_data=True, + package_data={ + "azure.ai.assistants": ["py.typed"], + }, + install_requires=[ + "isodate>=0.6.1", + "azure-core>=1.30.0", + "typing-extensions>=4.6.0", + ], + python_requires=">=3.8", +) diff --git a/sdk/ai/azure-ai-assistants/tsp-location.yaml b/sdk/ai/azure-ai-assistants/tsp-location.yaml new file mode 100644 index 000000000000..cac157fb46ba --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tsp-location.yaml @@ -0,0 +1,4 @@ +directory: specification/ai/Azure.AI.Assistants +commit: 9d38aaedbb624278ee9dc60c1f6ffe11ff38bd30 +repo: Azure/azure-rest-api-specs +additionalDirectories: From daf5732981e0d6a328a40f3ee61832c31b75d791 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Mon, 7 Apr 2025 10:38:27 -0700 Subject: [PATCH 2/7] Regenerate code --- .../apiview-properties.json | 101 +- .../azure/ai/assistants/__init__.py | 4 +- .../azure/ai/assistants/_client.py | 52 +- .../azure/ai/assistants/_configuration.py | 38 +- .../ai/assistants/_operations/__init__.py | 4 +- .../ai/assistants/_operations/_operations.py | 1088 +++++++++++------ .../azure/ai/assistants/_types.py | 10 +- .../azure/ai/assistants/_vendor.py | 6 +- .../azure/ai/assistants/aio/__init__.py | 4 +- .../azure/ai/assistants/aio/_client.py | 50 +- .../azure/ai/assistants/aio/_configuration.py | 36 +- .../ai/assistants/aio/_operations/__init__.py | 4 +- .../assistants/aio/_operations/_operations.py | 1074 ++++++++++------ .../azure/ai/assistants/aio/_vendor.py | 6 +- .../azure/ai/assistants/models/__init__.py | 46 +- .../azure/ai/assistants/models/_enums.py | 30 +- .../azure/ai/assistants/models/_models.py | 464 ++++--- sdk/ai/azure-ai-assistants/tsp-location.yaml | 2 +- 18 files changed, 1926 insertions(+), 1093 deletions(-) diff --git a/sdk/ai/azure-ai-assistants/apiview-properties.json b/sdk/ai/azure-ai-assistants/apiview-properties.json index 840508d6d84d..407356d45103 100644 --- a/sdk/ai/azure-ai-assistants/apiview-properties.json +++ b/sdk/ai/azure-ai-assistants/apiview-properties.json @@ -1,13 +1,13 @@ { "CrossLanguagePackageId": "Azure.AI.Assistants", "CrossLanguageDefinitionId": { - "azure.ai.assistants.models.Agent": "Azure.AI.Assistants.Agent", - "azure.ai.assistants.models.AgentDeletionStatus": "Azure.AI.Assistants.AgentDeletionStatus", - "azure.ai.assistants.models.AgentsApiResponseFormat": "Azure.AI.Assistants.AgentsApiResponseFormat", - "azure.ai.assistants.models.AgentsNamedToolChoice": "Azure.AI.Assistants.AgentsNamedToolChoice", - "azure.ai.assistants.models.AgentThread": "Azure.AI.Assistants.AgentThread", - "azure.ai.assistants.models.AgentThreadCreationOptions": "Azure.AI.Assistants.AgentThreadCreationOptions", "azure.ai.assistants.models.AISearchIndexResource": "Azure.AI.Assistants.AISearchIndexResource", + "azure.ai.assistants.models.Assistant": "Azure.AI.Assistants.Assistant", + "azure.ai.assistants.models.AssistantDeletionStatus": "Azure.AI.Assistants.AssistantDeletionStatus", + "azure.ai.assistants.models.AssistantsApiResponseFormat": "Azure.AI.Assistants.AssistantsApiResponseFormat", + "azure.ai.assistants.models.AssistantsNamedToolChoice": "Azure.AI.Assistants.AssistantsNamedToolChoice", + "azure.ai.assistants.models.AssistantThread": "Azure.AI.Assistants.AssistantThread", + "azure.ai.assistants.models.AssistantThreadCreationOptions": "Azure.AI.Assistants.AssistantThreadCreationOptions", "azure.ai.assistants.models.AzureAISearchResource": "Azure.AI.Assistants.AzureAISearchResource", "azure.ai.assistants.models.ToolDefinition": "Azure.AI.Assistants.ToolDefinition", "azure.ai.assistants.models.AzureAISearchToolDefinition": "Azure.AI.Assistants.AzureAISearchToolDefinition", @@ -19,7 +19,6 @@ "azure.ai.assistants.models.BingGroundingToolDefinition": "Azure.AI.Assistants.BingGroundingToolDefinition", "azure.ai.assistants.models.CodeInterpreterToolDefinition": "Azure.AI.Assistants.CodeInterpreterToolDefinition", "azure.ai.assistants.models.CodeInterpreterToolResource": "Azure.AI.Assistants.CodeInterpreterToolResource", - "azure.ai.assistants.models.ErrorResponse": "Azure.ResourceManager.CommonTypes.ErrorResponse", "azure.ai.assistants.models.FileDeletionStatus": "Azure.AI.Assistants.FileDeletionStatus", "azure.ai.assistants.models.FileListResponse": "Azure.AI.Assistants.FileListResponse", "azure.ai.assistants.models.FileSearchRankingOptions": "Azure.AI.Assistants.FileSearchRankingOptions", @@ -61,7 +60,7 @@ "azure.ai.assistants.models.MessageTextUrlCitationDetails": "Azure.AI.Assistants.MessageTextUrlCitationDetails", "azure.ai.assistants.models.MicrosoftFabricToolDefinition": "Azure.AI.Assistants.MicrosoftFabricToolDefinition", "azure.ai.assistants.models.OpenAIFile": "Azure.AI.Assistants.OpenAIFile", - "azure.ai.assistants.models.OpenAIPageableListOfAgent": "Azure.AI.Assistants.OpenAIPageableListOf", + "azure.ai.assistants.models.OpenAIPageableListOfAssistant": "Azure.AI.Assistants.OpenAIPageableListOf", "azure.ai.assistants.models.OpenAIPageableListOfRunStep": "Azure.AI.Assistants.OpenAIPageableListOf", "azure.ai.assistants.models.OpenAIPageableListOfThreadMessage": "Azure.AI.Assistants.OpenAIPageableListOf", "azure.ai.assistants.models.OpenAIPageableListOfThreadRun": "Azure.AI.Assistants.OpenAIPageableListOf", @@ -163,7 +162,7 @@ "azure.ai.assistants.models.OpenApiAuthType": "Azure.AI.Assistants.OpenApiAuthType", "azure.ai.assistants.models.VectorStoreDataSourceAssetType": "Azure.AI.Assistants.VectorStoreDataSourceAssetType", "azure.ai.assistants.models.AzureAISearchQueryType": "Azure.AI.Assistants.AzureAISearchQueryType", - "azure.ai.assistants.models.AgentsApiResponseFormatMode": "Azure.AI.Assistants.AgentsApiResponseFormatMode", + "azure.ai.assistants.models.AssistantsApiResponseFormatMode": "Azure.AI.Assistants.AssistantsApiResponseFormatMode", "azure.ai.assistants.models.ResponseFormat": "Azure.AI.Assistants.ResponseFormat", "azure.ai.assistants.models.ListSortOrder": "Azure.AI.Assistants.ListSortOrder", "azure.ai.assistants.models.MessageRole": "Azure.AI.Assistants.MessageRole", @@ -172,8 +171,8 @@ "azure.ai.assistants.models.RunStatus": "Azure.AI.Assistants.RunStatus", "azure.ai.assistants.models.IncompleteDetailsReason": "Azure.AI.Assistants.IncompleteDetailsReason", "azure.ai.assistants.models.TruncationStrategy": "Azure.AI.Assistants.TruncationStrategy", - "azure.ai.assistants.models.AgentsApiToolChoiceOptionMode": "Azure.AI.Assistants.AgentsApiToolChoiceOptionMode", - "azure.ai.assistants.models.AgentsNamedToolChoiceType": "Azure.AI.Assistants.AgentsNamedToolChoiceType", + "azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode": "Azure.AI.Assistants.AssistantsApiToolChoiceOptionMode", + "azure.ai.assistants.models.AssistantsNamedToolChoiceType": "Azure.AI.Assistants.AssistantsNamedToolChoiceType", "azure.ai.assistants.models.RunAdditionalFieldList": "Azure.AI.Assistants.RunAdditionalFieldList", "azure.ai.assistants.models.RunStepType": "Azure.AI.Assistants.RunStepType", "azure.ai.assistants.models.RunStepStatus": "Azure.AI.Assistants.RunStepStatus", @@ -194,45 +193,45 @@ "azure.ai.assistants.models.MessageStreamEvent": "Azure.AI.Assistants.MessageStreamEvent", "azure.ai.assistants.models.ErrorEvent": "Azure.AI.Assistants.ErrorEvent", "azure.ai.assistants.models.DoneEvent": "Azure.AI.Assistants.DoneEvent", - "azure.ai.assistants.models.AgentStreamEvent": "Azure.AI.Assistants.AgentStreamEvent", - "azure.ai.assistants.AIAssistantClient.create_agent": "Azure.AI.Assistants.createAgent", - "azure.ai.assistants.AIAssistantClient.list_agents": "Azure.AI.Assistants.listAgents", - "azure.ai.assistants.AIAssistantClient.get_agent": "Azure.AI.Assistants.getAgent", - "azure.ai.assistants.AIAssistantClient.update_agent": "Azure.AI.Assistants.updateAgent", - "azure.ai.assistants.AIAssistantClient.delete_agent": "Azure.AI.Assistants.deleteAgent", - "azure.ai.assistants.AIAssistantClient.create_thread": "Azure.AI.Assistants.createThread", - "azure.ai.assistants.AIAssistantClient.get_thread": "Azure.AI.Assistants.getThread", - "azure.ai.assistants.AIAssistantClient.update_thread": "Azure.AI.Assistants.updateThread", - "azure.ai.assistants.AIAssistantClient.delete_thread": "Azure.AI.Assistants.deleteThread", - "azure.ai.assistants.AIAssistantClient.create_message": "Azure.AI.Assistants.createMessage", - "azure.ai.assistants.AIAssistantClient.list_messages": "Azure.AI.Assistants.listMessages", - "azure.ai.assistants.AIAssistantClient.get_message": "Azure.AI.Assistants.getMessage", - "azure.ai.assistants.AIAssistantClient.update_message": "Azure.AI.Assistants.updateMessage", - "azure.ai.assistants.AIAssistantClient.create_run": "Azure.AI.Assistants.createRun", - "azure.ai.assistants.AIAssistantClient.list_runs": "Azure.AI.Assistants.listRuns", - "azure.ai.assistants.AIAssistantClient.get_run": "Azure.AI.Assistants.getRun", - "azure.ai.assistants.AIAssistantClient.update_run": "Azure.AI.Assistants.updateRun", - "azure.ai.assistants.AIAssistantClient.submit_tool_outputs_to_run": "Azure.AI.Assistants.submitToolOutputsToRun", - "azure.ai.assistants.AIAssistantClient.cancel_run": "Azure.AI.Assistants.cancelRun", - "azure.ai.assistants.AIAssistantClient.create_thread_and_run": "Azure.AI.Assistants.createThreadAndRun", - "azure.ai.assistants.AIAssistantClient.get_run_step": "Azure.AI.Assistants.getRunStep", - "azure.ai.assistants.AIAssistantClient.list_run_steps": "Azure.AI.Assistants.listRunSteps", - "azure.ai.assistants.AIAssistantClient.list_files": "Azure.AI.Assistants.listFiles", - "azure.ai.assistants.AIAssistantClient.upload_file": "Azure.AI.Assistants.uploadFile", - "azure.ai.assistants.AIAssistantClient.delete_file": "Azure.AI.Assistants.deleteFile", - "azure.ai.assistants.AIAssistantClient.get_file": "Azure.AI.Assistants.getFile", - "azure.ai.assistants.AIAssistantClient.list_vector_stores": "Azure.AI.Assistants.listVectorStores", - "azure.ai.assistants.AIAssistantClient.create_vector_store": "Azure.AI.Assistants.createVectorStore", - "azure.ai.assistants.AIAssistantClient.get_vector_store": "Azure.AI.Assistants.getVectorStore", - "azure.ai.assistants.AIAssistantClient.modify_vector_store": "Azure.AI.Assistants.modifyVectorStore", - "azure.ai.assistants.AIAssistantClient.delete_vector_store": "Azure.AI.Assistants.deleteVectorStore", - "azure.ai.assistants.AIAssistantClient.list_vector_store_files": "Azure.AI.Assistants.listVectorStoreFiles", - "azure.ai.assistants.AIAssistantClient.create_vector_store_file": "Azure.AI.Assistants.createVectorStoreFile", - "azure.ai.assistants.AIAssistantClient.get_vector_store_file": "Azure.AI.Assistants.getVectorStoreFile", - "azure.ai.assistants.AIAssistantClient.delete_vector_store_file": "Azure.AI.Assistants.deleteVectorStoreFile", - "azure.ai.assistants.AIAssistantClient.create_vector_store_file_batch": "Azure.AI.Assistants.createVectorStoreFileBatch", - "azure.ai.assistants.AIAssistantClient.get_vector_store_file_batch": "Azure.AI.Assistants.getVectorStoreFileBatch", - "azure.ai.assistants.AIAssistantClient.cancel_vector_store_file_batch": "Azure.AI.Assistants.cancelVectorStoreFileBatch", - "azure.ai.assistants.AIAssistantClient.list_vector_store_file_batch_files": "Azure.AI.Assistants.listVectorStoreFileBatchFiles" + "azure.ai.assistants.models.AssistantStreamEvent": "Azure.AI.Assistants.AssistantStreamEvent", + "azure.ai.assistants.AssistantsClient.create_assistant": "Azure.AI.Assistants.createAssistant", + "azure.ai.assistants.AssistantsClient.list_assistants": "Azure.AI.Assistants.listAssistants", + "azure.ai.assistants.AssistantsClient.get_assistant": "Azure.AI.Assistants.getAssistant", + "azure.ai.assistants.AssistantsClient.update_assistant": "Azure.AI.Assistants.updateAssistant", + "azure.ai.assistants.AssistantsClient.delete_assistant": "Azure.AI.Assistants.deleteAssistant", + "azure.ai.assistants.AssistantsClient.create_thread": "Azure.AI.Assistants.createThread", + "azure.ai.assistants.AssistantsClient.get_thread": "Azure.AI.Assistants.getThread", + "azure.ai.assistants.AssistantsClient.update_thread": "Azure.AI.Assistants.updateThread", + "azure.ai.assistants.AssistantsClient.delete_thread": "Azure.AI.Assistants.deleteThread", + "azure.ai.assistants.AssistantsClient.create_message": "Azure.AI.Assistants.createMessage", + "azure.ai.assistants.AssistantsClient.list_messages": "Azure.AI.Assistants.listMessages", + "azure.ai.assistants.AssistantsClient.get_message": "Azure.AI.Assistants.getMessage", + "azure.ai.assistants.AssistantsClient.update_message": "Azure.AI.Assistants.updateMessage", + "azure.ai.assistants.AssistantsClient.create_run": "Azure.AI.Assistants.createRun", + "azure.ai.assistants.AssistantsClient.list_runs": "Azure.AI.Assistants.listRuns", + "azure.ai.assistants.AssistantsClient.get_run": "Azure.AI.Assistants.getRun", + "azure.ai.assistants.AssistantsClient.update_run": "Azure.AI.Assistants.updateRun", + "azure.ai.assistants.AssistantsClient.submit_tool_outputs_to_run": "Azure.AI.Assistants.submitToolOutputsToRun", + "azure.ai.assistants.AssistantsClient.cancel_run": "Azure.AI.Assistants.cancelRun", + "azure.ai.assistants.AssistantsClient.create_thread_and_run": "Azure.AI.Assistants.createThreadAndRun", + "azure.ai.assistants.AssistantsClient.get_run_step": "Azure.AI.Assistants.getRunStep", + "azure.ai.assistants.AssistantsClient.list_run_steps": "Azure.AI.Assistants.listRunSteps", + "azure.ai.assistants.AssistantsClient.list_files": "Azure.AI.Assistants.listFiles", + "azure.ai.assistants.AssistantsClient.upload_file": "Azure.AI.Assistants.uploadFile", + "azure.ai.assistants.AssistantsClient.delete_file": "Azure.AI.Assistants.deleteFile", + "azure.ai.assistants.AssistantsClient.get_file": "Azure.AI.Assistants.getFile", + "azure.ai.assistants.AssistantsClient.list_vector_stores": "Azure.AI.Assistants.listVectorStores", + "azure.ai.assistants.AssistantsClient.create_vector_store": "Azure.AI.Assistants.createVectorStore", + "azure.ai.assistants.AssistantsClient.get_vector_store": "Azure.AI.Assistants.getVectorStore", + "azure.ai.assistants.AssistantsClient.modify_vector_store": "Azure.AI.Assistants.modifyVectorStore", + "azure.ai.assistants.AssistantsClient.delete_vector_store": "Azure.AI.Assistants.deleteVectorStore", + "azure.ai.assistants.AssistantsClient.list_vector_store_files": "Azure.AI.Assistants.listVectorStoreFiles", + "azure.ai.assistants.AssistantsClient.create_vector_store_file": "Azure.AI.Assistants.createVectorStoreFile", + "azure.ai.assistants.AssistantsClient.get_vector_store_file": "Azure.AI.Assistants.getVectorStoreFile", + "azure.ai.assistants.AssistantsClient.delete_vector_store_file": "Azure.AI.Assistants.deleteVectorStoreFile", + "azure.ai.assistants.AssistantsClient.create_vector_store_file_batch": "Azure.AI.Assistants.createVectorStoreFileBatch", + "azure.ai.assistants.AssistantsClient.get_vector_store_file_batch": "Azure.AI.Assistants.getVectorStoreFileBatch", + "azure.ai.assistants.AssistantsClient.cancel_vector_store_file_batch": "Azure.AI.Assistants.cancelVectorStoreFileBatch", + "azure.ai.assistants.AssistantsClient.list_vector_store_file_batch_files": "Azure.AI.Assistants.listVectorStoreFileBatchFiles" } } \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/__init__.py index 9b2280c14ff9..2484b50c5378 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/__init__.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/__init__.py @@ -12,7 +12,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._client import AIAssistantClient # type: ignore +from ._client import AssistantsClient # type: ignore from ._version import VERSION __version__ = VERSION @@ -25,7 +25,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "AIAssistantClient", + "AssistantsClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py index 1f43c04fda7f..433f72ca45b5 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -15,20 +16,29 @@ from azure.core.pipeline import policies from azure.core.rest import HttpRequest, HttpResponse -from ._configuration import AIAssistantClientConfiguration -from ._operations import AIAssistantClientOperationsMixin +from ._configuration import AssistantsClientConfiguration +from ._operations import AssistantsClientOperationsMixin from ._serialization import Deserializer, Serializer if TYPE_CHECKING: from azure.core.credentials import TokenCredential -class AIAssistantClient(AIAssistantClientOperationsMixin): - """AIAssistantClient. +class AssistantsClient(AssistantsClientOperationsMixin): + """AssistantsClient. - :param endpoint: Project endpoint in the form of: - https://.services.ai.azure.com/api/projects/. Required. + :param endpoint: The Azure AI Foundry project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``, where is the + Azure region where the project is deployed (e.g. westus) and is the GUID of + the Enterprise private link. Required. :type endpoint: str + :param subscription_id: The Azure subscription ID. Required. + :type subscription_id: str + :param resource_group_name: The name of the Azure Resource Group. Required. + :type resource_group_name: str + :param project_name: The Azure AI Foundry project name. Required. + :type project_name: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or @@ -38,9 +48,24 @@ class AIAssistantClient(AIAssistantClientOperationsMixin): :paramtype api_version: str """ - def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: - _endpoint = "{endpoint}" - self._config = AIAssistantClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: Union[AzureKeyCredential, "TokenCredential"], + **kwargs: Any + ) -> None: + _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" + self._config = AssistantsClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + **kwargs + ) _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -85,6 +110,15 @@ def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: request_copy = deepcopy(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py index 4cfdb063c073..2b2e6944f1dd 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py @@ -17,15 +17,24 @@ from azure.core.credentials import TokenCredential -class AIAssistantClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for AIAssistantClient. +class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for AssistantsClient. Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: Project endpoint in the form of: - https://.services.ai.azure.com/api/projects/. Required. + :param endpoint: The Azure AI Foundry project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``, where is the + Azure region where the project is deployed (e.g. westus) and is the GUID of + the Enterprise private link. Required. :type endpoint: str + :param subscription_id: The Azure subscription ID. Required. + :type subscription_id: str + :param resource_group_name: The name of the Azure Resource Group. Required. + :type resource_group_name: str + :param project_name: The Azure AI Foundry project name. Required. + :type project_name: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or @@ -35,18 +44,35 @@ class AIAssistantClientConfiguration: # pylint: disable=too-many-instance-attri :paramtype api_version: str """ - def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: Union[AzureKeyCredential, "TokenCredential"], + **kwargs: Any, + ) -> None: api_version: str = kwargs.pop("api_version", "latest") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + if resource_group_name is None: + raise ValueError("Parameter 'resource_group_name' must not be None.") + if project_name is None: + raise ValueError("Parameter 'project_name' must not be None.") if credential is None: raise ValueError("Parameter 'credential' must not be None.") self.endpoint = endpoint + self.subscription_id = subscription_id + self.resource_group_name = resource_group_name + self.project_name = project_name self.credential = credential self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-assistants/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/__init__.py index 28950a5960c0..ee3f17d82ddc 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/__init__.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/__init__.py @@ -12,14 +12,14 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._operations import AIAssistantClientOperationsMixin # type: ignore +from ._operations import AssistantsClientOperationsMixin # type: ignore from ._patch import __all__ as _patch_all from ._patch import * from ._patch import patch_sdk as _patch_sdk __all__ = [ - "AIAssistantClientOperationsMixin", + "AssistantsClientOperationsMixin", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py index 210901feac86..bf3858e5c3c9 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py @@ -29,7 +29,7 @@ from .. import _model_base, models as _models from .._model_base import SdkJSONEncoder, _deserialize from .._serialization import Serializer -from .._vendor import AIAssistantClientMixinABC, prepare_multipart_form_data +from .._vendor import AssistantsClientMixinABC, prepare_multipart_form_data if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -47,7 +47,7 @@ _SERIALIZER.client_side_validation = False -def build_ai_assistant_create_agent_request(**kwargs: Any) -> HttpRequest: +def build_assistants_create_assistant_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -69,7 +69,7 @@ def build_ai_assistant_create_agent_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_list_agents_request( +def build_assistants_list_assistants_request( *, limit: Optional[int] = None, order: Optional[Union[str, _models.ListSortOrder]] = None, @@ -103,7 +103,7 @@ def build_ai_assistant_list_agents_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_get_agent_request(agent_id: str, **kwargs: Any) -> HttpRequest: +def build_assistants_get_assistant_request(assistant_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -113,7 +113,7 @@ def build_ai_assistant_get_agent_request(agent_id: str, **kwargs: Any) -> HttpRe # Construct URL _url = "/assistants/{assistantId}" path_format_arguments = { - "assistantId": _SERIALIZER.url("agent_id", agent_id, "str"), + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -127,7 +127,9 @@ def build_ai_assistant_get_agent_request(agent_id: str, **kwargs: Any) -> HttpRe return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_update_agent_request(agent_id: str, **kwargs: Any) -> HttpRequest: +def build_assistants_update_assistant_request( # pylint: disable=name-too-long + assistant_id: str, **kwargs: Any +) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -138,7 +140,7 @@ def build_ai_assistant_update_agent_request(agent_id: str, **kwargs: Any) -> Htt # Construct URL _url = "/assistants/{assistantId}" path_format_arguments = { - "assistantId": _SERIALIZER.url("agent_id", agent_id, "str"), + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -154,7 +156,9 @@ def build_ai_assistant_update_agent_request(agent_id: str, **kwargs: Any) -> Htt return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_delete_agent_request(agent_id: str, **kwargs: Any) -> HttpRequest: +def build_assistants_delete_assistant_request( # pylint: disable=name-too-long + assistant_id: str, **kwargs: Any +) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -164,7 +168,7 @@ def build_ai_assistant_delete_agent_request(agent_id: str, **kwargs: Any) -> Htt # Construct URL _url = "/assistants/{assistantId}" path_format_arguments = { - "assistantId": _SERIALIZER.url("agent_id", agent_id, "str"), + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -178,7 +182,7 @@ def build_ai_assistant_delete_agent_request(agent_id: str, **kwargs: Any) -> Htt return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_create_thread_request(**kwargs: Any) -> HttpRequest: +def build_assistants_create_thread_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -200,7 +204,7 @@ def build_ai_assistant_create_thread_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_get_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: +def build_assistants_get_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -224,7 +228,7 @@ def build_ai_assistant_get_thread_request(thread_id: str, **kwargs: Any) -> Http return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_update_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: +def build_assistants_update_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -251,7 +255,7 @@ def build_ai_assistant_update_thread_request(thread_id: str, **kwargs: Any) -> H return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_delete_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: +def build_assistants_delete_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -275,9 +279,7 @@ def build_ai_assistant_delete_thread_request(thread_id: str, **kwargs: Any) -> H return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_create_message_request( # pylint: disable=name-too-long - thread_id: str, **kwargs: Any -) -> HttpRequest: +def build_assistants_create_message_request(thread_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -304,7 +306,7 @@ def build_ai_assistant_create_message_request( # pylint: disable=name-too-long return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_list_messages_request( +def build_assistants_list_messages_request( thread_id: str, *, run_id: Optional[str] = None, @@ -347,7 +349,7 @@ def build_ai_assistant_list_messages_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_get_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: +def build_assistants_get_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -372,9 +374,7 @@ def build_ai_assistant_get_message_request(thread_id: str, message_id: str, **kw return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_update_message_request( # pylint: disable=name-too-long - thread_id: str, message_id: str, **kwargs: Any -) -> HttpRequest: +def build_assistants_update_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -402,7 +402,7 @@ def build_ai_assistant_update_message_request( # pylint: disable=name-too-long return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_create_run_request( +def build_assistants_create_run_request( thread_id: str, *, include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -433,7 +433,7 @@ def build_ai_assistant_create_run_request( return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_list_runs_request( +def build_assistants_list_runs_request( thread_id: str, *, limit: Optional[int] = None, @@ -473,7 +473,7 @@ def build_ai_assistant_list_runs_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_get_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: +def build_assistants_get_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -498,7 +498,7 @@ def build_ai_assistant_get_run_request(thread_id: str, run_id: str, **kwargs: An return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_update_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: +def build_assistants_update_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -526,7 +526,7 @@ def build_ai_assistant_update_run_request(thread_id: str, run_id: str, **kwargs: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_submit_tool_outputs_to_run_request( # pylint: disable=name-too-long +def build_assistants_submit_tool_outputs_to_run_request( # pylint: disable=name-too-long thread_id: str, run_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -556,7 +556,7 @@ def build_ai_assistant_submit_tool_outputs_to_run_request( # pylint: disable=na return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_cancel_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: +def build_assistants_cancel_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -581,7 +581,7 @@ def build_ai_assistant_cancel_run_request(thread_id: str, run_id: str, **kwargs: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_create_thread_and_run_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long +def build_assistants_create_thread_and_run_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -603,7 +603,7 @@ def build_ai_assistant_create_thread_and_run_request(**kwargs: Any) -> HttpReque return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_get_run_step_request( +def build_assistants_get_run_step_request( thread_id: str, run_id: str, step_id: str, @@ -638,7 +638,7 @@ def build_ai_assistant_get_run_step_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_list_run_steps_request( # pylint: disable=name-too-long +def build_assistants_list_run_steps_request( thread_id: str, run_id: str, *, @@ -683,7 +683,7 @@ def build_ai_assistant_list_run_steps_request( # pylint: disable=name-too-long return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_list_files_request( +def build_assistants_list_files_request( *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -706,7 +706,7 @@ def build_ai_assistant_list_files_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_upload_file_request(**kwargs: Any) -> HttpRequest: +def build_assistants_upload_file_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -725,7 +725,7 @@ def build_ai_assistant_upload_file_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_delete_file_request(file_id: str, **kwargs: Any) -> HttpRequest: +def build_assistants_delete_file_request(file_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -749,7 +749,7 @@ def build_ai_assistant_delete_file_request(file_id: str, **kwargs: Any) -> HttpR return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_get_file_request(file_id: str, **kwargs: Any) -> HttpRequest: +def build_assistants_get_file_request(file_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -773,7 +773,7 @@ def build_ai_assistant_get_file_request(file_id: str, **kwargs: Any) -> HttpRequ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_get_file_content_request( # pylint: disable=name-too-long +def build_assistants_get_file_content_request( # pylint: disable=name-too-long file_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -799,7 +799,7 @@ def build_ai_assistant_get_file_content_request( # pylint: disable=name-too-lon return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_list_vector_stores_request( # pylint: disable=name-too-long +def build_assistants_list_vector_stores_request( # pylint: disable=name-too-long *, limit: Optional[int] = None, order: Optional[Union[str, _models.ListSortOrder]] = None, @@ -833,7 +833,7 @@ def build_ai_assistant_list_vector_stores_request( # pylint: disable=name-too-l return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_create_vector_store_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long +def build_assistants_create_vector_store_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -855,7 +855,7 @@ def build_ai_assistant_create_vector_store_request(**kwargs: Any) -> HttpRequest return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_get_vector_store_request( # pylint: disable=name-too-long +def build_assistants_get_vector_store_request( # pylint: disable=name-too-long vector_store_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -881,7 +881,7 @@ def build_ai_assistant_get_vector_store_request( # pylint: disable=name-too-lon return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_modify_vector_store_request( # pylint: disable=name-too-long +def build_assistants_modify_vector_store_request( # pylint: disable=name-too-long vector_store_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -910,7 +910,7 @@ def build_ai_assistant_modify_vector_store_request( # pylint: disable=name-too- return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_delete_vector_store_request( # pylint: disable=name-too-long +def build_assistants_delete_vector_store_request( # pylint: disable=name-too-long vector_store_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -936,7 +936,7 @@ def build_ai_assistant_delete_vector_store_request( # pylint: disable=name-too- return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_list_vector_store_files_request( # pylint: disable=name-too-long +def build_assistants_list_vector_store_files_request( # pylint: disable=name-too-long vector_store_id: str, *, filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, @@ -979,7 +979,7 @@ def build_ai_assistant_list_vector_store_files_request( # pylint: disable=name- return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_create_vector_store_file_request( # pylint: disable=name-too-long +def build_assistants_create_vector_store_file_request( # pylint: disable=name-too-long vector_store_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -1008,7 +1008,7 @@ def build_ai_assistant_create_vector_store_file_request( # pylint: disable=name return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_get_vector_store_file_request( # pylint: disable=name-too-long +def build_assistants_get_vector_store_file_request( # pylint: disable=name-too-long vector_store_id: str, file_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -1035,7 +1035,7 @@ def build_ai_assistant_get_vector_store_file_request( # pylint: disable=name-to return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_delete_vector_store_file_request( # pylint: disable=name-too-long +def build_assistants_delete_vector_store_file_request( # pylint: disable=name-too-long vector_store_id: str, file_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -1062,7 +1062,7 @@ def build_ai_assistant_delete_vector_store_file_request( # pylint: disable=name return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_create_vector_store_file_batch_request( # pylint: disable=name-too-long +def build_assistants_create_vector_store_file_batch_request( # pylint: disable=name-too-long vector_store_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -1091,7 +1091,7 @@ def build_ai_assistant_create_vector_store_file_batch_request( # pylint: disabl return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_get_vector_store_file_batch_request( # pylint: disable=name-too-long +def build_assistants_get_vector_store_file_batch_request( # pylint: disable=name-too-long vector_store_id: str, batch_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -1118,7 +1118,7 @@ def build_ai_assistant_get_vector_store_file_batch_request( # pylint: disable=n return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_cancel_vector_store_file_batch_request( # pylint: disable=name-too-long +def build_assistants_cancel_vector_store_file_batch_request( # pylint: disable=name-too-long vector_store_id: str, batch_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -1145,7 +1145,7 @@ def build_ai_assistant_cancel_vector_store_file_batch_request( # pylint: disabl return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_ai_assistant_list_vector_store_file_batch_files_request( # pylint: disable=name-too-long +def build_assistants_list_vector_store_file_batch_files_request( # pylint: disable=name-too-long vector_store_id: str, batch_id: str, *, @@ -1190,10 +1190,10 @@ def build_ai_assistant_list_vector_store_file_batch_files_request( # pylint: di return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -class AIAssistantClientOperationsMixin(AIAssistantClientMixinABC): # pylint: disable=too-many-public-methods +class AssistantsClientOperationsMixin(AssistantsClientMixinABC): # pylint: disable=too-many-public-methods @overload - def create_agent( + def create_assistant( self, *, model: str, @@ -1205,27 +1205,28 @@ def create_agent( tool_resources: Optional[_models.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. + ) -> _models.Assistant: + """Creates a new assistant. :keyword model: The ID of the model to use. Required. :paramtype model: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. + :keyword name: The name of the new assistant. Default value is None. :paramtype name: str - :keyword description: The description of the new agent. Default value is None. + :keyword description: The description of the new assistant. Default value is None. :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. + :keyword instructions: The system instructions for the new assistant to use. Default value is + None. :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :keyword tools: The collection of tools to enable for the new assistant. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources @@ -1240,53 +1241,57 @@ def create_agent( We generally recommend altering this or temperature but not both. Default value is None. :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. + def create_assistant( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. + def create_assistant( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace - def create_agent( + def create_assistant( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -1298,26 +1303,27 @@ def create_agent( tool_resources: Optional[_models.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. + ) -> _models.Assistant: + """Creates a new assistant. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword model: The ID of the model to use. Required. :paramtype model: str - :keyword name: The name of the new agent. Default value is None. + :keyword name: The name of the new assistant. Default value is None. :paramtype name: str - :keyword description: The description of the new agent. Default value is None. + :keyword description: The description of the new assistant. Default value is None. :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. + :keyword instructions: The system instructions for the new assistant to use. Default value is + None. :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :keyword tools: The collection of tools to enable for the new assistant. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources @@ -1332,20 +1338,20 @@ def create_agent( We generally recommend altering this or temperature but not both. Default value is None. :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -1360,7 +1366,7 @@ def create_agent( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + cls: ClsType[_models.Assistant] = kwargs.pop("cls", None) if body is _Unset: if model is _Unset: @@ -1385,7 +1391,7 @@ def create_agent( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_agent_request( + _request = build_assistants_create_assistant_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -1394,6 +1400,15 @@ def create_agent( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1416,7 +1431,7 @@ def create_agent( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.Agent, response.json()) + deserialized = _deserialize(_models.Assistant, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1424,7 +1439,7 @@ def create_agent( return deserialized # type: ignore @distributed_trace - def list_agents( + def list_assistants( self, *, limit: Optional[int] = None, @@ -1432,8 +1447,8 @@ def list_agents( after: Optional[str] = None, before: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIPageableListOfAgent: - """Gets a list of agents that were previously created. + ) -> _models.OpenAIPageableListOfAssistant: + """Gets a list of assistants that were previously created. :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. Default value is None. @@ -1451,9 +1466,9 @@ def list_agents( obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. Default value is None. :paramtype before: str - :return: OpenAIPageableListOfAgent. The OpenAIPageableListOfAgent is compatible with + :return: OpenAIPageableListOfAssistant. The OpenAIPageableListOfAssistant is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfAgent + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfAssistant :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -1467,9 +1482,9 @@ def list_agents( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.OpenAIPageableListOfAgent] = kwargs.pop("cls", None) + cls: ClsType[_models.OpenAIPageableListOfAssistant] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_agents_request( + _request = build_assistants_list_assistants_request( limit=limit, order=order, after=after, @@ -1480,6 +1495,15 @@ def list_agents( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1502,7 +1526,7 @@ def list_agents( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.OpenAIPageableListOfAgent, response.json()) + deserialized = _deserialize(_models.OpenAIPageableListOfAssistant, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1510,13 +1534,13 @@ def list_agents( return deserialized # type: ignore @distributed_trace - def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: - """Retrieves an existing agent. + def get_assistant(self, assistant_id: str, **kwargs: Any) -> _models.Assistant: + """Retrieves an existing assistant. - :param agent_id: Identifier of the agent. Required. - :type agent_id: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :param assistant_id: Identifier of the assistant. Required. + :type assistant_id: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -1530,16 +1554,25 @@ def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + cls: ClsType[_models.Assistant] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_agent_request( - agent_id=agent_id, + _request = build_assistants_get_assistant_request( + assistant_id=assistant_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1562,7 +1595,7 @@ def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.Agent, response.json()) + deserialized = _deserialize(_models.Assistant, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1570,9 +1603,9 @@ def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: return deserialized # type: ignore @overload - def update_agent( + def update_assistant( self, - agent_id: str, + assistant_id: str, *, content_type: str = "application/json", model: Optional[str] = None, @@ -1583,31 +1616,31 @@ def update_agent( tool_resources: Optional[_models.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. + ) -> _models.Assistant: + """Modifies an existing assistant. - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword model: The ID of the model to use. Default value is None. :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. + :keyword name: The modified name for the assistant to use. Default value is None. :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. + :keyword description: The modified description for the assistant to use. Default value is None. :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. + :keyword instructions: The modified system instructions for the new assistant to use. Default + value is None. :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is + :keyword tools: The modified collection of tools to enable for the assistant. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources @@ -1622,63 +1655,63 @@ def update_agent( We generally recommend altering this or temperature but not both. Default value is None. :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def update_agent( - self, agent_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. + def update_assistant( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def update_agent( - self, agent_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. + def update_assistant( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace - def update_agent( + def update_assistant( self, - agent_id: str, + assistant_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, model: Optional[str] = None, @@ -1689,30 +1722,30 @@ def update_agent( tool_resources: Optional[_models.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. + ) -> _models.Assistant: + """Modifies an existing assistant. - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword model: The ID of the model to use. Default value is None. :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. + :keyword name: The modified name for the assistant to use. Default value is None. :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. + :keyword description: The modified description for the assistant to use. Default value is None. :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. + :keyword instructions: The modified system instructions for the new assistant to use. Default + value is None. :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is + :keyword tools: The modified collection of tools to enable for the assistant. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources @@ -1727,20 +1760,20 @@ def update_agent( We generally recommend altering this or temperature but not both. Default value is None. :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -1755,7 +1788,7 @@ def update_agent( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + cls: ClsType[_models.Assistant] = kwargs.pop("cls", None) if body is _Unset: body = { @@ -1778,8 +1811,8 @@ def update_agent( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_update_agent_request( - agent_id=agent_id, + _request = build_assistants_update_assistant_request( + assistant_id=assistant_id, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -1788,6 +1821,15 @@ def update_agent( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1810,7 +1852,7 @@ def update_agent( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.Agent, response.json()) + deserialized = _deserialize(_models.Assistant, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1818,13 +1860,13 @@ def update_agent( return deserialized # type: ignore @distributed_trace - def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: - """Deletes an agent. + def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.AssistantDeletionStatus: + """Deletes an assistant. - :param agent_id: Identifier of the agent. Required. - :type agent_id: str - :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentDeletionStatus + :param assistant_id: Identifier of the assistant. Required. + :type assistant_id: str + :return: AssistantDeletionStatus. The AssistantDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -1838,16 +1880,25 @@ def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionSta _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) + cls: ClsType[_models.AssistantDeletionStatus] = kwargs.pop("cls", None) - _request = build_ai_assistant_delete_agent_request( - agent_id=agent_id, + _request = build_assistants_delete_assistant_request( + assistant_id=assistant_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1870,7 +1921,7 @@ def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionSta if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) + deserialized = _deserialize(_models.AssistantDeletionStatus, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1886,8 +1937,8 @@ def create_thread( tool_resources: Optional[_models.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -1895,7 +1946,7 @@ def create_thread( :keyword messages: The initial messages to associate with the new thread. Default value is None. :paramtype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] - :keyword tool_resources: A set of resources that are made available to the agent's tools in + :keyword tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires @@ -1906,40 +1957,40 @@ def create_thread( 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ @overload def create_thread( self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ @overload def create_thread( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1952,15 +2003,15 @@ def create_thread( tool_resources: Optional[_models.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword messages: The initial messages to associate with the new thread. Default value is None. :paramtype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] - :keyword tool_resources: A set of resources that are made available to the agent's tools in + :keyword tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires @@ -1971,8 +2022,8 @@ def create_thread( 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -1987,7 +2038,7 @@ def create_thread( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + cls: ClsType[_models.AssistantThread] = kwargs.pop("cls", None) if body is _Unset: body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} @@ -1999,7 +2050,7 @@ def create_thread( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_thread_request( + _request = build_assistants_create_thread_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -2008,6 +2059,15 @@ def create_thread( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2030,7 +2090,7 @@ def create_thread( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentThread, response.json()) + deserialized = _deserialize(_models.AssistantThread, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2038,13 +2098,13 @@ def create_thread( return deserialized # type: ignore @distributed_trace - def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: + def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AssistantThread: """Gets information about an existing thread. :param thread_id: Identifier of the thread. Required. :type thread_id: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -2058,9 +2118,9 @@ def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + cls: ClsType[_models.AssistantThread] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_thread_request( + _request = build_assistants_get_thread_request( thread_id=thread_id, api_version=self._config.api_version, headers=_headers, @@ -2068,6 +2128,15 @@ def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2090,7 +2159,7 @@ def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentThread, response.json()) + deserialized = _deserialize(_models.AssistantThread, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2106,7 +2175,7 @@ def update_thread( tool_resources: Optional[_models.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: + ) -> _models.AssistantThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. @@ -2114,7 +2183,7 @@ def update_thread( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword tool_resources: A set of resources that are made available to the agent's tools in + :keyword tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires @@ -2125,15 +2194,15 @@ def update_thread( 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ @overload def update_thread( self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: + ) -> _models.AssistantThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. @@ -2143,15 +2212,15 @@ def update_thread( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ @overload def update_thread( self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: + ) -> _models.AssistantThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. @@ -2161,8 +2230,8 @@ def update_thread( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2175,14 +2244,14 @@ def update_thread( tool_resources: Optional[_models.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: + ) -> _models.AssistantThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. :type thread_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword tool_resources: A set of resources that are made available to the agent's tools in + :keyword tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires @@ -2193,8 +2262,8 @@ def update_thread( 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -2209,7 +2278,7 @@ def update_thread( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + cls: ClsType[_models.AssistantThread] = kwargs.pop("cls", None) if body is _Unset: body = {"metadata": metadata, "tool_resources": tool_resources} @@ -2221,7 +2290,7 @@ def update_thread( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_update_thread_request( + _request = build_assistants_update_thread_request( thread_id=thread_id, content_type=content_type, api_version=self._config.api_version, @@ -2231,6 +2300,15 @@ def update_thread( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2253,7 +2331,7 @@ def update_thread( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentThread, response.json()) + deserialized = _deserialize(_models.AssistantThread, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -2283,7 +2361,7 @@ def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletion cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) - _request = build_ai_assistant_delete_thread_request( + _request = build_assistants_delete_thread_request( thread_id=thread_id, api_version=self._config.api_version, headers=_headers, @@ -2291,6 +2369,15 @@ def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletion ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2340,8 +2427,8 @@ def create_message( * `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the agent. Use this value to insert - messages from the agent into the + * `assistant`: Indicates the message is generated by the assistant. Use this value to insert + messages from the assistant into the conversation. Known values are: "user" and "assistant". Required. :paramtype role: str or ~azure.ai.assistants.models.MessageRole :keyword content: The textual content of the initial message. Currently, robust input including @@ -2422,8 +2509,8 @@ def create_message( * `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the agent. Use this value to insert - messages from the agent into the + * `assistant`: Indicates the message is generated by the assistant. Use this value to insert + messages from the assistant into the conversation. Known values are: "user" and "assistant". Required. :paramtype role: str or ~azure.ai.assistants.models.MessageRole :keyword content: The textual content of the initial message. Currently, robust input including @@ -2470,7 +2557,7 @@ def create_message( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_message_request( + _request = build_assistants_create_message_request( thread_id=thread_id, content_type=content_type, api_version=self._config.api_version, @@ -2480,6 +2567,15 @@ def create_message( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2561,7 +2657,7 @@ def list_messages( cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_messages_request( + _request = build_assistants_list_messages_request( thread_id=thread_id, run_id=run_id, limit=limit, @@ -2574,6 +2670,15 @@ def list_messages( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2628,7 +2733,7 @@ def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_message_request( + _request = build_assistants_get_message_request( thread_id=thread_id, message_id=message_id, api_version=self._config.api_version, @@ -2637,6 +2742,15 @@ def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2786,7 +2900,7 @@ def update_message( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_update_message_request( + _request = build_assistants_update_message_request( thread_id=thread_id, message_id=message_id, content_type=content_type, @@ -2797,6 +2911,15 @@ def update_message( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2831,7 +2954,7 @@ def create_run( self, thread_id: str, *, - agent_id: str, + assistant_id: str, include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, content_type: str = "application/json", model: Optional[str] = None, @@ -2845,18 +2968,18 @@ def create_run( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any ) -> _models.ThreadRun: - """Creates a new run for an agent thread. + """Creates a new run for an assistant thread. :param thread_id: Identifier of the thread. Required. :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str :keyword include: A list of additional fields to include in the response. Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result @@ -2865,11 +2988,11 @@ def create_run( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. + :keyword model: The overridden model name that the assistant should use to run the thread. + Default value is None. :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. + :keyword instructions: The overridden system instructions that the assistant should use to run + the thread. Default value is None. :paramtype instructions: str :keyword additional_instructions: Additional instructions to append at the end of the instructions for the run. This is useful for modifying the behavior @@ -2878,7 +3001,7 @@ def create_run( :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the + :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run @@ -2916,16 +3039,17 @@ def create_run( moves forward. Default value is None. :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode - or ~azure.ai.assistants.models.AgentsNamedToolChoice + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. @@ -2950,7 +3074,7 @@ def create_run( content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadRun: - """Creates a new run for an agent thread. + """Creates a new run for an assistant thread. :param thread_id: Identifier of the thread. Required. :type thread_id: str @@ -2979,7 +3103,7 @@ def create_run( content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadRun: - """Creates a new run for an agent thread. + """Creates a new run for an assistant thread. :param thread_id: Identifier of the thread. Required. :type thread_id: str @@ -3004,7 +3128,7 @@ def create_run( thread_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, - agent_id: str = _Unset, + assistant_id: str = _Unset, include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, model: Optional[str] = None, instructions: Optional[str] = None, @@ -3017,30 +3141,30 @@ def create_run( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any ) -> _models.ThreadRun: - """Creates a new run for an agent thread. + """Creates a new run for an assistant thread. :param thread_id: Identifier of the thread. Required. :type thread_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str :keyword include: A list of additional fields to include in the response. Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. + :keyword model: The overridden model name that the assistant should use to run the thread. + Default value is None. :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. + :keyword instructions: The overridden system instructions that the assistant should use to run + the thread. Default value is None. :paramtype instructions: str :keyword additional_instructions: Additional instructions to append at the end of the instructions for the run. This is useful for modifying the behavior @@ -3049,7 +3173,7 @@ def create_run( :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the + :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run @@ -3087,16 +3211,17 @@ def create_run( moves forward. Default value is None. :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode - or ~azure.ai.assistants.models.AgentsNamedToolChoice + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. @@ -3125,12 +3250,12 @@ def create_run( cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: - if agent_id is _Unset: - raise TypeError("missing required argument: agent_id") + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") body = { "additional_instructions": additional_instructions, "additional_messages": additional_messages, - "assistant_id": agent_id, + "assistant_id": assistant_id, "instructions": instructions, "max_completion_tokens": max_completion_tokens, "max_prompt_tokens": max_prompt_tokens, @@ -3153,7 +3278,7 @@ def create_run( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_run_request( + _request = build_assistants_create_run_request( thread_id=thread_id, include=include, content_type=content_type, @@ -3164,6 +3289,15 @@ def create_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3242,7 +3376,7 @@ def list_runs( cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_runs_request( + _request = build_assistants_list_runs_request( thread_id=thread_id, limit=limit, order=order, @@ -3254,6 +3388,15 @@ def list_runs( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3308,7 +3451,7 @@ def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadR cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_run_request( + _request = build_assistants_get_run_request( thread_id=thread_id, run_id=run_id, api_version=self._config.api_version, @@ -3317,6 +3460,15 @@ def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadR ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3466,7 +3618,7 @@ def update_run( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_update_run_request( + _request = build_assistants_update_run_request( thread_id=thread_id, run_id=run_id, content_type=content_type, @@ -3477,6 +3629,15 @@ def update_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3638,7 +3799,7 @@ def submit_tool_outputs_to_run( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_submit_tool_outputs_to_run_request( + _request = build_assistants_submit_tool_outputs_to_run_request( thread_id=thread_id, run_id=run_id, content_type=content_type, @@ -3649,6 +3810,15 @@ def submit_tool_outputs_to_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3703,7 +3873,7 @@ def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Thre cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - _request = build_ai_assistant_cancel_run_request( + _request = build_assistants_cancel_run_request( thread_id=thread_id, run_id=run_id, api_version=self._config.api_version, @@ -3712,6 +3882,15 @@ def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Thre ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3745,9 +3924,9 @@ def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Thre def create_thread_and_run( self, *, - agent_id: str, + assistant_id: str, content_type: str = "application/json", - thread: Optional[_models.AgentThreadCreationOptions] = None, + thread: Optional[_models.AssistantThreadCreationOptions] = None, model: Optional[str] = None, instructions: Optional[str] = None, tools: Optional[List[_models.ToolDefinition]] = None, @@ -3758,33 +3937,34 @@ def create_thread_and_run( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. + """Creates a new assistant thread and immediately starts a run using that new thread. - :keyword agent_id: The ID of the agent for which the thread should be created. Required. - :paramtype agent_id: str + :keyword assistant_id: The ID of the assistant for which the thread should be created. + Required. + :paramtype assistant_id: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword thread: The details used to create the new thread. If no thread is provided, an empty one will be created. Default value is None. - :paramtype thread: ~azure.ai.assistants.models.AgentThreadCreationOptions - :keyword model: The overridden model that the agent should use to run the thread. Default value - is None. + :paramtype thread: ~azure.ai.assistants.models.AssistantThreadCreationOptions + :keyword model: The overridden model that the assistant should use to run the thread. Default + value is None. :paramtype model: str - :keyword instructions: The overridden system instructions the agent should use to run the + :keyword instructions: The overridden system instructions the assistant should use to run the thread. Default value is None. :paramtype instructions: str - :keyword tools: The overridden list of enabled tools the agent should use to run the thread. - Default value is None. + :keyword tools: The overridden list of enabled tools the assistant should use to run the + thread. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. This is useful for - modifying the behavior on a per-run basis. Default value is None. + :keyword tool_resources: Override the tools the assistant can use for this run. This is useful + for modifying the behavior on a per-run basis. Default value is None. :paramtype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run as server-sent events, @@ -3821,16 +4001,17 @@ def create_thread_and_run( moves forward. Default value is None. :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode - or ~azure.ai.assistants.models.AgentsNamedToolChoice + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. @@ -3849,7 +4030,7 @@ def create_thread_and_run( def create_thread_and_run( self, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. + """Creates a new assistant thread and immediately starts a run using that new thread. :param body: Required. :type body: JSON @@ -3865,7 +4046,7 @@ def create_thread_and_run( def create_thread_and_run( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. + """Creates a new assistant thread and immediately starts a run using that new thread. :param body: Required. :type body: IO[bytes] @@ -3882,8 +4063,8 @@ def create_thread_and_run( self, body: Union[JSON, IO[bytes]] = _Unset, *, - agent_id: str = _Unset, - thread: Optional[_models.AgentThreadCreationOptions] = None, + assistant_id: str = _Unset, + thread: Optional[_models.AssistantThreadCreationOptions] = None, model: Optional[str] = None, instructions: Optional[str] = None, tools: Optional[List[_models.ToolDefinition]] = None, @@ -3894,32 +4075,33 @@ def create_thread_and_run( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. + """Creates a new assistant thread and immediately starts a run using that new thread. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword agent_id: The ID of the agent for which the thread should be created. Required. - :paramtype agent_id: str + :keyword assistant_id: The ID of the assistant for which the thread should be created. + Required. + :paramtype assistant_id: str :keyword thread: The details used to create the new thread. If no thread is provided, an empty one will be created. Default value is None. - :paramtype thread: ~azure.ai.assistants.models.AgentThreadCreationOptions - :keyword model: The overridden model that the agent should use to run the thread. Default value - is None. + :paramtype thread: ~azure.ai.assistants.models.AssistantThreadCreationOptions + :keyword model: The overridden model that the assistant should use to run the thread. Default + value is None. :paramtype model: str - :keyword instructions: The overridden system instructions the agent should use to run the + :keyword instructions: The overridden system instructions the assistant should use to run the thread. Default value is None. :paramtype instructions: str - :keyword tools: The overridden list of enabled tools the agent should use to run the thread. - Default value is None. + :keyword tools: The overridden list of enabled tools the assistant should use to run the + thread. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. This is useful for - modifying the behavior on a per-run basis. Default value is None. + :keyword tool_resources: Override the tools the assistant can use for this run. This is useful + for modifying the behavior on a per-run basis. Default value is None. :paramtype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run as server-sent events, @@ -3956,16 +4138,17 @@ def create_thread_and_run( moves forward. Default value is None. :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode - or ~azure.ai.assistants.models.AgentsNamedToolChoice + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. @@ -3994,10 +4177,10 @@ def create_thread_and_run( cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: - if agent_id is _Unset: - raise TypeError("missing required argument: agent_id") + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") body = { - "assistant_id": agent_id, + "assistant_id": assistant_id, "instructions": instructions, "max_completion_tokens": max_completion_tokens, "max_prompt_tokens": max_prompt_tokens, @@ -4022,7 +4205,7 @@ def create_thread_and_run( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_thread_and_run_request( + _request = build_assistants_create_thread_and_run_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -4031,6 +4214,15 @@ def create_thread_and_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4100,7 +4292,7 @@ def get_run_step( cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_run_step_request( + _request = build_assistants_get_run_step_request( thread_id=thread_id, run_id=run_id, step_id=step_id, @@ -4111,6 +4303,15 @@ def get_run_step( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4198,7 +4399,7 @@ def list_run_steps( cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_run_steps_request( + _request = build_assistants_list_run_steps_request( thread_id=thread_id, run_id=run_id, include=include, @@ -4212,6 +4413,15 @@ def list_run_steps( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4268,7 +4478,7 @@ def list_files( cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_files_request( + _request = build_assistants_list_files_request( purpose=purpose, api_version=self._config.api_version, headers=_headers, @@ -4276,6 +4486,15 @@ def list_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4355,7 +4574,7 @@ def upload_file(self, body: Union[_models.UploadFileRequest, JSON], **kwargs: An _data_fields: List[str] = ["purpose", "filename"] _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) - _request = build_ai_assistant_upload_file_request( + _request = build_assistants_upload_file_request( api_version=self._config.api_version, files=_files, data=_data, @@ -4364,6 +4583,15 @@ def upload_file(self, body: Union[_models.UploadFileRequest, JSON], **kwargs: An ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4416,7 +4644,7 @@ def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) - _request = build_ai_assistant_delete_file_request( + _request = build_assistants_delete_file_request( file_id=file_id, api_version=self._config.api_version, headers=_headers, @@ -4424,6 +4652,15 @@ def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4476,7 +4713,7 @@ def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_file_request( + _request = build_assistants_get_file_request( file_id=file_id, api_version=self._config.api_version, headers=_headers, @@ -4484,6 +4721,15 @@ def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4536,7 +4782,7 @@ def _get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_file_content_request( + _request = build_assistants_get_file_content_request( file_id=file_id, api_version=self._config.api_version, headers=_headers, @@ -4544,6 +4790,15 @@ def _get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4616,7 +4871,7 @@ def list_vector_stores( cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_vector_stores_request( + _request = build_assistants_list_vector_stores_request( limit=limit, order=order, after=after, @@ -4627,6 +4882,15 @@ def list_vector_stores( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4799,7 +5063,7 @@ def create_vector_store( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_vector_store_request( + _request = build_assistants_create_vector_store_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -4808,6 +5072,15 @@ def create_vector_store( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4860,7 +5133,7 @@ def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Vecto cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_vector_store_request( + _request = build_assistants_get_vector_store_request( vector_store_id=vector_store_id, api_version=self._config.api_version, headers=_headers, @@ -4868,6 +5141,15 @@ def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Vecto ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5019,7 +5301,7 @@ def modify_vector_store( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_modify_vector_store_request( + _request = build_assistants_modify_vector_store_request( vector_store_id=vector_store_id, content_type=content_type, api_version=self._config.api_version, @@ -5029,6 +5311,15 @@ def modify_vector_store( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5082,7 +5373,7 @@ def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Ve cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) - _request = build_ai_assistant_delete_vector_store_request( + _request = build_assistants_delete_vector_store_request( vector_store_id=vector_store_id, api_version=self._config.api_version, headers=_headers, @@ -5090,6 +5381,15 @@ def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Ve ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5172,7 +5472,7 @@ def list_vector_store_files( cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_vector_store_files_request( + _request = build_assistants_list_vector_store_files_request( vector_store_id=vector_store_id, filter=filter, limit=limit, @@ -5185,6 +5485,15 @@ def list_vector_store_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5332,7 +5641,7 @@ def create_vector_store_file( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_vector_store_file_request( + _request = build_assistants_create_vector_store_file_request( vector_store_id=vector_store_id, content_type=content_type, api_version=self._config.api_version, @@ -5342,6 +5651,15 @@ def create_vector_store_file( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5396,7 +5714,7 @@ def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: An cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_vector_store_file_request( + _request = build_assistants_get_vector_store_file_request( vector_store_id=vector_store_id, file_id=file_id, api_version=self._config.api_version, @@ -5405,6 +5723,15 @@ def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: An ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5464,7 +5791,7 @@ def delete_vector_store_file( cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) - _request = build_ai_assistant_delete_vector_store_file_request( + _request = build_assistants_delete_vector_store_file_request( vector_store_id=vector_store_id, file_id=file_id, api_version=self._config.api_version, @@ -5473,6 +5800,15 @@ def delete_vector_store_file( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5620,7 +5956,7 @@ def create_vector_store_file_batch( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_vector_store_file_batch_request( + _request = build_assistants_create_vector_store_file_batch_request( vector_store_id=vector_store_id, content_type=content_type, api_version=self._config.api_version, @@ -5630,6 +5966,15 @@ def create_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5686,7 +6031,7 @@ def get_vector_store_file_batch( cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_vector_store_file_batch_request( + _request = build_assistants_get_vector_store_file_batch_request( vector_store_id=vector_store_id, batch_id=batch_id, api_version=self._config.api_version, @@ -5695,6 +6040,15 @@ def get_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5752,7 +6106,7 @@ def cancel_vector_store_file_batch( cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - _request = build_ai_assistant_cancel_vector_store_file_batch_request( + _request = build_assistants_cancel_vector_store_file_batch_request( vector_store_id=vector_store_id, batch_id=batch_id, api_version=self._config.api_version, @@ -5761,6 +6115,15 @@ def cancel_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5846,7 +6209,7 @@ def list_vector_store_file_batch_files( cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_vector_store_file_batch_files_request( + _request = build_assistants_list_vector_store_file_batch_files_request( vector_store_id=vector_store_id, batch_id=batch_id, filter=filter, @@ -5860,6 +6223,15 @@ def list_vector_store_file_batch_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py index 1c059e5809cc..a3cd7f954106 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py @@ -10,12 +10,14 @@ if TYPE_CHECKING: from . import models as _models -AgentsApiResponseFormatOption = Union[ +AssistantsApiResponseFormatOption = Union[ str, str, - "_models.AgentsApiResponseFormatMode", - "_models.AgentsApiResponseFormat", + "_models.AssistantsApiResponseFormatMode", + "_models.AssistantsApiResponseFormat", "_models.ResponseFormatJsonSchemaType", ] MessageAttachmentToolDefinition = Union["_models.CodeInterpreterToolDefinition", "_models.FileSearchToolDefinition"] -AgentsApiToolChoiceOption = Union[str, str, "_models.AgentsApiToolChoiceOptionMode", "_models.AgentsNamedToolChoice"] +AssistantsApiToolChoiceOption = Union[ + str, str, "_models.AssistantsApiToolChoiceOptionMode", "_models.AssistantsNamedToolChoice" +] diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_vendor.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_vendor.py index e130f325a0a7..08b71686c335 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_vendor.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_vendor.py @@ -9,7 +9,7 @@ import json from typing import Any, Dict, IO, List, Mapping, Optional, TYPE_CHECKING, Tuple, Union -from ._configuration import AIAssistantClientConfiguration +from ._configuration import AssistantsClientConfiguration from ._model_base import Model, SdkJSONEncoder if TYPE_CHECKING: @@ -18,11 +18,11 @@ from ._serialization import Deserializer, Serializer -class AIAssistantClientMixinABC(ABC): +class AssistantsClientMixinABC(ABC): """DO NOT use this class. It is for internal typing use only.""" _client: "PipelineClient" - _config: AIAssistantClientConfiguration + _config: AssistantsClientConfiguration _serialize: "Serializer" _deserialize: "Deserializer" diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/__init__.py index 6219b129c895..4fea30ca6925 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/__init__.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/__init__.py @@ -12,7 +12,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._client import AIAssistantClient # type: ignore +from ._client import AssistantsClient # type: ignore try: from ._patch import __all__ as _patch_all @@ -22,7 +22,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "AIAssistantClient", + "AssistantsClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py index 5ec076d2444f..5efb9a86adcc 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -16,19 +17,28 @@ from azure.core.rest import AsyncHttpResponse, HttpRequest from .._serialization import Deserializer, Serializer -from ._configuration import AIAssistantClientConfiguration -from ._operations import AIAssistantClientOperationsMixin +from ._configuration import AssistantsClientConfiguration +from ._operations import AssistantsClientOperationsMixin if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential -class AIAssistantClient(AIAssistantClientOperationsMixin): - """AIAssistantClient. +class AssistantsClient(AssistantsClientOperationsMixin): + """AssistantsClient. - :param endpoint: Project endpoint in the form of: - https://.services.ai.azure.com/api/projects/. Required. + :param endpoint: The Azure AI Foundry project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``, where is the + Azure region where the project is deployed (e.g. westus) and is the GUID of + the Enterprise private link. Required. :type endpoint: str + :param subscription_id: The Azure subscription ID. Required. + :type subscription_id: str + :param resource_group_name: The name of the Azure Resource Group. Required. + :type resource_group_name: str + :param project_name: The Azure AI Foundry project name. Required. + :type project_name: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or @@ -39,10 +49,23 @@ class AIAssistantClient(AIAssistantClientOperationsMixin): """ def __init__( - self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: Union[AzureKeyCredential, "AsyncTokenCredential"], + **kwargs: Any ) -> None: - _endpoint = "{endpoint}" - self._config = AIAssistantClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" + self._config = AssistantsClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + **kwargs + ) _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -89,6 +112,15 @@ def send_request( request_copy = deepcopy(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py index 9d4bc36de261..b5a7bf17f277 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py @@ -17,15 +17,24 @@ from azure.core.credentials_async import AsyncTokenCredential -class AIAssistantClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for AIAssistantClient. +class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for AssistantsClient. Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: Project endpoint in the form of: - https://.services.ai.azure.com/api/projects/. Required. + :param endpoint: The Azure AI Foundry project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``, where is the + Azure region where the project is deployed (e.g. westus) and is the GUID of + the Enterprise private link. Required. :type endpoint: str + :param subscription_id: The Azure subscription ID. Required. + :type subscription_id: str + :param resource_group_name: The name of the Azure Resource Group. Required. + :type resource_group_name: str + :param project_name: The Azure AI Foundry project name. Required. + :type project_name: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or @@ -36,19 +45,34 @@ class AIAssistantClientConfiguration: # pylint: disable=too-many-instance-attri """ def __init__( - self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: Union[AzureKeyCredential, "AsyncTokenCredential"], + **kwargs: Any, ) -> None: api_version: str = kwargs.pop("api_version", "latest") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + if resource_group_name is None: + raise ValueError("Parameter 'resource_group_name' must not be None.") + if project_name is None: + raise ValueError("Parameter 'project_name' must not be None.") if credential is None: raise ValueError("Parameter 'credential' must not be None.") self.endpoint = endpoint + self.subscription_id = subscription_id + self.resource_group_name = resource_group_name + self.project_name = project_name self.credential = credential self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-assistants/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/__init__.py index 28950a5960c0..ee3f17d82ddc 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/__init__.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/__init__.py @@ -12,14 +12,14 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._operations import AIAssistantClientOperationsMixin # type: ignore +from ._operations import AssistantsClientOperationsMixin # type: ignore from ._patch import __all__ as _patch_all from ._patch import * from ._patch import patch_sdk as _patch_sdk __all__ = [ - "AIAssistantClientOperationsMixin", + "AssistantsClientOperationsMixin", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py index da13d2fbdfcc..5db7010cfdf3 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py @@ -29,49 +29,49 @@ from ... import _model_base, models as _models from ..._model_base import SdkJSONEncoder, _deserialize from ..._operations._operations import ( - build_ai_assistant_cancel_run_request, - build_ai_assistant_cancel_vector_store_file_batch_request, - build_ai_assistant_create_agent_request, - build_ai_assistant_create_message_request, - build_ai_assistant_create_run_request, - build_ai_assistant_create_thread_and_run_request, - build_ai_assistant_create_thread_request, - build_ai_assistant_create_vector_store_file_batch_request, - build_ai_assistant_create_vector_store_file_request, - build_ai_assistant_create_vector_store_request, - build_ai_assistant_delete_agent_request, - build_ai_assistant_delete_file_request, - build_ai_assistant_delete_thread_request, - build_ai_assistant_delete_vector_store_file_request, - build_ai_assistant_delete_vector_store_request, - build_ai_assistant_get_agent_request, - build_ai_assistant_get_file_content_request, - build_ai_assistant_get_file_request, - build_ai_assistant_get_message_request, - build_ai_assistant_get_run_request, - build_ai_assistant_get_run_step_request, - build_ai_assistant_get_thread_request, - build_ai_assistant_get_vector_store_file_batch_request, - build_ai_assistant_get_vector_store_file_request, - build_ai_assistant_get_vector_store_request, - build_ai_assistant_list_agents_request, - build_ai_assistant_list_files_request, - build_ai_assistant_list_messages_request, - build_ai_assistant_list_run_steps_request, - build_ai_assistant_list_runs_request, - build_ai_assistant_list_vector_store_file_batch_files_request, - build_ai_assistant_list_vector_store_files_request, - build_ai_assistant_list_vector_stores_request, - build_ai_assistant_modify_vector_store_request, - build_ai_assistant_submit_tool_outputs_to_run_request, - build_ai_assistant_update_agent_request, - build_ai_assistant_update_message_request, - build_ai_assistant_update_run_request, - build_ai_assistant_update_thread_request, - build_ai_assistant_upload_file_request, + build_assistants_cancel_run_request, + build_assistants_cancel_vector_store_file_batch_request, + build_assistants_create_assistant_request, + build_assistants_create_message_request, + build_assistants_create_run_request, + build_assistants_create_thread_and_run_request, + build_assistants_create_thread_request, + build_assistants_create_vector_store_file_batch_request, + build_assistants_create_vector_store_file_request, + build_assistants_create_vector_store_request, + build_assistants_delete_assistant_request, + build_assistants_delete_file_request, + build_assistants_delete_thread_request, + build_assistants_delete_vector_store_file_request, + build_assistants_delete_vector_store_request, + build_assistants_get_assistant_request, + build_assistants_get_file_content_request, + build_assistants_get_file_request, + build_assistants_get_message_request, + build_assistants_get_run_request, + build_assistants_get_run_step_request, + build_assistants_get_thread_request, + build_assistants_get_vector_store_file_batch_request, + build_assistants_get_vector_store_file_request, + build_assistants_get_vector_store_request, + build_assistants_list_assistants_request, + build_assistants_list_files_request, + build_assistants_list_messages_request, + build_assistants_list_run_steps_request, + build_assistants_list_runs_request, + build_assistants_list_vector_store_file_batch_files_request, + build_assistants_list_vector_store_files_request, + build_assistants_list_vector_stores_request, + build_assistants_modify_vector_store_request, + build_assistants_submit_tool_outputs_to_run_request, + build_assistants_update_assistant_request, + build_assistants_update_message_request, + build_assistants_update_run_request, + build_assistants_update_thread_request, + build_assistants_upload_file_request, ) from ..._vendor import prepare_multipart_form_data -from .._vendor import AIAssistantClientMixinABC +from .._vendor import AssistantsClientMixinABC if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -86,10 +86,10 @@ ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] -class AIAssistantClientOperationsMixin(AIAssistantClientMixinABC): # pylint: disable=too-many-public-methods +class AssistantsClientOperationsMixin(AssistantsClientMixinABC): # pylint: disable=too-many-public-methods @overload - async def create_agent( + async def create_assistant( self, *, model: str, @@ -101,27 +101,28 @@ async def create_agent( tool_resources: Optional[_models.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. + ) -> _models.Assistant: + """Creates a new assistant. :keyword model: The ID of the model to use. Required. :paramtype model: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. + :keyword name: The name of the new assistant. Default value is None. :paramtype name: str - :keyword description: The description of the new agent. Default value is None. + :keyword description: The description of the new assistant. Default value is None. :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. + :keyword instructions: The system instructions for the new assistant to use. Default value is + None. :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :keyword tools: The collection of tools to enable for the new assistant. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources @@ -136,55 +137,57 @@ async def create_agent( We generally recommend altering this or temperature but not both. Default value is None. :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. + async def create_assistant( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def create_agent( + async def create_assistant( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. + ) -> _models.Assistant: + """Creates a new assistant. :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async - async def create_agent( + async def create_assistant( self, body: Union[JSON, IO[bytes]] = _Unset, *, @@ -196,26 +199,27 @@ async def create_agent( tool_resources: Optional[_models.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. + ) -> _models.Assistant: + """Creates a new assistant. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword model: The ID of the model to use. Required. :paramtype model: str - :keyword name: The name of the new agent. Default value is None. + :keyword name: The name of the new assistant. Default value is None. :paramtype name: str - :keyword description: The description of the new agent. Default value is None. + :keyword description: The description of the new assistant. Default value is None. :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. + :keyword instructions: The system instructions for the new assistant to use. Default value is + None. :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :keyword tools: The collection of tools to enable for the new assistant. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources @@ -230,20 +234,20 @@ async def create_agent( We generally recommend altering this or temperature but not both. Default value is None. :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -258,7 +262,7 @@ async def create_agent( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + cls: ClsType[_models.Assistant] = kwargs.pop("cls", None) if body is _Unset: if model is _Unset: @@ -283,7 +287,7 @@ async def create_agent( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_agent_request( + _request = build_assistants_create_assistant_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -292,6 +296,15 @@ async def create_agent( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -314,7 +327,7 @@ async def create_agent( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.Agent, response.json()) + deserialized = _deserialize(_models.Assistant, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -322,7 +335,7 @@ async def create_agent( return deserialized # type: ignore @distributed_trace_async - async def list_agents( + async def list_assistants( self, *, limit: Optional[int] = None, @@ -330,8 +343,8 @@ async def list_agents( after: Optional[str] = None, before: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIPageableListOfAgent: - """Gets a list of agents that were previously created. + ) -> _models.OpenAIPageableListOfAssistant: + """Gets a list of assistants that were previously created. :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. Default value is None. @@ -349,9 +362,9 @@ async def list_agents( obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. Default value is None. :paramtype before: str - :return: OpenAIPageableListOfAgent. The OpenAIPageableListOfAgent is compatible with + :return: OpenAIPageableListOfAssistant. The OpenAIPageableListOfAssistant is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfAgent + :rtype: ~azure.ai.assistants.models.OpenAIPageableListOfAssistant :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -365,9 +378,9 @@ async def list_agents( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.OpenAIPageableListOfAgent] = kwargs.pop("cls", None) + cls: ClsType[_models.OpenAIPageableListOfAssistant] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_agents_request( + _request = build_assistants_list_assistants_request( limit=limit, order=order, after=after, @@ -378,6 +391,15 @@ async def list_agents( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -400,7 +422,7 @@ async def list_agents( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.OpenAIPageableListOfAgent, response.json()) + deserialized = _deserialize(_models.OpenAIPageableListOfAssistant, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -408,13 +430,13 @@ async def list_agents( return deserialized # type: ignore @distributed_trace_async - async def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: - """Retrieves an existing agent. + async def get_assistant(self, assistant_id: str, **kwargs: Any) -> _models.Assistant: + """Retrieves an existing assistant. - :param agent_id: Identifier of the agent. Required. - :type agent_id: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :param assistant_id: Identifier of the assistant. Required. + :type assistant_id: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -428,16 +450,25 @@ async def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + cls: ClsType[_models.Assistant] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_agent_request( - agent_id=agent_id, + _request = build_assistants_get_assistant_request( + assistant_id=assistant_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -460,7 +491,7 @@ async def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.Agent, response.json()) + deserialized = _deserialize(_models.Assistant, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -468,9 +499,9 @@ async def get_agent(self, agent_id: str, **kwargs: Any) -> _models.Agent: return deserialized # type: ignore @overload - async def update_agent( + async def update_assistant( self, - agent_id: str, + assistant_id: str, *, content_type: str = "application/json", model: Optional[str] = None, @@ -481,31 +512,31 @@ async def update_agent( tool_resources: Optional[_models.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. + ) -> _models.Assistant: + """Modifies an existing assistant. - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword model: The ID of the model to use. Default value is None. :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. + :keyword name: The modified name for the assistant to use. Default value is None. :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. + :keyword description: The modified description for the assistant to use. Default value is None. :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. + :keyword instructions: The modified system instructions for the new assistant to use. Default + value is None. :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is + :keyword tools: The modified collection of tools to enable for the assistant. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources @@ -520,63 +551,63 @@ async def update_agent( We generally recommend altering this or temperature but not both. Default value is None. :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def update_agent( - self, agent_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. + async def update_assistant( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @overload - async def update_agent( - self, agent_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. + async def update_assistant( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace_async - async def update_agent( + async def update_assistant( self, - agent_id: str, + assistant_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, model: Optional[str] = None, @@ -587,30 +618,30 @@ async def update_agent( tool_resources: Optional[_models.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. + ) -> _models.Assistant: + """Modifies an existing assistant. - :param agent_id: The ID of the agent to modify. Required. - :type agent_id: str + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword model: The ID of the model to use. Default value is None. :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. + :keyword name: The modified name for the assistant to use. Default value is None. :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. + :keyword description: The modified description for the assistant to use. Default value is None. :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. + :keyword instructions: The modified system instructions for the new assistant to use. Default + value is None. :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is + :keyword tools: The modified collection of tools to enable for the assistant. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, + :keyword tool_resources: A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources @@ -625,20 +656,20 @@ async def update_agent( We generally recommend altering this or temperature but not both. Default value is None. :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + :keyword response_format: The response format of the tool calls used by this assistant. Is one + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.Agent + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -653,7 +684,7 @@ async def update_agent( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + cls: ClsType[_models.Assistant] = kwargs.pop("cls", None) if body is _Unset: body = { @@ -676,8 +707,8 @@ async def update_agent( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_update_agent_request( - agent_id=agent_id, + _request = build_assistants_update_assistant_request( + assistant_id=assistant_id, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -686,6 +717,15 @@ async def update_agent( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -708,7 +748,7 @@ async def update_agent( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.Agent, response.json()) + deserialized = _deserialize(_models.Assistant, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -716,13 +756,13 @@ async def update_agent( return deserialized # type: ignore @distributed_trace_async - async def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: - """Deletes an agent. + async def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.AssistantDeletionStatus: + """Deletes an assistant. - :param agent_id: Identifier of the agent. Required. - :type agent_id: str - :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentDeletionStatus + :param assistant_id: Identifier of the assistant. Required. + :type assistant_id: str + :return: AssistantDeletionStatus. The AssistantDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -736,16 +776,25 @@ async def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDelet _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) + cls: ClsType[_models.AssistantDeletionStatus] = kwargs.pop("cls", None) - _request = build_ai_assistant_delete_agent_request( - agent_id=agent_id, + _request = build_assistants_delete_assistant_request( + assistant_id=assistant_id, api_version=self._config.api_version, headers=_headers, params=_params, ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -768,7 +817,7 @@ async def delete_agent(self, agent_id: str, **kwargs: Any) -> _models.AgentDelet if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) + deserialized = _deserialize(_models.AssistantDeletionStatus, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -784,8 +833,8 @@ async def create_thread( tool_resources: Optional[_models.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -793,7 +842,7 @@ async def create_thread( :keyword messages: The initial messages to associate with the new thread. Default value is None. :paramtype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] - :keyword tool_resources: A set of resources that are made available to the agent's tools in + :keyword tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires @@ -804,40 +853,40 @@ async def create_thread( 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def create_thread( self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def create_thread( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ @@ -850,15 +899,15 @@ async def create_thread( tool_resources: Optional[_models.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. + ) -> _models.AssistantThread: + """Creates a new thread. Threads contain messages and can be run by assistants. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword messages: The initial messages to associate with the new thread. Default value is None. :paramtype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] - :keyword tool_resources: A set of resources that are made available to the agent's tools in + :keyword tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires @@ -869,8 +918,8 @@ async def create_thread( 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -885,7 +934,7 @@ async def create_thread( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + cls: ClsType[_models.AssistantThread] = kwargs.pop("cls", None) if body is _Unset: body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} @@ -897,7 +946,7 @@ async def create_thread( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_thread_request( + _request = build_assistants_create_thread_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -906,6 +955,15 @@ async def create_thread( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -928,7 +986,7 @@ async def create_thread( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentThread, response.json()) + deserialized = _deserialize(_models.AssistantThread, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -936,13 +994,13 @@ async def create_thread( return deserialized # type: ignore @distributed_trace_async - async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: + async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AssistantThread: """Gets information about an existing thread. :param thread_id: Identifier of the thread. Required. :type thread_id: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -956,9 +1014,9 @@ async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + cls: ClsType[_models.AssistantThread] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_thread_request( + _request = build_assistants_get_thread_request( thread_id=thread_id, api_version=self._config.api_version, headers=_headers, @@ -966,6 +1024,15 @@ async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -988,7 +1055,7 @@ async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentThread, response.json()) + deserialized = _deserialize(_models.AssistantThread, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1004,7 +1071,7 @@ async def update_thread( tool_resources: Optional[_models.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: + ) -> _models.AssistantThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. @@ -1012,7 +1079,7 @@ async def update_thread( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword tool_resources: A set of resources that are made available to the agent's tools in + :keyword tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires @@ -1023,15 +1090,15 @@ async def update_thread( 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def update_thread( self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: + ) -> _models.AssistantThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. @@ -1041,15 +1108,15 @@ async def update_thread( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ @overload async def update_thread( self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: + ) -> _models.AssistantThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. @@ -1059,8 +1126,8 @@ async def update_thread( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1073,14 +1140,14 @@ async def update_thread( tool_resources: Optional[_models.ToolResources] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any - ) -> _models.AgentThread: + ) -> _models.AssistantThread: """Modifies an existing thread. :param thread_id: The ID of the thread to modify. Required. :type thread_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword tool_resources: A set of resources that are made available to the agent's tools in + :keyword tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires @@ -1091,8 +1158,8 @@ async def update_thread( 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.AgentThread + :return: AssistantThread. The AssistantThread is compatible with MutableMapping + :rtype: ~azure.ai.assistants.models.AssistantThread :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -1107,7 +1174,7 @@ async def update_thread( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + cls: ClsType[_models.AssistantThread] = kwargs.pop("cls", None) if body is _Unset: body = {"metadata": metadata, "tool_resources": tool_resources} @@ -1119,7 +1186,7 @@ async def update_thread( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_update_thread_request( + _request = build_assistants_update_thread_request( thread_id=thread_id, content_type=content_type, api_version=self._config.api_version, @@ -1129,6 +1196,15 @@ async def update_thread( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1151,7 +1227,7 @@ async def update_thread( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.AgentThread, response.json()) + deserialized = _deserialize(_models.AssistantThread, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1181,7 +1257,7 @@ async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDe cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) - _request = build_ai_assistant_delete_thread_request( + _request = build_assistants_delete_thread_request( thread_id=thread_id, api_version=self._config.api_version, headers=_headers, @@ -1189,6 +1265,15 @@ async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDe ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1238,8 +1323,8 @@ async def create_message( * `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the agent. Use this value to insert - messages from the agent into the + * `assistant`: Indicates the message is generated by the assistant. Use this value to insert + messages from the assistant into the conversation. Known values are: "user" and "assistant". Required. :paramtype role: str or ~azure.ai.assistants.models.MessageRole :keyword content: The textual content of the initial message. Currently, robust input including @@ -1320,8 +1405,8 @@ async def create_message( * `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the agent. Use this value to insert - messages from the agent into the + * `assistant`: Indicates the message is generated by the assistant. Use this value to insert + messages from the assistant into the conversation. Known values are: "user" and "assistant". Required. :paramtype role: str or ~azure.ai.assistants.models.MessageRole :keyword content: The textual content of the initial message. Currently, robust input including @@ -1368,7 +1453,7 @@ async def create_message( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_message_request( + _request = build_assistants_create_message_request( thread_id=thread_id, content_type=content_type, api_version=self._config.api_version, @@ -1378,6 +1463,15 @@ async def create_message( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1459,7 +1553,7 @@ async def list_messages( cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_messages_request( + _request = build_assistants_list_messages_request( thread_id=thread_id, run_id=run_id, limit=limit, @@ -1472,6 +1566,15 @@ async def list_messages( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1526,7 +1629,7 @@ async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _ cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_message_request( + _request = build_assistants_get_message_request( thread_id=thread_id, message_id=message_id, api_version=self._config.api_version, @@ -1535,6 +1638,15 @@ async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _ ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1684,7 +1796,7 @@ async def update_message( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_update_message_request( + _request = build_assistants_update_message_request( thread_id=thread_id, message_id=message_id, content_type=content_type, @@ -1695,6 +1807,15 @@ async def update_message( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1729,7 +1850,7 @@ async def create_run( self, thread_id: str, *, - agent_id: str, + assistant_id: str, include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, content_type: str = "application/json", model: Optional[str] = None, @@ -1743,18 +1864,18 @@ async def create_run( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any ) -> _models.ThreadRun: - """Creates a new run for an agent thread. + """Creates a new run for an assistant thread. :param thread_id: Identifier of the thread. Required. :type thread_id: str - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str :keyword include: A list of additional fields to include in the response. Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result @@ -1763,11 +1884,11 @@ async def create_run( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. + :keyword model: The overridden model name that the assistant should use to run the thread. + Default value is None. :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. + :keyword instructions: The overridden system instructions that the assistant should use to run + the thread. Default value is None. :paramtype instructions: str :keyword additional_instructions: Additional instructions to append at the end of the instructions for the run. This is useful for modifying the behavior @@ -1776,7 +1897,7 @@ async def create_run( :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the + :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run @@ -1814,16 +1935,17 @@ async def create_run( moves forward. Default value is None. :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode - or ~azure.ai.assistants.models.AgentsNamedToolChoice + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. @@ -1848,7 +1970,7 @@ async def create_run( content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadRun: - """Creates a new run for an agent thread. + """Creates a new run for an assistant thread. :param thread_id: Identifier of the thread. Required. :type thread_id: str @@ -1877,7 +1999,7 @@ async def create_run( content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadRun: - """Creates a new run for an agent thread. + """Creates a new run for an assistant thread. :param thread_id: Identifier of the thread. Required. :type thread_id: str @@ -1902,7 +2024,7 @@ async def create_run( thread_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, - agent_id: str = _Unset, + assistant_id: str = _Unset, include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, model: Optional[str] = None, instructions: Optional[str] = None, @@ -1915,30 +2037,30 @@ async def create_run( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any ) -> _models.ThreadRun: - """Creates a new run for an agent thread. + """Creates a new run for an assistant thread. :param thread_id: Identifier of the thread. Required. :type thread_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword agent_id: The ID of the agent that should run the thread. Required. - :paramtype agent_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str :keyword include: A list of additional fields to include in the response. Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. + :keyword model: The overridden model name that the assistant should use to run the thread. + Default value is None. :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. + :keyword instructions: The overridden system instructions that the assistant should use to run + the thread. Default value is None. :paramtype instructions: str :keyword additional_instructions: Additional instructions to append at the end of the instructions for the run. This is useful for modifying the behavior @@ -1947,7 +2069,7 @@ async def create_run( :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] - :keyword tools: The overridden list of enabled tools that the agent should use to run the + :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run @@ -1985,16 +2107,17 @@ async def create_run( moves forward. Default value is None. :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode - or ~azure.ai.assistants.models.AgentsNamedToolChoice + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. @@ -2023,12 +2146,12 @@ async def create_run( cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: - if agent_id is _Unset: - raise TypeError("missing required argument: agent_id") + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") body = { "additional_instructions": additional_instructions, "additional_messages": additional_messages, - "assistant_id": agent_id, + "assistant_id": assistant_id, "instructions": instructions, "max_completion_tokens": max_completion_tokens, "max_prompt_tokens": max_prompt_tokens, @@ -2051,7 +2174,7 @@ async def create_run( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_run_request( + _request = build_assistants_create_run_request( thread_id=thread_id, include=include, content_type=content_type, @@ -2062,6 +2185,15 @@ async def create_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2140,7 +2272,7 @@ async def list_runs( cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_runs_request( + _request = build_assistants_list_runs_request( thread_id=thread_id, limit=limit, order=order, @@ -2152,6 +2284,15 @@ async def list_runs( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2206,7 +2347,7 @@ async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.T cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_run_request( + _request = build_assistants_get_run_request( thread_id=thread_id, run_id=run_id, api_version=self._config.api_version, @@ -2215,6 +2356,15 @@ async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.T ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2364,7 +2514,7 @@ async def update_run( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_update_run_request( + _request = build_assistants_update_run_request( thread_id=thread_id, run_id=run_id, content_type=content_type, @@ -2375,6 +2525,15 @@ async def update_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2536,7 +2695,7 @@ async def submit_tool_outputs_to_run( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_submit_tool_outputs_to_run_request( + _request = build_assistants_submit_tool_outputs_to_run_request( thread_id=thread_id, run_id=run_id, content_type=content_type, @@ -2547,6 +2706,15 @@ async def submit_tool_outputs_to_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2601,7 +2769,7 @@ async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _model cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - _request = build_ai_assistant_cancel_run_request( + _request = build_assistants_cancel_run_request( thread_id=thread_id, run_id=run_id, api_version=self._config.api_version, @@ -2610,6 +2778,15 @@ async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _model ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2643,9 +2820,9 @@ async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _model async def create_thread_and_run( self, *, - agent_id: str, + assistant_id: str, content_type: str = "application/json", - thread: Optional[_models.AgentThreadCreationOptions] = None, + thread: Optional[_models.AssistantThreadCreationOptions] = None, model: Optional[str] = None, instructions: Optional[str] = None, tools: Optional[List[_models.ToolDefinition]] = None, @@ -2656,33 +2833,34 @@ async def create_thread_and_run( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. + """Creates a new assistant thread and immediately starts a run using that new thread. - :keyword agent_id: The ID of the agent for which the thread should be created. Required. - :paramtype agent_id: str + :keyword assistant_id: The ID of the assistant for which the thread should be created. + Required. + :paramtype assistant_id: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword thread: The details used to create the new thread. If no thread is provided, an empty one will be created. Default value is None. - :paramtype thread: ~azure.ai.assistants.models.AgentThreadCreationOptions - :keyword model: The overridden model that the agent should use to run the thread. Default value - is None. + :paramtype thread: ~azure.ai.assistants.models.AssistantThreadCreationOptions + :keyword model: The overridden model that the assistant should use to run the thread. Default + value is None. :paramtype model: str - :keyword instructions: The overridden system instructions the agent should use to run the + :keyword instructions: The overridden system instructions the assistant should use to run the thread. Default value is None. :paramtype instructions: str - :keyword tools: The overridden list of enabled tools the agent should use to run the thread. - Default value is None. + :keyword tools: The overridden list of enabled tools the assistant should use to run the + thread. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. This is useful for - modifying the behavior on a per-run basis. Default value is None. + :keyword tool_resources: Override the tools the assistant can use for this run. This is useful + for modifying the behavior on a per-run basis. Default value is None. :paramtype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run as server-sent events, @@ -2719,16 +2897,17 @@ async def create_thread_and_run( moves forward. Default value is None. :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode - or ~azure.ai.assistants.models.AgentsNamedToolChoice + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. @@ -2747,7 +2926,7 @@ async def create_thread_and_run( async def create_thread_and_run( self, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. + """Creates a new assistant thread and immediately starts a run using that new thread. :param body: Required. :type body: JSON @@ -2763,7 +2942,7 @@ async def create_thread_and_run( async def create_thread_and_run( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. + """Creates a new assistant thread and immediately starts a run using that new thread. :param body: Required. :type body: IO[bytes] @@ -2780,8 +2959,8 @@ async def create_thread_and_run( self, body: Union[JSON, IO[bytes]] = _Unset, *, - agent_id: str = _Unset, - thread: Optional[_models.AgentThreadCreationOptions] = None, + assistant_id: str = _Unset, + thread: Optional[_models.AssistantThreadCreationOptions] = None, model: Optional[str] = None, instructions: Optional[str] = None, tools: Optional[List[_models.ToolDefinition]] = None, @@ -2792,32 +2971,33 @@ async def create_thread_and_run( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. + """Creates a new assistant thread and immediately starts a run using that new thread. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword agent_id: The ID of the agent for which the thread should be created. Required. - :paramtype agent_id: str + :keyword assistant_id: The ID of the assistant for which the thread should be created. + Required. + :paramtype assistant_id: str :keyword thread: The details used to create the new thread. If no thread is provided, an empty one will be created. Default value is None. - :paramtype thread: ~azure.ai.assistants.models.AgentThreadCreationOptions - :keyword model: The overridden model that the agent should use to run the thread. Default value - is None. + :paramtype thread: ~azure.ai.assistants.models.AssistantThreadCreationOptions + :keyword model: The overridden model that the assistant should use to run the thread. Default + value is None. :paramtype model: str - :keyword instructions: The overridden system instructions the agent should use to run the + :keyword instructions: The overridden system instructions the assistant should use to run the thread. Default value is None. :paramtype instructions: str - :keyword tools: The overridden list of enabled tools the agent should use to run the thread. - Default value is None. + :keyword tools: The overridden list of enabled tools the assistant should use to run the + thread. Default value is None. :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. This is useful for - modifying the behavior on a per-run basis. Default value is None. + :keyword tool_resources: Override the tools the assistant can use for this run. This is useful + for modifying the behavior on a per-run basis. Default value is None. :paramtype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions :keyword stream_parameter: If ``true``, returns a stream of events that happen during the Run as server-sent events, @@ -2854,16 +3034,17 @@ async def create_thread_and_run( moves forward. Default value is None. :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode - or ~azure.ai.assistants.models.AgentsNamedToolChoice + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType Default value is None. :paramtype response_format: str or str or - ~azure.ai.assistants.models.AgentsApiResponseFormatMode or - ~azure.ai.assistants.models.AgentsApiResponseFormat or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. @@ -2892,10 +3073,10 @@ async def create_thread_and_run( cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) if body is _Unset: - if agent_id is _Unset: - raise TypeError("missing required argument: agent_id") + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") body = { - "assistant_id": agent_id, + "assistant_id": assistant_id, "instructions": instructions, "max_completion_tokens": max_completion_tokens, "max_prompt_tokens": max_prompt_tokens, @@ -2920,7 +3101,7 @@ async def create_thread_and_run( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_thread_and_run_request( + _request = build_assistants_create_thread_and_run_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -2929,6 +3110,15 @@ async def create_thread_and_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2998,7 +3188,7 @@ async def get_run_step( cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_run_step_request( + _request = build_assistants_get_run_step_request( thread_id=thread_id, run_id=run_id, step_id=step_id, @@ -3009,6 +3199,15 @@ async def get_run_step( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3096,7 +3295,7 @@ async def list_run_steps( cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_run_steps_request( + _request = build_assistants_list_run_steps_request( thread_id=thread_id, run_id=run_id, include=include, @@ -3110,6 +3309,15 @@ async def list_run_steps( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3166,7 +3374,7 @@ async def list_files( cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_files_request( + _request = build_assistants_list_files_request( purpose=purpose, api_version=self._config.api_version, headers=_headers, @@ -3174,6 +3382,15 @@ async def list_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3253,7 +3470,7 @@ async def upload_file(self, body: Union[_models.UploadFileRequest, JSON], **kwar _data_fields: List[str] = ["purpose", "filename"] _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) - _request = build_ai_assistant_upload_file_request( + _request = build_assistants_upload_file_request( api_version=self._config.api_version, files=_files, data=_data, @@ -3262,6 +3479,15 @@ async def upload_file(self, body: Union[_models.UploadFileRequest, JSON], **kwar ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3314,7 +3540,7 @@ async def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletion cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) - _request = build_ai_assistant_delete_file_request( + _request = build_assistants_delete_file_request( file_id=file_id, api_version=self._config.api_version, headers=_headers, @@ -3322,6 +3548,15 @@ async def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletion ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3374,7 +3609,7 @@ async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_file_request( + _request = build_assistants_get_file_request( file_id=file_id, api_version=self._config.api_version, headers=_headers, @@ -3382,6 +3617,15 @@ async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3434,7 +3678,7 @@ async def _get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_file_content_request( + _request = build_assistants_get_file_content_request( file_id=file_id, api_version=self._config.api_version, headers=_headers, @@ -3442,6 +3686,15 @@ async def _get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[ ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3514,7 +3767,7 @@ async def list_vector_stores( cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_vector_stores_request( + _request = build_assistants_list_vector_stores_request( limit=limit, order=order, after=after, @@ -3525,6 +3778,15 @@ async def list_vector_stores( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3697,7 +3959,7 @@ async def create_vector_store( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_vector_store_request( + _request = build_assistants_create_vector_store_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -3706,6 +3968,15 @@ async def create_vector_store( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3758,7 +4029,7 @@ async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_vector_store_request( + _request = build_assistants_get_vector_store_request( vector_store_id=vector_store_id, api_version=self._config.api_version, headers=_headers, @@ -3766,6 +4037,15 @@ async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3917,7 +4197,7 @@ async def modify_vector_store( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_modify_vector_store_request( + _request = build_assistants_modify_vector_store_request( vector_store_id=vector_store_id, content_type=content_type, api_version=self._config.api_version, @@ -3927,6 +4207,15 @@ async def modify_vector_store( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3980,7 +4269,7 @@ async def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _mod cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) - _request = build_ai_assistant_delete_vector_store_request( + _request = build_assistants_delete_vector_store_request( vector_store_id=vector_store_id, api_version=self._config.api_version, headers=_headers, @@ -3988,6 +4277,15 @@ async def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _mod ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4070,7 +4368,7 @@ async def list_vector_store_files( cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_vector_store_files_request( + _request = build_assistants_list_vector_store_files_request( vector_store_id=vector_store_id, filter=filter, limit=limit, @@ -4083,6 +4381,15 @@ async def list_vector_store_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4230,7 +4537,7 @@ async def create_vector_store_file( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_vector_store_file_request( + _request = build_assistants_create_vector_store_file_request( vector_store_id=vector_store_id, content_type=content_type, api_version=self._config.api_version, @@ -4240,6 +4547,15 @@ async def create_vector_store_file( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4294,7 +4610,7 @@ async def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwar cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_vector_store_file_request( + _request = build_assistants_get_vector_store_file_request( vector_store_id=vector_store_id, file_id=file_id, api_version=self._config.api_version, @@ -4303,6 +4619,15 @@ async def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwar ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4362,7 +4687,7 @@ async def delete_vector_store_file( cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) - _request = build_ai_assistant_delete_vector_store_file_request( + _request = build_assistants_delete_vector_store_file_request( vector_store_id=vector_store_id, file_id=file_id, api_version=self._config.api_version, @@ -4371,6 +4696,15 @@ async def delete_vector_store_file( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4518,7 +4852,7 @@ async def create_vector_store_file_batch( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_ai_assistant_create_vector_store_file_batch_request( + _request = build_assistants_create_vector_store_file_batch_request( vector_store_id=vector_store_id, content_type=content_type, api_version=self._config.api_version, @@ -4528,6 +4862,15 @@ async def create_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4584,7 +4927,7 @@ async def get_vector_store_file_batch( cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - _request = build_ai_assistant_get_vector_store_file_batch_request( + _request = build_assistants_get_vector_store_file_batch_request( vector_store_id=vector_store_id, batch_id=batch_id, api_version=self._config.api_version, @@ -4593,6 +4936,15 @@ async def get_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4650,7 +5002,7 @@ async def cancel_vector_store_file_batch( cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - _request = build_ai_assistant_cancel_vector_store_file_batch_request( + _request = build_assistants_cancel_vector_store_file_batch_request( vector_store_id=vector_store_id, batch_id=batch_id, api_version=self._config.api_version, @@ -4659,6 +5011,15 @@ async def cancel_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4744,7 +5105,7 @@ async def list_vector_store_file_batch_files( cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) - _request = build_ai_assistant_list_vector_store_file_batch_files_request( + _request = build_assistants_list_vector_store_file_batch_files_request( vector_store_id=vector_store_id, batch_id=batch_id, filter=filter, @@ -4758,6 +5119,15 @@ async def list_vector_store_file_batch_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True + ), + "projectName": self._serialize.url( + "self._config.project_name", self._config.project_name, "str", skip_quote=True + ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_vendor.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_vendor.py index 46e0725a5b24..48ecbadbc90e 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_vendor.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_vendor.py @@ -8,7 +8,7 @@ from abc import ABC from typing import TYPE_CHECKING -from ._configuration import AIAssistantClientConfiguration +from ._configuration import AssistantsClientConfiguration if TYPE_CHECKING: from azure.core import AsyncPipelineClient @@ -16,10 +16,10 @@ from .._serialization import Deserializer, Serializer -class AIAssistantClientMixinABC(ABC): +class AssistantsClientMixinABC(ABC): """DO NOT use this class. It is for internal typing use only.""" _client: "AsyncPipelineClient" - _config: AIAssistantClientConfiguration + _config: AssistantsClientConfiguration _serialize: "Serializer" _deserialize: "Deserializer" diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py index 081376de319f..336ad4ce9f17 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py @@ -15,12 +15,12 @@ from ._models import ( # type: ignore AISearchIndexResource, - Agent, - AgentDeletionStatus, - AgentThread, - AgentThreadCreationOptions, - AgentsApiResponseFormat, - AgentsNamedToolChoice, + Assistant, + AssistantDeletionStatus, + AssistantThread, + AssistantThreadCreationOptions, + AssistantsApiResponseFormat, + AssistantsNamedToolChoice, AzureAISearchResource, AzureAISearchToolDefinition, AzureFunctionBinding, @@ -31,7 +31,6 @@ BingGroundingToolDefinition, CodeInterpreterToolDefinition, CodeInterpreterToolResource, - ErrorResponse, FileDeletionStatus, FileListResponse, FileSearchRankingOptions, @@ -73,7 +72,7 @@ MessageTextUrlCitationDetails, MicrosoftFabricToolDefinition, OpenAIFile, - OpenAIPageableListOfAgent, + OpenAIPageableListOfAssistant, OpenAIPageableListOfRunStep, OpenAIPageableListOfThreadMessage, OpenAIPageableListOfThreadRun, @@ -176,10 +175,10 @@ ) from ._enums import ( # type: ignore - AgentStreamEvent, - AgentsApiResponseFormatMode, - AgentsApiToolChoiceOptionMode, - AgentsNamedToolChoiceType, + AssistantStreamEvent, + AssistantsApiResponseFormatMode, + AssistantsApiToolChoiceOptionMode, + AssistantsNamedToolChoiceType, AzureAISearchQueryType, DoneEvent, ErrorEvent, @@ -218,12 +217,12 @@ __all__ = [ "AISearchIndexResource", - "Agent", - "AgentDeletionStatus", - "AgentThread", - "AgentThreadCreationOptions", - "AgentsApiResponseFormat", - "AgentsNamedToolChoice", + "Assistant", + "AssistantDeletionStatus", + "AssistantThread", + "AssistantThreadCreationOptions", + "AssistantsApiResponseFormat", + "AssistantsNamedToolChoice", "AzureAISearchResource", "AzureAISearchToolDefinition", "AzureFunctionBinding", @@ -234,7 +233,6 @@ "BingGroundingToolDefinition", "CodeInterpreterToolDefinition", "CodeInterpreterToolResource", - "ErrorResponse", "FileDeletionStatus", "FileListResponse", "FileSearchRankingOptions", @@ -276,7 +274,7 @@ "MessageTextUrlCitationDetails", "MicrosoftFabricToolDefinition", "OpenAIFile", - "OpenAIPageableListOfAgent", + "OpenAIPageableListOfAssistant", "OpenAIPageableListOfRunStep", "OpenAIPageableListOfThreadMessage", "OpenAIPageableListOfThreadRun", @@ -376,10 +374,10 @@ "VectorStoreStaticChunkingStrategyOptions", "VectorStoreStaticChunkingStrategyRequest", "VectorStoreStaticChunkingStrategyResponse", - "AgentStreamEvent", - "AgentsApiResponseFormatMode", - "AgentsApiToolChoiceOptionMode", - "AgentsNamedToolChoiceType", + "AssistantStreamEvent", + "AssistantsApiResponseFormatMode", + "AssistantsApiToolChoiceOptionMode", + "AssistantsNamedToolChoiceType", "AzureAISearchQueryType", "DoneEvent", "ErrorEvent", diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py index 405ab0478f62..3f2e6561663c 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py @@ -10,7 +10,7 @@ from azure.core import CaseInsensitiveEnumMeta -class AgentsApiResponseFormatMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): +class AssistantsApiResponseFormatMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Represents the mode in which the model will handle the return format of a tool call.""" AUTO = "auto" @@ -19,7 +19,7 @@ class AgentsApiResponseFormatMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Setting the value to ``none``, will result in a 400 Bad request.""" -class AgentsApiToolChoiceOptionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): +class AssistantsApiToolChoiceOptionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Specifies how the tool choice will be used.""" NONE = "none" @@ -28,8 +28,8 @@ class AgentsApiToolChoiceOptionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta """The model can pick between generating a message or calling a function.""" -class AgentsNamedToolChoiceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Available tool types for agents named tools.""" +class AssistantsNamedToolChoiceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Available tool types for assistants named tools.""" FUNCTION = "function" """Tool type ``function``""" @@ -49,7 +49,7 @@ class AgentsNamedToolChoiceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Tool type ``bing_custom_search``""" -class AgentStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): +class AssistantStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Each event in a server-sent events stream has an ``event`` and ``data`` property: @@ -61,7 +61,7 @@ class AgentStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): We emit events whenever a new object is created, transitions to a new state, or is being streamed in parts (deltas). For example, we emit ``thread.run.created`` when a new run - is created, ``thread.run.completed`` when a run completes, and so on. When an Agent chooses + is created, ``thread.run.completed`` when a run completes, and so on. When an Assistant chooses to create a message during a run, we emit a ``thread.message.created event``, a ``thread.message.in_progress`` event, many ``thread.message.delta`` events, and finally a ``thread.message.completed`` event. @@ -71,7 +71,7 @@ class AgentStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): """ THREAD_CREATED = "thread.created" - """Event sent when a new thread is created. The data of this event is of type AgentThread""" + """Event sent when a new thread is created. The data of this event is of type AssistantThread""" THREAD_RUN_CREATED = "thread.run.created" """Event sent when a new run is created. The data of this event is of type ThreadRun""" THREAD_RUN_QUEUED = "thread.run.queued" @@ -165,10 +165,10 @@ class FilePurpose(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Indicates a file is used for fine tuning input.""" FINE_TUNE_RESULTS = "fine-tune-results" """Indicates a file is used for fine tuning results.""" - AGENTS = "assistants" - """Indicates a file is used as input to agents.""" - AGENTS_OUTPUT = "assistants_output" - """Indicates a file is used as output by agents.""" + ASSISTANTS = "assistants" + """Indicates a file is used as input to assistants.""" + ASSISTANTS_OUTPUT = "assistants_output" + """Indicates a file is used as output by assistants.""" BATCH = "batch" """Indicates a file is used as input to .""" BATCH_OUTPUT = "batch_output" @@ -245,8 +245,8 @@ class MessageRole(str, Enum, metaclass=CaseInsensitiveEnumMeta): USER = "user" """The role representing the end-user.""" - AGENT = "assistant" - """The role representing the agent.""" + ASSISTANT = "assistant" + """The role representing the assistant.""" class MessageStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -307,7 +307,7 @@ class RunAdditionalFieldList(str, Enum, metaclass=CaseInsensitiveEnumMeta): class RunStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Possible values for the status of an agent thread run.""" + """Possible values for the status of an assistant thread run.""" QUEUED = "queued" """Represents a run that is queued to start.""" @@ -413,7 +413,7 @@ class ThreadStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Thread operation related streaming events.""" THREAD_CREATED = "thread.created" - """Event sent when a new thread is created. The data of this event is of type AgentThread""" + """Event sent when a new thread is created. The data of this event is of type AssistantThread""" class TruncationStrategy(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py index 6b6d52e9c14e..3d4ce4433874 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py @@ -25,8 +25,62 @@ from .. import _types, models as _models -class Agent(_model_base.Model): - """Represents an agent that can call the model and use tools. +class AISearchIndexResource(_model_base.Model): + """A AI Search Index resource. + + :ivar index_connection_id: An index connection id in an IndexResource attached to this + assistant. Required. + :vartype index_connection_id: str + :ivar index_name: The name of an index in an IndexResource attached to this assistant. + Required. + :vartype index_name: str + :ivar query_type: Type of query in an AIIndexResource attached to this assistant. Known values + are: "simple", "semantic", "vector", "vector_simple_hybrid", and "vector_semantic_hybrid". + :vartype query_type: str or ~azure.ai.assistants.models.AzureAISearchQueryType + :ivar top_k: Number of documents to retrieve from search and present to the model. + :vartype top_k: int + :ivar filter: Odata filter string for search resource. + :vartype filter: str + """ + + index_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """An index connection id in an IndexResource attached to this assistant. Required.""" + index_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of an index in an IndexResource attached to this assistant. Required.""" + query_type: Optional[Union[str, "_models.AzureAISearchQueryType"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Type of query in an AIIndexResource attached to this assistant. Known values are: \"simple\", + \"semantic\", \"vector\", \"vector_simple_hybrid\", and \"vector_semantic_hybrid\".""" + top_k: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Number of documents to retrieve from search and present to the model.""" + filter: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Odata filter string for search resource.""" + + @overload + def __init__( + self, + *, + index_connection_id: str, + index_name: str, + query_type: Optional[Union[str, "_models.AzureAISearchQueryType"]] = None, + top_k: Optional[int] = None, + filter: Optional[str] = None, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Assistant(_model_base.Model): + """Represents an assistant that can call the model and use tools. :ivar id: The identifier, which can be referenced in API endpoints. Required. :vartype id: str @@ -36,18 +90,18 @@ class Agent(_model_base.Model): :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. Required. :vartype created_at: ~datetime.datetime - :ivar name: The name of the agent. Required. + :ivar name: The name of the assistant. Required. :vartype name: str - :ivar description: The description of the agent. Required. + :ivar description: The description of the assistant. Required. :vartype description: str :ivar model: The ID of the model to use. Required. :vartype model: str - :ivar instructions: The system instructions for the agent to use. Required. + :ivar instructions: The system instructions for the assistant to use. Required. :vartype instructions: str - :ivar tools: The collection of tools enabled for the agent. Required. + :ivar tools: The collection of tools enabled for the assistant. Required. :vartype tools: list[~azure.ai.assistants.models.ToolDefinition] - :ivar tool_resources: A set of resources that are used by the agent's tools. The resources are - specific to the type of tool. For example, the ``code_interpreter`` + :ivar tool_resources: A set of resources that are used by the assistant's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Required. :vartype tool_resources: ~azure.ai.assistants.models.ToolResources @@ -61,11 +115,12 @@ class Agent(_model_base.Model): We generally recommend altering this or temperature but not both. Required. :vartype top_p: float - :ivar response_format: The response format of the tool calls used by this agent. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType - :vartype response_format: str or str or ~azure.ai.assistants.models.AgentsApiResponseFormatMode - or ~azure.ai.assistants.models.AgentsApiResponseFormat or + :ivar response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType + :vartype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 @@ -82,18 +137,18 @@ class Agent(_model_base.Model): ) """The Unix timestamp, in seconds, representing when this object was created. Required.""" name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The name of the agent. Required.""" + """The name of the assistant. Required.""" description: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The description of the agent. Required.""" + """The description of the assistant. Required.""" model: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the model to use. Required.""" instructions: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The system instructions for the agent to use. Required.""" + """The system instructions for the assistant to use. Required.""" tools: List["_models.ToolDefinition"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The collection of tools enabled for the agent. Required.""" + """The collection of tools enabled for the assistant. Required.""" tool_resources: "_models.ToolResources" = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """A set of resources that are used by the agent's tools. The resources are specific to the type - of tool. For example, the ``code_interpreter`` + """A set of resources that are used by the assistant's tools. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Required.""" temperature: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -106,11 +161,11 @@ class Agent(_model_base.Model): So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. Required.""" - response_format: Optional["_types.AgentsApiResponseFormatOption"] = rest_field( + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) - """The response format of the tool calls used by this agent. Is one of the following types: str, - Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat, + """The response format of the tool calls used by this assistant. Is one of the following types: + str, Union[str, \"_models.AssistantsApiResponseFormatMode\"], AssistantsApiResponseFormat, ResponseFormatJsonSchemaType""" metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A set of up to 16 key/value pairs that can be attached to an object, used for storing @@ -132,7 +187,7 @@ def __init__( temperature: float, top_p: float, metadata: Dict[str, str], - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, ) -> None: ... @overload @@ -147,8 +202,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.object: Literal["assistant"] = "assistant" -class AgentDeletionStatus(_model_base.Model): - """The status of an agent deletion operation. +class AssistantDeletionStatus(_model_base.Model): + """The status of an assistant deletion operation. :ivar id: The ID of the resource specified for deletion. Required. :vartype id: str @@ -187,7 +242,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.object: Literal["assistant.deleted"] = "assistant.deleted" -class AgentsApiResponseFormat(_model_base.Model): +class AssistantsApiResponseFormat(_model_base.Model): """An object describing the expected output of the model. If ``json_object`` only ``function`` type ``tools`` are allowed to be passed to the Run. If ``text`` the model can return text or any value needed. @@ -220,18 +275,18 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AgentsNamedToolChoice(_model_base.Model): +class AssistantsNamedToolChoice(_model_base.Model): """Specifies a tool the model should use. Use to force the model to call a specific tool. :ivar type: the type of tool. If type is ``function``, the function name must be set. Required. Known values are: "function", "code_interpreter", "file_search", "bing_grounding", "fabric_dataagent", "sharepoint_grounding", "azure_ai_search", and "bing_custom_search". - :vartype type: str or ~azure.ai.assistants.models.AgentsNamedToolChoiceType + :vartype type: str or ~azure.ai.assistants.models.AssistantsNamedToolChoiceType :ivar function: The name of the function to call. :vartype function: ~azure.ai.assistants.models.FunctionName """ - type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field( + type: Union[str, "_models.AssistantsNamedToolChoiceType"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """the type of tool. If type is ``function``, the function name must be set. Required. Known @@ -245,7 +300,7 @@ class AgentsNamedToolChoice(_model_base.Model): def __init__( self, *, - type: Union[str, "_models.AgentsNamedToolChoiceType"], + type: Union[str, "_models.AssistantsNamedToolChoiceType"], function: Optional["_models.FunctionName"] = None, ) -> None: ... @@ -260,8 +315,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AgentThread(_model_base.Model): - """Information about a single thread associated with an agent. +class AssistantThread(_model_base.Model): + """Information about a single thread associated with an assistant. :ivar id: The identifier, which can be referenced in API endpoints. Required. :vartype id: str @@ -270,8 +325,8 @@ class AgentThread(_model_base.Model): :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. Required. :vartype created_at: ~datetime.datetime - :ivar tool_resources: A set of resources that are made available to the agent's tools in this - thread. The resources are specific to the type + :ivar tool_resources: A set of resources that are made available to the assistant's tools in + this thread. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Required. @@ -291,8 +346,8 @@ class AgentThread(_model_base.Model): ) """The Unix timestamp, in seconds, representing when this object was created. Required.""" tool_resources: "_models.ToolResources" = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """A set of resources that are made available to the agent's tools in this thread. The resources - are specific to the type + """A set of resources that are made available to the assistant's tools in this thread. The + resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Required.""" @@ -323,13 +378,13 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.object: Literal["thread"] = "thread" -class AgentThreadCreationOptions(_model_base.Model): - """The details used to create a new agent thread. +class AssistantThreadCreationOptions(_model_base.Model): + """The details used to create a new assistant thread. :ivar messages: The initial messages to associate with the new thread. :vartype messages: list[~azure.ai.assistants.models.ThreadMessageOptions] - :ivar tool_resources: A set of resources that are made available to the agent's tools in this - thread. The resources are specific to the + :ivar tool_resources: A set of resources that are made available to the assistant's tools in + this thread. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. @@ -347,8 +402,8 @@ class AgentThreadCreationOptions(_model_base.Model): tool_resources: Optional["_models.ToolResources"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) - """A set of resources that are made available to the agent's tools in this thread. The resources - are specific to the + """A set of resources that are made available to the assistant's tools in this thread. The + resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs.""" @@ -377,72 +432,19 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AISearchIndexResource(_model_base.Model): - """A AI Search Index resource. - - :ivar index_connection_id: An index connection id in an IndexResource attached to this agent. - Required. - :vartype index_connection_id: str - :ivar index_name: The name of an index in an IndexResource attached to this agent. Required. - :vartype index_name: str - :ivar query_type: Type of query in an AIIndexResource attached to this agent. Known values are: - "simple", "semantic", "vector", "vector_simple_hybrid", and "vector_semantic_hybrid". - :vartype query_type: str or ~azure.ai.assistants.models.AzureAISearchQueryType - :ivar top_k: Number of documents to retrieve from search and present to the model. - :vartype top_k: int - :ivar filter: Odata filter string for search resource. - :vartype filter: str - """ - - index_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """An index connection id in an IndexResource attached to this agent. Required.""" - index_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The name of an index in an IndexResource attached to this agent. Required.""" - query_type: Optional[Union[str, "_models.AzureAISearchQueryType"]] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """Type of query in an AIIndexResource attached to this agent. Known values are: \"simple\", - \"semantic\", \"vector\", \"vector_simple_hybrid\", and \"vector_semantic_hybrid\".""" - top_k: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """Number of documents to retrieve from search and present to the model.""" - filter: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """Odata filter string for search resource.""" - - @overload - def __init__( - self, - *, - index_connection_id: str, - index_name: str, - query_type: Optional[Union[str, "_models.AzureAISearchQueryType"]] = None, - top_k: Optional[int] = None, - filter: Optional[str] = None, # pylint: disable=redefined-builtin - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - class AzureAISearchResource(_model_base.Model): """A set of index resources used by the ``azure_ai_search`` tool. - :ivar index_list: The indices attached to this agent. There can be a maximum of 1 index - resource attached to the agent. + :ivar index_list: The indices attached to this assistant. There can be a maximum of 1 index + resource attached to the assistant. :vartype index_list: list[~azure.ai.assistants.models.AISearchIndexResource] """ index_list: Optional[List["_models.AISearchIndexResource"]] = rest_field( name="indexes", visibility=["read", "create", "update", "delete", "query"] ) - """The indices attached to this agent. There can be a maximum of 1 index - resource attached to the agent.""" + """The indices attached to this assistant. There can be a maximum of 1 index + resource attached to the assistant.""" @overload def __init__( @@ -463,7 +465,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class ToolDefinition(_model_base.Model): - """An abstract representation of an input tool definition that an agent can use. + """An abstract representation of an input tool definition that an assistant can use. You probably want to use the sub-classes and not this class directly. Known sub-classes are: AzureAISearchToolDefinition, AzureFunctionToolDefinition, BingCustomSearchToolDefinition, @@ -498,7 +500,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class AzureAISearchToolDefinition(ToolDefinition, discriminator="azure_ai_search"): - """The input definition information for an Azure AI search tool as used to configure an agent. + """The input definition information for an Azure AI search tool as used to configure an assistant. :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is "azure_ai_search". @@ -645,7 +647,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class AzureFunctionToolDefinition(ToolDefinition, discriminator="azure_function"): - """The input definition information for a azure function tool as used to configure an agent. + """The input definition information for a azure function tool as used to configure an assistant. :ivar type: The object type, which is always 'azure_function'. Required. Default value is "azure_function". @@ -682,7 +684,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BingCustomSearchToolDefinition(ToolDefinition, discriminator="bing_custom_search"): - """The input definition information for a Bing custom search tool as used to configure an agent. + """The input definition information for a Bing custom search tool as used to configure an + assistant. :ivar type: The object type, which is always 'bing_custom_search'. Required. Default value is "bing_custom_search". @@ -720,7 +723,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BingGroundingToolDefinition(ToolDefinition, discriminator="bing_grounding"): """The input definition information for a bing grounding search tool as used to configure an - agent. + assistant. :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is "bing_grounding". @@ -756,7 +759,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class CodeInterpreterToolDefinition(ToolDefinition, discriminator="code_interpreter"): - """The input definition information for a code interpreter tool as used to configure an agent. + """The input definition information for a code interpreter tool as used to configure an assistant. :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is "code_interpreter". @@ -823,35 +826,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ErrorResponse(_model_base.Model): - """Common error response for all Azure Resource Manager APIs to return error details for failed - operations. - - :ivar error: The error object. - :vartype error: ~azure.ai.assistants.models.ErrorDetail - """ - - error: Optional["_models.ErrorDetail"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The error object.""" - - @overload - def __init__( - self, - *, - error: Optional["_models.ErrorDetail"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - class FileDeletionStatus(_model_base.Model): """A status response from a file deletion operation. @@ -990,7 +964,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class FileSearchToolDefinition(ToolDefinition, discriminator="file_search"): - """The input definition information for a file search tool as used to configure an agent. + """The input definition information for a file search tool as used to configure an assistant. :ivar type: The object type, which is always 'file_search'. Required. Default value is "file_search". @@ -1071,9 +1045,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class FileSearchToolResource(_model_base.Model): """A set of resources that are used by the ``file_search`` tool. - :ivar vector_store_ids: The ID of the vector store attached to this agent. There can be a + :ivar vector_store_ids: The ID of the vector store attached to this assistant. There can be a maximum of 1 vector - store attached to the agent. + store attached to the assistant. :vartype vector_store_ids: list[str] :ivar vector_stores: The list of vector store configuration objects from Azure. This list is limited to one element. @@ -1082,8 +1056,8 @@ class FileSearchToolResource(_model_base.Model): """ vector_store_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The ID of the vector store attached to this agent. There can be a maximum of 1 vector - store attached to the agent.""" + """The ID of the vector store attached to this assistant. There can be a maximum of 1 vector + store attached to the assistant.""" vector_stores: Optional[List["_models.VectorStoreConfigurations"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -1180,7 +1154,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class FunctionToolDefinition(ToolDefinition, discriminator="function"): - """The input definition information for a function tool as used to configure an agent. + """The input definition information for a function tool as used to configure an assistant. :ivar type: The object type, which is always 'function'. Required. Default value is "function". :vartype type: str @@ -1786,7 +1760,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class MessageDeltaTextUrlCitationAnnotation(MessageDeltaTextAnnotation, discriminator="url_citation"): """A citation within the message that points to a specific URL associated with the message. - Generated when the agent uses tools such as 'bing_grounding' to search the Internet. + Generated when the assistant uses tools such as 'bing_grounding' to search the Internet. :ivar index: The index of the annotation within a text content part. Required. :vartype index: int @@ -2034,7 +2008,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class MessageTextDetails(_model_base.Model): - """The text and associated annotations for a single item of agent thread message content. + """The text and associated annotations for a single item of assistant thread message content. :ivar value: The text data. Required. :vartype value: str @@ -2070,8 +2044,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class MessageTextFileCitationAnnotation(MessageTextAnnotation, discriminator="file_citation"): """A citation within the message that points to a specific quote from a specific File associated - with the agent or the message. Generated when the agent uses the 'file_search' tool to search - files. + with the assistant or the message. Generated when the assistant uses the 'file_search' tool to + search files. :ivar text: The textual content associated with this text annotation item. Required. :vartype text: str @@ -2080,7 +2054,7 @@ class MessageTextFileCitationAnnotation(MessageTextAnnotation, discriminator="fi :vartype type: str :ivar file_citation: A citation within the message that points to a specific quote from a specific file. - Generated when the agent uses the "file_search" tool to search files. Required. + Generated when the assistant uses the "file_search" tool to search files. Required. :vartype file_citation: ~azure.ai.assistants.models.MessageTextFileCitationDetails :ivar start_index: The first text index associated with this text annotation. :vartype start_index: int @@ -2094,7 +2068,7 @@ class MessageTextFileCitationAnnotation(MessageTextAnnotation, discriminator="fi visibility=["read", "create", "update", "delete", "query"] ) """A citation within the message that points to a specific quote from a specific file. - Generated when the agent uses the \"file_search\" tool to search files. Required.""" + Generated when the assistant uses the \"file_search\" tool to search files. Required.""" start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The first text index associated with this text annotation.""" end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -2163,8 +2137,8 @@ class MessageTextFilePathAnnotation(MessageTextAnnotation, discriminator="file_p :ivar type: The object type, which is always 'file_path'. Required. Default value is "file_path". :vartype type: str - :ivar file_path: A URL for the file that's generated when the agent used the code_interpreter - tool to generate a file. Required. + :ivar file_path: A URL for the file that's generated when the assistant used the + code_interpreter tool to generate a file. Required. :vartype file_path: ~azure.ai.assistants.models.MessageTextFilePathDetails :ivar start_index: The first text index associated with this text annotation. :vartype start_index: int @@ -2177,8 +2151,8 @@ class MessageTextFilePathAnnotation(MessageTextAnnotation, discriminator="file_p file_path: "_models.MessageTextFilePathDetails" = rest_field( visibility=["read", "create", "update", "delete", "query"] ) - """A URL for the file that's generated when the agent used the code_interpreter tool to generate a - file. Required.""" + """A URL for the file that's generated when the assistant used the code_interpreter tool to + generate a file. Required.""" start_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The first text index associated with this text annotation.""" end_index: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -2235,7 +2209,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class MessageTextUrlCitationAnnotation(MessageTextAnnotation, discriminator="url_citation"): """A citation within the message that points to a specific URL associated with the message. - Generated when the agent uses tools such as 'bing_grounding' to search the Internet. + Generated when the assistant uses tools such as 'bing_grounding' to search the Internet. :ivar text: The textual content associated with this text annotation item. Required. :vartype text: str @@ -2316,7 +2290,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class MicrosoftFabricToolDefinition(ToolDefinition, discriminator="fabric_dataagent"): - """The input definition information for a Microsoft Fabric tool as used to configure an agent. + """The input definition information for a Microsoft Fabric tool as used to configure an assistant. :ivar type: The object type, which is always 'fabric_dataagent'. Required. Default value is "fabric_dataagent". @@ -2352,7 +2326,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class OpenAIFile(_model_base.Model): - """Represents an agent that can call the model and use tools. + """Represents an assistant that can call the model and use tools. :ivar object: The object type, which is always 'file'. Required. Default value is "file". :vartype object: str @@ -2427,13 +2401,13 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.object: Literal["file"] = "file" -class OpenAIPageableListOfAgent(_model_base.Model): +class OpenAIPageableListOfAssistant(_model_base.Model): """The response data for a requested list of items. :ivar object: The object type, which is always list. Required. Default value is "list". :vartype object: str :ivar data: The requested list of items. Required. - :vartype data: list[~azure.ai.assistants.models.Agent] + :vartype data: list[~azure.ai.assistants.models.Assistant] :ivar first_id: The first ID represented in this list. Required. :vartype first_id: str :ivar last_id: The last ID represented in this list. Required. @@ -2445,7 +2419,7 @@ class OpenAIPageableListOfAgent(_model_base.Model): object: Literal["list"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.Agent"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + data: List["_models.Assistant"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The requested list of items. Required.""" first_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The first ID represented in this list. Required.""" @@ -2459,7 +2433,7 @@ class OpenAIPageableListOfAgent(_model_base.Model): def __init__( self, *, - data: List["_models.Agent"], + data: List["_models.Assistant"], first_id: str, last_id: str, has_more: bool, @@ -2863,8 +2837,6 @@ class OpenApiFunctionDefinition(_model_base.Model): :vartype auth: ~azure.ai.assistants.models.OpenApiAuthDetails :ivar default_params: List of OpenAPI spec parameters that will use user-provided defaults. :vartype default_params: list[str] - :ivar functions: List of functions returned in response. - :vartype functions: list[~azure.ai.assistants.models.FunctionDefinition] """ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -2878,10 +2850,6 @@ class OpenApiFunctionDefinition(_model_base.Model): """Open API authentication details. Required.""" default_params: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """List of OpenAPI spec parameters that will use user-provided defaults.""" - functions: Optional[List["_models.FunctionDefinition"]] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """List of functions returned in response.""" @overload def __init__( @@ -2892,7 +2860,6 @@ def __init__( auth: "_models.OpenApiAuthDetails", description: Optional[str] = None, default_params: Optional[List[str]] = None, - functions: Optional[List["_models.FunctionDefinition"]] = None, ) -> None: ... @overload @@ -2969,7 +2936,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class OpenApiToolDefinition(ToolDefinition, discriminator="openapi"): - """The input definition information for an OpenAPI tool as used to configure an agent. + """The input definition information for an OpenAPI tool as used to configure an assistant. :ivar type: The object type, which is always 'openapi'. Required. Default value is "openapi". :vartype type: str @@ -3003,7 +2970,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class RequiredAction(_model_base.Model): - """An abstract representation of a required action for an agent thread run to continue. + """An abstract representation of a required action for an assistant thread run to continue. You probably want to use the sub-classes and not this class directly. Known sub-classes are: SubmitToolOutputsAction @@ -3268,7 +3235,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class RunError(_model_base.Model): - """The details of an error as encountered by an agent thread run. + """The details of an error as encountered by an assistant thread run. :ivar code: The status for the error. Required. :vartype code: str @@ -3301,7 +3268,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class RunStep(_model_base.Model): - """Detailed information about a single step of an agent thread run. + """Detailed information about a single step of an assistant thread run. :ivar id: The identifier, which can be referenced in API endpoints. Required. :vartype id: str @@ -3311,8 +3278,8 @@ class RunStep(_model_base.Model): :ivar type: The type of run step, which can be either message_creation or tool_calls. Required. Known values are: "message_creation" and "tool_calls". :vartype type: str or ~azure.ai.assistants.models.RunStepType - :ivar agent_id: The ID of the agent associated with the run step. Required. - :vartype agent_id: str + :ivar assistant_id: The ID of the assistant associated with the run step. Required. + :vartype assistant_id: str :ivar thread_id: The ID of the thread that was run. Required. :vartype thread_id: str :ivar run_id: The ID of the run that this run step is a part of. Required. @@ -3355,8 +3322,8 @@ class RunStep(_model_base.Model): type: Union[str, "_models.RunStepType"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The type of run step, which can be either message_creation or tool_calls. Required. Known values are: \"message_creation\" and \"tool_calls\".""" - agent_id: str = rest_field(name="assistant_id", visibility=["read", "create", "update", "delete", "query"]) - """The ID of the agent associated with the run step. Required.""" + assistant_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the assistant associated with the run step. Required.""" thread_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the thread that was run. Required.""" run_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -3404,7 +3371,7 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin type: Union[str, "_models.RunStepType"], - agent_id: str, + assistant_id: str, thread_id: str, run_id: str, status: Union[str, "_models.RunStepStatus"], @@ -4998,7 +4965,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class SharepointToolDefinition(ToolDefinition, discriminator="sharepoint_grounding"): - """The input definition information for a sharepoint tool as used to configure an agent. + """The input definition information for a sharepoint tool as used to configure an assistant. :ivar type: The object type, which is always 'sharepoint_grounding'. Required. Default value is "sharepoint_grounding". @@ -5034,7 +5001,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class SubmitToolOutputsAction(RequiredAction, discriminator="submit_tool_outputs"): - """The details for required tool calls that must be submitted for an agent thread run to continue. + """The details for required tool calls that must be submitted for an assistant thread run to + continue. :ivar type: The object type, which is always 'submit_tool_outputs'. Required. Default value is "submit_tool_outputs". @@ -5073,7 +5041,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class SubmitToolOutputsDetails(_model_base.Model): """The details describing tools that should be called to submit tool outputs. - :ivar tool_calls: The list of tool calls that must be resolved for the agent thread run to + :ivar tool_calls: The list of tool calls that must be resolved for the assistant thread run to continue. Required. :vartype tool_calls: list[~azure.ai.assistants.models.RequiredToolCall] """ @@ -5081,7 +5049,8 @@ class SubmitToolOutputsDetails(_model_base.Model): tool_calls: List["_models.RequiredToolCall"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) - """The list of tool calls that must be resolved for the agent thread run to continue. Required.""" + """The list of tool calls that must be resolved for the assistant thread run to continue. + Required.""" @overload def __init__( @@ -5142,7 +5111,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class ThreadMessage(_model_base.Model): - """A single, existing message within an agent thread. + """A single, existing message within an assistant thread. :ivar id: The identifier, which can be referenced in API endpoints. Required. :vartype id: str @@ -5166,13 +5135,15 @@ class ThreadMessage(_model_base.Model): :ivar incomplete_at: The Unix timestamp (in seconds) for when the message was marked as incomplete. Required. :vartype incomplete_at: ~datetime.datetime - :ivar role: The role associated with the agent thread message. Required. Known values are: + :ivar role: The role associated with the assistant thread message. Required. Known values are: "user" and "assistant". :vartype role: str or ~azure.ai.assistants.models.MessageRole - :ivar content: The list of content items associated with the agent thread message. Required. + :ivar content: The list of content items associated with the assistant thread message. + Required. :vartype content: list[~azure.ai.assistants.models.MessageContent] - :ivar agent_id: If applicable, the ID of the agent that authored this message. Required. - :vartype agent_id: str + :ivar assistant_id: If applicable, the ID of the assistant that authored this message. + Required. + :vartype assistant_id: str :ivar run_id: If applicable, the ID of the run associated with the authoring of this message. Required. :vartype run_id: str @@ -5212,12 +5183,12 @@ class ThreadMessage(_model_base.Model): ) """The Unix timestamp (in seconds) for when the message was marked as incomplete. Required.""" role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The role associated with the agent thread message. Required. Known values are: \"user\" and + """The role associated with the assistant thread message. Required. Known values are: \"user\" and \"assistant\".""" content: List["_models.MessageContent"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The list of content items associated with the agent thread message. Required.""" - agent_id: str = rest_field(name="assistant_id", visibility=["read", "create", "update", "delete", "query"]) - """If applicable, the ID of the agent that authored this message. Required.""" + """The list of content items associated with the assistant thread message. Required.""" + assistant_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """If applicable, the ID of the assistant that authored this message. Required.""" run_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """If applicable, the ID of the run associated with the authoring of this message. Required.""" attachments: List["_models.MessageAttachment"] = rest_field( @@ -5242,7 +5213,7 @@ def __init__( incomplete_at: datetime.datetime, role: Union[str, "_models.MessageRole"], content: List["_models.MessageContent"], - agent_id: str, + assistant_id: str, run_id: str, attachments: List["_models.MessageAttachment"], metadata: Dict[str, str], @@ -5261,15 +5232,15 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class ThreadMessageOptions(_model_base.Model): - """A single message within an agent thread, as provided during that thread's creation for its + """A single message within an assistant thread, as provided during that thread's creation for its initial state. :ivar role: The role of the entity that is creating the message. Allowed values include: * `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the agent. Use this value to insert - messages from the agent into the + * `assistant`: Indicates the message is generated by the assistant. Use this value to insert + messages from the assistant into the conversation. Required. Known values are: "user" and "assistant". :vartype role: str or ~azure.ai.assistants.models.MessageRole :ivar content: The textual content of the initial message. Currently, robust input including @@ -5290,8 +5261,8 @@ class ThreadMessageOptions(_model_base.Model): * `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the agent. Use this value to insert - messages from the agent into the + * `assistant`: Indicates the message is generated by the assistant. Use this value to insert + messages from the assistant into the conversation. Required. Known values are: \"user\" and \"assistant\".""" content: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The textual content of the initial message. Currently, robust input including images and @@ -5328,7 +5299,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class ThreadRun(_model_base.Model): - """Data representing a single evaluation run of an agent thread. + """Data representing a single evaluation run of an assistant thread. :ivar id: The identifier, which can be referenced in API endpoints. Required. :vartype id: str @@ -5337,23 +5308,24 @@ class ThreadRun(_model_base.Model): :vartype object: str :ivar thread_id: The ID of the thread associated with this run. Required. :vartype thread_id: str - :ivar agent_id: The ID of the agent associated with the thread this run was performed against. - Required. - :vartype agent_id: str - :ivar status: The status of the agent thread run. Required. Known values are: "queued", + :ivar assistant_id: The ID of the assistant associated with the thread this run was performed + against. Required. + :vartype assistant_id: str + :ivar status: The status of the assistant thread run. Required. Known values are: "queued", "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", and "expired". :vartype status: str or ~azure.ai.assistants.models.RunStatus - :ivar required_action: The details of the action required for the agent thread run to continue. + :ivar required_action: The details of the action required for the assistant thread run to + continue. :vartype required_action: ~azure.ai.assistants.models.RequiredAction - :ivar last_error: The last error, if any, encountered by this agent thread run. Required. + :ivar last_error: The last error, if any, encountered by this assistant thread run. Required. :vartype last_error: ~azure.ai.assistants.models.RunError :ivar model: The ID of the model to use. Required. :vartype model: str - :ivar instructions: The overridden system instructions used for this agent thread run. + :ivar instructions: The overridden system instructions used for this assistant thread run. Required. :vartype instructions: str - :ivar tools: The overridden enabled tools used for this agent thread run. Required. + :ivar tools: The overridden enabled tools used for this assistant thread run. Required. :vartype tools: list[~azure.ai.assistants.models.ToolDefinition] :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. Required. @@ -5391,21 +5363,23 @@ class ThreadRun(_model_base.Model): moves forward. Required. :vartype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :ivar tool_choice: Controls whether or not and which tool is called by the model. Required. Is - one of the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice - :vartype tool_choice: str or str or ~azure.ai.assistants.models.AgentsApiToolChoiceOptionMode - or ~azure.ai.assistants.models.AgentsNamedToolChoice + one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice + :vartype tool_choice: str or str or + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :ivar response_format: The response format of the tool calls used in this run. Required. Is one - of the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat, ResponseFormatJsonSchemaType - :vartype response_format: str or str or ~azure.ai.assistants.models.AgentsApiResponseFormatMode - or ~azure.ai.assistants.models.AgentsApiResponseFormat or + of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType + :vartype response_format: str or str or + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat or ~azure.ai.assistants.models.ResponseFormatJsonSchemaType :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Required. :vartype metadata: dict[str, str] - :ivar tool_resources: Override the tools the agent can use for this run. This is useful for + :ivar tool_resources: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. :vartype tool_resources: ~azure.ai.assistants.models.UpdateToolResourcesOptions :ivar parallel_tool_calls: Determines if tools can be executed in parallel within the run. @@ -5419,23 +5393,24 @@ class ThreadRun(_model_base.Model): """The object type, which is always 'thread.run'. Required. Default value is \"thread.run\".""" thread_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the thread associated with this run. Required.""" - agent_id: str = rest_field(name="assistant_id", visibility=["read", "create", "update", "delete", "query"]) - """The ID of the agent associated with the thread this run was performed against. Required.""" + assistant_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the assistant associated with the thread this run was performed against. Required.""" status: Union[str, "_models.RunStatus"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The status of the agent thread run. Required. Known values are: \"queued\", \"in_progress\", - \"requires_action\", \"cancelling\", \"cancelled\", \"failed\", \"completed\", and \"expired\".""" + """The status of the assistant thread run. Required. Known values are: \"queued\", + \"in_progress\", \"requires_action\", \"cancelling\", \"cancelled\", \"failed\", \"completed\", + and \"expired\".""" required_action: Optional["_models.RequiredAction"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) - """The details of the action required for the agent thread run to continue.""" + """The details of the action required for the assistant thread run to continue.""" last_error: "_models.RunError" = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The last error, if any, encountered by this agent thread run. Required.""" + """The last error, if any, encountered by this assistant thread run. Required.""" model: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the model to use. Required.""" instructions: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The overridden system instructions used for this agent thread run. Required.""" + """The overridden system instructions used for this assistant thread run. Required.""" tools: List["_models.ToolDefinition"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The overridden enabled tools used for this agent thread run. Required.""" + """The overridden enabled tools used for this assistant thread run. Required.""" created_at: datetime.datetime = rest_field( visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp" ) @@ -5481,18 +5456,18 @@ class ThreadRun(_model_base.Model): visibility=["read", "create", "update", "delete", "query"] ) """The strategy to use for dropping messages as the context windows moves forward. Required.""" - tool_choice: "_types.AgentsApiToolChoiceOption" = rest_field( + tool_choice: "_types.AssistantsApiToolChoiceOption" = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """Controls whether or not and which tool is called by the model. Required. Is one of the - following types: str, Union[str, \"_models.AgentsApiToolChoiceOptionMode\"], - AgentsNamedToolChoice""" - response_format: "_types.AgentsApiResponseFormatOption" = rest_field( + following types: str, Union[str, \"_models.AssistantsApiToolChoiceOptionMode\"], + AssistantsNamedToolChoice""" + response_format: "_types.AssistantsApiResponseFormatOption" = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """The response format of the tool calls used in this run. Required. Is one of the following - types: str, Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat, - ResponseFormatJsonSchemaType""" + types: str, Union[str, \"_models.AssistantsApiResponseFormatMode\"], + AssistantsApiResponseFormat, ResponseFormatJsonSchemaType""" metadata: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 @@ -5500,8 +5475,8 @@ class ThreadRun(_model_base.Model): tool_resources: Optional["_models.UpdateToolResourcesOptions"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) - """Override the tools the agent can use for this run. This is useful for modifying the behavior on - a per-run basis.""" + """Override the tools the assistant can use for this run. This is useful for modifying the + behavior on a per-run basis.""" parallel_tool_calls: bool = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Determines if tools can be executed in parallel within the run. Required.""" @@ -5511,7 +5486,7 @@ def __init__( # pylint: disable=too-many-locals *, id: str, # pylint: disable=redefined-builtin thread_id: str, - agent_id: str, + assistant_id: str, status: Union[str, "_models.RunStatus"], last_error: "_models.RunError", model: str, @@ -5528,8 +5503,8 @@ def __init__( # pylint: disable=too-many-locals max_prompt_tokens: int, max_completion_tokens: int, truncation_strategy: "_models.TruncationObject", - tool_choice: "_types.AgentsApiToolChoiceOption", - response_format: "_types.AgentsApiResponseFormatOption", + tool_choice: "_types.AssistantsApiToolChoiceOption", + response_format: "_types.AssistantsApiResponseFormatOption", metadata: Dict[str, str], parallel_tool_calls: bool, required_action: Optional["_models.RequiredAction"] = None, @@ -5649,8 +5624,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class ToolResources(_model_base.Model): - """A set of resources that are used by the agent's tools. The resources are specific to the type - of + """A set of resources that are used by the assistant's tools. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. @@ -5749,12 +5724,12 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class UpdateCodeInterpreterToolResourceOptions(_model_base.Model): """Request object to update ``code_interpreted`` tool resources. - :ivar file_ids: A list of file IDs to override the current list of the agent. + :ivar file_ids: A list of file IDs to override the current list of the assistant. :vartype file_ids: list[str] """ file_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """A list of file IDs to override the current list of the agent.""" + """A list of file IDs to override the current list of the assistant.""" @overload def __init__( @@ -5777,12 +5752,13 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class UpdateFileSearchToolResourceOptions(_model_base.Model): """Request object to update ``file_search`` tool resources. - :ivar vector_store_ids: A list of vector store IDs to override the current list of the agent. + :ivar vector_store_ids: A list of vector store IDs to override the current list of the + assistant. :vartype vector_store_ids: list[str] """ vector_store_ids: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """A list of vector store IDs to override the current list of the agent.""" + """A list of vector store IDs to override the current list of the assistant.""" @overload def __init__( @@ -5803,7 +5779,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class UpdateToolResourcesOptions(_model_base.Model): - """Request object. A set of resources that are used by the agent's tools. The resources are + """Request object. A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of @@ -5813,8 +5789,8 @@ class UpdateToolResourcesOptions(_model_base.Model): ``code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. :vartype code_interpreter: ~azure.ai.assistants.models.UpdateCodeInterpreterToolResourceOptions - :ivar file_search: Overrides the vector store attached to this agent. There can be a maximum of - 1 vector store attached to the agent. + :ivar file_search: Overrides the vector store attached to this assistant. There can be a + maximum of 1 vector store attached to the assistant. :vartype file_search: ~azure.ai.assistants.models.UpdateFileSearchToolResourceOptions :ivar azure_ai_search: Overrides the resources to be used by the ``azure_ai_search`` tool consisting of index IDs and names. @@ -5830,8 +5806,8 @@ class UpdateToolResourcesOptions(_model_base.Model): file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) - """Overrides the vector store attached to this agent. There can be a maximum of 1 vector store - attached to the agent.""" + """Overrides the vector store attached to this assistant. There can be a maximum of 1 vector store + attached to the assistant.""" azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -5863,8 +5839,8 @@ class UploadFileRequest(_model_base.Model): :ivar file: The file data, in bytes. Required. :vartype file: ~azure.ai.assistants._vendor.FileType - :ivar purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and - Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and + :ivar purpose: The intended purpose of the uploaded file. Use ``assistants`` for Assistants and + Message files, ``vision`` for Assistants image file inputs, ``batch`` for Batch API, and ``fine-tune`` for Fine-tuning. Required. Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". :vartype purpose: str or ~azure.ai.assistants.models.FilePurpose @@ -5877,8 +5853,8 @@ class UploadFileRequest(_model_base.Model): ) """The file data, in bytes. Required.""" purpose: Union[str, "_models.FilePurpose"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The intended purpose of the uploaded file. Use ``assistants`` for Agents and Message files, - ``vision`` for Agents image file inputs, ``batch`` for Batch API, and ``fine-tune`` for + """The intended purpose of the uploaded file. Use ``assistants`` for Assistants and Message files, + ``vision`` for Assistants image file inputs, ``batch`` for Batch API, and ``fine-tune`` for Fine-tuning. Required. Known values are: \"fine-tune\", \"fine-tune-results\", \"assistants\", \"assistants_output\", \"batch\", \"batch_output\", and \"vision\".""" filename: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) diff --git a/sdk/ai/azure-ai-assistants/tsp-location.yaml b/sdk/ai/azure-ai-assistants/tsp-location.yaml index cac157fb46ba..2df370e20752 100644 --- a/sdk/ai/azure-ai-assistants/tsp-location.yaml +++ b/sdk/ai/azure-ai-assistants/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Assistants -commit: 9d38aaedbb624278ee9dc60c1f6ffe11ff38bd30 +commit: 02a554c61c069231f265000c3e94c09d42579ae0 repo: Azure/azure-rest-api-specs additionalDirectories: From 3065db63324cb9760f60edb02fcbfb7606e54dc3 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Wed, 9 Apr 2025 22:00:52 -0700 Subject: [PATCH 3/7] Add samples and tests --- .../azure/ai/assistants/_patch.py | 2606 ++++++++++++- .../ai/assistants/aio/_operations/_patch.py | 44 +- .../azure/ai/assistants/aio/_patch.py | 2608 ++++++++++++- .../azure/ai/assistants/models/_patch.py | 1758 ++++++++- .../azure_ai_assistants_tests.env | 25 + ...tant-5szzLs73bsbQ2k75xUGKv8_image_file.png | Bin 0 -> 162061 bytes ...tant-6Q824dJfHkRzsy46hPatQA_image_file.png | Bin 0 -> 181757 bytes ...tant-WhEPqpcbmXadRJmCzMUeTi_image_file.png | Bin 0 -> 181757 bytes ...Operating_Profit_Transportation_Sector.png | Bin 0 -> 152440 bytes ...tant-WTHWm9BcJfvM6YNr9AY1MJ_image_file.png | Bin 0 -> 151993 bytes ...sample_assistants_azure_functions_async.py | 110 + .../sample_assistants_basics_async.py | 72 + ...basics_async_with_azure_monitor_tracing.py | 89 + ...tants_basics_async_with_console_tracing.py | 89 + ...ample_assistants_code_interpreter_async.py | 109 + ...tants_code_interpreter_attachment_async.py | 84 + ...eter_attachment_enterprise_search_async.py | 80 + .../sample_assistants_functions_async.py | 109 + .../sample_assistants_json_schema_async.py | 114 + ...ample_assistants_run_with_toolset_async.py | 82 + ...le_assistants_stream_eventhandler_async.py | 97 + ...tream_eventhandler_with_functions_async.py | 138 + ..._stream_eventhandler_with_toolset_async.py | 104 + ...ample_assistants_stream_iteration_async.py | 81 + ...m_with_base_override_eventhandler_async.py | 108 + ...tore_batch_enterprise_file_search_async.py | 116 + ...ts_vector_store_batch_file_search_async.py | 110 + ...ctor_store_enterprise_file_search_async.py | 76 + ...sistants_vector_store_file_search_async.py | 87 + ...tants_with_file_search_attachment_async.py | 82 + .../async_samples/user_async_functions.py | 67 + .../samples/countries.json | 46 + .../azure-ai-assistants/samples/fix_sample.sh | 20 + .../samples/multiassistant/assistant_team.py | 434 +++ .../multiassistant/assistant_team_config.yaml | 43 + .../assistant_trace_configurator.py | 63 + .../sample_assistants_assistant_team.py | 62 + ...tants_assistant_team_custom_team_leader.py | 115 + .../sample_assistants_multi_assistant_team.py | 99 + .../user_functions_with_traces.py | 111 + .../samples/nifty_500_quarterly_results.csv | 502 +++ .../samples/product_info_1.md | 51 + .../sample_assistants_azure_ai_search.py | 132 + .../sample_assistants_azure_functions.py | 103 + .../samples/sample_assistants_basics.py | 86 + ...tants_basics_with_azure_monitor_tracing.py | 82 + ..._assistants_basics_with_console_tracing.py | 81 + ..._with_console_tracing_custom_attributes.py | 109 + .../sample_assistants_bing_grounding.py | 90 + .../sample_assistants_code_interpreter.py | 105 + ...nterpreter_attachment_enterprise_search.py | 82 + ...ample_assistants_enterprise_file_search.py | 75 + .../samples/sample_assistants_fabric.py | 79 + .../samples/sample_assistants_file_search.py | 96 + .../samples/sample_assistants_functions.py | 102 + ...ts_functions_with_azure_monitor_tracing.py | 149 + ...sistants_functions_with_console_tracing.py | 151 + .../samples/sample_assistants_json_schema.py | 114 + .../samples/sample_assistants_logic_apps.py | 123 + .../samples/sample_assistants_openapi.py | 116 + ...mple_assistants_openapi_connection_auth.py | 97 + .../sample_assistants_run_with_toolset.py | 83 + .../samples/sample_assistants_sharepoint.py | 80 + .../sample_assistants_stream_eventhandler.py | 102 + ...eventhandler_with_azure_monitor_tracing.py | 114 + ...stream_eventhandler_with_bing_grounding.py | 126 + ...tream_eventhandler_with_console_tracing.py | 115 + ...ants_stream_eventhandler_with_functions.py | 137 + ...stants_stream_eventhandler_with_toolset.py | 109 + .../sample_assistants_stream_iteration.py | 86 + ...ts_stream_iteration_with_bing_grounding.py | 116 + ...tants_stream_iteration_with_file_search.py | 105 + ...ssistants_stream_iteration_with_toolset.py | 96 + ..._stream_with_base_override_eventhandler.py | 102 + ...ctor_store_batch_enterprise_file_search.py | 100 + ...sistants_vector_store_batch_file_search.py | 102 + ...ple_assistants_vector_store_file_search.py | 80 + ...s_with_code_interpreter_file_attachment.py | 105 + ...tants_with_enterprise_search_attachment.py | 71 + ..._assistants_with_file_search_attachment.py | 71 + ...ple_assistants_with_resources_in_thread.py | 91 + .../samples/tripadvisor_openapi.json | 1606 ++++++++ .../samples/user_functions.py | 248 ++ .../samples/user_logic_apps.py | 80 + .../samples/weather_openapi.json | 62 + sdk/ai/azure-ai-assistants/tests/README.md | 60 + ...t_datetime_and_weather_stream_response.txt | 255 ++ .../tests/assets/main_stream_response.txt | 45 + .../assets/send_email_stream_response.txt | 213 ++ .../tests/check_sample_name.sh | 20 + sdk/ai/azure-ai-assistants/tests/conftest.py | 159 + .../tests/overload_assert_utils.py | 186 + .../tests/test_assistant_mock_overloads.py | 139 + .../tests/test_assistant_models.py | 272 ++ .../tests/test_assistant_models_async.py | 229 ++ .../tests/test_assistants_client.py | 3227 +++++++++++++++++ .../tests/test_assistants_client_async.py | 3092 ++++++++++++++++ .../tests/test_assistants_mock.py | 559 +++ .../tests/test_assistants_mock_async.py | 568 +++ .../tests/test_data/assistant.json | 11 + .../tests/test_data/product_info_1.md | 51 + .../tests/test_data/thread_run.json | 31 + .../tests/test_deserialization.py | 93 + .../tests/test_overload_assert.py | 25 + .../tests/test_vector_store.py | 43 + .../tests/user_functions.py | 228 ++ 106 files changed, 26048 insertions(+), 8 deletions(-) create mode 100644 sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env create mode 100644 sdk/ai/azure-ai-assistants/samples/assistant-5szzLs73bsbQ2k75xUGKv8_image_file.png create mode 100644 sdk/ai/azure-ai-assistants/samples/assistant-6Q824dJfHkRzsy46hPatQA_image_file.png create mode 100644 sdk/ai/azure-ai-assistants/samples/assistant-WhEPqpcbmXadRJmCzMUeTi_image_file.png create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/Operating_Profit_Transportation_Sector.png create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/assistant-WTHWm9BcJfvM6YNr9AY1MJ_image_file.png create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/user_async_functions.py create mode 100644 sdk/ai/azure-ai-assistants/samples/countries.json create mode 100644 sdk/ai/azure-ai-assistants/samples/fix_sample.sh create mode 100644 sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team.py create mode 100644 sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team_config.yaml create mode 100644 sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py create mode 100644 sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py create mode 100644 sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py create mode 100644 sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py create mode 100644 sdk/ai/azure-ai-assistants/samples/multiassistant/user_functions_with_traces.py create mode 100644 sdk/ai/azure-ai-assistants/samples/nifty_500_quarterly_results.csv create mode 100644 sdk/ai/azure-ai-assistants/samples/product_info_1.md create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py create mode 100644 sdk/ai/azure-ai-assistants/samples/tripadvisor_openapi.json create mode 100644 sdk/ai/azure-ai-assistants/samples/user_functions.py create mode 100644 sdk/ai/azure-ai-assistants/samples/user_logic_apps.py create mode 100644 sdk/ai/azure-ai-assistants/samples/weather_openapi.json create mode 100644 sdk/ai/azure-ai-assistants/tests/README.md create mode 100644 sdk/ai/azure-ai-assistants/tests/assets/fetch_current_datetime_and_weather_stream_response.txt create mode 100644 sdk/ai/azure-ai-assistants/tests/assets/main_stream_response.txt create mode 100644 sdk/ai/azure-ai-assistants/tests/assets/send_email_stream_response.txt create mode 100644 sdk/ai/azure-ai-assistants/tests/check_sample_name.sh create mode 100644 sdk/ai/azure-ai-assistants/tests/conftest.py create mode 100644 sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py create mode 100644 sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py create mode 100644 sdk/ai/azure-ai-assistants/tests/test_assistant_models.py create mode 100644 sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py create mode 100644 sdk/ai/azure-ai-assistants/tests/test_assistants_client.py create mode 100644 sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py create mode 100644 sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py create mode 100644 sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py create mode 100644 sdk/ai/azure-ai-assistants/tests/test_data/assistant.json create mode 100644 sdk/ai/azure-ai-assistants/tests/test_data/product_info_1.md create mode 100644 sdk/ai/azure-ai-assistants/tests/test_data/thread_run.json create mode 100644 sdk/ai/azure-ai-assistants/tests/test_deserialization.py create mode 100644 sdk/ai/azure-ai-assistants/tests/test_overload_assert.py create mode 100644 sdk/ai/azure-ai-assistants/tests/test_vector_store.py create mode 100644 sdk/ai/azure-ai-assistants/tests/user_functions.py diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py index f7dd32510333..de107d2f836f 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py @@ -6,9 +6,2611 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List +import io +import logging +import os +import sys +import time +import uuid +from os import PathLike +from pathlib import Path +from typing import ( + IO, + TYPE_CHECKING, + Any, + Dict, + Iterator, + List, + Optional, + Self, + Tuple, + Union, + cast, + overload, +) -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level +from azure.core.tracing.decorator import distributed_trace + +from . import models as _models +from ._vendor import FileType +from .models._enums import FilePurpose, RunStatus +from ._client import AssistantsClient as AssistantsClientGenerated +from azure.core.credentials import TokenCredential + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from openai import AzureOpenAI + + from . import _types + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() + +logger = logging.getLogger(__name__) + + +class AssistantsClient(AssistantsClientGenerated): + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self._toolset: Dict[str, _models.ToolSet] = {} + + # pylint: disable=arguments-differ + @overload + def create_assistant( # pylint: disable=arguments-differ + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Creates a new assistant. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new assistant. Default value is None. + :paramtype name: str + :keyword description: The description of the new assistant. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new assistant to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new assistant. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode + or ~azure.ai.projects.models.AssistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + # pylint: disable=arguments-differ + @overload + def create_assistant( # pylint: disable=arguments-differ + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Creates a new assistant. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new assistant. Default value is None. + :paramtype name: str + :keyword description: The description of the new assistant. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new assistant to use. Default value is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.projects.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode + or ~azure.ai.projects.models.AssistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_assistant(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Assistant: + """Creates a new assistant. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_assistant(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Assistant: + """Creates a new assistant. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_assistant( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.Assistant: + """ + Creates a new assistant with various configurations, delegating to the generated operations. + + :param body: JSON or IO[bytes]. Required if `model` is not provided. + :type body: Union[JSON, IO[bytes]] + :keyword model: The ID of the model to use. Required if `body` is not provided. + :paramtype model: str + :keyword name: The name of the new assistant. + :paramtype name: Optional[str] + :keyword description: A description for the new assistant. + :paramtype description: Optional[str] + :keyword instructions: System instructions for the assistant. + :paramtype instructions: Optional[str] + :keyword tools: List of tools definitions for the assistant. + :paramtype tools: Optional[List[_models.ToolDefinition]] + :keyword tool_resources: Resources used by the assistant's tools. + :paramtype tool_resources: Optional[_models.ToolResources] + :keyword toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). + :paramtype toolset: Optional[_models.ToolSet] + :keyword temperature: Sampling temperature for generating assistant responses. + :paramtype temperature: Optional[float] + :keyword top_p: Nucleus sampling parameter. + :paramtype top_p: Optional[float] + :keyword response_format: Response format for tool calls. + :paramtype response_format: Optional["_types.AssistantsApiResponseFormatOption"] + :keyword metadata: Key/value pairs for storing additional information. + :paramtype metadata: Optional[Dict[str, str]] + :keyword content_type: Content type of the body. + :paramtype content_type: str + :return: An Assistant object. + :rtype: _models.Assistant + :raises: HttpResponseError for HTTP errors. + """ + + self._validate_tools_and_tool_resources(tools, tool_resources) + + if body is not _Unset: + if isinstance(body, io.IOBase): + return super().create_assistant(body=body, content_type=content_type, **kwargs) + return super().create_assistant(body=body, **kwargs) + + if toolset is not None: + tools = toolset.definitions + tool_resources = toolset.resources + + new_assistant = super().create_assistant( + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + if toolset is not None: + self._toolset[new_assistant.id] = toolset + return new_assistant + + # pylint: disable=arguments-differ + @overload + def update_assistant( # pylint: disable=arguments-differ + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the assistant. Default value is + None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode + or ~azure.ai.projects.models.AssistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + # pylint: disable=arguments-differ + @overload + def update_assistant( # pylint: disable=arguments-differ + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default value + is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.projects.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode + or ~azure.ai.projects.models.AssistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_assistant( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_assistant( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_assistant( + self, + assistant_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the assistant. Default value is + None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.projects.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode + or ~azure.ai.projects.models.AssistantsApiResponseFormat + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Assistant. The Assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + self._validate_tools_and_tool_resources(tools, tool_resources) + + if body is not _Unset: + if isinstance(body, io.IOBase): + return super().update_assistant(body=body, content_type=content_type, **kwargs) + return super().update_assistant(body=body, **kwargs) + + if toolset is not None: + self._toolset[assistant_id] = toolset + tools = toolset.definitions + tool_resources = toolset.resources + + return super().update_assistant( + assistant_id=assistant_id, + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + def _validate_tools_and_tool_resources( + self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources] + ): + if tool_resources is None: + return + if tools is None: + tools = [] + + if tool_resources.file_search is not None and not any( + isinstance(tool, _models.FileSearchToolDefinition) for tool in tools + ): + raise ValueError( + "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" + ) + if tool_resources.code_interpreter is not None and not any( + isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools + ): + raise ValueError( + "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" + ) + + # pylint: disable=arguments-differ + @overload + def create_run( # pylint: disable=arguments-differ + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode + or ~azure.ai.projects.models.AssistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, + thread_id: str, + body: JSON, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, + thread_id: str, + body: IO[bytes], + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode + or ~azure.ai.projects.models.AssistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + include=include, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=False, + stream=False, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + parallel_tool_calls=parallel_tool_calls, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + return response + + @distributed_trace + def create_and_process_run( + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: int = 1, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread and processes the run. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword model: The overridden model name that the assistant should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :keyword toolset: The Collection of tools and resources (alternative to `tools` and + `tool_resources`). Default value is None. + :paramtype toolset: ~azure.ai.projects.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or + ~azure.ai.projects.models.AssistantsApiResponseFormatMode or + ~azure.ai.projects.models.AssistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: The time in seconds to wait between polling the service for run status. + Default value is 1. + :paramtype sleep_interval: int + :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AssistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + # Create and initiate the run with additional parameters + run = self.create_run( + thread_id=thread_id, + include=include, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=toolset.definitions if toolset else None, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + parallel_tool_calls=parallel_tool_calls, + metadata=metadata, + **kwargs, + ) + + # Monitor and process the run status + while run.status in [ + RunStatus.QUEUED, + RunStatus.IN_PROGRESS, + RunStatus.REQUIRES_ACTION, + ]: + time.sleep(sleep_interval) + run = self.get_run(thread_id=thread_id, run_id=run.id) + + if run.status == RunStatus.REQUIRES_ACTION and isinstance( + run.required_action, _models.SubmitToolOutputsAction + ): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logging.warning("No tool calls provided - cancelling run") + self.cancel_run(thread_id=thread_id, run_id=run.id) + break + # We need tool set only if we are executing local function. In case if + # the tool is azure_function we just need to wait when it will be finished. + if any(tool_call.type == "function" for tool_call in tool_calls): + toolset = toolset or self._toolset.get(run.assistant_id) + if toolset is not None: + tool_outputs = toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + logging.info("Tool outputs: %s", tool_outputs) + if tool_outputs: + self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) + + logging.info("Current run status: %s", run.status) + + return run + + @overload + def create_stream( + self, + thread_id: str, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: None = None, + **kwargs: Any, + ) -> _models.AssistantRunStream[_models.AssistantEventHandler]: + """Creates a new stream for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode + or ~azure.ai.projects.models.AssistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: None + :paramtype event_handler: None. _models.AssistantEventHandler will be applied as default. + :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AssistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_stream( + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: _models.BaseAssistantEventHandlerT, + **kwargs: Any, + ) -> _models.AssistantRunStream[_models.BaseAssistantEventHandlerT]: + """Creates a new stream for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode + or ~azure.ai.projects.models.AssistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AssistantEventHandler + :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AssistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_stream( + self, + thread_id: str, + body: Union[JSON, IO[bytes]], + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + event_handler: None = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AssistantRunStream[_models.AssistantEventHandler]: + """Creates a new run for an assistant thread. + + Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword event_handler: None + :paramtype event_handler: None. _models.AssistantEventHandler will be applied as default. + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AssistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_stream( + self, + thread_id: str, + body: Union[JSON, IO[bytes]], + *, + event_handler: _models.BaseAssistantEventHandlerT, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AssistantRunStream[_models.BaseAssistantEventHandlerT]: + """Creates a new run for an assistant thread. + + Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AssistantEventHandler + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AssistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_stream( # pyright: ignore[reportInconsistentOverload] + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.BaseAssistantEventHandlerT] = None, + **kwargs: Any, + ) -> _models.AssistantRunStream[_models.BaseAssistantEventHandlerT]: + """Creates a new run for an assistant thread. + + Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + AssistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AssistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode + or ~azure.ai.projects.models.AssistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AssistantEventHandler + :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AssistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + include=include, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=True, + stream=True, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + parallel_tool_calls=parallel_tool_calls, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) + + if not event_handler: + event_handler = cast(_models.BaseAssistantEventHandlerT, _models.AssistantEventHandler()) + return _models.AssistantRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + + # pylint: disable=arguments-differ + @overload + def submit_tool_outputs_to_run( # pylint: disable=arguments-differ + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: Optional[_models.AssistantEventHandler] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AssistantEventHandler + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, + run_id, + tool_outputs=tool_outputs, + stream_parameter=False, + stream=False, + **kwargs, + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + return response + + @overload + def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]], + *, + event_handler: _models.BaseAssistantEventHandler, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword event_handler: The event handler to use for processing events during the run. + :paramtype event_handler: ~azure.ai.projects.models.BaseAssistantEventHandler + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: _models.BaseAssistantEventHandler, + **kwargs: Any, + ) -> None: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. + :paramtype event_handler: ~azure.ai.projects.models.BaseAssistantEventHandler + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_stream( # pyright: ignore[reportInconsistentOverload] + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + event_handler: _models.BaseAssistantEventHandler, + **kwargs: Any, + ) -> None: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :keyword event_handler: The event handler to use for processing events during the run. + :paramtype event_handler: ~azure.ai.projects.models.BaseAssistantEventHandler + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # Cast the response to Iterator[bytes] for type correctness + response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) + + event_handler.initialize(response_iterator, self._handle_submit_tool_outputs) + + def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handler: _models.BaseAssistantEventHandler) -> None: + if isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logger.debug("No tool calls to execute.") + return + + # We need tool set only if we are executing local function. In case if + # the tool is azure_function we just need to wait when it will be finished. + if any(tool_call.type == "function" for tool_call in tool_calls): + toolset = self._toolset.get(run.assistant_id) + if toolset: + tool_outputs = toolset.execute_tool_calls(tool_calls) + else: + logger.debug("Toolset is not available in the client.") + return + + logger.info("Tool outputs: %s", tool_outputs) + if tool_outputs: + self.submit_tool_outputs_to_stream( + thread_id=run.thread_id, + run_id=run.id, + tool_outputs=tool_outputs, + event_handler=event_handler, + ) + + # pylint: disable=arguments-differ + @overload + def upload_file( # pylint: disable=arguments-differ + self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + # pylint: disable=arguments-differ + @overload + def upload_file( # pylint: disable=arguments-differ + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.projects._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def upload_file( + self, + body: Optional[Union[_models.UploadFileRequest, JSON]] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :type body: Optional[JSON] + :keyword file: File content. Required if `body` and `purpose` are not provided. + :paramtype file: Optional[FileType] + :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. + :paramtype file_path: Optional[str] + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + :paramtype purpose: Union[str, _models.FilePurpose, None] + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :keyword filename: The name of the file. + :paramtype filename: Optional[str] + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: _models.OpenAIFile + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + return super().upload_file(body=body, **kwargs) + + if isinstance(purpose, FilePurpose): + purpose = purpose.value + + if file is not None and purpose is not None: + file_body = _models.UploadFileRequest( + file=file, + purpose=purpose, + filename=filename + ) + return super().upload_file(body=file_body, **kwargs) + + if file_path is not None and purpose is not None: + if not os.path.isfile(file_path): + raise FileNotFoundError(f"The file path provided does not exist: {file_path}") + + try: + with open(file_path, "rb") as f: + content = f.read() + + # Determine filename and create correct FileType + base_filename = filename or os.path.basename(file_path) + file_content: FileType = (base_filename, content) + file_body = _models.UploadFileRequest( + file=file_content, + purpose=purpose, + filename=filename + ) + + return super().upload_file(body=file_body, **kwargs) + except IOError as e: + raise IOError(f"Unable to read file: {file_path}") from e + + raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") + + @overload + def upload_file_and_poll(self, body: JSON, *, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file_and_poll( + self, + *, + file: FileType, + purpose: Union[str, _models.FilePurpose], + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.projects._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file_and_poll( + self, *, file_path: str, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def upload_file_and_poll( + self, + body: Optional[JSON] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :type body: Optional[JSON] + :keyword file: File content. Required if `body` and `purpose` are not provided. + :paramtype file: Optional[FileType] + :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. + :paramtype file_path: Optional[str] + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :paramtype purpose: Union[str, _models.FilePurpose, None] + :keyword filename: The name of the file. + :paramtype filename: Optional[str] + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: _models.OpenAIFile + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + uploaded_file = self.upload_file(body=body, **kwargs) + elif file is not None and purpose is not None: + uploaded_file = self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + elif file_path is not None and purpose is not None: + uploaded_file = self.upload_file(file_path=file_path, purpose=purpose, **kwargs) + else: + raise ValueError( + "Invalid parameters for upload_file_and_poll. Please provide either 'body', " + "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." + ) + + while uploaded_file.status in ["uploaded", "pending", "running"]: + time.sleep(sleep_interval) + uploaded_file = self.get_file(uploaded_file.id) + + return uploaded_file + + @overload + def create_vector_store_and_poll( + self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_and_poll( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_and_poll( + self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_and_poll( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not _Unset: + if isinstance(body, dict): + vector_store = super().create_vector_store( + body=body, content_type=content_type or "application/json", **kwargs + ) + elif isinstance(body, io.IOBase): + vector_store = super().create_vector_store(body=body, content_type=content_type, **kwargs) + else: + raise ValueError("Invalid 'body' type: must be a dictionary (JSON) or a file-like object (IO[bytes]).") + else: + store_configuration = None + if data_sources: + store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) + + vector_store = super().create_vector_store( + file_ids=file_ids, + store_configuration=store_configuration, + name=name, + expires_after=expires_after, + chunking_strategy=chunking_strategy, + metadata=metadata, + **kwargs, + ) + + while vector_store.status == "in_progress": + time.sleep(sleep_interval) + vector_store = super().get_vector_store(vector_store.id) + + return vector_store + + @overload + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + *, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword content_type: Body parameter content-type. Defaults to "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not _Unset: + if isinstance(body, dict): + vector_store_file_batch = super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + body=body, + content_type=content_type or "application/json", + **kwargs, + ) + elif isinstance(body, io.IOBase): + vector_store_file_batch = super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + body=body, + content_type=content_type, + **kwargs, + ) + else: + raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like (IO[bytes]).") + else: + vector_store_file_batch = super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + file_ids=file_ids, + data_sources=data_sources, + chunking_strategy=chunking_strategy, + **kwargs, + ) + + while vector_store_file_batch.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file_batch = super().get_vector_store_file_batch( + vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id + ) + + return vector_store_file_batch + + @distributed_trace + def get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: + """ + Returns file content as byte stream for given file_id. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: An iterator that yields bytes from the file content. + :rtype: Iterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: If the HTTP request fails. + """ + kwargs["stream"] = True + response = super()._get_file_content(file_id, **kwargs) + return cast(Iterator[bytes], response) + + @distributed_trace + def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: + """ + Synchronously saves file content retrieved using a file identifier to the specified local directory. + + :param file_id: The unique identifier for the file to retrieve. + :type file_id: str + :param file_name: The name of the file to be saved. + :type file_name: str + :param target_dir: The directory where the file should be saved. Defaults to the current working directory. + :type target_dir: Optional[Union[str, Path]] + :raises ValueError: If the target path is not a directory or the file name is invalid. + :raises RuntimeError: If file content retrieval fails or no content is found. + :raises TypeError: If retrieved chunks are not bytes-like objects. + :raises IOError: If writing to the file fails. + """ + try: + # Determine and validate the target directory + path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() + path.mkdir(parents=True, exist_ok=True) + if not path.is_dir(): + raise ValueError(f"The target path '{path}' is not a directory.") + + # Sanitize and validate the file name + sanitized_file_name = Path(file_name).name + if not sanitized_file_name: + raise ValueError("The provided file name is invalid.") + + # Retrieve the file content + file_content_stream = self.get_file_content(file_id) + if not file_content_stream: + raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") + + target_file_path = path / sanitized_file_name + + # Write the file content to disk + with target_file_path.open("wb") as file: + for chunk in file_content_stream: + if isinstance(chunk, (bytes, bytearray)): + file.write(chunk) + else: + raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") + + logger.debug("File '%s' saved successfully at '%s'.", sanitized_file_name, target_file_path) + + except (ValueError, RuntimeError, TypeError, IOError) as e: + logger.error("An error occurred in save_file: %s", e) + raise + + @overload + def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_and_poll( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + content_type: str = "application/json", + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword content_type: Body Parameter content-type. Defaults to 'application/json'. + :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not _Unset: + if isinstance(body, dict): + vector_store_file = super().create_vector_store_file( + vector_store_id=vector_store_id, + body=body, + content_type=content_type or "application/json", + **kwargs, + ) + elif isinstance(body, io.IOBase): + vector_store_file = super().create_vector_store_file( + vector_store_id=vector_store_id, + body=body, + content_type=content_type, + **kwargs, + ) + else: + raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like object (IO[bytes]).") + else: + vector_store_file = super().create_vector_store_file( + vector_store_id=vector_store_id, + file_id=file_id, + data_source=data_source, + chunking_strategy=chunking_strategy, + **kwargs, + ) + + while vector_store_file.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file = super().get_vector_store_file( + vector_store_id=vector_store_id, file_id=vector_store_file.id + ) + + return vector_store_file + + @classmethod + def from_connection_string(cls, conn_str: str, credential: "TokenCredential", **kwargs) -> Self: + """ + Create an asynchronous AIProjectClient from a connection string. + + :param str conn_str: The connection string, copied from your AI Foundry project. + :param TokenCredential credential: Credential used to authenticate requests to the service. + :return: An AssistantsClient instance. + :rtype: AssistantsClient + """ + if not conn_str: + raise ValueError("Connection string is required") + parts = conn_str.split(";") + if len(parts) != 4: + raise ValueError("Invalid connection string format") + endpoint = "https://" + parts[0] + subscription_id = parts[1] + resource_group_name = parts[2] + project_name = parts[3] + return cls( + endpoint, + subscription_id, + resource_group_name, + project_name, + credential, + **kwargs, + ) + + def upload_file_to_azure_blob(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: + """Upload a file to the Azure AI Foundry project. + This method required *azure-ai-ml* to be installed. + + :param file_path: The path to the file to upload. + :type file_path: Union[str, Path, PathLike] + :return: The tuple, containing asset id and asset URI of uploaded file. + :rtype: Tuple[str, str] + """ + try: + from azure.ai.ml import MLClient # type: ignore + from azure.ai.ml.constants import AssetTypes # type: ignore + from azure.ai.ml.entities import Data # type: ignore + except ImportError as e: + raise ImportError( + "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" + ) from e + + data = Data( + path=str(file_path), + type=AssetTypes.URI_FILE, + name=str(uuid.uuid4()), # generating random name + is_anonymous=True, + version="1", + ) + # We have to wrap async method get_token of + + ml_client = MLClient( + self._config.credential, + self._config.subscription_id, + self._config.resource_group_name, + self._config.project_name, + ) + + data_asset = ml_client.data.create_or_update(data) + + return data_asset.id, data_asset.path + + @distributed_trace + def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.AssistantDeletionStatus: + """Deletes an assistant. + + :param assistant_id: Identifier of the assistant. Required. + :type assistant_id: str + :return: AssistantDeletionStatus. The AssistantDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AssistantDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + if assistant_id in self._toolset: + del self._toolset[assistant_id] + return super().delete_assistant(assistant_id, **kwargs) + + @property + def scope(self) -> Dict[str, str]: + return { + "subscription_id": self._config.subscription_id, + "resource_group_name": self._config.resource_group_name, + "project_name": self._config.project_name, + } + + +__all__: List[str] = ['AssistantsClient'] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py index f7dd32510333..292578c140f1 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py @@ -6,9 +6,49 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List +from typing import Any, List, Optional, TYPE_CHECKING -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level +import asyncio +import concurrent.futures +from azure.core.credentials import TokenCredential + +if TYPE_CHECKING: + from azure.core.credentials import AccessToken + from azure.core.credentials_async import AsyncTokenCredential + + +class _SyncCredentialWrapper(TokenCredential): + """ + The class, synchronizing AsyncTokenCredential. + + :param async_credential: The async credential to be synchronized. + :type async_credential: ~azure.core.credentials_async.AsyncTokenCredential + """ + + def __init__(self, async_credential: "AsyncTokenCredential"): + self._async_credential = async_credential + + def get_token( + self, + *scopes: str, + claims: Optional[str] = None, + tenant_id: Optional[str] = None, + enable_cae: bool = False, + **kwargs: Any, + ) -> "AccessToken": + return concurrent.futures.ThreadPoolExecutor().submit( + asyncio.run, + self._async_credential.get_token( + *scopes, + claims=claims, + tenant_id=tenant_id, + enable_cae=enable_cae, + **kwargs, + ), + ).result() + + +__all__: List[str] = [] def patch_sdk(): diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py index f7dd32510333..fa880f254f35 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py @@ -2,13 +2,2617 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ +from openai.types import file_purpose """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List +import asyncio +import io +import logging +import os +import uuid +import time -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level +from os import PathLike +from pathlib import Path +from .. import models as _models + +from typing import ( + IO, + TYPE_CHECKING, + Any, + AsyncIterator, + Dict, + List, + MutableMapping, + Optional, + Self, + Tuple, + Union, + cast, + overload, +) +from azure.core.tracing.decorator_async import distributed_trace_async +from .._vendor import FileType +from ..models._enums import FilePurpose, RunStatus + +from ._client import AssistantsClient as AssistantsClientGenerated +from ._operations._patch import _SyncCredentialWrapper + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from openai import AsyncAzureOpenAI + + from azure.core.credentials import AccessToken + from azure.core.credentials_async import AsyncTokenCredential + +logger = logging.getLogger(__name__) + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() + + +class AssistantsClient(AssistantsClientGenerated): + + + + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self._toolset: Dict[str, _models.AsyncToolSet] = {} + + # pylint: disable=arguments-differ + @overload + async def create_assistant( # pylint: disable=arguments-differ + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Creates a new assistant. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new assistant. Default value is None. + :paramtype name: str + :keyword description: The description of the new assistant. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new assistant to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new assistant. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + AssistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode + or ~azure.ai.projects.models.assistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + # pylint: disable=arguments-differ + @overload + async def create_assistant( # pylint: disable=arguments-differ + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.AsyncToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Creates a new assistant. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new assistant. Default value is None. + :paramtype name: str + :keyword description: The description of the new assistant. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new assistant to use. Default value is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode + or ~azure.ai.projects.models.assistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_assistant(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Assistant: + """Creates a new assistant. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_assistant( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Creates a new assistant. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_assistant( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.AsyncToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.Assistant: + """ + Creates a new assistant with various configurations, delegating to the generated operations. + + :param body: JSON or IO[bytes]. Required if `model` is not provided. + :type body: Union[JSON, IO[bytes]] + :keyword model: The ID of the model to use. Required if `body` is not provided. + :paramtype model: str + :keyword name: The name of the new assistant. + :paramtype name: Optional[str] + :keyword description: A description for the new assistant. + :paramtype description: Optional[str] + :keyword instructions: System instructions for the assistant. + :paramtype instructions: Optional[str] + :keyword tools: List of tools definitions for the assistant. + :paramtype tools: Optional[List[_models.ToolDefinition]] + :keyword tool_resources: Resources used by the assistant's tools. + :paramtype tool_resources: Optional[_models.ToolResources] + :keyword toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). + :paramtype toolset: Optional[_models.AsyncToolSet] + :keyword temperature: Sampling temperature for generating assistant responses. + :paramtype temperature: Optional[float] + :keyword top_p: Nucleus sampling parameter. + :paramtype top_p: Optional[float] + :keyword response_format: Response format for tool calls. + :paramtype response_format: Optional["_types.assistantsApiResponseFormatOption"] + :keyword metadata: Key/value pairs for storing additional information. + :paramtype metadata: Optional[Dict[str, str]] + :keyword content_type: Content type of the body. + :paramtype content_type: str + :return: An assistant object. + :rtype: _models.Assistant + :raises: HttpResponseError for HTTP errors. + """ + if body is not _Unset: + if isinstance(body, io.IOBase): + return await super().create_assistant(body=body, content_type=content_type, **kwargs) + return await super().create_assistant(body=body, **kwargs) + + if toolset is not None: + tools = toolset.definitions + tool_resources = toolset.resources + + new_assistant = await super().create_assistant( + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + if toolset is not None: + self._toolset[new_assistant.id] = toolset + return new_assistant + + # pylint: disable=arguments-differ + @overload + async def update_assistant( # pylint: disable=arguments-differ + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the assistant. Default value is + None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode + or ~azure.ai.projects.models.assistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + # pylint: disable=arguments-differ + @overload + async def update_assistant( # pylint: disable=arguments-differ + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.AsyncToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default value + is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode + or ~azure.ai.projects.models.assistantsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_assistant( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_assistant( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_assistant( + self, + assistant_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.AsyncToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Assistant: + """Modifies an existing assistant. + + :param assistant_id: The ID of the assistant to modify. Required. + :type assistant_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the assistant to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the assistant to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new assistant to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the assistant. Default value is + None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this assistant. Is one of + the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode + or ~azure.ai.projects.models.assistantsApiResponseFormat + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: assistant. The assistant is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.assistant + :raises ~azure.core.exceptions.HttpResponseError: + """ + self._validate_tools_and_tool_resources(tools, tool_resources) + + if body is not _Unset: + if isinstance(body, io.IOBase): + return await super().update_assistant(body=body, content_type=content_type, **kwargs) + return await super().update_assistant(body=body, **kwargs) + + if toolset is not None: + self._toolset[assistant_id] = toolset + tools = toolset.definitions + tool_resources = toolset.resources + + return await super().update_assistant( + assistant_id=assistant_id, + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + def _validate_tools_and_tool_resources( + self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources] + ): + if tool_resources is None: + return + if tools is None: + tools = [] + + if tool_resources.file_search is not None and not any( + isinstance(tool, _models.FileSearchToolDefinition) for tool in tools + ): + raise ValueError( + "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" + ) + if tool_resources.code_interpreter is not None and not any( + isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools + ): + raise ValueError( + "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" + ) + + # pylint: disable=arguments-differ + @overload + async def create_run( # pylint: disable=arguments-differ + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + assistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.projects.models.assistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode + or ~azure.ai.projects.models.assistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, + thread_id: str, + body: JSON, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, + thread_id: str, + body: IO[bytes], + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + assistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.projects.models.assistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode + or ~azure.ai.projects.models.assistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + include=include, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=False, + stream=False, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + parallel_tool_calls=parallel_tool_calls, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + return await response + + @distributed_trace_async + async def create_and_process_run( + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + toolset: Optional[_models.AsyncToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: int = 1, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an assistant thread and processes the run. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword model: The overridden model name that the assistant should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :keyword toolset: The Collection of tools and resources (alternative to `tools` and + `tool_resources`). Default value is None. + :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + assistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.projects.models.assistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or + ~azure.ai.projects.models.assistantsApiResponseFormatMode or + ~azure.ai.projects.models.assistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: The time in seconds to wait between polling the service for run status. + Default value is 1. + :paramtype sleep_interval: int + :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + # Create and initiate the run with additional parameters + run = await self.create_run( + thread_id=thread_id, + assistant_id=assistant_id, + include=include, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=toolset.definitions if toolset else None, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + parallel_tool_calls=parallel_tool_calls, + metadata=metadata, + **kwargs, + ) + + # Monitor and process the run status + while run.status in [ + RunStatus.QUEUED, + RunStatus.IN_PROGRESS, + RunStatus.REQUIRES_ACTION, + ]: + time.sleep(sleep_interval) + run = await self.get_run(thread_id=thread_id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logging.warning("No tool calls provided - cancelling run") + await self.cancel_run(thread_id=thread_id, run_id=run.id) + break + # We need tool set only if we are executing local function. In case if + # the tool is azure_function we just need to wait when it will be finished. + if any(tool_call.type == "function" for tool_call in tool_calls): + toolset = toolset or self._toolset.get(run.assistant_id) + if toolset: + tool_outputs = await toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + logging.info("Tool outputs: %s", tool_outputs) + if tool_outputs: + await self.submit_tool_outputs_to_run( + thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs + ) + + logging.info("Current run status: %s", run.status) + + return run + + @overload + async def create_stream( + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: None = None, + **kwargs: Any, + ) -> _models.AsyncAssistantRunStream[_models.AsyncAssistantEventHandler]: + """Creates a new stream for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + assistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.projects.models.assistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode + or ~azure.ai.projects.models.assistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: None + :paramtype event_handler: None. _models.AsyncassistantEventHandler will be applied as default. + :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_stream( + self, + thread_id: str, + *, + assistant_id: str, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: _models.BaseAsyncAssistantEventHandlerT, + **kwargs: Any, + ) -> _models.AsyncAssistantRunStream[_models.BaseAsyncAssistantEventHandlerT]: + """Creates a new stream for an assistant thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + assistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.projects.models.assistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode + or ~azure.ai.projects.models.assistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. + :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_stream( + self, + thread_id: str, + body: Union[JSON, IO[bytes]], + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + event_handler: None = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AsyncAssistantRunStream[_models.AsyncAssistantEventHandler]: + """Creates a new run for an assistant thread. + + Terminating when the Run enters a terminal state with a `data: [DONE]` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword event_handler: None + :paramtype event_handler: None. _models.AsyncassistantEventHandler will be applied as default. + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncAssistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_stream( + self, + thread_id: str, + body: Union[JSON, IO[bytes]], + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + event_handler: _models.BaseAsyncAssistantEventHandlerT, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.AsyncAssistantRunStream[_models.BaseAsyncAssistantEventHandlerT]: + """Creates a new run for an assistant thread. + + Terminating when the Run enters a terminal state with a `data: [DONE]` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_stream( # pyright: ignore[reportInconsistentOverload] + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessageOptions]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + parallel_tool_calls: Optional[bool] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.BaseAsyncAssistantEventHandlerT] = None, + **kwargs: Any, + ) -> _models.AsyncAssistantRunStream[_models.BaseAsyncAssistantEventHandlerT]: + """Creates a new run for an assistant thread. + + Terminating when the Run enters a terminal state with a `data: [DONE]` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword assistant_id: The ID of the assistant that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the assistant should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the assistant should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :keyword tools: The overridden list of enabled tools that the assistant should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], + assistantsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.projects.models.assistantsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], + assistantsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode + or ~azure.ai.projects.models.assistantsApiResponseFormat + :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. + Default value is None. + :paramtype parallel_tool_calls: bool + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + assistant_id=assistant_id, + include=include, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=True, + stream=True, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + parallel_tool_calls=parallel_tool_calls, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, include=include, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) + + if not event_handler: + event_handler = cast(_models.BaseAssistantEventHandlerT, _models.AsyncAssistantEventHandler()) + + return _models.AsyncAssistantRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + + # pylint: disable=arguments-differ + @overload + async def submit_tool_outputs_to_run( # pylint: disable=arguments-differ + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + return await response + + @overload + async def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]], + *, + event_handler: _models.BaseAsyncAssistantEventHandler, + content_type: str = "application/json", + **kwargs: Any, + ) -> None: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: _models.BaseAsyncAssistantEventHandler, + **kwargs: Any, + ) -> None: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. + :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_stream( # pyright: ignore[reportInconsistentOverload] + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + event_handler: _models.BaseAsyncAssistantEventHandler, + **kwargs: Any, + ) -> None: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :keyword event_handler: The event handler to use for processing events during the run. + :paramtype event_handler: ~azure.ai.projects.models.AsyncAssistantEventHandler + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # Cast the response to Iterator[bytes] for type correctness + response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) + + event_handler.initialize(response_iterator, self._handle_submit_tool_outputs) + + async def _handle_submit_tool_outputs( + self, run: _models.ThreadRun, event_handler: _models.BaseAsyncAssistantEventHandler + ) -> None: + if isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logger.debug("No tool calls to execute.") + return + + # We need tool set only if we are executing local function. In case if + # the tool is azure_function we just need to wait when it will be finished. + if any(tool_call.type == "function" for tool_call in tool_calls): + toolset = self._toolset.get(run.assistant_id) + if toolset: + tool_outputs = await toolset.execute_tool_calls(tool_calls) + else: + logger.debug("Toolset is not available in the client.") + return + + logger.info("Tool outputs: %s", tool_outputs) + if tool_outputs: + await self.submit_tool_outputs_to_stream( + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler + ) + + # pylint: disable=arguments-differ + @overload + async def upload_file( # pylint: disable=arguments-differ + self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + # pylint: disable=arguments-differ + @overload + async def upload_file( # pylint: disable=arguments-differ + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.projects._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def upload_file( + self, + body: Optional[Union[_models.UploadFileRequest, JSON]] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :type body: Optional[JSON] + :keyword file: File content. Required if `body` and `purpose` are not provided. + :paramtype file: Optional[FileType] + :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. + :paramtype file_path: Optional[str] + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :paramtype purpose: Union[str, _models.FilePurpose, None] + :keyword filename: The name of the file. + :paramtype filename: Optional[str] + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: _models.OpenAIFile + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + return await super().upload_file(body=body, **kwargs) + + if isinstance(purpose, FilePurpose): + purpose = purpose.value + + if file is not None and purpose is not None: + file_body = _models.UploadFileRequest( + file=file, + purpose=purpose, + filename=filename + ) + return await super().upload_file(body=file_body, **kwargs) + + if file_path is not None and purpose is not None: + if not os.path.isfile(file_path): + raise FileNotFoundError(f"The file path provided does not exist: {file_path}") + + try: + with open(file_path, "rb") as f: + content = f.read() + + # Determine filename and create correct FileType + base_filename = filename or os.path.basename(file_path) + file_content: FileType = (base_filename, content) + file_body = _models.UploadFileRequest( + file=file_content, + purpose=purpose, + filename=filename + ) + + return await super().upload_file(body=file_body, **kwargs) + except IOError as e: + raise IOError(f"Unable to read file: {file_path}.") from e + + raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") + + @overload + async def upload_file_and_poll(self, body: JSON, *, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file_and_poll( + self, + *, + file: FileType, + purpose: Union[str, _models.FilePurpose], + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.projects._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file_and_poll( + self, *, file_path: str, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def upload_file_and_poll( + self, + body: Optional[JSON] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :type body: Optional[JSON] + :keyword file: File content. Required if `body` and `purpose` are not provided. + :paramtype file: Optional[FileType] + :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. + :paramtype file_path: Optional[str] + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :paramtype purpose: Union[str, _models.FilePurpose, None] + :keyword filename: The name of the file. + :paramtype filename: Optional[str] + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: _models.OpenAIFile + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + uploaded_file = await self.upload_file(body=body, **kwargs) + elif file is not None and purpose is not None: + uploaded_file = await self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + elif file_path is not None and purpose is not None: + uploaded_file = await self.upload_file(file_path=file_path, purpose=purpose, **kwargs) + else: + raise ValueError( + "Invalid parameters for upload_file_and_poll. Please provide either 'body', " + "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." + ) + + while uploaded_file.status in ["uploaded", "pending", "running"]: + time.sleep(sleep_interval) + uploaded_file = await self.get_file(uploaded_file.id) + + return uploaded_file + + @overload + async def create_vector_store_and_poll( + self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_and_poll( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_and_poll( + self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_and_poll( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not _Unset: + if isinstance(body, dict): + vector_store = await super().create_vector_store( + body=body, content_type=content_type or "application/json", **kwargs + ) + elif isinstance(body, io.IOBase): + vector_store = await super().create_vector_store(body=body, content_type=content_type, **kwargs) + else: + raise ValueError("Invalid 'body' type: must be a dictionary (JSON) or a file-like object (IO[bytes]).") + else: + store_configuration = None + if data_sources: + store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) + + vector_store = await super().create_vector_store( + file_ids=file_ids, + store_configuration=store_configuration, + name=name, + expires_after=expires_after, + chunking_strategy=chunking_strategy, + metadata=metadata, + **kwargs, + ) + + while vector_store.status == "in_progress": + time.sleep(sleep_interval) + vector_store = await super().get_vector_store(vector_store.id) + + return vector_store + + @overload + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + *, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword content_type: Body parameter content-type. Defaults to "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not _Unset: + if isinstance(body, dict): + vector_store_file_batch = await super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + body=body, + content_type=content_type or "application/json", + **kwargs, + ) + elif isinstance(body, io.IOBase): + vector_store_file_batch = await super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + body=body, + content_type=content_type, + **kwargs, + ) + else: + raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like (IO[bytes]).") + else: + vector_store_file_batch = await super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + file_ids=file_ids, + data_sources=data_sources, + chunking_strategy=chunking_strategy, + **kwargs, + ) + + while vector_store_file_batch.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file_batch = await super().get_vector_store_file_batch( + vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id + ) + + return vector_store_file_batch + + @overload + async def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_and_poll( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + content_type: str = "application/json", + file_id: Optional[str] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword content_type: Body Parameter content-type. Defaults to 'application/json'. + :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not _Unset: + if isinstance(body, dict): + vector_store_file = await super().create_vector_store_file( + vector_store_id=vector_store_id, + body=body, + content_type=content_type or "application/json", + **kwargs, + ) + elif isinstance(body, io.IOBase): + vector_store_file = await super().create_vector_store_file( + vector_store_id=vector_store_id, + body=body, + content_type=content_type, + **kwargs, + ) + else: + raise ValueError("Invalid type for 'body'. Must be a dict (JSON) or file-like object (IO[bytes]).") + else: + vector_store_file = await super().create_vector_store_file( + vector_store_id=vector_store_id, + file_id=file_id, + data_source=data_source, + chunking_strategy=chunking_strategy, + **kwargs, + ) + + while vector_store_file.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file = await super().get_vector_store_file( + vector_store_id=vector_store_id, file_id=vector_store_file.id + ) + + return vector_store_file + + @distributed_trace_async + async def get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[bytes]: + """ + Asynchronously returns file content as a byte stream for the given file_id. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: An async iterator that yields bytes from the file content. + :rtype: AsyncIterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: If the HTTP request fails. + """ + kwargs["stream"] = True + response = await super()._get_file_content(file_id, **kwargs) + return cast(AsyncIterator[bytes], response) + + @distributed_trace_async + async def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: + """ + Asynchronously saves file content retrieved using a file identifier to the specified local directory. + + :param file_id: The unique identifier for the file to retrieve. + :type file_id: str + :param file_name: The name of the file to be saved. + :type file_name: str + :param target_dir: The directory where the file should be saved. Defaults to the current working directory. + :type target_dir: str or Path + :raises ValueError: If the target path is not a directory or the file name is invalid. + :raises RuntimeError: If file content retrieval fails or no content is found. + :raises TypeError: If retrieved chunks are not bytes-like objects. + :raises IOError: If writing to the file fails. + """ + try: + # Determine and validate the target directory + path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() + path.mkdir(parents=True, exist_ok=True) + if not path.is_dir(): + raise ValueError(f"The target path '{path}' is not a directory.") + + # Sanitize and validate the file name + sanitized_file_name = Path(file_name).name + if not sanitized_file_name: + raise ValueError("The provided file name is invalid.") + + # Retrieve the file content + file_content_stream = await self.get_file_content(file_id) + if not file_content_stream: + raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") + + # Collect all chunks asynchronously + chunks = [] + async for chunk in file_content_stream: + if isinstance(chunk, (bytes, bytearray)): + chunks.append(chunk) + else: + raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") + + target_file_path = path / sanitized_file_name + + # Write the collected content to the file synchronously + def write_file(collected_chunks: list): + with open(target_file_path, "wb") as file: + for chunk in collected_chunks: + file.write(chunk) + + # Use the event loop to run the synchronous function in a thread executor + loop = asyncio.get_running_loop() + await loop.run_in_executor(None, write_file, chunks) + + logger.debug("File '%s' saved successfully at '%s'.", sanitized_file_name, target_file_path) + + except (ValueError, RuntimeError, TypeError, IOError) as e: + logger.error("An error occurred in save_file: %s", e) + raise + + @classmethod + def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential", **kwargs) -> Self: + """ + Create an asynchronous AIProjectClient from a connection string. + + :param str conn_str: The connection string, copied from your AI Foundry project. + :param AsyncTokenCredential credential: Credential used to authenticate requests to the service. + :return: An AssistantsClient instance. + :rtype: AssistantsClient + """ + if not conn_str: + raise ValueError("Connection string is required") + parts = conn_str.split(";") + if len(parts) != 4: + raise ValueError("Invalid connection string format") + endpoint = "https://" + parts[0] + subscription_id = parts[1] + resource_group_name = parts[2] + project_name = parts[3] + return cls( + endpoint, + subscription_id, + resource_group_name, + project_name, + credential, + **kwargs, + ) + + def upload_file_to_azure_blob(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: + """Upload a file to the Azure AI Foundry project. + This method required *azure-ai-ml* to be installed. + + :param file_path: The path to the file to upload. + :type file_path: Union[str, Path, PathLike] + :return: The tuple, containing asset id and asset URI of uploaded file. + :rtype: Tuple[str, str] + """ + try: + from azure.ai.ml import MLClient # type: ignore + from azure.ai.ml.constants import AssetTypes # type: ignore + from azure.ai.ml.entities import Data # type: ignore + except ImportError as e: + raise ImportError( + "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" + ) from e + + data = Data( + path=str(file_path), + type=AssetTypes.URI_FILE, + name=str(uuid.uuid4()), # generating random name + is_anonymous=True, + version="1", + ) + # We have to wrap async method get_token of + + ml_client = MLClient( + _SyncCredentialWrapper(self._config.credential), + self._config.subscription_id, + self._config.resource_group_name, + self._config.project_name, + ) + + data_asset = ml_client.data.create_or_update(data) + + return data_asset.id, data_asset.path + + @distributed_trace_async + async def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.AssistantDeletionStatus: + """Deletes an assistant. + + :param assistant_id: Identifier of the assistant. Required. + :type assistant_id: str + :return: AssistantDeletionStatus. The AssistantDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AssistantDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + if assistant_id in self._toolset: + del self._toolset[assistant_id] + return await super().delete_assistant(assistant_id, **kwargs) + + @property + def scope(self) -> Dict[str, str]: + return { + "subscription_id": self._config.subscription_id, + "resource_group_name": self._config.resource_group_name, + "project_name": self._config.project_name, + } + + +__all__: List[str] = ['AssistantsClient'] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py index f7dd32510333..ba79c2794d4e 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py @@ -6,9 +6,1763 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List +import asyncio +import base64 +import datetime +import inspect +import itertools +import json +import logging +import math +import re +from abc import ABC, abstractmethod +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Dict, + Generic, + Iterator, + List, + Mapping, + Optional, + Set, + Tuple, + Type, + TypeVar, + Union, + cast, + get_args, + get_origin, + overload, +) -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level +from azure.core.credentials import AccessToken, TokenCredential + +from ._enums import AssistantStreamEvent, MessageRole, AzureAISearchQueryType +from ._models import ( + AISearchIndexResource, + AzureAISearchResource, + AzureAISearchToolDefinition, + AzureFunctionDefinition, + AzureFunctionStorageQueue, + AzureFunctionToolDefinition, + AzureFunctionBinding, + BingGroundingToolDefinition, + CodeInterpreterToolDefinition, + CodeInterpreterToolResource, + FileSearchToolDefinition, + FileSearchToolResource, + FunctionDefinition, + FunctionToolDefinition, + MessageImageFileContent, + MessageTextContent, + MessageTextFileCitationAnnotation, + MessageTextUrlCitationAnnotation, + MessageTextFilePathAnnotation, + MicrosoftFabricToolDefinition, + OpenApiAuthDetails, + OpenApiToolDefinition, + OpenApiFunctionDefinition, + RequiredFunctionToolCall, + RunStep, + RunStepDeltaChunk, + SharepointToolDefinition, + SubmitToolOutputsAction, + ThreadRun, + ToolConnection, + ToolConnectionList, + ToolDefinition, + ToolResources, + MessageDeltaTextContent, +) + +from ._models import MessageDeltaChunk as MessageDeltaChunkGenerated +from ._models import ThreadMessage as ThreadMessageGenerated +from ._models import OpenAIPageableListOfThreadMessage as OpenAIPageableListOfThreadMessageGenerated +from ._models import MessageAttachment as MessageAttachmentGenerated + +from .. import _types + +logger = logging.getLogger(__name__) + +StreamEventData = Union["MessageDeltaChunk", "ThreadMessage", ThreadRun, RunStep, str] + + +def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[str, Any]: + """ + Remove the parameters, non present in class public fields; return shallow copy of a dictionary. + + **Note:** Classes inherited from the model check that the parameters are present + in the list of attributes and if they are not, the error is being raised. This check may not + be relevant for classes, not inherited from azure.ai.projects._model_base.Model. + :param Type model_class: The class of model to be used. + :param parameters: The parsed dictionary with parameters. + :type parameters: Union[str, Dict[str, Any]] + :return: The dictionary with all invalid parameters removed. + :rtype: Dict[str, Any] + """ + new_params = {} + valid_parameters = set( + filter( + lambda x: not x.startswith("_") and hasattr(model_class.__dict__[x], "_type"), model_class.__dict__.keys() + ) + ) + for k in filter(lambda x: x in valid_parameters, parameters.keys()): + new_params[k] = parameters[k] + return new_params + + +def _safe_instantiate( + model_class: Type, parameters: Union[str, Dict[str, Any]], *, generated_class: Optional[Type] = None +) -> StreamEventData: + """ + Instantiate class with the set of parameters from the server. + + :param Type model_class: The class of model to be used. + :param parameters: The parsed dictionary with parameters. + :type parameters: Union[str, Dict[str, Any]] + :keyword Optional[Type] generated_class: The optional generated type. + :return: The class of model_class type if parameters is a dictionary, or the parameters themselves otherwise. + :rtype: Any + """ + if not generated_class: + generated_class = model_class + if not isinstance(parameters, dict): + return parameters + return cast(StreamEventData, model_class(**_filter_parameters(generated_class, parameters))) + + +def _parse_event(event_data_str: str) -> Tuple[str, StreamEventData]: + event_lines = event_data_str.strip().split("\n") + event_type: Optional[str] = None + event_data = "" + event_obj: StreamEventData + for line in event_lines: + if line.startswith("event:"): + event_type = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + event_data = line.split(":", 1)[1].strip() + + if not event_type: + raise ValueError("Event type not specified in the event data.") + + try: + parsed_data: Union[str, Dict[str, StreamEventData]] = cast(Dict[str, StreamEventData], json.loads(event_data)) + except json.JSONDecodeError: + parsed_data = event_data + + # Workaround for service bug: Rename 'expires_at' to 'expired_at' + if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: + parsed_data["expired_at"] = parsed_data.pop("expires_at") + + # Map to the appropriate class instance + if event_type in { + AssistantStreamEvent.THREAD_RUN_CREATED.value, + AssistantStreamEvent.THREAD_RUN_QUEUED.value, + AssistantStreamEvent.THREAD_RUN_INCOMPLETE.value, + AssistantStreamEvent.THREAD_RUN_IN_PROGRESS.value, + AssistantStreamEvent.THREAD_RUN_REQUIRES_ACTION.value, + AssistantStreamEvent.THREAD_RUN_COMPLETED.value, + AssistantStreamEvent.THREAD_RUN_FAILED.value, + AssistantStreamEvent.THREAD_RUN_CANCELLING.value, + AssistantStreamEvent.THREAD_RUN_CANCELLED.value, + AssistantStreamEvent.THREAD_RUN_EXPIRED.value, + }: + event_obj = _safe_instantiate(ThreadRun, parsed_data) + elif event_type in { + AssistantStreamEvent.THREAD_RUN_STEP_CREATED.value, + AssistantStreamEvent.THREAD_RUN_STEP_IN_PROGRESS.value, + AssistantStreamEvent.THREAD_RUN_STEP_COMPLETED.value, + AssistantStreamEvent.THREAD_RUN_STEP_FAILED.value, + AssistantStreamEvent.THREAD_RUN_STEP_CANCELLED.value, + AssistantStreamEvent.THREAD_RUN_STEP_EXPIRED.value, + }: + event_obj = _safe_instantiate(RunStep, parsed_data) + elif event_type in { + AssistantStreamEvent.THREAD_MESSAGE_CREATED.value, + AssistantStreamEvent.THREAD_MESSAGE_IN_PROGRESS.value, + AssistantStreamEvent.THREAD_MESSAGE_COMPLETED.value, + AssistantStreamEvent.THREAD_MESSAGE_INCOMPLETE.value, + }: + event_obj = _safe_instantiate(ThreadMessage, parsed_data, generated_class=ThreadMessageGenerated) + elif event_type == AssistantStreamEvent.THREAD_MESSAGE_DELTA.value: + event_obj = _safe_instantiate(MessageDeltaChunk, parsed_data, generated_class=MessageDeltaChunkGenerated) + + elif event_type == AssistantStreamEvent.THREAD_RUN_STEP_DELTA.value: + event_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) + else: + event_obj = str(parsed_data) + + return event_type, event_obj + + +# TODO: Look into adding an async version of this class +class SASTokenCredential(TokenCredential): + def __init__( + self, + *, + sas_token: str, + credential: TokenCredential, + subscription_id: str, + resource_group_name: str, + project_name: str, + connection_name: str, + ): + self._sas_token = sas_token + self._credential = credential + self._subscription_id = subscription_id + self._resource_group_name = resource_group_name + self._project_name = project_name + self._connection_name = connection_name + self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) + logger.debug("[SASTokenCredential.__init__] Exit. Given token expires on %s.", self._expires_on) + + @classmethod + def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime.datetime: + payload = jwt_token.split(".")[1] + padded_payload = payload + "=" * (4 - len(payload) % 4) # Add padding if necessary + decoded_bytes = base64.urlsafe_b64decode(padded_payload) + decoded_str = decoded_bytes.decode("utf-8") + decoded_payload = json.loads(decoded_str) + expiration_date = decoded_payload.get("exp") + return datetime.datetime.fromtimestamp(expiration_date, datetime.timezone.utc) + + def _refresh_token(self) -> None: + logger.debug("[SASTokenCredential._refresh_token] Enter") + from azure.ai.assistants import AssistantsClient + + project_client = AssistantsClient( + credential=self._credential, + # Since we are only going to use the "connections" operations, we don't need to supply an endpoint. + # http://management.azure.com is hard coded in the SDK. + endpoint="not-needed", + subscription_id=self._subscription_id, + resource_group_name=self._resource_group_name, + project_name=self._project_name, + ) + + connection = project_client.connections.get(connection_name=self._connection_name, include_credentials=True) + + self._sas_token = "" + if connection is not None and connection.token_credential is not None: + sas_credential = cast(SASTokenCredential, connection.token_credential) + self._sas_token = sas_credential._sas_token # pylint: disable=protected-access + self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) + logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) + + def get_token( + self, + *scopes: str, + claims: Optional[str] = None, + tenant_id: Optional[str] = None, + enable_cae: bool = False, + **kwargs: Any, + ) -> AccessToken: + """Request an access token for `scopes`. + + :param str scopes: The type of access needed. + + :keyword str claims: Additional claims required in the token, such as those returned in a resource + provider's claims challenge following an authorization failure. + :keyword str tenant_id: Optional tenant to include in the token request. + :keyword bool enable_cae: Indicates whether to enable Continuous Access Evaluation (CAE) for the requested + token. Defaults to False. + + :rtype: AccessToken + :return: An AccessToken instance containing the token string and its expiration time in Unix time. + """ + logger.debug("SASTokenCredential.get_token] Enter") + if self._expires_on < datetime.datetime.now(datetime.timezone.utc): + self._refresh_token() + return AccessToken(self._sas_token, math.floor(self._expires_on.timestamp())) + + +# Define type_map to translate Python type annotations to JSON Schema types +type_map = { + "str": "string", + "int": "integer", + "float": "number", + "bool": "boolean", + "NoneType": "null", + "list": "array", + "dict": "object", +} + + +def _map_type(annotation) -> Dict[str, Any]: # pylint: disable=too-many-return-statements + if annotation == inspect.Parameter.empty: + return {"type": "string"} # Default type if annotation is missing + + origin = get_origin(annotation) + + if origin in {list, List}: + args = get_args(annotation) + item_type = args[0] if args else str + return {"type": "array", "items": _map_type(item_type)} + if origin in {dict, Dict}: + return {"type": "object"} + if origin is Union: + args = get_args(annotation) + # If Union contains None, it is an optional parameter + if type(None) in args: + # If Union contains only one non-None type, it is a nullable parameter + non_none_args = [arg for arg in args if arg is not type(None)] + if len(non_none_args) == 1: + schema = _map_type(non_none_args[0]) + if "type" in schema: + if isinstance(schema["type"], str): + schema["type"] = [schema["type"], "null"] + elif "null" not in schema["type"]: + schema["type"].append("null") + else: + schema["type"] = ["null"] + return schema + # If Union contains multiple types, it is a oneOf parameter + return {"oneOf": [_map_type(arg) for arg in args]} + if isinstance(annotation, type): + schema_type = type_map.get(annotation.__name__, "string") + return {"type": schema_type} + + return {"type": "string"} # Fallback to "string" if type is unrecognized + + +def is_optional(annotation) -> bool: + origin = get_origin(annotation) + if origin is Union: + args = get_args(annotation) + return type(None) in args + return False + + +class MessageDeltaChunk(MessageDeltaChunkGenerated): + @property + def text(self) -> str: + """Get the text content of the delta chunk. + + :rtype: str + """ + if not self.delta or not self.delta.content: + return "" + return "".join( + content_part.text.value or "" + for content_part in self.delta.content + if isinstance(content_part, MessageDeltaTextContent) and content_part.text + ) + + +class ThreadMessage(ThreadMessageGenerated): + @property + def text_messages(self) -> List[MessageTextContent]: + """Returns all text message contents in the messages. + + :rtype: List[MessageTextContent] + """ + if not self.content: + return [] + return [content for content in self.content if isinstance(content, MessageTextContent)] + + @property + def image_contents(self) -> List[MessageImageFileContent]: + """Returns all image file contents from image message contents in the messages. + + :rtype: List[MessageImageFileContent] + """ + if not self.content: + return [] + return [content for content in self.content if isinstance(content, MessageImageFileContent)] + + @property + def file_citation_annotations(self) -> List[MessageTextFileCitationAnnotation]: + """Returns all file citation annotations from text message annotations in the messages. + + :rtype: List[MessageTextFileCitationAnnotation] + """ + if not self.content: + return [] + + return [ + annotation + for content in self.content + if isinstance(content, MessageTextContent) + for annotation in content.text.annotations + if isinstance(annotation, MessageTextFileCitationAnnotation) + ] + + @property + def file_path_annotations(self) -> List[MessageTextFilePathAnnotation]: + """Returns all file path annotations from text message annotations in the messages. + + :rtype: List[MessageTextFilePathAnnotation] + """ + if not self.content: + return [] + return [ + annotation + for content in self.content + if isinstance(content, MessageTextContent) + for annotation in content.text.annotations + if isinstance(annotation, MessageTextFilePathAnnotation) + ] + + @property + def url_citation_annotations(self) -> List[MessageTextUrlCitationAnnotation]: + """Returns all URL citation annotations from text message annotations in the messages. + + :rtype: List[MessageTextUrlCitationAnnotation] + """ + if not self.content: + return [] + return [ + annotation + for content in self.content + if isinstance(content, MessageTextContent) + for annotation in content.text.annotations + if isinstance(annotation, MessageTextUrlCitationAnnotation) + ] + + +class MessageAttachment(MessageAttachmentGenerated): + @overload + def __init__( + self, + *, + tools: List["FileSearchToolDefinition"], + file_id: Optional[str] = None, + data_source: Optional["VectorStoreDataSource"] = None, + ) -> None: ... + @overload + def __init__( + self, + *, + tools: List["CodeInterpreterToolDefinition"], + file_id: Optional[str] = None, + data_source: Optional["VectorStoreDataSource"] = None, + ) -> None: ... + @overload + def __init__( + self, + *, + tools: List["_types.MessageAttachmentToolDefinition"], + file_id: Optional[str] = None, + data_source: Optional["VectorStoreDataSource"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +ToolDefinitionT = TypeVar("ToolDefinitionT", bound=ToolDefinition) +ToolT = TypeVar("ToolT", bound="Tool") + + +class Tool(ABC, Generic[ToolDefinitionT]): + """ + An abstract class representing a tool that can be used by an assistant. + """ + + @property + @abstractmethod + def definitions(self) -> List[ToolDefinitionT]: + """Get the tool definitions.""" + + @property + @abstractmethod + def resources(self) -> ToolResources: + """Get the tool resources.""" + + @abstractmethod + def execute(self, tool_call: Any) -> Any: + """ + Execute the tool with the provided tool call. + + :param Any tool_call: The tool call to execute. + :return: The output of the tool operations. + """ + + +class BaseFunctionTool(Tool[FunctionToolDefinition]): + """ + A tool that executes user-defined functions. + """ + + def __init__(self, functions: Set[Callable[..., Any]]): + """ + Initialize FunctionTool with a set of functions. + + :param functions: A set of function objects. + """ + self._functions = self._create_function_dict(functions) + self._definitions = self._build_function_definitions(self._functions) + + def add_functions(self, extra_functions: Set[Callable[..., Any]]) -> None: + """ + Add more functions into this FunctionTool’s existing function set. + If a function with the same name already exists, it is overwritten. + + :param extra_functions: A set of additional functions to be added to + the existing function set. Functions are defined as callables and + may have any number of arguments and return types. + :type extra_functions: Set[Callable[..., Any]] + """ + # Convert the existing dictionary of { name: function } back into a set + existing_functions = set(self._functions.values()) + # Merge old + new + combined = existing_functions.union(extra_functions) + # Rebuild state + self._functions = self._create_function_dict(combined) + self._definitions = self._build_function_definitions(self._functions) + + def _create_function_dict(self, functions: Set[Callable[..., Any]]) -> Dict[str, Callable[..., Any]]: + return {func.__name__: func for func in functions} + + def _build_function_definitions(self, functions: Dict[str, Any]) -> List[FunctionToolDefinition]: + specs: List[FunctionToolDefinition] = [] + # Flexible regex to capture ':param : ' + param_pattern = re.compile( + r""" + ^\s* # Optional leading whitespace + :param # Literal ':param' + \s+ # At least one whitespace character + (?P[^:\s\(\)]+) # Parameter name (no spaces, colons, or parentheses) + (?:\s*\(\s*(?P[^)]+?)\s*\))? # Optional type in parentheses, allowing internal spaces + \s*:\s* # Colon ':' surrounded by optional whitespace + (?P.+) # Description (rest of the line) + """, + re.VERBOSE, + ) + + for name, func in functions.items(): + sig = inspect.signature(func) + params = sig.parameters + docstring = inspect.getdoc(func) or "" + description = docstring.split("\n", maxsplit=1)[0] if docstring else "No description" + + param_descriptions = {} + for line in docstring.splitlines(): + line = line.strip() + match = param_pattern.match(line) + if match: + groups = match.groupdict() + param_name = groups.get("name") + param_desc = groups.get("description") + param_desc = param_desc.strip() if param_desc else "No description" + param_descriptions[param_name] = param_desc.strip() + + properties = {} + required = [] + for param_name, param in params.items(): + param_type_info = _map_type(param.annotation) + param_description = param_descriptions.get(param_name, "No description") + + properties[param_name] = {**param_type_info, "description": param_description} + + # If the parameter has no default value and is not optional, add it to the required list + if param.default is inspect.Parameter.empty and not is_optional(param.annotation): + required.append(param_name) + + function_def = FunctionDefinition( + name=name, + description=description, + parameters={"type": "object", "properties": properties, "required": required}, + ) + tool_def = FunctionToolDefinition(function=function_def) + specs.append(tool_def) + + return specs + + def _get_func_and_args(self, tool_call: RequiredFunctionToolCall) -> Tuple[Any, Dict[str, Any]]: + function_name = tool_call.function.name + arguments = tool_call.function.arguments + + if function_name not in self._functions: + logging.error("Function '%s' not found.", function_name) + raise ValueError(f"Function '{function_name}' not found.") + + function = self._functions[function_name] + + try: + parsed_arguments = json.loads(arguments) + except json.JSONDecodeError as e: + logging.error("Invalid JSON arguments for function '%s': %s", function_name, e) + raise ValueError(f"Invalid JSON arguments: {e}") from e + + if not isinstance(parsed_arguments, dict): + logging.error("Arguments must be a JSON object for function '%s'.", function_name) + raise TypeError("Arguments must be a JSON object.") + + return function, parsed_arguments + + @property + def definitions(self) -> List[FunctionToolDefinition]: + """ + Get the function definitions. + + :return: A list of function definitions. + :rtype: List[ToolDefinition] + """ + return self._definitions + + @property + def resources(self) -> ToolResources: + """ + Get the tool resources for the assistant. + + :return: An empty ToolResources as FunctionTool doesn't have specific resources. + :rtype: ToolResources + """ + return ToolResources() + + +class FunctionTool(BaseFunctionTool): + + def execute(self, tool_call: RequiredFunctionToolCall) -> Any: + function, parsed_arguments = self._get_func_and_args(tool_call) + + try: + return function(**parsed_arguments) if parsed_arguments else function() + except TypeError as e: + error_message = f"Error executing function '{tool_call.function.name}': {e}" + logging.error(error_message) + # Return error message as JSON string back to assistant in order to make possible self + # correction to the function call + return json.dumps({"error": error_message}) + + +class AsyncFunctionTool(BaseFunctionTool): + + async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: # pylint: disable=invalid-overridden-method + function, parsed_arguments = self._get_func_and_args(tool_call) + + try: + if inspect.iscoroutinefunction(function): + return await function(**parsed_arguments) if parsed_arguments else await function() + return function(**parsed_arguments) if parsed_arguments else function() + except TypeError as e: + error_message = f"Error executing function '{tool_call.function.name}': {e}" + logging.error(error_message) + # Return error message as JSON string back to assistant in order to make possible self correction + # to the function call + return json.dumps({"error": error_message}) + + +class AzureAISearchTool(Tool[AzureAISearchToolDefinition]): + """ + A tool that searches for information using Azure AI Search. + :param connection_id: Connection ID used by tool. All connection tools allow only one connection. + """ + + def __init__( + self, + index_connection_id: str, + index_name: str, + query_type: AzureAISearchQueryType = AzureAISearchQueryType.SIMPLE, + filter: str = "", + top_k: int = 5, + ): + """ + Initialize AzureAISearch with an index_connection_id and index_name, with optional params. + + :param index_connection_id: Index Connection ID used by tool. Allows only one connection. + :type index_connection_id: str + :param index_name: Name of Index in search resource to be used by tool. + :type index_name: str + :param query_type: Type of query in an AIIndexResource attached to this assistant. + Default value is AzureAISearchQueryType.SIMPLE. + :type query_type: AzureAISearchQueryType + :param filter: Odata filter string for search resource. + :type filter: str + :param top_k: Number of documents to retrieve from search and present to the model. + :type top_k: int + """ + self.index_list = [ + AISearchIndexResource( + index_connection_id=index_connection_id, + index_name=index_name, + query_type=query_type, + filter=filter, + top_k=top_k, + ) + ] + + @property + def definitions(self) -> List[AzureAISearchToolDefinition]: + """ + Get the Azure AI search tool definitions. + + :return: A list of tool definitions. + :rtype: List[ToolDefinition] + """ + return [AzureAISearchToolDefinition()] + + @property + def resources(self) -> ToolResources: + """ + Get the Azure AI search resources. + + :return: ToolResources populated with azure_ai_search associated resources. + :rtype: ToolResources + """ + return ToolResources(azure_ai_search=AzureAISearchResource(index_list=self.index_list)) + + def execute(self, tool_call: Any): + """ + AI Search tool does not execute client-side. + + :param Any tool_call: The tool call to execute. + """ + + +class OpenApiTool(Tool[OpenApiToolDefinition]): + """ + A tool that retrieves information using OpenAPI specs. + Initialized with an initial API definition (name, description, spec, auth), + this class also supports adding and removing additional API definitions dynamically. + """ + + def __init__(self, name: str, description: str, spec: Any, auth: OpenApiAuthDetails): + """ + Constructor initializes the tool with a primary API definition. + + :param name: The name of the API. + :param description: The API description. + :param spec: The API specification. + :param auth: Authentication details for the API. + :type auth: OpenApiAuthDetails + """ + self._default_auth = auth + self._definitions: List[OpenApiToolDefinition] = [ + OpenApiToolDefinition( + openapi=OpenApiFunctionDefinition(name=name, description=description, spec=spec, auth=auth) + ) + ] + + @property + def definitions(self) -> List[OpenApiToolDefinition]: + """ + Get the list of all API definitions for the tool. + + :return: A list of OpenAPI tool definitions. + :rtype: List[ToolDefinition] + """ + return self._definitions + + def add_definition(self, name: str, description: str, spec: Any, auth: Optional[OpenApiAuthDetails] = None) -> None: + """ + Adds a new API definition dynamically. + Raises a ValueError if a definition with the same name already exists. + + :param name: The name of the API. + :type name: str + :param description: The description of the API. + :type description: str + :param spec: The API specification. + :type spec: Any + :param auth: Optional authentication details for this particular API definition. + If not provided, the tool's default authentication details will be used. + :type auth: Optional[OpenApiAuthDetails] + :raises ValueError: If a definition with the same name exists. + """ + # Check if a definition with the same name exists. + if any(definition.openapi.name == name for definition in self._definitions): + raise ValueError(f"Definition '{name}' already exists and cannot be added again.") + + # Use provided auth if specified, otherwise use default + auth_to_use = auth if auth is not None else self._default_auth + + new_definition = OpenApiToolDefinition( + openapi=OpenApiFunctionDefinition(name=name, description=description, spec=spec, auth=auth_to_use) + ) + self._definitions.append(new_definition) + + def remove_definition(self, name: str) -> None: + """ + Removes an API definition based on its name. + + :param name: The name of the API definition to remove. + :type name: str + :raises ValueError: If the definition with the specified name does not exist. + """ + for definition in self._definitions: + if definition.openapi.name == name: + self._definitions.remove(definition) + logging.info("Definition '%s' removed. Total definitions: %d.", name, len(self._definitions)) + return + raise ValueError(f"Definition with the name '{name}' does not exist.") + + @property + def resources(self) -> ToolResources: + """ + Get the tool resources for the assistant. + + :return: An empty ToolResources as OpenApiTool doesn't have specific resources. + :rtype: ToolResources + """ + return ToolResources() + + def execute(self, tool_call: Any) -> None: + """ + OpenApiTool does not execute client-side. + + :param Any tool_call: The tool call to execute. + :type tool_call: Any + """ + + +class AzureFunctionTool(Tool[AzureFunctionToolDefinition]): + """ + A tool that is used to inform assistant about available the Azure function. + + :param name: The azure function name. + :param description: The azure function description. + :param parameters: The description of function parameters. + :param input_queue: Input queue used, by azure function. + :param output_queue: Output queue used, by azure function. + """ + + def __init__( + self, + name: str, + description: str, + parameters: Dict[str, Any], + input_queue: AzureFunctionStorageQueue, + output_queue: AzureFunctionStorageQueue, + ) -> None: + self._definitions = [ + AzureFunctionToolDefinition( + azure_function=AzureFunctionDefinition( + function=FunctionDefinition( + name=name, + description=description, + parameters=parameters, + ), + input_binding=AzureFunctionBinding(storage_queue=input_queue), + output_binding=AzureFunctionBinding(storage_queue=output_queue), + ) + ) + ] + + @property + def definitions(self) -> List[AzureFunctionToolDefinition]: + """ + Get the Azure AI search tool definitions. + + :rtype: List[ToolDefinition] + """ + return self._definitions + + @property + def resources(self) -> ToolResources: + """ + Get the Azure AI search resources. + + :rtype: ToolResources + """ + return ToolResources() + + def execute(self, tool_call: Any) -> Any: + pass + + +class ConnectionTool(Tool[ToolDefinitionT]): + """ + A tool that requires connection ids. + Used as base class for Bing Grounding, Sharepoint, and Microsoft Fabric + """ + + def __init__(self, connection_id: str): + """ + Initialize ConnectionTool with a connection_id. + + :param connection_id: Connection ID used by tool. All connection tools allow only one connection. + """ + self.connection_ids = [ToolConnection(connection_id=connection_id)] + + @property + def resources(self) -> ToolResources: + """ + Get the connection tool resources. + + :rtype: ToolResources + """ + return ToolResources() + + def execute(self, tool_call: Any) -> Any: + pass + + +class BingGroundingTool(ConnectionTool[BingGroundingToolDefinition]): + """ + A tool that searches for information using Bing. + """ + + @property + def definitions(self) -> List[BingGroundingToolDefinition]: + """ + Get the Bing grounding tool definitions. + + :rtype: List[ToolDefinition] + """ + return [BingGroundingToolDefinition(bing_grounding=ToolConnectionList(connection_list=self.connection_ids))] + + +class FabricTool(ConnectionTool[MicrosoftFabricToolDefinition]): + """ + A tool that searches for information using Microsoft Fabric. + """ + + @property + def definitions(self) -> List[MicrosoftFabricToolDefinition]: + """ + Get the Microsoft Fabric tool definitions. + + :rtype: List[ToolDefinition] + """ + return [MicrosoftFabricToolDefinition(fabric_dataagent=ToolConnectionList(connection_list=self.connection_ids))] + + +class SharepointTool(ConnectionTool[SharepointToolDefinition]): + """ + A tool that searches for information using Sharepoint. + """ + + @property + def definitions(self) -> List[SharepointToolDefinition]: + """ + Get the Sharepoint tool definitions. + + :rtype: List[ToolDefinition] + """ + return [SharepointToolDefinition(sharepoint_grounding=ToolConnectionList(connection_list=self.connection_ids))] + + +class FileSearchTool(Tool[FileSearchToolDefinition]): + """ + A tool that searches for uploaded file information from the created vector stores. + + :param vector_store_ids: A list of vector store IDs to search for files. + :type vector_store_ids: list[str] + """ + + def __init__(self, vector_store_ids: Optional[List[str]] = None): + if vector_store_ids is None: + self.vector_store_ids = set() + else: + self.vector_store_ids = set(vector_store_ids) + + def add_vector_store(self, store_id: str) -> None: + """ + Add a vector store ID to the list of vector stores to search for files. + + :param store_id: The ID of the vector store to search for files. + :type store_id: str + + """ + self.vector_store_ids.add(store_id) + + def remove_vector_store(self, store_id: str) -> None: + """ + Remove a vector store ID from the list of vector stores to search for files. + + :param store_id: The ID of the vector store to remove. + :type store_id: str + + """ + self.vector_store_ids.remove(store_id) + + @property + def definitions(self) -> List[FileSearchToolDefinition]: + """ + Get the file search tool definitions. + + :rtype: List[ToolDefinition] + """ + return [FileSearchToolDefinition()] + + @property + def resources(self) -> ToolResources: + """ + Get the file search resources. + + :rtype: ToolResources + """ + return ToolResources(file_search=FileSearchToolResource(vector_store_ids=list(self.vector_store_ids))) + + def execute(self, tool_call: Any) -> Any: + pass + + +class CodeInterpreterTool(Tool[CodeInterpreterToolDefinition]): + """ + A tool that interprets code files uploaded to the assistant. + + :param file_ids: A list of file IDs to interpret. + :type file_ids: list[str] + """ + + def __init__(self, file_ids: Optional[List[str]] = None): + if file_ids is None: + self.file_ids = set() + else: + self.file_ids = set(file_ids) + + def add_file(self, file_id: str) -> None: + """ + Add a file ID to the list of files to interpret. + + :param file_id: The ID of the file to interpret. + :type file_id: str + """ + self.file_ids.add(file_id) + + def remove_file(self, file_id: str) -> None: + """ + Remove a file ID from the list of files to interpret. + + :param file_id: The ID of the file to remove. + :type file_id: str + """ + self.file_ids.remove(file_id) + + @property + def definitions(self) -> List[CodeInterpreterToolDefinition]: + """ + Get the code interpreter tool definitions. + + :rtype: List[ToolDefinition] + """ + return [CodeInterpreterToolDefinition()] + + @property + def resources(self) -> ToolResources: + """ + Get the code interpreter resources. + + :rtype: ToolResources + """ + if not self.file_ids: + return ToolResources() + return ToolResources(code_interpreter=CodeInterpreterToolResource(file_ids=list(self.file_ids))) + + def execute(self, tool_call: Any) -> Any: + pass + + +class BaseToolSet: + """ + Abstract class for a collection of tools that can be used by an assistant. + """ + + def __init__(self) -> None: + self._tools: List[Tool] = [] + + def validate_tool_type(self, tool: Tool) -> None: + pass + + def add(self, tool: Tool): + """ + Add a tool to the tool set. + + :param Tool tool: The tool to add. + :raises ValueError: If a tool of the same type already exists. + """ + self.validate_tool_type(tool) + + if any(isinstance(existing_tool, type(tool)) for existing_tool in self._tools): + raise ValueError("Tool of type {type(tool).__name__} already exists in the ToolSet.") + self._tools.append(tool) + + def remove(self, tool_type: Type[Tool]) -> None: + """ + Remove a tool of the specified type from the tool set. + + :param Type[Tool] tool_type: The type of tool to remove. + :raises ValueError: If a tool of the specified type is not found. + """ + for i, tool in enumerate(self._tools): + if isinstance(tool, tool_type): + del self._tools[i] + logging.info("Tool of type %s removed from the ToolSet.", tool_type.__name__) + return + raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the definitions for all tools in the tool set. + + :rtype: List[ToolDefinition] + """ + tools = [] + for tool in self._tools: + tools.extend(tool.definitions) + return tools + + @property + def resources(self) -> ToolResources: + """ + Get the resources for all tools in the tool set. + + :rtype: ToolResources + """ + tool_resources: Dict[str, Any] = {} + for tool in self._tools: + resources = tool.resources + for key, value in resources.items(): + if key in tool_resources: + if isinstance(tool_resources[key], dict) and isinstance(value, dict): + tool_resources[key].update(value) + else: + tool_resources[key] = value + return self._create_tool_resources_from_dict(tool_resources) + + def _create_tool_resources_from_dict(self, resources: Dict[str, Any]) -> ToolResources: + """ + Safely converts a dictionary into a ToolResources instance. + + :param resources: A dictionary of tool resources. Should be a mapping + accepted by ~azure.ai.projects.models.AzureAISearchResource + :type resources: Dict[str, Any] + :return: A ToolResources instance. + :rtype: ToolResources + """ + try: + return ToolResources(**resources) + except TypeError as e: + logging.error("Error creating ToolResources: %s", e) + raise ValueError("Invalid resources for ToolResources.") from e + + def get_definitions_and_resources(self) -> Dict[str, Any]: + """ + Get the definitions and resources for all tools in the tool set. + + :return: A dictionary containing the tool resources and definitions. + :rtype: Dict[str, Any] + """ + return { + "tool_resources": self.resources, + "tools": self.definitions, + } + + def get_tool(self, tool_type: Type[ToolT]) -> ToolT: + """ + Get a tool of the specified type from the tool set. + + :param Type[Tool] tool_type: The type of tool to get. + :return: The tool of the specified type. + :rtype: Tool + :raises ValueError: If a tool of the specified type is not found. + """ + for tool in self._tools: + if isinstance(tool, tool_type): + return cast(ToolT, tool) + raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") + + +class ToolSet(BaseToolSet): + """ + A collection of tools that can be used by an synchronize assistant. + """ + + def validate_tool_type(self, tool: Tool) -> None: + """ + Validate the type of the tool. + + :param Tool tool: The type of the tool to validate. + :raises ValueError: If the tool type is not a subclass of Tool. + """ + if isinstance(tool, AsyncFunctionTool): + raise ValueError( + "AsyncFunctionTool is not supported in ToolSet. " + + "To use async functions, use AsyncToolSet and assistants operations in azure.ai.projects.aio." + ) + + def execute_tool_calls(self, tool_calls: List[Any]) -> Any: + """ + Execute a tool of the specified type with the provided tool calls. + + :param List[Any] tool_calls: A list of tool calls to execute. + :return: The output of the tool operations. + :rtype: Any + """ + tool_outputs = [] + + for tool_call in tool_calls: + try: + if tool_call.type == "function": + tool = self.get_tool(FunctionTool) + output = tool.execute(tool_call) + tool_output = { + "tool_call_id": tool_call.id, + "output": output, + } + tool_outputs.append(tool_output) + except Exception as e: # pylint: disable=broad-exception-caught + logging.error("Failed to execute tool call %s: %s", tool_call, e) + + return tool_outputs + + +class AsyncToolSet(BaseToolSet): + """ + A collection of tools that can be used by an asynchronous assistant. + """ + + def validate_tool_type(self, tool: Tool) -> None: + """ + Validate the type of the tool. + + :param Tool tool: The type of the tool to validate. + :raises ValueError: If the tool type is not a subclass of Tool. + """ + if isinstance(tool, FunctionTool): + raise ValueError( + "FunctionTool is not supported in AsyncToolSet. " + + "Please use AsyncFunctionTool instead and provide sync and/or async function(s)." + ) + + async def execute_tool_calls(self, tool_calls: List[Any]) -> Any: + """ + Execute a tool of the specified type with the provided tool calls. + + :param List[Any] tool_calls: A list of tool calls to execute. + :return: The output of the tool operations. + :rtype: Any + """ + tool_outputs = [] + + for tool_call in tool_calls: + try: + if tool_call.type == "function": + tool = self.get_tool(AsyncFunctionTool) + output = await tool.execute(tool_call) + tool_output = { + "tool_call_id": tool_call.id, + "output": output, + } + tool_outputs.append(tool_output) + except Exception as e: # pylint: disable=broad-exception-caught + logging.error("Failed to execute tool call %s: %s", tool_call, e) + + return tool_outputs + + +EventFunctionReturnT = TypeVar("EventFunctionReturnT") +T = TypeVar("T") +BaseAsyncAssistantEventHandlerT = TypeVar("BaseAsyncAssistantEventHandlerT", bound="BaseAsyncAssistantEventHandler") +BaseAssistantEventHandlerT = TypeVar("BaseAssistantEventHandlerT", bound="BaseAssistantEventHandler") + + +async def async_chain(*iterators: AsyncIterator[T]) -> AsyncIterator[T]: + for iterator in iterators: + async for item in iterator: + yield item + + +class BaseAsyncAssistantEventHandler(AsyncIterator[T]): + + def __init__(self) -> None: + self.response_iterator: Optional[AsyncIterator[bytes]] = None + self.submit_tool_outputs: Optional[Callable[[ThreadRun, "BaseAsyncAssistantEventHandler[T]"], Awaitable[None]]] = ( + None + ) + self.buffer: Optional[bytes] = None + + def initialize( + self, + response_iterator: AsyncIterator[bytes], + submit_tool_outputs: Callable[[ThreadRun, "BaseAsyncAssistantEventHandler[T]"], Awaitable[None]], + ): + self.response_iterator = ( + async_chain(self.response_iterator, response_iterator) if self.response_iterator else response_iterator + ) + self.submit_tool_outputs = submit_tool_outputs + + # cspell:disable-next-line + async def __anext__(self) -> T: + # cspell:disable-next-line + event_bytes = await self.__anext_impl__() + return await self._process_event(event_bytes.decode("utf-8")) + + # cspell:disable-next-line + async def __anext_impl__(self) -> bytes: + self.buffer = b"" if self.buffer is None else self.buffer + if self.response_iterator is None: + raise ValueError("The response handler was not initialized.") + + if not b"\n\n" in self.buffer: + async for chunk in self.response_iterator: + self.buffer += chunk + if b"\n\n" in self.buffer: + break + + if self.buffer == b"": + raise StopAsyncIteration() + + event_bytes = b"" + if b"\n\n" in self.buffer: + event_end_index = self.buffer.index(b"\n\n") + event_bytes = self.buffer[:event_end_index] + self.buffer = self.buffer[event_end_index:].lstrip() + else: + event_bytes = self.buffer + self.buffer = b"" + + return event_bytes + + async def _process_event(self, event_data_str: str) -> T: + raise NotImplementedError("This method needs to be implemented.") + + async def until_done(self) -> None: + """ + Iterates through all events until the stream is marked as done. + Calls the provided callback function with each event data. + """ + try: + async for _ in self: + pass + except StopAsyncIteration: + pass + + +class BaseAssistantEventHandler(Iterator[T]): + + def __init__(self) -> None: + self.response_iterator: Optional[Iterator[bytes]] = None + self.submit_tool_outputs: Optional[Callable[[ThreadRun, "BaseAssistantEventHandler[T]"], None]] = None + self.buffer: Optional[bytes] = None + + def initialize( + self, + response_iterator: Iterator[bytes], + submit_tool_outputs: Callable[[ThreadRun, "BaseAssistantEventHandler[T]"], None], + ) -> None: + self.response_iterator = ( + itertools.chain(self.response_iterator, response_iterator) if self.response_iterator else response_iterator + ) + self.submit_tool_outputs = submit_tool_outputs + + def __next__(self) -> T: + event_bytes = self.__next_impl__() + return self._process_event(event_bytes.decode("utf-8")) + + def __next_impl__(self) -> bytes: + self.buffer = b"" if self.buffer is None else self.buffer + if self.response_iterator is None: + raise ValueError("The response handler was not initialized.") + + if not b"\n\n" in self.buffer: + for chunk in self.response_iterator: + self.buffer += chunk + if b"\n\n" in self.buffer: + break + + if self.buffer == b"": + raise StopIteration() + + event_bytes = b"" + if b"\n\n" in self.buffer: + event_end_index = self.buffer.index(b"\n\n") + event_bytes = self.buffer[:event_end_index] + self.buffer = self.buffer[event_end_index:].lstrip() + else: + event_bytes = self.buffer + self.buffer = b"" + + return event_bytes + + def _process_event(self, event_data_str: str) -> T: + raise NotImplementedError("This method needs to be implemented.") + + def until_done(self) -> None: + """ + Iterates through all events until the stream is marked as done. + Calls the provided callback function with each event data. + """ + try: + for _ in self: + pass + except StopIteration: + pass + + +class AsyncAssistantEventHandler(BaseAsyncAssistantEventHandler[Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]]): + + async def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]: + event_type, event_data_obj = _parse_event(event_data_str) + if ( + isinstance(event_data_obj, ThreadRun) + and event_data_obj.status == "requires_action" + and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) + ): + await cast(Callable[[ThreadRun, "BaseAsyncAssistantEventHandler"], Awaitable[None]], self.submit_tool_outputs)( + event_data_obj, self + ) + + func_rt: Optional[EventFunctionReturnT] = None + try: + if isinstance(event_data_obj, MessageDeltaChunk): + func_rt = await self.on_message_delta(event_data_obj) + elif isinstance(event_data_obj, ThreadMessage): + func_rt = await self.on_thread_message(event_data_obj) + elif isinstance(event_data_obj, ThreadRun): + func_rt = await self.on_thread_run(event_data_obj) + elif isinstance(event_data_obj, RunStep): + func_rt = await self.on_run_step(event_data_obj) + elif isinstance(event_data_obj, RunStepDeltaChunk): + func_rt = await self.on_run_step_delta(event_data_obj) + elif event_type == AssistantStreamEvent.ERROR: + func_rt = await self.on_error(event_data_obj) + elif event_type == AssistantStreamEvent.DONE: + func_rt = await self.on_done() + else: + func_rt = await self.on_unhandled_event( + event_type, event_data_obj + ) # pylint: disable=assignment-from-none + except Exception as e: # pylint: disable=broad-exception-caught + logging.error("Error in event handler for event '%s': %s", event_type, e) + return event_type, event_data_obj, func_rt + + async def on_message_delta( + self, delta: "MessageDeltaChunk" # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle message delta events. + + :param MessageDeltaChunk delta: The message delta. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + async def on_thread_message( + self, message: "ThreadMessage" # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle thread message events. + + :param ThreadMessage message: The thread message. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + async def on_thread_run( + self, run: "ThreadRun" # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle thread run events. + + :param ThreadRun run: The thread run. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + async def on_run_step(self, step: "RunStep") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument + """Handle run step events. + + :param RunStep step: The run step. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + async def on_run_step_delta( + self, delta: "RunStepDeltaChunk" # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle run step delta events. + + :param RunStepDeltaChunk delta: The run step delta. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + async def on_error(self, data: str) -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument + """Handle error events. + + :param str data: The error event's data. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + async def on_done( + self, + ) -> Optional[EventFunctionReturnT]: + """Handle the completion of the stream. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + async def on_unhandled_event( + self, event_type: str, event_data: str # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle any unhandled event types. + + :param str event_type: The event type. + :param Any event_data: The event's data. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + +class AssistantEventHandler(BaseAssistantEventHandler[Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]]): + + def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]: + + event_type, event_data_obj = _parse_event(event_data_str) + if ( + isinstance(event_data_obj, ThreadRun) + and event_data_obj.status == "requires_action" + and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) + ): + cast(Callable[[ThreadRun, "BaseAssistantEventHandler"], Awaitable[None]], self.submit_tool_outputs)( + event_data_obj, self + ) + + func_rt: Optional[EventFunctionReturnT] = None + try: + if isinstance(event_data_obj, MessageDeltaChunk): + func_rt = self.on_message_delta(event_data_obj) # pylint: disable=assignment-from-none + elif isinstance(event_data_obj, ThreadMessage): + func_rt = self.on_thread_message(event_data_obj) # pylint: disable=assignment-from-none + elif isinstance(event_data_obj, ThreadRun): + func_rt = self.on_thread_run(event_data_obj) # pylint: disable=assignment-from-none + elif isinstance(event_data_obj, RunStep): + func_rt = self.on_run_step(event_data_obj) # pylint: disable=assignment-from-none + elif isinstance(event_data_obj, RunStepDeltaChunk): + func_rt = self.on_run_step_delta(event_data_obj) # pylint: disable=assignment-from-none + elif event_type == AssistantStreamEvent.ERROR: + func_rt = self.on_error(event_data_obj) # pylint: disable=assignment-from-none + elif event_type == AssistantStreamEvent.DONE: + func_rt = self.on_done() # pylint: disable=assignment-from-none + else: + func_rt = self.on_unhandled_event(event_type, event_data_obj) # pylint: disable=assignment-from-none + except Exception as e: # pylint: disable=broad-exception-caught + logging.error("Error in event handler for event '%s': %s", event_type, e) + return event_type, event_data_obj, func_rt + + def on_message_delta( + self, delta: "MessageDeltaChunk" # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle message delta events. + + :param MessageDeltaChunk delta: The message delta. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + def on_thread_message( + self, message: "ThreadMessage" # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle thread message events. + + :param ThreadMessage message: The thread message. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + def on_thread_run(self, run: "ThreadRun") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument + """Handle thread run events. + + :param ThreadRun run: The thread run. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + def on_run_step(self, step: "RunStep") -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument + """Handle run step events. + + :param RunStep step: The run step. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + def on_run_step_delta( + self, delta: "RunStepDeltaChunk" # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle run step delta events. + + :param RunStepDeltaChunk delta: The run step delta. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + def on_error(self, data: str) -> Optional[EventFunctionReturnT]: # pylint: disable=unused-argument + """Handle error events. + + :param str data: The error event's data. + :rtype: Optional[EventFunctionReturnT] + """ + return None + + def on_done( + self, + ) -> Optional[EventFunctionReturnT]: + """Handle the completion of the stream.""" + return None + + def on_unhandled_event( + self, event_type: str, event_data: str # pylint: disable=unused-argument + ) -> Optional[EventFunctionReturnT]: + """Handle any unhandled event types. + + :param str event_type: The event type. + :param Any event_data: The event's data. + """ + return None + + +class AsyncAssistantRunStream(Generic[BaseAsyncAssistantEventHandlerT]): + def __init__( + self, + response_iterator: AsyncIterator[bytes], + submit_tool_outputs: Callable[[ThreadRun, BaseAsyncAssistantEventHandlerT], Awaitable[None]], + event_handler: BaseAsyncAssistantEventHandlerT, + ): + self.response_iterator = response_iterator + self.event_handler = event_handler + self.submit_tool_outputs = submit_tool_outputs + self.event_handler.initialize( + self.response_iterator, + cast(Callable[[ThreadRun, BaseAsyncAssistantEventHandler], Awaitable[None]], submit_tool_outputs), + ) + + async def __aenter__(self): + return self.event_handler + + async def __aexit__(self, exc_type, exc_val, exc_tb): + close_method = getattr(self.response_iterator, "close", None) + if callable(close_method): + result = close_method() + if asyncio.iscoroutine(result): + await result + + +class AssistantRunStream(Generic[BaseAssistantEventHandlerT]): + def __init__( + self, + response_iterator: Iterator[bytes], + submit_tool_outputs: Callable[[ThreadRun, BaseAssistantEventHandlerT], None], + event_handler: BaseAssistantEventHandlerT, + ): + self.response_iterator = response_iterator + self.event_handler = event_handler + self.submit_tool_outputs = submit_tool_outputs + self.event_handler.initialize( + self.response_iterator, + cast(Callable[[ThreadRun, BaseAssistantEventHandler], None], submit_tool_outputs), + ) + + def __enter__(self): + return self.event_handler + + def __exit__(self, exc_type, exc_val, exc_tb): + close_method = getattr(self.response_iterator, "close", None) + if callable(close_method): + close_method() + + +class OpenAIPageableListOfThreadMessage(OpenAIPageableListOfThreadMessageGenerated): + + @property + def text_messages(self) -> List[MessageTextContent]: + """Returns all text message contents in the messages. + + :rtype: List[MessageTextContent] + """ + texts = [content for msg in self.data for content in msg.text_messages] + return texts + + @property + def image_contents(self) -> List[MessageImageFileContent]: + """Returns all image file contents from image message contents in the messages. + + :rtype: List[MessageImageFileContent] + """ + return [content for msg in self.data for content in msg.image_contents] + + @property + def file_citation_annotations(self) -> List[MessageTextFileCitationAnnotation]: + """Returns all file citation annotations from text message annotations in the messages. + + :rtype: List[MessageTextFileCitationAnnotation] + """ + annotations = [annotation for msg in self.data for annotation in msg.file_citation_annotations] + return annotations + + @property + def file_path_annotations(self) -> List[MessageTextFilePathAnnotation]: + """Returns all file path annotations from text message annotations in the messages. + + :rtype: List[MessageTextFilePathAnnotation] + """ + annotations = [annotation for msg in self.data for annotation in msg.file_path_annotations] + return annotations + + def get_last_message_by_role(self, role: MessageRole) -> Optional[ThreadMessage]: + """Returns the last message from a sender in the specified role. + + :param role: The role of the sender. + :type role: MessageRole + + :return: The last message from a sender in the specified role. + :rtype: ~azure.ai.projects.models.ThreadMessage + """ + for msg in self.data: + if msg.role == role: + return msg + return None + + def get_last_text_message_by_role(self, role: MessageRole) -> Optional[MessageTextContent]: + """Returns the last text message from a sender in the specified role. + + :param role: The role of the sender. + :type role: MessageRole + + :return: The last text message from a sender in the specified role. + :rtype: ~azure.ai.projects.models.MessageTextContent + """ + for msg in self.data: + if msg.role == role: + for content in msg.content: + if isinstance(content, MessageTextContent): + return content + return None + + +__all__: List[str] = [ + "AssistantEventHandler", + "AssistantRunStream", + "AsyncAssistantRunStream", + "AsyncFunctionTool", + "AsyncToolSet", + "AzureAISearchTool", + "AzureFunctionTool", + "BaseAsyncAssistantEventHandler", + "BaseAssistantEventHandler", + "CodeInterpreterTool", + "AsyncAssistantEventHandler", + "OpenAIPageableListOfThreadMessage", + "FileSearchTool", + "FunctionTool", + "OpenApiTool", + "BingGroundingTool", + "StreamEventData", + "SharepointTool", + "FabricTool", + "AzureAISearchTool", + "SASTokenCredential", + "Tool", + "ToolSet", + "BaseAsyncAssistantEventHandlerT", + "BaseAssistantEventHandlerT", + "ThreadMessage", + "MessageTextFileCitationAnnotation", + "MessageDeltaChunk", + "MessageAttachment", +] def patch_sdk(): diff --git a/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env b/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env new file mode 100644 index 000000000000..faaf292ebf44 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env @@ -0,0 +1,25 @@ +# +# Environment variables required for running tests. +# +# All values should be empty by default. Fill them in locally before running live tests on your dev box, +# but do not commit these changes to the repository. +# + + +######################################################################################################################## +# Telemetry tests +# +# To run telemetry tests you need an AI Foundry project with a connected Application Insights resource. +# +AZURE_AI_ASSISTANTS_TELEMETRY_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_ASSISTANTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING} + +######################################################################################################################## +# Agents tests +# +AZURE_AI_ASSISTANTS_AGENTS_TESTS_PROJECT_CONNECTION_STRING= +AZURE_AI_ASSISTANTS_AGENTS_TESTS_DATA_PATH= +AZURE_AI_ASSISTANTS_AGENTS_TESTS_STORAGE_QUEUE= +AZURE_AI_ASSISTANTS_AGENTS_TESTS_SEARCH_INDEX_NAME= +AZURE_AI_ASSISTANTS_AGENTS_TESTS_SEARCH_CONNECTION_NAME= + + diff --git a/sdk/ai/azure-ai-assistants/samples/assistant-5szzLs73bsbQ2k75xUGKv8_image_file.png b/sdk/ai/azure-ai-assistants/samples/assistant-5szzLs73bsbQ2k75xUGKv8_image_file.png new file mode 100644 index 0000000000000000000000000000000000000000..928fec58517aee4c8bc063b37949f8400224d95d GIT binary patch literal 162061 zcmd43hdY<;{|Bt1L5r-E5>jOEQA*jXY?;}c>`f_38JSU}viB%tN5hQFgh+{u5X#o` zy6)d|Jje0<51x+WKJM;reLmN9Ug!CKzt;N-y`n0&kDP&=goI?Dg1odk3CXT15)#s+ z-8=CoYbw)j_?MWQjINu8lck%diHik^iiz8GdnY%08&f6^3l~=#Cr1HpVQv9VCTlmh z>#pKFJP!Zg4{$rVSn+iHsr`cw*>hd~sw)Y}u?FIwZH^z^B}ulCkSIt?X?i8ieDT!P zm{>nQ@7eErdN(--2l?zaN9W5!zU2j#r4jobe#zOD+D_;BYUgN8=V;xpn&sjk=Wsko zO?|aQCm?X1#aMMG>jTn@!-dmRTCSoKVwa;On#DRc_eSY*(%!oJKfmPf&{KPo{m&oW zy{q}oT!iv}eoY=_s|x*}pOT;8{j4JWKflE9G&?f?&#x{qWsgJu^FdMVCMA#m=Y#IL z?T`8LKc6F==9)0W_CFuQoc!khf0tyI_Ulk>;J;(bb0c!suk$v%dGmW~(>Ez| zRd%1-Oh2Xn`rqKp%*+s5`Tt#=N@-Tw`|lo|7Cm!@qA`N;O_{^sL3;Yrp9`iYrJ19~ zM*d9ozM(VryZPjtZ^OrrR~+0=GIQ)xS64qsMHM9CG|a@!z2C;hhF3@^I6t3@LpyKx z<;#~79zUk@6v`%D{QH;oY_02rDqrvIC7FvCw{NXZ93QInKhlwMUJn1MuAy-dZU!4W7?hF-vXZ`5l;Na1wD3(`&|0MDF9z1w(BP?uh^N8=#&zocv zwA99A?iXxrIi5)R-@AKv_uIE`nS_LB-oJleR9t*BJbd5KmoI9?rZV;CNYxbP|Nhcn zn(9?r&|O;(H~%fvdffOp^E3Rd+qZ8s2@BJvNcvy&@DRp2n^;%`FZ^gty%+PJp5 z-I&F1ZE4Sm6DM{bI6z!-S9iCGrR7dvUti4}%{|AC9pmNWW1qE+j<_Eh8fD;mij9q# zg=NS1`1tkf*Dw5Ni5s1r-Os|3f0;h_5Y?eWzgK>nRr@aQC%#Wnk-V*~?V6+GemOZg z&8#ciXwGQg&d#=R)``2DrKz@F7_H0s{@JBFMYg>hw}XQf%6^o*e0e)F^Hh#b;eIPC zt5|l8o%o)E_ut8@sJv^8JWhG|aHv(slM}40qzVcO(Pqcbdfl+;&KP}Y98jNk^~J}! zz`*1>c|5**SjHq#=j~ftTdzFlj+K;@P|w;~Ti;##GyUe%C%W3&+Ujj=T}&2-iZU`X zib_g?YHH4%J$Fv6+|khO=f`ax9v+YKukE@1cjoHGQupN`yP7Tz35g>|L>zCYq#Q4K z@#21!xcEeO=4H`<4bk>wi92@=93NlT-$l~b*GKH#N{?UngdGN6x3wuN37uqScF-@n zwk_iL`GPpt=IJXRUszqn5*55Q3l9&+(;&06v(wDiqj~b=iFJ3zKHPl4U!U25QVP4i zJW6|450Ceqsd6UQu3h=*k}jb;hMQ?1{=m#E)g^IreOb=hI&19r#_D1xJ~urwaC1#) zKco0Gk)<$i()koUa66?TIgpQ1LZ_KKRTrKP2{?tD(>^JglU zmP?O%%J0UvxsTyTM4mf;f6wu7d9maCcl>;Gc3J3~H zm)iDT$y8+&bQ(U2zZDV}7x#WzL0a=sw$NwP35SjOHwSj_-@o%g%ouH`^!&oY#d60l zA4@Ehda^a-CP_BzdnunOyL6B; zyA>s<*zyB|w2TaUX`r???d)*P{VLBn)5Y-*D+{eWouzg<`#wd#9Bl}u*u7^@)LF;D zGOn{{&)zGRP0M@vQZOw&eH6>=wY4FJ4H6M|MyCNA6kF>~X{o!jjv9;O(5ssjt`j+D zzena=ym;~YXaiZZ*?|KGUOEhFr%uFL5#O`9JW?89WccvWBl7d-&u^?wq{yqPYOlSz zoFX}jqyEZom7kM~i_gBFH;kTddwhI6Lmn4p)WYmg#jP7RNSe)fczDX}VqFd&K72Pk zT*ls>v!S7Z_#*p(mwQU>KE0p${6ekJ=v-;C((vwoO-=GBa8_&HjICG%;&RYt(?d&!{y%6(sI$y@9ekl-zkqB zi#VfGa1FPRrC-inyRp#f7gKl7+{)_v*ZN!i)!qX5p*V3iF0Oq#g+}jsb9L0R)ld2@ zeh{(l%S&vT(AFNFV5FqHY4@qXWc}~2EcJA9e1_6X+g?Sp32i#;iHcBBMQo>O6;mHej zQIhgXN^hRa++V0UD9p=y1Lq4B^~dl=;L7|qEYh2FrHAu%|2892huwyPDdM_Q!mAu-vH_Ev&vwF^=Pz!(iEIor0umYv)BoMDqLcS$(lJYd8Pu*IV;=6Xo32 z_Fh^VtGT6R%f>l&_C5OLj_>Mk?JBnW#El!b%W098R;1Xy`@!MfrkzHc)JKno;l!Fu z^<=Niyy)mrA8vif^J7aTCbX(IN5R;PN;v?cKExD>LtUdPx!y>yW_y4#h;X0rSKovFtT zq1pP<)U+EXO8)ZYj&hcplI>gX>`pMNi7)1`L1wBclHs_ZujL~F-+hLq8G34bJ$v%?bu4`W7oIPI8=03=$hT-| zXo3$5SYDfGPfq8MqQf5p(a0Zr$fZw6=t_VSdiJg~g+u6~I{`4>>E=dr zF)@({ShdF<{@`%!+KqTF{pS{~@z%IJ)LYi%(zm93#1~Oh-^QjJt^2oiNU-VM`}fR} zlC_%t@ptc@&ScR~7I%*w5RsHThR#WCe*}+(Ln|k(=XZ-+gI#Y<+)z|r1xr=RS??h1 zKo%C3OAF;QGxi&Q2c3$_%XyZT-c&3+cGSmK>FLq5AE{*=@mmZZXpI;4oHa<4E-x(= z!jj!0`s|l4hVw@c9Xdn_#P;>;HG>KlGGI&|b<=G1bVcljgtotVd7N+2V}5^ceC59` z+||`Z8i<|CXz$?AS?)Bly@T?|k^6fNu{F6F@bdCr$kopOSmA2rZ9dW9mo7|GVjxJakw>9;Q{>|uBmGKi?08+vGF^0)hqAC4CRYB z@cuw-^(()>pB(h_^V6K7FLs+$uQ!uf)((y`-W7%0OFHLE>%X?7=Cy&Y)`_o9Z>OQ9 z4LRrgcXye}Bf&joWo3$tg|qAFMMb>I3A|F4(j}pxd(gS>s3wa)Gpg~yirqn-(bHoX zuJylvpN{9-+t5Qn+3hb%N{W2`*b>)?hpDNwDeO43`}pw`s;b+Qi2_z*Y?WAsa9sN{ z{1fCG3E%`MHj3@Pd-v|w@9xw6Mjdz1P6S}nuoL!ajy_4O=X+mrhQo(V=D*g{^IjwQ zJNqTK)0L7kj@q=C-gTlqPQgt=Ow6P`No@Z2?40KFA*)9v)VVB=ueYo3C6_iBsg&~@E zL@AD9k$dj9y1>AGoPK2;@Uz=LQ6My~w{Z2d*t#O-{ES!s*9qTCl{V#0}ZLLl7)c<9a z@OTZpQBY7oazn4wHe2vVsauJDqVE|C1xtW3#0$ok69KZryCy1KCnB&g*f3dF zo&|MTWGNXK9L-Qo=7{-Opi_7(U}bcdX12P_IK76327W`Pm7`g2mEs>#r&* z^BHoqazjASm<0q5eJV6=G!EPhMiIA59gvnzf28+OO-_(V=l&UwJC?;oFC z0$dHQ%vF?@*0kQgf4^>}8@sss6l?zzkUG?Rn#v)ihunLj9}5c$(+gTZxOUuo+M7;h zxU&MNi(xKd1?xh73^0cgJ4G|$XMCpp=Ms88Q__f|VnH`|?8>PK5j%SHXb}#A+phtw zqH|naDwf_yY|d!sy-t;jBpsaU%fFv2;mJ``R#jNY<1y2JLcnJYmnDi@QMaHDLAk8m zTC+=JUlkA#Ai8yUa(U61`@zE^j!9pguU&h3O*^OSTj0O-<0v3!`uoZp^l=3;j?eWU z-MqOi3jNU2+h-9snIPol17|ou6{7lkdc7(ePAXW$yF{t zT6F6IYRsD-8sHIJxOg#0(IVS+W92tLqnFdrt7o0Av>|T0C_(M~{(iQ~GOQ}QcHZ5S zirtamRlwAwgA78pw@)g?XH~P>;L?6i7uJy%ydcUoHqynD*>Yj@CsvI1Qc$#ddWU>- znd~}Q{~b)|X~5=MF!$vc0n>w&lN5x=Jp{xMfz#BaOqbvO*6_}L#;=o;?A}RnalzOLx%nH_ z=tC!8^p$a>p1ywzjH}J3T}@4mT;gb+aosV~V%-H@d_KN4c$RZ=b~d8etdYD7kf9~) zfsP0^P1}4%-i|+_!!O==3sO32?@nreLOTWQYSxM{g5>=F%hLJ=Gx=zZ1%0? zx_>m4-{oIeelXAQ0qb>t7Tp6m;?lpZO|ro<$1nGmmXE` z_YaWV)a$@Y6SGSh1g&qN?a^cp6&O~1L+1?=dJWDL&QrIc{-g7ocRGsg=$kuUf2L@t zsHm>l*zAFj(Nkz#C;y{m5rPY$tzZZJ{{6eCsOUl>zr__uQLid0vi}%VNcykc-gV#@ z)7i5}0Zw`O`7Z*PcLMnPMG}QBSEukG1H-*nuSCH^-V6;LMTfuc>?{R(4uqW(B~C|I ztQ+>=!NF!cs9cR}nQdGdI5tGEOe} zI4UYCnB&aJlVkuY(z?3z#MJ}DfedhzzBdH>Iwt9N7i$1WRR8uZ>EFM9vviA@ZTs@B z{!F)95dNw*TJ5(=#bZ=`0b~$>7-GW?;t~oB_03pfp+L+NG7o+PkE*GqB?!WpxZ6(> zU*C1+hjkz}^=o%Y=6{usC6#*ko{ac9vIAxS5>HIP>clK`0X`m8PPj zqk~?*zHmDvgc(QH($X?ZGn}2Z;I@)FWU&=5yy5QIyfrw&QD0ce5)zb}Pf4!L+L?+Y03YXCZK-?8K5 zryr;sfN65KcJAw0^L-Ycz?qM(#`d!(AwE6?s!N>Q@3r-HiU7GYS~;wTE&74!>tFVx zP$h^uKOYeP3LL1wOVJ8k$p;=(%JYSpM=gg!V7w<+=d$HX4|8)?T*m2sT2LRY-^rOc|Z<;th&2w-*L(6rn*TG{Wm~Z4-yhie=dV2=l4lM z#~hWF=wlBc$S`HzYJ#-K3ptIsS1nmGVB@rx!O)0IXJ;pL7g>Azys@d};D?)4RaLTe`mr>i`KK;FA(YHLd-g!FyNJ{Fb#}I( z*iI7X1W?cCy%`4t**xjPX9-@{or|rU{sw-AUdB3fJf`{`943%>)T%FrzQ3W50a4EyHy$n)e+?!2t-IKv||m(L8wK&>q1 zxAaqu5L|H%MBRQKgrt7*#eUra+@{IW&rV!SVT(rkHZi+(s8RcHJ(sZNL;i#e!c)6) zu_R-YXeq7R{C>ZqOW-vLYMJQUIcc$T>|7&(%E2t68^aldqTAXJf}?Qhmwg5Oiy5Sl ztTSK6Qz!;6A|)eZKU#my&hCC+zW(?7Jhi9iGq~z>$(X$Md;jp4tau-JoM)vSdSHz5 zkYZrOqBxZCT6cEiTE8ec357uJG=)zhI#Rz05ozOh<8e{vZ_>5N$8)sv@1d?<`}+DO z$H{v%{#)RDzt`vLz5|g3*&K7NsHkA~Q*CK!Ym2>lz67^p4IL(`g)0xGjv$a=!5X65 z677c|rq>O{AhB z!SOG$>1L~_(;&HKWp#%THgGA5r)p^YJ2Epe2rUfkBcGgM^7<>igQzQz9Q9?fMy#m~{z-wk}`<(ZsI zMMJQw*^fmL^_piQG*<{E977!tTh93`y+-*mN=x&D_=`gm{8+?keEq7hkdO=n>yMCf zo9*{h2>NryK6-SI(68hokI6uOZ`g#;_jP*uKBS_iO-60$&Kfl^aq!~%J9h17;^0sm z7j<=Ar23~Xqsd_HaO@rWI!MT2=yBSwW{tq22x5%u{D#FRG(aG&d0feJJa&GK+dz7^ zAe&Jf)4zK48TN-QdlghVe!4210t2;*Brr#_k58rg%bik&q(vTdkFrC#0pEH9hC?A# zxrjX|GdEmQ4Ee_I(3cCUJncZ`&jSB#d2Or|w@kFyfymN>L310|COI{T7XqD{Ha$4e zU+XUs6cj{$mK$~tzKPH;im%mguL#|5c#w|n4l1Q7us1nAvi-wXKnq^qzxIhXPbjF+ zfY72%P|b+{sk$o|jlvYlfnuH0$>61lOL0&7|7|QDKs^jXjm#1JqOYk*EgN|(92zWy z-(9P0V(943GyQx^b0f!bbsB3&HAT^6WZ){zV_(pTw*14>)~#?kxwbrK{4@KCD;XIX zaTWZ4vt}K9^>aKPp9x?p7RigFX69V#}D{r4p85Jhdbw^<Z6I`#MS98| zW>nI*34CVP0bGMWaexGhdd$#6dS;{N%4P@9)qP>PZ|7e68z8pS^p7Ktoohfhn14rS zJhm>8r~{dgAfs(h&K-fku5Me|x3c18@ap=u^BeQG8gAPv97Mx}1VQlb)mje|6O)y# z^|@l}PUhq)$EIm?T0)-27LS`-UjUX^m4{ka7bi-^Z+*rt{RqiD&!B>TgxK}Z zgv(!CFp*paYD*AvWs?nO2*xv@SrH@76R0a+;FjOY>T0%MZ3Fr4-50hg(m=9x$o;g2Jd&K ze2_um5HPv^R+iQru&}iJhAkE30N8b9(Crh1Z2r`|MUM>;Q&0$-Sdt%Hte37 z&mhjjS0a?abf=y87-9SV8@RVt2VD_C!P{`{h8AUHWK2LQU%5}mM|a3ux5jf3^yg{R zMRc(D@7YG8oL7ZD>g(#h?(5SYy*mz=U;Ue2uxv4ZXL6Ha<>iVOU|vdIdx8B<^KZf|OO4P01see`1TeI3JMrxD4vWC@*7&lG^=p`G1msHQ*(!H(B_ zU>@N6g1|#|mpy*`IJf4Rx=)MhQ?I_S1~ooH_+IYv9TA`_4N(U>o}7CFU`l!Ps6p1^ z&Rx6AAnyh(_XAsj8@U8p!5F+*ShuCOkT_W5E25pJdqTb)=Y~Bj91q$A96C714s_+e zMZRHfV)AT$U<`!HSpMa#rI_hj~VS!^t=sfpt)k`G1f#Go-dYXL);zuy7{o{nKNgCQ#uLQ1lXdO za!H>ro9D^-KyEL4Sh@>M5|eM;;fq+vKClsY{*bVQ-k*FFs74`x;a_i7?YgM}rj8YoO^%;TsGV~etUP|>Nwx0^Pyfy$v z^Necr&Mdzr=mUg?A6xMZ^z={{WoMlNKq!Ht$zZspcl??8ETC{l&)a40Dz%}Rb?%PW zs1Ljj1A@W7q(QS>teEbFhrc7dm@fA#Msp)|jS#9&uI3rwuyBOapJM*=aX@f<+qP{a zE)4qc4LBE<6B6i;p7lBi9T)N*;h_))38D=fbAkUZSzDhDx^>GLnBHx6=&F7%8#9MC z0vEA1M+;zR>va&`w-i4sNFI252X{gojcs8dbU02K6B0~x&$Q=u4zGoxQ(s`hMoA(oBx4jI4 zy8-8eU?`vrUA%P3d8&sUcSVt%oh^F(>y5EL9FzP40v47mS1b)bWM!S^gkF20UBwDV zfyf-dc%p+!3y1t}SlC-QUZjobX=&F$f%rv2aKZK1=ABq%U0vM^oU|+uc&yr!>LH4g zrM?4!A_v5SYA>5%lb&+_wqu9dH{?MuUU?vfUxr_1l4Zr0tp3xJXIC6U##i8XFr!nvi0pIsst-_3u4I0Mg_vmm`WmfoH7g zx#R%|$J>+dtKaiX<#Xcnrm$MUuH3)|k#q`co=YL25 z5MDZj`kWrZ!TkuI4_X@)qCHu9r8(CWM{qGGL`CTdX%gG+Wb-7{2*Q{FEYkMQe~nWp zZ+VHZun2Ri*59|;HwB^}D#0}%tDuohU=LIW23Q+0iW3?k=xoa)fswe_J!K-cJ-fl9 z?>v0?a7-W}IXN7wxU#-Z2I>pe&O@8BSuo=F3?P%x+qFf97e+DZZ7ERC8+ zmiiC`{2-D7Kvkpf!;if3TP=}|*_08&gKM7Ck6}~cHE%iy7e%3VE;Tik*t?+8TG#zY zCnip<&g6nwM2?kKyZpHKb*%~>3O|d}qWZW9A087-1R=C`(kS`cIAp{l35qB|;EYK1 zY^{7hAk|<-_&mU|HI=JYc0xkaU=ac+4n zDnm4u=L}`oR35ReaPNVIhHZ3DP%FTQ2(%1WS1?URCna%S4R{JyFw3~EcHp7rX%6pX zh4jK->4m{0ABL7EROX=)(h`n#<`JHpq3OtK@Ls){X@-gDS*EV1QafI~dbPU@osR&S z{dIwL=7W$@d)5*=g}qsB^d7cCp~SNdil8P77jN$X<|MKP&F-?ZC8`=3%Iq$N_j7Or z_(y7>37sM|9^$w`SzriVMyRf3$XHxl+!R>Zx;KXg8MBikHvwT@18@^dS`pCE($avC z0@RlXXxL-E1*k5-k6Pp6g{JTB5xA1@S#YYGoxFiVK$;0Vn)u{oXN5zbE+QJ&c;Ng7 zQ}<{cwg|!x$AKAlfeo5lSP;=Js8#iVuB?oQ@Z^iko2hWH6yo`>^mUy7$63*yrXPIc=Ok;YH7XarRK|!RGFwkujFcTRnR>Fse^aEQ<|CbeFWou7<4Cjqz zt}k%?mk7=bpyn`&v$^b3$US>eW@99}dU|_lh@uOKPB5V6C2xpBD<~;idJ?n9tdO{H z=U@w+S$u?Z-|*psisL2g8lOKO@sPJ4@q}+TIyH3<$~h5X35t?NKBWPVY)trqm#?o< zU;18haw@^+`29o`+l!1b;XZaf-+q{SFx6yn0el`ax>7Fggy_vym7W<)6)K$|U=^W! zXHS$oDH0qoi?<=}1__ZF{`I}H=`YF%0~=|JAcvN`&?Mg-b~ZMl2<9~%Wx?Xr`gw;j z2r2$&aPn*LMjjQT!j~K}co8biOq3WxJ?hIpLSRc#N4SqfMvH(XQ%}mdvE;Nz(NLzD z3s;kLhC9;GKbSrnR=8NeK)v8F5uFL96x7%iPTBJ0A6!V?qH8yqIvw!!uvJtnWsxFe zX`UX+P&v|;a^5(4LrFoQzS?{7>%>H6*WI?E_aBKM1AHI;K!8#iEO#t7&-t!8cSlFZ z)pqHpjzDx^gz4AxD_njQnY`JVIOmB1uW+f2Xf@Nu0fbdg$MYT#GH0p6^-u%Cdb*{y zamqXHqf-!80NRQPx>iv3F3-8)a6}QXAqg0;0wvpA{CGlQ;xqU}M8Hq---c_mr#0eU zL=qL6*BL{dum1uT{r&ui4V$Re7ZDLb&u_k)NCLt<-6K9rG*n14P33C60|N|jEiM)s z)hsypm6w&t;=exTT@~1SRP-P$TWNsU8%h1=+^1q&>TXrkr6SAL0Nv6&BU2Df3`G&l zMUT_q)s-TTB|;4bLhTufvRpd+pu0I6&m|alPr3@hUWuBGDs!wVT!U2_9VZ`)ykzszdWTo;YDC%C666q%ZG8`4z<0G zii;yaDf-QuzCKz!UKz;$tSMqz;?J`6f##pGFq4Rii9JKv3UXMOU8hi#&Q@HjJzvg} zd*{v_y^bAY)pK1+v{?*;kDlA_qGZbKb{sbMgdYJDi)C?ZlH8$`=ZA_%Bhnt&&%uft zj}j9Jfi>vr0Fsh-pnta*_FZ^9Q0|1l-hspj4nl?sBBQ`+5+Py&I00YEet0BAc1w_) zOz=vlRNfKd;@wO-hHyi~9 zB%~hr7FyykVZ%OCOO;*ucAw|^L_1@m#r|FhR*8wBbG7TL4}J>s^EaT7hT$_P{t5{S z#wOJvRs_nNVJ=&UluX-WkpsK-AH8Bj`(ns<9JgQ!;KeV-gNPZle5v#aY3bn1(%A?g z_gxRVvv@8KBZjfFlt|XTd9wp9-{{gKc=Bq)orM)WUmkGGjT`$63 z^tfyC3=u^|MULxV(xGbZW)KM3&v*|yBTavnEu>Wl2t;}!+5t*Klti*#0MZr3$g#x9 z9R>`(lpeykW5*6co(_7yIus&j$bbFog^JMG>ZIgkKG$*O`Z8d)i7HiZNNYqQyS@y8 z7CKRZ3-I864?5Mw^~-wMps76w>uQ5fs|qP-3R~d>)m!doo3dMb`-fR_f$GAz1TVV?Wlb~Typaje#q}4(FT3_ zlt^dt!&|p@5LlvLyWd^)%a<>NToBwHxJ zlBHTM&wNh3&aql8&`N*!aP~{Tgs3RWO1H^qr?b@M(1V*)+$OYYeB2X1ySbTgUGpA? zmV;~6L8s^}G(KNn7L+m3YpERX0kdZXX)ZDa6DBJ?1MkqOjj>-|J4hg2YKdxuyItfF zan4i)f}`5dhW08yO0_3g4eh(_k&x2p$ahG9KU!!AbsUFF4lYG!U0@)mQ8hgsx4{*g z3U2q69}kVqEz@lW%ktX-7A%{tct#sBE^m^p!l^@E?f=qK4|$A_p@3;mSCA`J-nxsP z)X-rFm-R}itrp_P9ZB3#j>av9IWoyTj8=r>^Kuds39)y5RRIejnX=obv?8&Jc?IG~ z9}igB26N~FsvXhnX9mkJJL(%tA@$6eAeD!mv@)5QLY|Zin7tRxhPI9MFLW2bhgFr= zM}yj^C~>n5h_%Pg#=-&(hwOBw)X60Ie4vr#o>bJ=Cf3SyA;%$lM9Qe_?}vqP_Do1D z`*Yd6xOZ@pCEuUiRqphU1t!hU3p{qTFxC?xZ)O6ryvvSB#c2ej}2?v>5@UJ<3X;;WHmMlS(j{ZYir#pJ#v?e zV}W4jpdsa?d-j5=zJAx|fdNHJOLlpH67y&#^bCzx|9~|~_dQ2SgI>ZTPkn;GrlB?l z&6h7+AbB5ln5Db_$M^3n0(yFsNSSRv@@f&r@PnrUp{1pzU8()V`Gl8KUq(U4&BV{I zKDij<2a}4wxsr&Wp;Ia{o=1!+G2t`RY(;k>2Uzs|UZs48`O^>V+#p|MssC;!u?Ei8 z$Vm|(M1U(frx1lbzY_xV_T@7eXc)S^0^$?*NVOIqH)y{VjxHhtlP8 zi1i+k4(IKbp4E(B7;{+aFnnC~^17?5hR@P{|s%;dH$ubqhHf=f!cA`mzm6Ls01m zWjStGD_CTX#Z4l5Q(9mqfd%5cS|UBU{te?Q8wj$kOyw4S%-0to0y`g@RSv;zy$dm- z#mxZL#XA@Y+%E!$Nwv8hhL)9#Z~ZtWD5&XZKQ~x@K)=kMh{ckDy(Ze}A|=d;1$^sv zFZz=dYuzWk%1e=Tb#>Z(7Ja0X9>-KRhUrTE6&^uhRknJG#f`%u0n7*JI5To`Y(Y_8 zo+vFuNDj$9MzE_=bK&|Rgq&4*S#GAMp9KBQbqnf!`&K$5e;fdUe0ik8HS?=83~^q> z!_(W-RbXd5{OXKK7Wbq$jW9Vr@#PFR5-MNPzRk3CU%R!jD(pk1GGx=l4af=HMkhAy}0!R85TU~}uC zk^U=y8AYL@iC!>TBG4cGu~i~HoacANQ+aoOcJlR`)=zIQ{vLEoYk zbg2#4)EliF=&}p-z-&XhF5#!P`BV?Mq1{#_i(}^J_vo8lq@<*~W&3_`=$CQUu8to` z@6hW{;e({(x;x`Y{SGt@4)v{wdlHZ3 zM}%8(Sy{aDj^l?8-D+xT@_U%jc@m^bL)_aMhAAAVNabCar6n9P$e|KSdnGg?q z#hD!%ZtoF=NN-O42UYhvG=Ri(qkGQ*Y^=M|_7GeiBQ2T>jM@+S`*m?j$Ow+vm9C^^ z#KPD#X?~qJnZF>;-GvQFoK!^1D?V-EAV*hu*g1b$Rw{IUoPr^roU?-&7=@|*x3x|s z@T`wr%X~ zFU+6uLik{g%iT|)3*-)o5F{4v%$HR@pbS_egcr(d*+w6B{JiAIbR4SBp4vG$MtKWP zA^855c&;;(O`b4SF)PvP_fJk%RE@%fxziW@1$mfyNIYZ0+YL|-7v`X@w4ReJr%T9D*w4&t$mTR<;qJK> zrSpVCL$chaTQ+?Wuxvlr2o(dcAxg6>BQ!(CIbt@xP1oI1DWb}_Z|vU zFfMER-X-hY>a-*BP$M#3Zpom%Mrfe;^9HdTBXIf znK$KBi!2)S4sPATC0IWkfpWx9$Fpb8PgMklqlx4*}(FY@|rnmCblU$aISKT643 z)w^12dy8Y_6!aZLN-&R-Z9WLnJ!a%-Edk#S4g^roP9C1Zi;OAd-$@>H+Y~?b4MAhw zBdce}>^uSBjiU3P^yUC(TQ*ZQS##|P*IlHyh&cy^7`Fcewt!7>Vu*=gT%rqq0f`9l zfN+Qw{AF}?6WSIIlko`(zJ-}V&0U}Nv(`8{wd%*0<;MvLS;zna`4qt=j?4V<=_QU5 z#cUnp(WB&Ho5-Hj4-V?{v)DL_RVK{DM+m6wJ$=S@J^Cym>hqdxuU(&&Xf6Z6CgRYS z5L39PStgE&8YC6S3@hF0f#{uof0Kc(xeNLx=XFhA&~GB=2eNKbA4EpJjIoo0XSFT0 zPkUp)ij~pzJR*1q3%4rw^7wP>>knQmeP=TOZ;x!`540hU4=%gL+V)$3ZNTwqE<*?{ zJ?6_mwH4d*ARDgFza}R`iF6n*??LSK8;};qoV+t*wO-cRNK2EF6qWC#oTTxtF{KdT zvI-cB%T#6-eOS_cM(KEr$Y(C6wJGfKMEA?E19DxQTrTvRox=bjh_K>0Id|4d0^4q^ zIO8Gj#C%fxY!@GNVo7kRz)y^DLq2xlP)pzd56`!oq zLelLXo~a1{1V_yrD6&qr)C~R=N<2J1Ig--gw=+jGdpn|-QqfzmA{9?oadB~J zJt{Nw>8Z*Ysp|SfbFbEuhP}tg|Ry zr*sQA5VyqH?rqAuibqTcn1u1gG$FYF3D9d{Y!9)sF>OK-Z`Ty}RjUWhzv^y+r_XCKDg=c8&xxpJWECpvgbu+pUQhNpBjHsxMY zRt|frwvPnn>$8Q5vCzm!ot(^!oP8l(1j{BScU)3ZIHF@>+?hY;Pl;D`v0SEhCw<_U zR8mxQTJw#vLQ+UiPY*Q~r0B(qTbT4EQtL#TNifcfTkE^lntN$+F}voia5v^>S-(|F z3ZLaPKQchE3oE?1k4AF^Fo+0M0kQWbuDfWBe0mOuACuETb7f0Ud5|x=wRYtj z1u>CR#D&%k%Z-GF%>l z!$oNAK^C{moJI;3)TM(>933Mdr=;R!5Mnw%KmYZu4Q~S0v315`ngBB!yJf>@utgj)p4*Va+mW#TeE;UPf3Q5sXjL(yGG%Xf+BS)unJSdm zri-;LNlc^U7s6YFgC43PN;ts~ARxs41Q#G-f{2gfa;L3}=|cNn{LIpW$)gXFlexP0 zzk=XU+M_nS`E02omhnfP!_=$D7|$e*Pi+5R;r#-ZPq;~hHH=*O57lzMfDNyTMrdw_ zV2T_>k%MoNfk_KZ)26B{kgRMyC)!0-!vv)`n=@Jt*1OD)EdMaPmKz;QyvxwzO>CLFLaxmZ+L5<7NKzmh3-sEful+Gz_tr(0^0qb;`C!NDUK46DatpG@9OPz%U7*Dz70D3pXE{2hM|A)oEn zjietU0jodg!cWcOOb0`$(&zUPgkJ?M@AX|umL8ucLG(wC5ao*~wM4iB1M(N=mw~G2 za5dU%RWMd$`%5hD>rJ1YtNjOIIrx!`3Z=I|F133FIpj8kah}4b$ik=(!KlDa5pUgl zq1}lXK8J^GG&Aj?h~(?S7tY0eb3X|D#X8_*yb^BmmOFWh1;!Sqx7sk5@qa!U-xGGK)(bPr92WNK{@7 zrjXLT?$RKqiF5OYZS7+9`J=G+v~$k=3`c60FL*lxTc?AmX%Mzd1J)h}&Yjd$mc0CY z^j(gakCWi+08yEp;!{+UrDNnrj(DG!lh$&uk5kbwzjt* zE5a#EgW5gx%4;fMT?H$px)~-uq%xfFVg~wC$`uLp`E6R`u#t*11qEba&!_BAFKJ=7LG{_m|Iarv2T&J?FtE zskAJGVIV2^HW~dfD>)D7tS&z}tEOL~59gniHSM)4?K@<02o0$DJMM_(^y%G*2&hea z3+6*TBakLUu~Yp4pizk(HEe@?f?tGXrPEaOpzGfN^&#{U`{C-tgvAc*dj;3BH1qio zh;)pvva<4C*yW^5R;=6bij=g=Vd3FwcnQEzU7&HvudU5>M8Py#Pd~!gz0_wCqO`}! zcAq?XvYC~Ig89p&!tl$h3uW-DWGrjCqOB3k2x`Q@#cRx@#Xi~6>dn?sl9+t*HP}#J z`SCQ#^!CY)gi0eF!vvSQG!30LrQ+{aLF}|LC&&80 z1`%k@{)&WDF>+HF$5zN*gwmL0P#&$gCX{1XCA_h*@zTW+BSSjEo$Id(L@Xd=UUV zw?^`j;t*2%M~P^%n>mrbK{kPil$1V^BTy1?MB2pbKuOvA%sqq2X}*q)-3ISzF00n~ zK-UY}Yo0Oh*kAN)prs=hsf+|+hey}4Z-F^}gC6#?EB(G~`_+TqjM6=$AhQW)y-NmK zdt)*D_8Lmh$EaV(lG{FZK)^tETu<$b^EiZ0NZPw4k3&j$XmhKR47Wvxz4KNCm&g|=n^ zzeUSe>|_iOG@=H-{X}>=_)KPNjrb)& zM1+vAcY4)ptEW9x-X^;3x&@&69rtc~J~HDf;)gJ`i&TdHvhq%GS?nq&!AS#D@pD{E^xn(y)G2m?PZ zvN;!-9sc$Hfq|AK#{+xz+>D7qwxp{JtaSHqE==YY!y`&j#C()nNbH-gE-EN?7ZJra zAH;fS>6sz4d}(%VX)O=$G(hwx_U&U`PVOE-4|ru%XSF?kNvJEY3B^9~P(KVz5&ehC zwRydOYY&PsOrueU$R}h?p%zF4gg8OFetP4%-mcg z!q*%}!_;+&ql43Ykdbj8(*(o2|Mz7a)84Tsoid7|d3|+}(XyEBK<3@}tNgzppqFpx zA(Zwnii@S7n%~!S41gm*U^>6eKcAukzwBv-lI@?=_4ND3EKN)I}Rd|x}J|m&UEga zVS3`Jcc#T{o5nUZkt|)i^o|p+)5>vnak)q+HG_}@?|_6-XAj|{z7f)>1DvoR@OH%? zV>R_x-eU_}Fr?9Zd95@eIIbcetN9-T$YGm6S}<=x3_OA?Qpv2uz=!K2Xj8V^1~fSG zb2M%?7LJbGF>cYvu3m+DMNKHRMft4xgE_e=;eag^$DqEouNYvWMh4iW45c(jaCm!x$yBi+&*j5zSgxsxo|9dvf`y zC^0}bE`nfp41a4xHrRwAKZ^jCt1#Z8bD)g;}Y9V*Cw zko~Z;?Rk|!AiZ&McWgG7%nt2$5CSYL_i4T~&lXq4(Ar172|^k(&WtDR8j%CpkE=>? z8%$seyj27HB@$wYkW1}zBZ^+U!Q$W!i2WSphk>zlECp+pYD6g~BTV73HMiI|1e^c9!ZDk7o@VT z=>tIs4QE}YCQ1_!JJ}#ozQhzYL;!>vAQ=-4i$4&UffDcKIvI)KUqXpaZ&k&l{9&&2 zW*e#dg8)gC2M<17+Cn<-;KqI=G?2Zbby?&4IrMyUX9N@{lXu{+cU@lK@I>2e=v!I z?)u;?zWW^*xu8oyP}8)*%;>1vUZE%p{Ay-@RXDg<7ZfSH?% zlg?r8=~tnHT48WrUY_{?Rr=gbiV|ee>%XH~m{3d* zaXLmoW=syKTaI8goHYkU6gSQs(C>pLn#WcnK4{`wAy&|fmfs;?hLP;QM7<6tO>QZvSoF!qiB#(Qs5sEkJ!HO2nV0`InQ#;vrwGnE zyY})P;QDa@Spa2%KNW6bZ(5Kx1KjZ$)@<%HQ^U3Af z!Nx|p*)4cjmvAD;1_5Msen#+K*m5vKr>*$<8xNy*SW1EM)5WgkSDv#&^|;U6o?_$@ z;uAj?yc}tyjPuaD>`|y+zMn`Q;#5E?*;lN~xqA5+CYobmxnS6v2phL_lT%T~(Ly4R zJIr?&rVesE29w!7#4Ex&B-fS4Lkgj1^8Y`aeFt36>-+vEgp+kpA`&UmB%y6ZKFMl` zG$^61ElrWgNFNQQK~gFj(q2d^qqO(lLsO-a{@0UL=leb9{J#HQ$C1Woyx-4rKlgoI z*L~gDrixI}vZ8nFL)=S(E+AoNf#VHF1>7nae9CpOv>le;B|T4w`P?^k5U|i1(<>j zKxiDVb;1`4+HeKI&?F8Q9X@_&;9&aVd7=VH=bTO^XcVo81=#7=&5+#-DHiQCvan#b zwnMEr6L(qQ4a@onP_hm)DF`$;qFB(~J-pHaB)Zr9KwzBt*Ye%~IV9dDtGuQ)-YCZz z7h36B=8O({O&nUSHEDJi3%XG1sC+!@g#ZJDVu`SC19AZ^(Q`3|HEKbJlUr#y{UZ&iG0uT*oPkWZOtzV2ogQR9n$30P?yc3G_Sb~$))iMR;8N_`wYjrEs z_}WAyjFO9gYshCbN6dvBln5+BDiO{Y-fr5uRr*tTjJu>-4b`PG5#66MHAWdb(COiV zNUzm(4FJ9)%H-?@)S_Mk=fup~6&g+AbW09IriQFtYyoJHG)6ZXLBdKr&?F_%>`wZ0 zaScfi9x;)8)IClV*kpB%9;PTSYkK00lW_?u)<5;EdTQwi421p3t@s8JY$M1-ulZfX zA`?BYxbd&7v+9ct*?IwAUx^B_6NC>064!k9# z2K66GB$?aW1E$8>|`H^x-p^zz5K4S?ft5%oRBTY5P^(-CG^H)!NZE)`u zl?C}=0tK4LKW@z^vu2aGZ(D9fp?1}b~;i3HgwnBzuAr*#Rc0B$S;;5P&(t1?v0g3-xW3q zF}s57$pHdNEX4^gE+~P1dvRqyx%Wq-${~T{v9giHH@V?Fb_s7`g{bLl{5a{TRDOPT zb;4|yt5=Dv9&OBaS(OyD7Cn7owtqR?KS*07`Uz?JI5)_h%>xflXi5Mg3qK#o&C1%2 zo-t(iSMW9oLu*`60M+kKiGy0mkse@)mPVg(IaPst{ZF`<;X*$|0YM9t!7C)rg#-)` zbdE&D4jTixwzW@Dm_~w_=U9o1iyRKH5L^PxR#09fDIo#mT+-u%iXC{(7XgIVgEsKc zBi?VM9S9lX0^}=F==XF5gNxWz50;eXx}I$Dj}I=L@D5Ebuy2bdHyvtQI3?cEL*M3d zA}%LJND%hX?%u?8zz;;iYR>EMHqukwcK&eXkA{RD4pz_^qJ7~a5G|l>^fflf5ui0b zZls_{%xg%yCODjZEXIo$Eh5}F0ZDMyZijs<%rWheYbZxbS zxH$2@B8c?{hc-lfVh%?-l}Sa0Lzj5nacn^&AAniXhL5f^CekK;;gok+7!U5eBck}R zXP-dN6Fo1^VG}vSt zp2IC6a2^h&EK|9aTl9{mcDxxvXE{n2+G{I(7cj;}^xjW{L>OG-8hfA1xi~nC&^<=2 zmj+RTduJRxR-^^5A5tXG=y=73b%tFj4Iqz@-T)wHi|P_nLvVq_t1w(W?G| zEP4_(c#p@P(zBk!+JjAKB^p=#o7p8hTp;#qJeQ3xU&h&@)3K z34ueas#tMP9J=*!ETN(cx~ns4N7^FJ0yHG&(~05$>~EPpxx_i=^bGaG(vQKMeVAa! zI|gwvTJb_$-+7$I?SlCC1ggZ{-0MhWb79LwV>gtc)7aS90E)9=3tYT(>C%Z~HwXs{ zkPuw;D;&S4XbrgE1z9G145(%Z;k0y{ZF23%SG;Kgo4{Uel5rbN_a$&Fa!cVI!T6lH z5w#5~@iNqxWTJ=|B@zcxlQQ% zSz5aLh|`ckl_+6*&V3*SJOOJ_TeKF2EC!J^2mN*6s{?_`)T^MBraScpmu{K9)kS}Xn{e}a196SLmb@1h9=MkghVhg0Xg@j=j2O!Mj$)TR$VQ5 zCM9}6cS25qf6m^X!~BKDxsP}uP(%t08a+FSEh^rjD>zx8_8U>#Uw8*!N1VW!#4Dqq z;T!NKV%=x&b7clPD8Xtqiv`5v>VH9CWje4X{01^7(w{&yJlG)|H<^(vz?M--xJVnz z+U=Vp8#XW)lqmEDWcO3i7EHQKba#2(z54=Bg_ej6^M`kIQ6iviF8PF1IfCsuSkx^A zy9LQUC^5l-=g*&$TxcV-pYAzlPM^MlC7_A@MAIkTcEJ29$6((4;7+yy1#KAf!Gw~A zr0iJma3rk-4&!Nc1JUeAxXSv1FyU^lB{ru)_o4sd1db1fw!8i2dw5&>E5nooyh3th z2uIw2jFQ%U3-OIm%w4<-aL)xcW*e+JQ+yM!y(T2hw3#)IjGzB^$mq8~g@$E}k zbC1X|khJXNJABv&V#i20A`lkc;YuD}5-`+b8?^32m=EY2q8*>^Ma&R(3d+B3E&?ow z)cTJ-I%u5L08~WmP5_UcKws^-wi#IJkP3>|n7GY^E8{%U*hajjotzqQ%FVdlKZ@M= z(K*vIsDd_H_BC3+8|>+M0XzaN_;lH^C};57EAT#?93AN)?QS|yA`C#9!ey3i0NE|> za1g=2HRb^2#b-v0 z0fHfapuaTFc^Ay1-fwn8osDF_F?Os`3vv-Guf}`UIYoeSh~Et0i|D}jSTQdXZsLxj zX8=}N>}LEX=<5Kox&ig!4zV4`JxBpax&7P_K3E|_@AmPVsmxJ?LmgyPugAX)ZKGN_ z9_95gb$+YQrug|DX%PWo7l25{UE+ULX3104JmkQnixfvCY4+jBdpZ|43?~)(KutdE zXV15SsTy(017XFrtcimUI3o5DTq6K-(H4sDrkYL{S>sv)MksoV0pu~2ukY^Vc!l1< zUKG_~Liy*Q96Oq1&IkLA#%~h1N)Sp7S4#ob=TYHN@P*GJIsDxO)YX~|fp#D?c!mT+ zQePaQuHL#QjC8a=={?S}jDP2Ze^17%vw^jOQ8Z zc9zTqZAh3Z0vNbwUD>&+CFl!aO@TfORikByF&@0f*e&!kaDh2EII{FBpTJrQ?VXpQ zl-7Ret)m3#Jc8>iz(enbqW#b~jQs%5JapcpjgAR9JO4R}VYKShhDD^(mh&K- zU~!0j)?he;&8JryNry9fiQYk^cBLEO#3bF4_hM7Ou0Y*KgOvdJfz>-xng|_lV;|>7`W-xnl z=g#7G6n3?n&)`_W2jkjApsX9w9svd~L8@i}?vp020%_^i)6;A0x>FOgih|R2A@D>(!%N-~blH zBM8^ob94yN79OrG_gZ7o#a|lhp@gi1JU(=CZ^x_*Mm+*LUOS{l6q)fAB42>;|47%E zn!)Jf9k;Royd$Qp zsC485-z>p{l9K^d^tJWaVBtP-}{=nb)Pds;(Kq2uO z84a5WHP6`^2ea#;m?YMk$Le-D$)^$WBV)Gu^~kkf{@#*`x`(KEGTKx8S`M*EXN3WG}@Chkg%87IZ}rX9lF?Y zz#FR>u=rBpbE~|CwB=e`q+XEH0ZbI>)dc$U!-4R&x3_ZroweT1RGZWK`Zv*-M^IRr zKEMxBXKU5(@Li)}QnD&Xj6Dw|Fs@8tR`+q7!Fixo9HRx-%VAsuS>yoR;*Faah!_s% z>`UGa!$u}1yHNM!VHfzu+tM3%Spo68f_$* z1ZNQJuwASQ)`#eS+XY17dWy+YkPB!8L6M&b{R)$(u*|s)$7-RFW6Mr*=^i>d~Og6 zJ17)!N+B_I0!;yhG->|@HXHgVF6{H?5&(n?#Sb7i1qa5-{1|!`)Q(aB2AOV22|R%f z0G!oUL)L>HV<<1KAaw)wU5mmAy}n)xTQXJ;ZMKBe8qpSpk1%AkQoy!HhaK<2d4Gg+JR`ON5yb-sBtM6zBewuO>9xwrs(6M!VWx$N3%EV2zuRNNRz?b-QC)NM zObB0Ne0Y%cKn#0+sEZYF=4~2n+MGFa$mA3tJ1|aGzSe3=W+b4*UIPjPg^&X}aftse z0G#v>AD+gCNlUTZk2z(5*LNGrcvzU7fLE78+WI@4elSvkW{`qM6bBWvToFTtL>Oeu z7rRTL-)WI-Kc3?jj<*db_er_AIM9BLCCb>Z+yAL4eADBz-boIyjigZUj1OZ)@t$36 zZ~NuHuc)|pZPM%`*5H^^all)M@?ZS?{l2K|>}dgcD>l0XK$tC>Qw@ zq)2<{B0SiS52nRWIg`Kv(o<&7o0oz8rzmQUWC>=YbQH83KTa{gYY1FUKrvLVE08nn zm6d&OQG;@fSdqU$MMqzO^f3>;4EvAV`8{sw`HQ{B*RjCjoS3MfU?K-3N%c+|8miYC z0S0>mN{6oTNrcFW7nJ=NYjgFgOe7X7ai+P1P>FoQ5grNm;oU(TVxi|DLK9RTI9vnWsTe1R`_779ow0Oi4WIh?9;RbpZdfX`Qr%QicuZ0V>9U+#Z^b{Q2MjRIq zbauc;2Z_LRsE?K|8&ls*EOx++tr8Vwp{!bRosI&^pUl{KeoPg2gqu(<(~+|!y znVXo&(QI%9_in?HxqtvZh`UB8z%e#yDe#5efXPsP5q1Qx63UPOaB$?5M>;WGNvWhh zoUE@OA0b|Qd3}!+)3Bj6Z~($JL=eTe);9x`L$Fakf!B{ABF8os-u`{J$!9X58vD#! zFr(xZ_A!|^glyv_&;b-Az&Exa;c$-D`FWeZ|E;hS?E)z{F?<#~OQETr1G2LiBjul0 z;QQa)j_?sPH#2kmJ`4n8O2ke1NlO?n`|x_L`%f1`PeY-g0(ovG^Or|>gh$|43SVLG zqy2vBY_Y4(JAti0jIfJ$Ndxega#7Bo*Wvr$?s8a9SnX^SqVHRq{0D5bFX5;|wRfaa zarKlZVhuzvz#%~<*Ff;D30&gp_3H)2At5tBuS=lE2~)d9>%$q^E+Th;Fl~v&=Bq-^ zMh8ahC;f4Vn)2_&_j~WH!wzL4z5*Dl1i=8AM<4}O?WC@5*aw|IUHw9^ON^8*?rm7w z61ZT}AAzMEQ~v!<3SH(;ZZ4h&y9BZ}jnH<7&4iorUV8Gv7z=E$yCxzp zk=AT!X}KEVA2b1yOs=kW*yH{FaJ`jRp4A1YX&-By8Wl-AKtvCIQ(7#oo*7*v68}drcGWM4uuBIU`2YZ>^5bRNbN({3PJvO`|IJr`MX$-|H7F^rX4Fp`w*s*XjNSPXrgbPs0Q$iv9m`Katr&Var65vLN2cXxV}o zibs4cQZzEL2Z#-M7MS7#7bOCVk4Ivr?%l^T^5J)e@VFbN1LZ!BHW+VOrCu|joHI6l z1^#0rjur|zIskwM_a-iCTjY9moauATo0}rzOcoNRP1`1KUsW9CL%4ht3Nob$7aCD> zoF2#;W>5%zSfw~AIFnyweTZK`l!SH8M;w3DdgWvIP0~4o!x%;cLJGSkKR!-lCd3k0 zUQDN~U}c?SW{t2-M*(W16E<~n;lj4HYxI4yV7$S$&Gt3Y90oIML~si6jPosGXP1by zG#RT8pXU0eC?Z!lINxQ`=K0T!vnB~P`?Em|107zyem(V(CIo>-pUOAmZUZqmylXSZ z)R(FH0AUQ(FJ?qisdu;8rOIUT1N%arN3lmW5SpYTrSMkqDT`C6VuN!MixdkJJWb)O zR3ARmbnDCpaB~9O%Qq6D9sS*N zD8%^sLo@*$uMu;?z3DCJ~CV1h_p@r@n_B((O;jFuL(YV zTJ{OedWhr=KtXV1yn=|HbhSP=tXTF(g?hh zvtgLUH2GuVpqfWPKEPR>X14%VcZ9Bh$31FqGA5#O3om2_**`DxifKhxh;GxgQ|D%%h;3#-82{IQV)PgH+%;4b2bENeogf zXHLKQw-v;Zle8grCK@c4Q%-1VUP4$xy>oPV@S{a<{}TD6gN^E^%WeJim3#6j!F02@ zxbqvFB~V%&N;21nH|V5ydG+e!z2ta49{Z$rTuQ3EzKQ7%frzY$#k70R9(F;R(Sb+H z8+<;~{rE$#>HF(*e9hDg`55|~DE2^A^nGbpXH#lFvCG4>oeNg%W_ju=N2W^!66LS@rq z{_H>dNaG8kn=BfdrS^9cEO2E?E-P)sD1iu-;L1A~67-J8`=+h1c1< z`0?lE(chorYpQD(;bHWBvWkin`I@l4p)T3Or**iu7c>4~k?-T{3pw4P%A!1N%aPlE zhB%trGEKgZ&UOpaia3M~oAUAB2FJ1-kG2BL^t0x9fnM7tL1HEeL~S~S zjQ@j1C#KkS@|(SS)%4CDS`{&4MotVLJOYTx`UpoIYQm_8FvV8I&wyF z>XP3(OPixC1+CZ=dsG1Mucg4`dZ)W4ib5e2hCxhPz$09v4B|T>j38u>;uY!VO{D!1QxRy7Euk_TevY%_2mijCJYK*%Si5z^)BH z2@s~PwRJXy6sF2=IoQWGo;@^s<~#-rNBvV<0T*iL&ZR91iT`wVgxIk3SFV4L;b)JE zjd7CQMDa{SpgMe>S7o+YjTTOvhmV2;b3#6A`rMH`@%>Ngi6_54Ctu^2AI<6lX+XJs zmu%b0m~}}QX-A=unMizF^&L4T-Ufq9a)Q=n%$k2x2%-Pv@8o7sZts!+JG*1s+awz5xfmKh#`y10nkw?2=9 zQh_%-1wQrkJwklAwD%$6Lww;Z_Y42Bwve)|1SAE+QW$>EhWUbOU>~#ymvD%FKOBGn zK7I%ntH~dM>4kosv6#JzLYqPXnFQ`l@Ed5To$h8B4}mOT8NP`13PFN6Fu6h$%K6h5 zubrXs>hg<;AP(C#_6@iHEr^$wbk4LVN)X~Lk3|IP)EeD*Lr072`PSE+)qaugs zQYbJ<*#Tz^VxO{fS<|?OI_6D)uGO@h~YdfaQBN#JTY0nY@lbp?6)+OYMc zA|ZoI$x{KJZ0zcXu@0fI@zUrh#1tAQz)pbhja_KV(V+%ZfDl4!yASv^4qy?_7465s2XdQWZqcZ#D;;W{a^j%01;X}o)A~`0+2dcm&nJ+j4~owBg!f$ zG+raq-UdpI7@PnScnvg`xXwVjuqzC;AEB3_H4E90H;)31-BFCHglKJjSuCiFgwZ|H z(wf-+w9Gk6c>m-`q(h_K3{;!D0LoGtImSGBkI@VzP>ymB;}xcCG^`f)Dhb7*N6Mha zUhsu~E?)dF@;wxNtDwuI)S^_!`OdJYib675DE8%_sSRPp!*3(Q`mcA19K%7eozo6k>INY&)B^Xny09YgHgyfhtX> zU|l*A{Y59Su_1m@6c85nSV&wx&8oah(law>;q6SLzzdWOP@HF(0MLJ|dryue=mDG{ zQRKGnf)aZ(_Uv>@Yh}VF+wn0A$I-V9O-+tK%u$A~S+#2WBTeew9m0oK4i1J;>`RYE zYlL4j;7V1e=XNeE^by{=NfYTp#y_N!LA|b3!%>(J9vvNhU~MAZeUM5tyLD(9t!hEp zM6+AKBDD7;COjN#5$`_R3(Af0XpoD`!PE|~>r(8*n5iquQWd7gv{oD*psXTZ310MR zp>+Ur27!}fVHZ%(xLo&*)K>Qk=r~MX`bh4d3gIV5?^Gprp)jP`B_$?e0xt%_;~u+< z=NQiGQsGm6e*3GKg|DGD7(pIWFM{hTD|^3}w9IX4YND5yFR*^62htlFWV%N2TojIs zM;8RtYZ&@-FRR+Gp!|rSEhiRDL0kR*BTE{ z`JaNTquFh<8kFslgwd~OQX0Eu?mcuc0W4pVAgigV2`lus5`K>ZmkDvrxHQv|k)Vg% z0HIf}zG1>$H3??=4~Xe*=Mi}QFc5(n%0z-JA;!<3C_s{ejxG1HafExCotUsN*JgUD z>h;+$870)#%NjI+aTeU1-Y4z056Wz2*#v%pjlOZ=!~3s?KMt2QpMqPsZdt};K`-+R zeTokNIuCah0FHTbJnleOVgq=t?~iYG01`+q!M?Sj{T?AQ38<9raOB|W`kscFvi9+8 z@=O+VcLni;OrtsJAX;h}6t_%C&)n5;8EI(9wuK`qjJ%H5NrLjA+3{=&Jzi~V`Ho_b zFmFAi6r|E77#J54CaXoX76@cH7Hl{oz4WmYX5GGXYQemD?#H(xYp00F=Rmfc-Khyb z8>HORDR7%=THERC<5Sz&Id|0=mIao&Xp>c0hT--Udmy(7xi4YZi8Tl6PEnX??@9lH z%YTf=u*SM#^xX7oP z?u(^IHZ(K7YWzxq4mOhT`DdJN(;%i2;wL%sUhAo8623<@5aH0e2x4*;n-YE8+Q{ePkL& zU$-bcZ*dC~k_gWKBH1g#G$z8iZrvJA&fU${iC=&JfLQ#Vk%YFsU%!4QS-51NMS-Pe zon&x;#5zl(@Qz&SO37f_fKQ=gc9%6=U{9jPppe!BL^U$n4(x{%QUkaQ-ayu)2|YRJ zH1GGlmNinu1U|>B!k#ArbIx413TVCikmMimra=Y5Af5G-_#+fsd*umSty1a!aWj$< zA~Odjy(^ zOgrDZGsLwN%4b4zQz*m_77`z>FUZxzK#x!;5P9r@#~cH%3BoseZKqT26O~DSKTpqH z6`)7hG*Ylg?t#L)IY}URW3K;Rh04ub!^YAQEw@eteeG4+v9)Qi^WCRM(h4m9RLvNj zvRI8UOH9(CD}RF=k@I14uCLfEoNL*(psul=#rlq2NK4kD*h541{@YuQq<#+`)jeIY zm8UFDwsS|wYC%G>!D~NDCdbyc>1tPLKT96+_kWZr(|Ucr_h5dL=pYt;Z!63#rB_kmNXlIunrwX zme!yUUe@%7H;@M@yxN9bE;|ilZw|%k9eY{Bx?%+Y6CTSID_5$W@PZtM)Ju&<)!O+@ zMVk#--JXUe3tp3P29O9-QI&~G1Qi*iR7#aatMa-22^CnIbe+qvUIZC(PaHDFmoHYe zaDI2=;g3uPW-9Yd6>71B(Z>C}v*_tV6oQaC;6PPwMC!Mxw=sKhLgr$3AsnUfy8Ad$ zWjJDxqLACaR2j?eRPAPDV&WW*`>arn5P$)hu8qBjDRX#<3mnZZ4zoDBBypZ_nR-=M zS8u=^&a!pg3Dc%cyY1)aXXr(QtWSFNkqIuXJ1{B71I?<9$hHGc3=o&_Sn?X0n&2$r z90%ZNm)Zn5Wi5h}v5TTYGphSuEE(%}AFQe|0XVBF7^xYC4A4GB5_{-H!!sKG(Q zG$w7E5~&=UpYP$iujFt)_b7~MJ)3WSHI z5QCjgBUB>-Hu0@MMH30x2%2|f4ZNU=+yujU3I$t)4A!U|Bc9+eVX6h)g&~)(AXtus z@pi%}-q~_UBW6&L{E(qj#zUL7YYV;0ThTXd~Zh35h867t&Vs6fwISV$vU5Qzw6zCa)e zNPxL4OIBc(+;wz`tmicW3r%$1gscYvg#`SMw6VeE1RsV*0!@eVk%L3s%g00;?`gOg!zbTXALA^XuIL75q$T3?r<$Ab!n_!EW69F zqIiK^6GdGH7(l4f=4D&8bap;DCna*2x@fsB_)%b1n>PI$Ji%^sGCGHw0hR!eQE?WS zBK!N<$dHnfaze^@N8dhPW=DU2Hez)5XebU)0{C>|!?Bcwr8&tEq&V>jB^tjvNun@? zDl+Jxe|X_Ax>G<^UqV6-UE{7gTil*EXw751rQm6f?l>|84CDjD5cv1`C_M!%e2uj* z5MvVIsj$p#SNhnty$iHklu)St`gnX6p!GMn$3~)U7GNo)x#S}D^z?ju6QbA$)d3Nz zK^>KjErzvw@$%&v6p?Iw6pMwBK*Pd^G|mECROB84MO25cixd2%1Nn zy;0tTskC5(2XR}AH*H%)rqrVtCGP8JS|Z9f^n5)v_99#nmZc*S=~ZwS0IK{v@crOMJ=n?m9K#)ie9{}Pg7v^(6Bx@eF;7%2N zf1mRV%8VJpAZtyhBavP&D7Y@i*LPWq!i9f2=a!xAkisBiU^{2ao^dXQvy5VO_2Hhu zTig@KABNWj1pFKF2i8dcL13ydatr z04oo6Iq7tKmI};NOOE8};{H<`@c4JA{h#viAN+e&ywP+@56&QBu1551(A$!>T8P=m z`HYtEkZ1TjC_9w4Wa@J4YHtWlm3-!)nqXd)8kod6efkp=o zpd1?e*V_>O3a?;>i>itWbk_Of1Q#ohx_fo?l40U8<|vSK8Phjknr>vAN- zvHJgU2Zd}Vgl53G&*rvvYuNKS{s*L6_t_{Sz2nI8DYt&1EBF8Rl`c#qSDI?tEZZ~z zw=0If|5x%5Iaxb5?{@`BJNJS02nV3&^V>y2S^GR~X*e$5AiapfOdoXng~jNxbNjB9ojrr1SFMsZ!AM=a2u3PR^4}P#m|LAIH=Uk~ z_8ZdE34kyFtPP38kX||>pfDy@;{PX*zL77YvyL=HaBy_=r#YiRxZ@6}AhDnQdz0V~7FV&nb#G()=}!uADITL zV2tR#il9ikWT3p+jQL+7Z8}p~+Prbi?$q=!u7YtFyS29uP#<5Mv1Wgy+%%b(Y1{!v z_fjqv?Ov#0x|{cCNq0b4z$$0<#3c&ebgBy5XYj7#bfYZ%lRbrF#k9+m)sC0nce{Kw z-m==!P2M;sS-r&loyZwK%jT82AMRvVp7HBlb>-Lw>G*p-qGmEsSkeLk+@X^s)3@%$ zZnVRmiY{XW1q+_Z!l@x|ZhF;11-l0ij?n@aubWo6;@X*W-G=lE=&`f)n>FKhOOINk z$P;^UC#KsikqpMHN4EQ=Gh~?@rL&xWu=%rCl0YB;KgM`EVW!;%N9ooDL{95l@$+Zs z6Xx@*$~{T{{Q=hX8(B~&wR(7}!K5TQLv)g9jn7~{Bg0kpi$B%)`?o;3)HQF$IrhJO&kZ>Szg`Q)KGUeox9Xtea3Dtr(z3*k@FebJ z!0~nwWEj(xlV{;8aBKd8-@j11L*+>4t(!O9PcCJcKR;X{N^JG&m({Z}COu~4mY|KZ ze*eb#6`oaPs)|9{GPJqr=S(;#;vcRprt6wdcP`PM(zl}8<@;hG3t>n2&&7h$c=5r<_{@-4cd~qYBJe}^gw#kMpA}_#TlmydRnYw@ICmFNaC3#b6kc}j zw`-}+E(=UdIPCjN^@c51F4^OkZspqY^Hy+YnUSUaW7&#($vameW998FD-Y7Rj+}wk>qIOIVr=QsDE6VCB6}7zjQhIgI-$L5{t%mHBHzuObVp1S<{j1xct)(4$9ztCtN3OO^T9y5 z-rO?~Ni;1G=WmjZ=kM=P1e%^{3_)0-5);OaX%zQ~9WWp~{hCx&pqQd2%{PE$JVIsl z80Q38T7`Hy3_}>Ld)fvzW6IH|Z(?p1FI6W$@ERs$g`y-6g?^T)YZH`RP#xrqXt#R@A#PfrV-{19HyV`m%3T#%U1#*1A^HP#sv>xt)@CuY2t#3 zpgb#hYUd|eIK_+2c+mzrtCDqEt?7>tbpz^M`f5ZrvQ^Z!ypP59Q~HL?c3qTu0gONRu9gDxhn@zWmAQrTS7+lujjlG10Dcl zui|fH_)2?qN)p=Rp91~VuTCyY?aJRLP=wix7B7>c;`A$mlDZQ2VQ!}@$1sfgAXg;! z4gxfEgK#%BH_On-@8Y-)12VvTW5Aj*iPh=4_Bo-6_@}V4rRAJZPA&tHieVG5Y!qxyd4+Jex{{mx1PS|6ur84 z;z!R2%0UE)ysc-EJM$#8-djip^ zXK4#|89Z|&sl&Nv2v-~xl6w!u9NJP_-z+y&0IdY#jm#`M3j+|&!9Nsc@f{DM)@!gN zAIJAN4(zR~`f*^_eqC|o<=vmLk{sOY`V-FGao)JU;G2ALV(3@leb{HDElzGum1EU~ zj;HOg6d4;JCIcEtm~6`>B65tT02@Jx-q(vC4T_+VdSXxlMC1?TMgbu`hI;9g-Po%+ z0*pMS%<;W%mKR|zEj1rV@#i8Xb@#+^2r6EsiW@g($@%x9Zq=0d{BhS{2|x{jn1jCq z+qs!e)n%&1pX+n{E>yCDoeV@6puY95PU67Yx??kprYUfQZB^UB!HP>?u^F&2Q`M^ zB+g%Y2AA($(V2^oNsxF5vRNQMs3(;KM0A}9j_P5nB4CCi9W9Rwo|O5#P+TE-Gr=8k zC_S|jnh%YRgU~|3*SAz%-6(^JxwHQ*PsDS93cW497}9t29@dRVX{r!Hl@54?YZXed zx&0OHh3kI5Y~ky>x;sHwd$^5@cR&sx_N+r^-TL(*SydnDb2Iq$0442K!)!hE$eR8N zG8hf23xSv#q}hF!!!V-+EBE>9cn&fx+r2&d1;Ed@G4pEYFW(|$ZZX@wV6%a&U;5~e zWBA3e$PSx%W?hTs_sd3&A-qo*T`+rgBy-Z7q19C6>%WdCZU@W5(ZpN^FkW3l4-Go5ZrfO`>)IOUT)up{)PGZ|-T1dfa|Tg+#V+WB zpkjCG*+XRL7BWRzAbw<#3z_=LFnji)BS$|+k*NFN@d3VgxB7>rdfaY$6uirWYPfzgo>4=Xlkj0T(f7uS6)Y5ElRMDkHU)DnZ^9=SA}}uQL6)fD-8Wys0kqeo zrGcR1pn9rZ2>CWzGaia{#~C|b=fozYQ^-XfmbZm|DnVrw?7eTw*t-UgQl|xyzInURiW>gX7M!XN z(BGy;Bbmb3*Zy4Q%AoDv3l-!gfoK!(DD%*M|L*kCu3PsF~Nvm&_lA7fs$hU(KdpeM#6d}h*ZhkNb znNA*Tem^;OAqV;3BNAP>GmZm(wiTW{#w{jwv5up+QFPvyt2ly4QWGZY-;$}QhO?Y8 z`gc91aCg`BDw5RU@z+u>paX9Jk(c?Hp?460$DxH#y1;hjWGk+);vdnkpqJYWOB=+M zgL2<|`jJg0oLIe0NT@Isw34=#mX>OdWhwUcV2jzljNluy=gxhen<7Ak8jP=GBcp** zz7V6MCSQD5a;VL;UvbSo`itx2mJ&HaWew~F_?B#vJ!TlDS{Ef!S{q9-9txl}dI=4>iYMPa?W z>svLN8tCJo$GUDK!pr;QwRS#E2z~WxfFbmw&&%|7cfI0Iw$U0D-&Txu|NEKG7_|yD z(@&(=NRD<_!^h?J$)$;yaJ3nN;WM4Z4?v2U!Y@r1!~m@?c)>Ufi_D4?l@uTQV_&qT zjgLieXhWWcOIp&cT@?A|fmsHHe=ECly1$N&t=p0TNS+ zkHEykved6gV$fk5T^Nh|tPJCku>rogN;!=r@CR9$>&72ez^n?kdhc$m-m)e3^?3$e z+!0r?Vh}gSPy0KTs9$J%H23o3VuZLnKZ(^{^QWH+h10a-<;(G;O#l0|#^hL(7_56S zh#2LTSrI=^6OQc*pYx;#kf-jY5wHh_25sfK5lJ;NDgbtO z5K&QI_vLOZr)!^Z%O4*%Oey*{5>ukaGjOaKV(t>4W}H;m{Cxz>CQ~(zyLLw14}I{i zA-QufGdT|jxlHAxx>x0;Rz?>JFm8VS1Ek;&Ge34RNLvS|zHWPG6{7D8jzjGHUGY6%E}fr5v2PQ!ZmhZI`guk2F@JgTm<7ns0^SqI&0 zAd*xQV(mV~l*m}y!BkoqSrcK_Ac1Pb#L!~0T0oM>&>&HJjIy(=VgY1Ym{qFPMS|-E zgwaoqpCJ+5LyU5;e}bSmh6Bg9d?@ESKMACKmLpXLcl1FJGd5p)Gf9POiUV99BU*_2 zE2vTeoB_6l&|48M7_J;7UmYLLwNr>*tNnZPy@;t)djv1XIE=9h1=;y1I5-T8npCsl z@%0QbdS&^KqE^7*EUN>#n+`ITrs~w^8gU#cV}li;m=1{m$X_6YG(DA_-hc`K1J7Bq zPas7hd~DuI@uNZ7?f&CmgK=AcD+NJ~$D~{GP$yJad9W6lxu)t7+ zfRK4l>v5HdZ(uwUG^Dgh%PFak=jcOpoC~DECq2E zhBhlvGd6he9fF<7it;n(&&T^+Xb)k}of#2|w(csB@+2~{7EexjodKYbaW_}yE(-u+ zF=~YaOeZhEk#Jw2zO_!DCRX?5@M`Sq>*Ju}k@Il}!k~6&LpExvmG%3@+UsO(5HPih zxzyO)+!yL7D@(mM(E*0h2kb#pa3X<+_g~n^o7q} zc7O;w?tKV?=|M(jW|G(nmm?RKwe#@^3GJhiMD5<1-nrq!PbtzEYn$6Yfy7<^4n?{2 z_s?ngnyewbZAj%t+fS$q#p`)NHl3DRoNL!|Z*&!RvRLy2ef=-#J3=WiEh>#ap^+w|uO6woW83fBN9iy=7FAJFqC|58 zj~em`Rm}0gNkyH#S;CXjs>qYOyn;fIc`NFSI*<_GniVioZKkx(7U;T1}_(d3>T6W4ZnunmJ8);XkZJ0dr=EC6W>?y7{Tk z%Jf8$)FUu6GBO6D-I#}$_r2^N^@~>@gsHO$Vhn!+CC3gPQrcN z?!9xa^0)50HaGnc(wFOuiIX1VF&DP&V-y**Km^G6ze2cTNid-eM6uh{*hs#)iYjWv z{-SC%4-ILI{ds;m*Y7DgZ~zpWW$+)q`FE@2!}nG4dh#lH7qrpk z+BH{>PcS2g_Qs;fVA6|MBijH;9m%e-7F{F;??Nrs5~?~qG9J}rK!6^}%ee1-SbOj0 zzin9q>cm+D^sfzFx$OnPE^4sw#@V(XC}a5C>Gm0>?WR&d=#9vFL3S!7eGBo!;P2t$ zBGaN+JAgJ$=1p+Sf6%O)ywSXm+&`Z^D_+#6&0T_^GT1| zV~j^d2T8n+66MhAt}3LgAX%^`NlMN7_iOS>_Sd>|m!Q@Gyo!~zUQ)8Us_wkdO3ek* z4RNS`{;~;aVo3x^#g2yQj4Cq9V$?c;NLUgyN+!KTfyU<=76u04lEaOQ$6B_HAC{4k zi5eg$IsnSgFQzZZDk&*Bc9LR$VW3=qsvOIk|G|}#6K!W_>nFX3_M1rjFDU)6@WTSw zMi9f<3&`(G;bVqS>QI(H$HQfXNI;32)qXPT9p(9^S7`i}d*`98QzkgE_1%#HA=f1Q z;6I!gAU%-u^^qRX`wgo`?WeWKQV`V~XcPbIvEnsx7PX{SlYK+n$k{%;FvkS&gNX4sEEpCn5Qx?XS_aye5dsC= zVlh!h%54t)(vPJiB9z2${Fl1ydKDa7XdIgXVW@n9pFlVWp;2>6EpUvJ6U#Aj!z9ix z2PE!aQVgeqn&>n;MwKHdSotVWv9d`;jQt#=^HIwwp_no&mGPP*krZNptlaPJ^-E9s z2SBh#`#s(L#6RYJFedn+mX>QvkL}-YYojw$m|x*fIbBH8v{6+v>v?*3oL28DkLjSV zSbpUJQZXe?q2E{3-pNEcN(|cqvyPi|beCS?AfZMpcGLcX>LtH_?1U;-*u9{(8A7$j*P{I&r&6G;l)92|N2o%tr3^` zr1dI(>!f#3Ti2=+5|Gj0Ca1BoYagV z_ds0Vo{rA0sZj@ylLtI97}OD|Et$Fi-ujY^ip*0+vBrb0dcyoUJLg08R*1uZVaUtd zI~0)s;_JPnr1sYKYEj*jDXwsaO(uTkANP-%;8m7XSiP(#DkFHRTG z$xoX4zu@D0qDI9bdM*a*3v_R$tiVNW3m*)uWhVhEaY5+;G2P1JWdmIg{SUEGsu z5L1+Zt=>xcQ~d<0P?Cy*6b>d_I_l&gWR!~Gk_cR(NS8=Pw!516l4ATd8A)l=m%WaZ zS%s+Gj58U2FN<3{@p6EEn4W(iwr-vD0`VSu941>z#R!L_dWjsV&QO4K8%v za>L{Y&{W@CwyD`+EOnsxxHsHy$IQTMnxWEshpP?HEy`~38BC}U1+GtQFp8cW8>+=K$`!^FH)Vh)5Tg%gDDc%uu^aHqxg?e?I%1ECbJDDjuDI_@1vxv1X4Mgf>1 z5CA5_qr3$z7ixhYNlxG4b;i#X5cXIKNK;*4G2j{Uf$U(?j+@~1R&d%EzSKDZIE56d z6?m2cxk#pvTipsL6%Iseae$9qlt$% zF54am0v_UAIv(;3_(u?YwM)N^pvqn^*XqH6Mp{B;CpO#?0ZHI3@=*zCkIxP4)CfuS97gv zQ-B>y>Rm$rI}LX1#g2*nu4CtQPnC`WO6W>ekUG|j#H~BE7%FW&B}r`=pnBPN>^W=^rs2&!l&|o zKD-i4iAdzrHqI1_Z@G@;D8QxsBe*G zp*>{a{ifWr*jM4AKE`E0#|UiFRC7`_U$wj?sqZb%r+4UF3?)_?1!lKKE zu+b<)p3B&bK%jsK0uUCfk|XuE zHKEtLu1*~IVfh6G1IX{%0a39?TE`=qx=#Wkc-=IBrVp`0A0v(?cBhVh&JpWAOTZ@3 z^q7M96x(MPz-qFoK6ggvw&Pa_Jjao~;gomK#2^CnQ7Age^P^`gR$%MdkK5_!=s?o+ zxHY0_1tsEn*8#{DqYrN7rveON?%cFt!y&Zk#HI18l=81HeipP_fREt$q1UTZhQc5U zn*ghEPUw!?B7Hz*MD%#m{!6+AZ5Db1pV)nEU=xKIIhlR~U=-I12BU2V3WrJlXw@wt z0m=cb$6k;|uv34?C4?{z3{jKpC6ECYgAd%u5R%S!^H_wqO17Y_n73HEZl_tDBu~B} zc5tU-zk^|`+mepqjc%Z+X_LuQ;NkA=zf%SKAs{GZ5(t5|2m=!AcOi6V1xkrK-XbPJ zgxmqg!d|g$r1jxQD(j*81n?L#ks8{C{aOPsv;oXC@RDDLwSA#$IUK$Weh(6_>?BBchDljh2COh4b zKnEMFRi-6N)>6sI54~g57Bun=hDg8&NJjh$evu1(#@HeFHgD8^Ef?*^tnrw#6fvoH z9!+fW5o)7F&$N0q%Of8|)i?CJtY5qK@Ph{#F4|x`M%^6NNPGM(g?gmkcRA2At}R=} z@1ovCRq>^z@v3HG=d%V;)5jiTFG@g9R(}D9t0)#k>#Hnq$zHx~9R>zwD#-a+jcjzktQo47 z*~rCz?~OIOC4ok(CYl~b-N*f0AA;W^*coHdK+nN=ObLCDkUt%USZ@9LY}eIF$Lbd3 z<>p4HR4}T%;0sx>ew^L#B-q+>N%{OMnR^BX%=Y^K4n4KJglOe4H$~%52gFY-7+{O3 za);_Z`CB$G!&B`;*1$KY*F~}s0HYo_XAvG69^bx>zWM+h)Ew)_hZ~TyCDm>r6fG!J zMXDTt+2!;A^UpO@ieEr9mHfr2;u&49 z#{6oeUs};;biIA8Ea%@;*U+$Qm>^rGEI=`80(v0PKVqmSK9#Y(A&*% zHlWLG5&{SDQ-e!$T^>BMnO3y>W%LNe@JNp=(~EomP~SZ}W3FydWd_RZy>0!qbu!7j zk~k1?FW~deTsQvK;7A-V?>5ZctXKNch)!%fY!g1cLrCQ*VJ&d<7hAb#Q2=rKJ63}f z8}lbizKx)4L!8A)2?P^yR($^jYLv%NS&QRel`cTuFnh`$4)PpS!pjk~6YdTk2_KN~ z6e4FLy3&b~e6%YmV(&dzpr6GtPYdLUNY3xQsI&vGrK#c0cv`8@2aQ}7fou@mo$sK- z=a1T7_o@$h8c*RFY@b5JEAAn$d-uXHKFBRz;!u^;?lMLutqE`w$W56ZM6t{N9_7J) zOeVY*6cV59#)s^%F=3!qKqwd}%}BmQuxMysuzJnV8A#-RlKW$|y=j3s5n_)XHbXof zY{EvWCPx)cC;(OuaSE=Y5}X8(!Q0BU{QNnoV6ui5RhV`Z?*~FWD2XgvwM!DC%{c7r z#x?W-e1_xvQ>Q^79f5?TJk9424SInJ|6W!EV9*@y%cj@jFIHEMj7+Blkwq^oZ>`AA z?VkV+zEc$c{Ul>b;?#b9bzvX$fH>|?!&ed&gL0y&c2q1qXtBQj{sNO+iC{PzGwPPW zuAhkOXr{oAMA!GdO=|Ku-btXBQo%U|unOGWWuAsF$$T;a!9oi}VMH{oI@s(XW9Z5& z_e7@CjCYBm4?~Lb&~s-BUBMV41tGu_Mvx=~V`#UG{+!sB-H>p9{*FQzMl;4nxV zGavO$57J&T>e_d`60~{&`a1XVn(oWjkE;qo4oG16)B8P(yWVk%ik4&GF_IuaONu%b zcH^Tpu;%sQSkKKJTBu!yCW#1OMEht!N`r}NpiuJ6`@0XY)Ic{w%GBE0x~KvWP&=3cC|`$*(=Td||e{e2d;kR2`;Y zzZj8o0T%wC`U^m#hT~3=6pD4Vs4Qj|K~J|p8#x;-5b72xGTMA>hm(ddX?z%0ePJJ( zOXJcY;>dL`>y9^S!1RJu)O;dTMKEq{3-d%vA zE5bE3gSRLVw-JJK7Mt~U#fHC{E?Gdf#s#D8D#?v~=?4=}VQ?4Cg)b!(OB z9kFz-(~ztIY}j2*MswSUK%|Z3${$-rB)#?hbN$ZO|M65kO$`LB9OO_dZNHo(Jn7Fm z6x*cS-)R-~pu~V1fM4a)H*k#$w|(nfkb>P!3tS-iWjiL}+zd}0(Sf2%7kj%rO4I58 zk@hFxRK8vNIKGi4B^7C)B4kQP8H+@wWGu=Q8c;HZ219cx$&OScAyd&HQ^rV&N~R`5 z8e}L#k~06!r9N*x&+~qtIeUJW8TzqIvK>rdR`{wZts0*Il{OI5^S_68^%zQcZ_H4X2JLvq|LDi8bFL z1{pvL>#4je=gyhf?I#)m#I=g)HCxbKz@(uD`|J(EeDOA1WGkPDjTtCc~CuJk7pG5!m^15ai;qXebwT} zyl$Y6##o0n@%r;H4A}-cogE=!)hJ}d|58_bs-C4jmcqJj3#|F*9>7Ae;F)OQTtiDp zX7sBL1fbMUh?l^%2L~^H7NjoE2&eovXAL7V)y@9DP&`%w z+uHus|M9;NzN=E-7$L0_COH@MH-BGI8LkNMOu3wX3Z}InT~B?pgdvJ#90c%K(FEbU zun6df|HAsj#h#Y?T-}z%`0>)?& z#JJx36BanYQI0CG>t26#+jI*qw@QzNep#2nJBi-`#zjn_A38ZG};~& ztTZb0+5brx{A(lDzX8rpN_Wy=Bqw+?2cnYpdpOaFY$!hL+3zs#535-_Fstk9fe zS7E>vnw2X4tc&;0YqqQ;MIO`fQrKp?=HGwR_A1_eMGEnCD>)0T?Ck5t+DqQt6{vVO zj7wxcjbi)Ze~Nykvm55_hX2~X>M}kBB(O0w5sF-=J@inR`$4g3{%Q0{Jp4_f2j>7Y z-vNZ$7z-??m=P(Cq;%6JO7=dW)$S|b#zLR}=wmVZGQR1%o!tzcnde%^G*qZr?F50G zKlI_0z@wm>`R_6bPT%d`n}AzO=zy(Chi=v8T?Y(6VvufXGXaS4&jBI-&!58LEbIbE z@NwU8f)p5+bOOQr92>j}MD?_PX$MmXj(AuvrwyOO`?!6N(*gDWx?+#*f6~RJa`TeB+t^dJ8_qIjZ z(p_dBhdE@XtU)tZ;_j?>$C8h*7arR8qj9MY(2X%YLFLGB|K4Yl#B%5Szn{3`wO0+8 zjA_?}>;a8m5C&=^t1j3AO)LQuxEQCpj1=@IY)p5kESmG~uTY_K8hvYsmlA!};QoY? zpd*_Lwb?SSzjD`G6Bn3u-ne6O;V%Nm@IrOeYu|i`YO=vN|M|1D zY4QEL;%Caa?2>om|5p;I$r&V8)`Slbm3-y<=ULJxg{E$4H;2QM~4Y`Ol- z#6Q$C?Op%lO*e&AF)KW zqPPe$r=|bpi9tn%8sL(&?V-HFr;_bx!8A!&#Drd@%j=`IUzCUXWS9T4#iW*a`2)`< z()!Cy3^s*`3aBFfMw24J(qM|;oNB@e4QP}O+--?}zEObFlhO^!1o^{y6ng(`SU#;2$?4N!g#y!Ks&;H#rr`jp)pbF2HGj8 zH}Kh%nE||S0G^GdnIL)5y2m|Z#(RoocK^?_;10A+m17hH!A(8+0DOEEmn}2FAt{xS z0y#5eJ>QWU!`6Pov#HzxFE>GT&>#>UsM~*rJ8)rzM3GzrKa2QbELE;KI?%qz{AZMu z%1IlOb_F)nT4iPVg622QoW{v{UI-=lVvWDfh;AXgGzVHDCA3D8yX?ndiefz|?>!$2 zhlgej<30%MAO(5GF(bgNy;j11sEKoi&$8cX;^dLecNX8fx1Vl_3ceU#cHDBQ>UQfM z%gCI<|7obcqnrbE_mbbz&n{}#>1#$94$taX;@^G6 z3(<}afO$+oL&sG%r$kdd`|o~iA6YCALfFRuPcN^G;bBtvkh!4ar2%x5^+JTz4o;@; zJMojmC7wICNC(7H#C7%G+N3S3kKH%e>$hr1dbG5`&8G;djVmYK_GV6}l1>%ImeOD~ z!OuXQoozs-Ws;yg38e^WbKoIz6%`#?3Za)q(Viu~_0K|5Ti#5N&)()R;^C+P5$8K~ z!u9Qe2M$~nOfY^Tww_z~&)1ajaJ)UJ4z4R1Vrg;ozoC;N1VS*BP!IVZNMV-P4K&SyWY#Ggy|Wt3 zD>nqYNSQz5iRwB7sFV=}k=RL;uc~PA*2mKbh$-Uf`cT{kifY$X`h3SuA() z=hRUTY|5RO-N}r@!lnMld(a7Y5R&&D)a6d}O)}%4E=sY^+l=%+<+I%e^7qrk1> zXn>9w@|k@h=o>+#F~BhHY(nOBuzE=vTK*TOxWySND40>2Q?DLI7>c2^q<50`9q=4< z_w>9gg%-~lbyNXC!G5P83gbQzCol8QH+1|<1^TDSo3`6O-UL`Ey6c20-Zjide>#{z zsSBU(!LJJCi8zg-#f-VI;XRn5d$9wufa}l-L)7kFT2i98d{wkK^+wIT)cmiF{eK%J zK+75$2I)Z#;?6lF=KMrAK-TD4D`mt22*wb*Fm6aW6L62s_CY8qA+vjJ!z&f~C$0yi z;rD2WB`|j2dD%|ki2QBP_096KcMnd^>}!Uy z|NPDfJkHnQ)m2sE>2H5`%k}@cU`9($w`DD-oYxm~bOe14l5c!Y5(vy4Zg4B*zXzUg zLxG5t10t55Cj;|XChV}N;uzuu1Cjgm%qsb7v85`^d9px)^ee1E18 zB+%$&)H`6Z-~BS;rXX%*oQ{2odb##)%@) z#se45NctiZ#MfW20*4W-pN8z)Yj+kvu;#Z84T&2M92k*-6iHm}?oXK|R&$e9?pGi2 z9~EAJ2sH1q!t`jUSV|s*9$2@)H`IR}JRXv~yWW?Q0^80WG>~{1nkfSP*Pl^{SK9Lo zy%|qN-po6rCZ;=&AAMM3oh0F7wTc(=u?OT0=eRgkwN(+2}E{qJ( zqoce7K8*C6)BGJTCosqatbH)8umVD1W{K>zftn%Fg=` z?elCPa>k70m*!I0oo~-2ET~2VKh;q^#sDQ$ozdwzEu-$uXo9j{1g)1nWXV(m5_S?Jt$t?OnF z3|77K5PvJv&*Recwbax-w{gXJJ>%+l$eWBE645c~9ud5%Y}Dn{EaR@ml9yK$1WF!$ zTB@a~1Iu!kwEdZPcWoMRziAC6V@Vwl_ma{UC=EWAdgfo#yS8$FPdLZh*+%BOZY(>K zSvhior8 zbEwW66KUh{GTd{`GIfIZ)8(}y4AHV{)faLSQ_tvPwqwT9@E=kgMIDn}Y^IO3_r3FE zuinqSu9n4T(eE?i7m#>;;Y%p^vv+)O_wE06U_?=>`K!`MJn~%qTM!>wge~h}kNYZ| ze`Ud{KWAoWNofO1^gf48tt@=a!*=SJfb5$M;)64I(w`UU0w-X`Az`iy_x|z6!|s4kkI)kW_&g8WLjbziN#m^bP44_Br?&IhOMBzdl*?$+ikWoRFQu z*5U?OSOnhnAV1x_wa;_&@!i1dKSK`#UgP0I0Fucyebh3J0zkM~^=3rH)Y=WW)`7kb z37yRaetGcsX#aGAQTo51)xYkOLCbkNZ+u?ytel7d>*;Fu`xv5C&n@%kxm$3zt}bL~~&}3Ioee zUU>RpblSIv0yBRXXt?Av%DPPd`B=E77-%WCrI4`_O=|;asZ9l<{$^wjjqb+xGlBl8 zVGc%M{PTwc>H=j1_i3s_x9~K#U5W?1LFw`^8uIT!QdVA%X8in~uaBpFM<(U1nRUJv z4rzF7?GQSIhx)e+)BS)`{E|&~G;s5nB;F^n+y?Y|G|Z9u=YtrGQ|_Q;sUH;9^5{n& z>B}r=qb9x|86Z$hP;{rmd6c?PP8z8*kv?2w=a|)scBg7cOA>H80q9sq1QSWD(JAGG z$4YG)IcxqL-CK+(;jy}B%ETzsB`cv@=4l*Z>Pb~%y>(Os9`oq$?-x^2+6!vH?;zTl4W%Iuk3_GH6r$)e0h0!+S0dPbI+UHYqGn!Mf73Z33-Vfi-aQUZW@E0rkZ+=n#G}xeO2+sh{1<80aa=GS4FARa# z)z^pitR1qSnXyN%{~R17i6%f5L*zIAVkg8)m$+sC*%OHYE+LtOdWq~x;589rS%<1H z4_Py_=SrO~G;>Nax>;qY0sv$GStHXv-8uPVKgt~`_}|Lu4`Yq(gYyWf>OQSNM@21) zt<=y^ZWMD6ALxf$x}Q8H_}9%hH6tO!b+7X%2hbeThEj}4LOxU-YhWJy0@<=yv9ugK zOW>m<1gNQk4%7D<&HD-+cVa4;;qK&@f?s(gRiTT%5c)d!vy<$piO$Pf_5}#7) zEp@L|F#N$^XiH+FrS+Hdad2os&ZTd2;OkLT9vhuLb`E#!VoXS%E4$%zpVubrYKW7*k z{hgsQ<|o7}d&8?Z6`w$jnSc<@5HN`pMWCY2J5wx7{AcqsuD{-<_AD+AHa*XwU4R;d zb=!^|;cuSd6d;-w{`t?(AC<5iv!fh^<$2$p zDQj8>&sp|RA3YN@`T3n~R!ms~!{gnj$Ge_EXrHy$N}-Fz1z`J_Hx&`u@xyo zfshVF03-^N%KvF+c4SInIgFhjNlzL4jIDG}W^SS4 zss;dBw9f&ub%p~q;*gB~*< ztbAFz6;>Jf-5^8~tw=AhMwiv%wUY~Ve`-#3G3!1mQ&@*!3`mLmb=y2mrbn>BX9V?f7nPsml+C(6$!_%AQmL zdEG@Ex&{VIJtDBCTS*Q@$RVN-aKP6iUv~fXX?w=H5MLRA3l?(35lCn%E|E^J@0fuA zAQ&oH80?S|h5Ww|=AEDTgJ% zj1IB0KK1&#HnMt4RQOm|UHibyP%OA?JA15&65A|=Hh`te!P;8S=4ZM}isA+-4VvG5 z7`5h_r3VkGCYKGaGUPWEl3yBHE}Y_BqUqSAKKt-5HVI^DYP5DK7Pr-2kK8SmEA-Q9 zoXe`myDlf-PLWdrrUMaeQyHck_r~*9JbhYQbd-bexN14H30#Ldk$bNOrNY!KbNTX; zV(E`9EeY7|$q<E1=3rO9i^!qIH z%g8b&Fm@@Ykkns;f?y*q!3>c`*SP>js&rBhK>ol3tN?+|7|EBX?E|%v%ZeUr?H`^7 z#Bd4{@kD~DkW2}G8h|2aIoi-kb^(~CCMB~m8St|Nq;^w7*qTd!MPD8|GV>>v)oHel8@K>dN} zL*N*VE|MH+yJ&Y{P#p-|E?RJq)B;i)iK*f+sZp}>dcEz^%KaGt<-PBB{B}YiyE34) zY1Ehk)q2)KyXg0zsfb5LjzU@qJb1B}HBk`#-!7eBJ=FiLHcn?ICAiSzNu8LT7>QiK z0WHC3%6<;4t6cIl_bYUOkX!?nQw;~cJtZlwC6}0?BJrOn=wzTWfhxj?TmT5&g}TGa zHG-ZFMbc+5(p!AxfHCf@+v+&AX7Yi88wM@D@9^-#FajgwVqjaSYqJ?E0@f2cpeEey z;?!z?A6hOgA3CAn5dTzhU~Guv^g6s3YZ78(idb4>A{ele@7Cfq-Y%7L#?XR zasv&MOYYaT*;i~GXCH$_;#4f3^6hDkh>`2ha3|){r%`bqp`ARL5JSud9xG73AcMWn9|+v>pA5{yEv#uL8GEnv6@^L9}nH2ZPxvSqIW#s?)_ z!=y@tY$SK5RDd1?O!OV`pGxi@Pbe3$G~GuYiTqRCvQ^6PRs+yF z{yDRn43T}Djnh@N$Ol8hj6=epBq>r@IWqLk01+4Q%G7BPe^WY)h0A$niYwo*I3j^5 zhxuh30NRey4bSUC(<4Ty&ieJ|$yCUUPD2#nUv6Ds09iiHdd><^fcI?opK&6)`b?%qpGrg2gS z{5&?tEr?bUe^ibB2- z&mC(+&m}Z$Wb9YqH;kup9VZOF>0of2;7t(9(J&d=j6~A}2O2dyf8k>BxdoLD?mpJ| z#-Gy?0i%M`(_sRwjO5~PnvEcM9!EXUXT-G)SDnef4Uh-%z`!|SaInD)zIph4yl^aA z0Bh0#yMg1xr4hgaj9jM*IXeos`(EtBj9MccgeYXNanjNJKiN44crnEL3&aS@Q}TMcR(H1vcR4 zVM@7Ytxv!jiUOz<@1s!`jviD68^1HGyf#sJP)R zK%G?pA40>!&FGoZeV}#Hbl2_&qI@rOoV*wJNg_r-RUA%3714?}jLc(mw)-_QxFCB3 z4BQ~k7xeB6k?9I-F)PtxluFxB!jv&zQbgoo{t$>$CkP_ZbhgAJf?_%3gw^O_GqO`v z+w(26U|1h<;j9ojnXEZBr4l=~tbRMb{ai@=$l7$Q@t;7Hgwn~TFRZ*9w%PcRgco-> zbZ5@!$d3X-M^56Ajb$SE9`*1?W%jt#%!cP^cEa99#At-YZT`N9OiIG53a1~NwjdRi zneLe5AZ|<>qEHpYUV6K(piqLfVcUzX zK-G1GFfhGfQ}xh2T#L;2%h4})wr<_(fax97@`jdp#KIw%3E{@H1Cwy`%wnk2qlbu` zN>ID&7=t$REamqWg+PxClK>%X-yY9oGVcOnBam1Z+uHFGZZFAN3Fneuz;2i!|Mq|h z&a2xk*+Nzz4beCWFjc=}Zxl*kta0+*ts32KIA`TP&ybm>(f4HZO^Ds5X=P%gdcjad zzC_52#cts#|Ax|#z%ybBeN|kjuMfwD;OP2*_G7;i{W_BQ(dQ$f!1qC0KlQJ-!j3`g z>%Ce;Gz4lD0=dw=hm_l?KI=BM2F8!mOz)#Fjz#wZMcJ2@dc+=6XhKg}xll*b@F%uv zxec*vr_wpqUYB_@BA8W_pAyQ?OK!0yaOV&l_sfWxtdm|uDtd5Z_|Q8)6vuJM0(JsM zLvD#C12%RsMhV?`B=MVbyi(v0GCD&_@BZ_bI7Xggn&AEW_fseGBr@?Wtk8Rv-r}-p zuXWZ_v@G=Etj!Wy8!dij{>tRmRtIGJ*VX;i5G=q{J0Y{iu@y3N(KtS$#gXOKQ5ZmY zPkS3@boK*8aN_Qj01}J)lx;cOS7OId;{!~AIx>D9?4ct8);VIxq@Ije>)7vmr}u=E z!!VIaS>-!0_PM{4;!F=8GLv4N+29oc8w3Fg?gI*SO25|Tr(jQi%QAF| zl37;*D)wxlwT08EXhLqFjS^f}M@KB(3aUP`>Is@HqmJ+EVT&{VpOocOx7`o={>T&| zj3k)^6wwA@gOQM8W8a`-g2K;d$f3Q|;uyJ=*qLL70pvr1v@r~Q!6>m{3}8-ok`$a| zz?PcRX}3?n=PtgNBQ_)fvlzg+yW$%mlR%Wr5L58)65hurnZChz9yz%F5_9W6(w~4# zY^O~N3W9Q}oOJ+$s^Qy*b^{LiwUbSMM-}<<$B&J`sZ;iZtY{7LhLSfwfBtMuzMeYf zV+3^hQAI_?)DYG)w>?_c18Yft<>Lp*a3T((b#5HNIR38VyvPkHKGgtOZUXhs0n8PJ zf4G46I+*Py;?k+c1B)!@KQfd&fqI04-JtOqEV+wUW+{VU2(Uv(S1d%|X|KXl$(MKj zWp>8=BY03nD2oqYI@9?aE!WahKu;*{a@vA(4gX&oR(&lnFaD~;JBgYm%nK_P<)G zyPiO=u}*)>S=2XfM>ADnw*~ovunzGATO9(5e;sLzZcf~duu%V$xGlSGo`46J1M(b1 zK`a6cOWoDsSt{~{M&75`x;d#-feCmLHDemrVfcX!{R@gD ze!e+P3Rt2f`iJY8c~S>hQOZAXxJ}R+ukZ}aD>bH)NZh#2ADv2MrL}I|>BEcI_>TYD z43jbG0Z7L(s$9a}_~8tMKI0-Gf@AN1sI&*FD|o!c7}q6SyVi#O5)#1t^9}$n%nxD( zK;3i}LnMS1@4h`Wf)2l~=YA||%yreC-V+%C;?I=(?Wxhg3Ak>071pU*8{tm_B z$PO3lkOLeU5PgIO`#H*95wu)k^Lig=QyAO4SkmJ-uV{$?S)>dD>+UMfZ6`odmt?mE z!vE>h44yzkauGxlGkT! z(YWM~kcEluX~6d>v$Qc^9fgr-|6-%B7RFuQY{W!jjd$62wgvs#;MXHShDa3*xoaJr zWIBmXJe3LJpCBgMoa=L&Cb*&qAV(QxAvGpeq*ePpJRc*F6{2RU(A$%75{v`k%>wSC z^WJAZ9uY%l>8RLyv@6P_FL#^v!yb)FQoOQnW`xD=Y|UvadkwQkwpeBEzPxJWr#&Te zeH{^Ujxn$l`_?hNum}q~aAnTT z96BK+FjHI`o&0N&-G~O^Rnka4_&B{#lo1$`>W5_k96uRqmchN{97}VfL6|Bkp!Vil z8O6J+F3Zh+iPgQ(&)JEyelM5}ibKzKi}@=Z#t^%91obNPiXi8hwbc3sF7!GuKeENf zQi_FM;kT0(R4~+Kf_fR0!#q2^NnCA z^rNHB46Z;a=LE-#pZOEV_+EsQm zrhx0vGIoP<1(+TtJenG9898*Bzpw+o6oNX$JfT^46Cm>%41LU!lQ_K?i8=IeuVW^3Kr4UC@$KOK) z6$8$$Lq}0W?5_YyI67;BfjuXqmj7-4LDa$BUYNiDIvH;Vrqq@0E|5!?9~2y6Lm#KW zdUMAmk(QQjC%Q?gnT0cd{x5zy|ATb@|Kp9r`8UZGP*vnL-^80H={9oU;K$JR`p5v@f_^yxGI>xB9+&Y9BA)z+{wKfX%9==-E-vBtB>@de+ z>lQsT;&&!(B$_Sxkjim+iJxpIe}fFbuy>&?`=gNL!Tnd8I(|RkFzQv1iTqm1>;L?! z(xhqATE@*d4^^PcKk#X!ePNyA=QC!}_XxP6DqWKRPvAfvWaflEo)_dz)pBD;E^F7g1*Z(i$f&Q&4 zQPTxW_#j*h=};kyCUf+>o1u>og{)2$*h#**zpNAj7R#=yhNO8MLt*p(rb-FWLs7E; zEwV{e`$5vtiloR8d}vNFtCRzJY3IsrJ&{^4vd{ZCtAhR*HS&*(zM0P;y!6NVJ#1&w zu9_)04wFhQ{9@+v9=*LvE;EEQpwG$s_=M0l2svMZFG>OOa8H>#`i%m9$ycSIHRe=3 zKc}G-SX-YUw1AB1^%0=LE?h`F^zP}HKrC9j4)}s24n1ZZx=@`816ZVjc|~u6`)UW% z#1taL8=$u7$qsA3{%yV^{VgSp1!%}z?pP&+?&jhoH`WbChT~`g0={J&7zWI8sc*1l z;lxtf2oNX%QXR5qrPB@r6tgpf&YbbG+X|+R4=#P=r6!bQcGH?j*i-3`%gT(A6cb3f z%L>&zRsYK!o)@;=W(@=u*?=*~l!1hWp7%()BVk4?h(NSh7IT})y8doafqD*Ud9(EySU6AU$Q%^7WG71FCET^rb@o5 zYTJfTqblM3@R1PT;3M7MYY-oE%$7TcGYx3rO7iAF*=;pl%5Y3<4Mt(P|HuJY!#csZ zk17ik$@Tnmo45!uM9GN&G=J~MkQt{Mkmx~*dYJ4}x2n8N-x)rpn^7I@4~FqVN7=1| zliwu^FoJP+mB^5jY!4iNIEAw@5Y%)uR+|!D1eoFlwPk;N6k?n?4`>?QZa^Q7@YMk` z63!7jzRU&Th#7~K(~1;bDr42sr4@c5PcSwKLI=thNLY@pAYdUx2hmXujJ3Jy)xjQe z79a;=lrTDFOWVn96-c=uI9EEe;ktZ*@7OienYzpiGq`ITaeCZ<5tKQ!LcBvW#0{;0 zAKv>|uF0KaWaa$o)@tLCy;>;HPo=10Y7+S+je5Jo5;++_9CdtvoTDrf(q~d#U-NUE z=bHIjW0hWjnaTykUtsiz1u4ko;CtTyj`zT8?)e}+{L+?&ugGbG>Nf`NX8Ti*A2}il zLJ#+B1KN)<2X0P^Ww!AcJ~Ss$8r^g%n}0VxL-e3mcBS1F=^a5@WH<=cJ+H=@w9Pb7 z08|m=Q3+rrcH*CylP?MG2jqJOWezqo5mCPJhj0TdVVTW@i4Wa2fNvgBLHwd* z?4nE9A!c>`qo<;$DReD9o(i)Ur3?ITpu4L9V9Pzw(PBUE8zZIuV&ez5I1i)=ZAfwC z+&^kVH}LxHP9RlMzyvT2N!Db7imqn<4Wp{dDZHBvzK*MX4QLfB48VxR`*l8$5o`d^ zh@=?(w##jc#tq(sIch6%xG*7t9r0kCebEp-=5dB0LNK{`?Q-cch*gYhybIT3ptjn8 z?Q5`1ef(rps?ZQD+9ShcSiQ)}+n#06^=6c@Si|UTIIaBmWtW!^^0TxApmbBU@HZdFMBB zgH{5>z9NDop~3z!!YR)R$ErAv3>_@lP%)D)HqU7Y2F??-H~sd2LLYZsh0VWl$jQ{f zz=WV*&$7V1S$DAKEpTnYHRn-MMA@r1zLxLThLV3Tucw1k&D`s=1#ekgxfHl;Fod(m9IyT4@>Hh zQ7f^kw4bn`aYwFVL7zegTW|&dNF8IOw!Qc$oOo2cpdb1Lss%oRQeg6kQ3zb(I`=~@ zsK=-;XfFg0DGVom|uNqW4 zwG@xRfag4TJ&oZ#d)fANC%wNQ_FzBF%2ufcF z;qpmkLftdCdJCL;FK{Zie|mKA4nNxSnYzB5zW1lt;Z5#Aw{+^pP8`Dk`R<}2eelUS zwLKqyakvl)3P89DrvTITivsK=#W;tQ6l4qC$oMNvCCCo@6=xs88=%s0tjC&196I;< zkf1=5BTZR>otgvKeK=GxQ?ozK)#)D<0fU8#$q<6PbX7nd95iQB_35Cx>D7+ zw95dVebzAOfynZ4kQRGr%9gd45GjmXkG<5@)2oJZ4L=bhejF8wXxj57&2JTd!uX;c zspVZ4|Mw3j@oG*nwY>lKJo_?UZZ8;Qz@Or!-^zF)7<7e)j_z_PtfrHj>|V+3kr*2~ z`{1iJoU4KcEK&!Me;AZ|#65phN=`Xc>OU|z7@p+^m>n?$OEQ}p>~{Iy-zCSc=@G5; z9y1*0r!>7++jhljlWXY)&8U*`S(~v?$w&=e{C1x)0Q@Fg68GPp*S#H(IJGk&WuYKK zvYu919y7yv4%Gli@u9{|IH@F21>fnf@X!3 za0b{e0&Rw6Bm?AW0tXT)WB@=|QYJfFkTF(Qqm|xyNfMoidd0q>o@^Lp#L#tN{cDay z4S`nrQcicQ`$^X!W!~C%n4@tX} zySit9y(ffvOiE40TqH-pD-bbBw(Q}3PYO_-oNMWx48V>oHK~maJBu3Kkki~T5H>?& z4{%^@Ov(uv&ZBV>5YSX1+IM(|GdgF?pP!0RU^vTBE)+T<_Mp)bXI59IAtON%u(?nY zOdUbBlYqeVJ%5_o9yg@tdQ9l&dSM+1^D+}=FC^kS$;N)l8bm3H?|VzoFbQGrJ@{B! zamzHRVBqJNDJh70h;w$1k;cU%-aEjhg}lT;Iz|%$5^iX?%0?bwa3CzXeD>@Xe^tyv z!Cegz8ni#5dj}$JD5AT@Crj*hVQH!)`xjh`%^LtAoS49IwQUBklx1Q~O^vMoMYh`e z_>SLEc}d_sC>{{eReRIMh8P~ee#lb5@J7u@IIavF3keSXfzldzjcKM8U zmKyffg+*3~^peKP3_t)JBK@skNG#NYF6S zHuxo3WEI@T=x%kw4XM^VK7XIK@yPcw@QpcN9T;sWiO6dQ&lri-m?8~d;W-j{Q0Q-d z&NNUldY?gg&hRFx8eKO$Qaeh<*}*R`TZRO@Zfysts*@R2%6wY{%l+hx$B> z$rQw4U+0|JMzR3g8nqGtog2|dT1&ht(EGtb1g08DIQ545uA$=?-%hCcN|rSQ8B(Ws z6K~+9jtE~AI(?HL9O7`I{DSfx>;!m^CRmmffnjdNl%-l2>RuGbP_MM@`bZSd54@UG zI1l6#slRK;PMD;I0A4rTw9cc(L-JL@I=$^bsZAZ3>t((rhKdlX00~PYSvRrAs3Vtn zTL;gRUk?+ezMRB>_Aj_yFQ{pQ26qA2Gy&iODOxgCM%&57QKCKDs#)o^j&8$4LqK|u zfJ7n(V9H=|SiATaG`UogULLA~N+k1eFp7{Jq_I|%y5U%GLO3$(`j=fGpGhX zADG*Psi36NCgA&I9m>i#;bb4CZ}e~5q?`X zHg&o(5-|Kx)VNpTLVHFG)C;P-a}sycJS6IP0NKop!`adfA-&nf>irljW&(VpN7YjFILEtV+E*^Mg;G$DR-nmR{T zBNK81oa5p(KWH~%v-fz5CI=l#>c`jKS&;hbrcHryg$;=)F`N&xqsS8SiE)5a5DJ8w zx4?80#g7S$%sAPli9A#GWBy_#?rrKRse6xvWmp-yW`=xX2Y7$rG;k?c^t?2o$KY*0 z#vvqO+l8K*;+liyZU*S!IXQ$Og~}d-s)Jivc=IF<$;_*5pz;d*FZbhb*HiE5LX-;= zx+GRMHuB~A_;D*8$QXRc9Qom5i+}_%P^5+3gt8C4ZPk6vfSYn;z+OHZRW>ED*cdj! zuXIKw zgO&|9V#v!Ap-Bv>A`DZ{T@N4wtrFxY2q|JKzB=iMutkVn*wb16T3~UC+8*-jq6QdT z`j$_uKBut@KBL~RMQk#^mXmd`T@pHe2+K*(gVR`*L^++DlNg2+$q4yF^5K6!1E5Y7 zEfu6P_ZG@VJE3f)Z$;}A_r`-b9ZUEUAmbj}#=n?YMxqe;6q$Pb1_FB$ffDA;h|YR7(6O!MhXSD?TL(l z$vDP&N_P2#fk`t*az=Pa&Kj1;cpSmZI52|{#|9V4=h6^1x!v^&I3pKF`NXK#p_9W5 z1tZL`C`}_zSmnjq(SZcD8$Jbce>M21U5&nL-H~Q%rg*Oy#(a zfKF-wI?PGNs#Ar9^~jv_CYc2d_8{exxZ44=h^+T?s-R&KLslOHUPoF$PnCBjEJ#psD zS|=xQ6ik8jsfZ9x>oiT3*gFI_)L*`4z4XG&FkVC34!K168F%HZXEVVY=cz=S66^&j_ z>$OZ}q59-fk47|QpFeH#WaIp(70z=35lmx9FI!d&mrYw2muWgoEWWg3Er{p#V4RI4Vq^tOk z@Ii+<_G%38J2)px zoxbA&)OW19ymCKcL2NAB;>8IEO0frn0Fk`M>MF4o3{SjamA8)v4{O7$HGvkbB2`|J zUe*3O#>O+S>kmRIz$Q9$Y5sx*!BCUiqxf|7^n3tt4A0K!ywSP^jK7FXvEu%(2LlGxY8z80Rt6Y-|b&KkzQK zKElO{&Aznz-L0@N9x2PLXvs@69b>`io}RyQE>uls`3cvrpF(C+j5U1kUaqsSp!iZ$ zOVa37Qo+p&8uak>^_@LujsTAQy}_{xi$z5z;#;m!Q)6&6#?|ui^TSO2AnJ=n^76c4 z;v1?wg#^PqyuB-a+|XbOp|TK!(dpZEyDq^aTW&|(eF4Pj78r*IrKR@RE|AxT#xI(l z6q%pD3cF(AxyQOS)|KnEw6wx*pD+zHn0hkNGRO5DE3&ll0^@{q4uynF2hPK0VPP># zE3rsgR`$?35tB0y4%DJd#@?GEaKyd8=jEJv^Vk?4-Lp4)%m{l3Z0IC5#hQsMGiS~` z0RCaNjNOr*AK@oXlm!zbk!>l+=xwj%0Ia+gmt^bcn542)MrJxTPa)QUYj3L&qY&VV zH0Dz6LigjM*Czw*ptUI59}Yap9zepSGa7NT*!4x5+VxCBko+z{1ze0o7n!MQ;q&KQ zNQ-MdB?ZHhH>S+pDmw>okV#BmpjUnV(Ytf5t7({~?%ls%!p6I;iXFg3*$+lgq9AV5 z`1vY60jRX|+^}5i%U&Mk$>G^=fb=(M@xqYF<0edC^HKcv0o{0?@>4W4nyHj@Lu2nm z0W6{bBoNVPq+pM-NZGu+x9jZ!Y)UOdLoW2|9RlUNZ2?y6(ht2FnkG=rN?K%0frPjS z%XZbZ_R*pZ^oP`zuP3_ox($(20kk)_UgV=JU0hraAnw4nWZUgN*-rQo zU8jVOyo04E{YwdO3k>MXU+RSm7k&cy< zuFckq67nlng&I?6qQG-N6|&lTrm~V=GmWy(U$`()U>wl&C+hMW%(31c;xuMq0E{h0 z8;R}i-JA;+EU3@%o~Aw#fYG(W<|7b>yS6;z#~Wd_$gW>+>WMqWFuXDSp+@tjb?c_& z=H}x3>3-oX()<0p3^w$-h!sI;nu(T%Q`W+scU5F$WMHiqBy8q)wobz2g}5)rwdbLj z847x@ZE49z;>H_(=6nKzRh)r#QSaY8YaU1vbaQv7b;5;!2e8Q=hMTxcN3yf`UUiOA z^*@7}@dGXw2M2mhXRkWoGbdg|eEzH)?Biw2muuSD2}(*z%JCo7c1FyZpscKHdCwDP z-Xsw2Yx6&-PsAmr6lS0%I$Pe1Hvx{Z?der??5`tl1W z5{J_6+$odyIhiO}_3~vYh*cR-Ka4_Tw3R>vrtm9qLr%IH1^1R+yQW-RcMQfiD1XLd zRO&&*oIV-!6$vhFGl-is_W1gc1q;Z?giI?=EP^6!5y|5$5#!$uDk&+A9a=6fHd}H~ zj*Hr0^4o@#IVW{vK`LZ&QqWeIJ_XdG+fM9~Kv%)uRjA3%kIDUDC zk>aP0j;UC?*bV}igZ*eQW_p+w)|i6($BCFo9Snd0DCz`Q1&e#`?ZKh4?vmUj{9IZ_ z=3r)~_v`T@;>YmRiYqIpyJzzulr^tPF%>|V+j-8!H=$REx*<%Cg&S)Oe(Zrr%>lX2;U zcDZGa*HlDowOzX;k?4m+L~s}w80eP@3dRU3?dug)QBgsH^Zl7eN=nLYu+a(%3ZbiI zW2$WF2u(*soHARQf&8Hql$$ibF?}uk<_rw{Sw==X=9@Pcy?n{LV#Nvy^~Qm#_o+|3 zyX*A~pe0C#_V37nu|QUK_B&uCT?f=_{eO~A9C$^cn51697al_|7gY&JGF#Vo`Z6^(Fwu$8X+TiZbN;GcRaz zUH9xM!2u+2q_FCmo{am)N3z=M@I}5s%5uN7bV6aoU=^?Fm+=9pY6TZAWMIcA_>ZVi zF(k6-C28TFA=7en^+RYi|!k8NktF2YbVGvh^N^FqTi zM$pKzf{)`Z;}Z=wO_BXGKUcu<0Vs=*KHlkTry42&FF_k-&Xh>$8_J3X6-`8XFsrHtxsvjR9fu=+UD>qzJd&o6GK;F)-bQMqkQz zal*oDjM-(ry8JuM134=+2`F+A;#j#XJ-B1VaVHb&B@!nVxv7{ML6bHMC*;Ow2UsUx zVHq6i_wa0e&Voz>F<>e{OA|`3_1!u?Esqozhl=)5v-_j1cV?uU-KEJs+mLUirKJ@k zbL>XeLX{FzNMP~e3ErLc_rZttTpny+UH}}*Jl%0WSVR`3Fnfgx%uqfZ}U?ghH50=-v47aWtQ6L-FM9% zc?l4dA4z{P^n<96P6MUl#=L`VJjgk%i|yp6hwZdndtyWaHNOnXZ+gyZm4OFm&6OILBau} z#6{4c1%fjz#;(GtdbF_si7MLy-`?q9=NXtpBfDPLREaWM)JLvJaSpHX58c@PdWUZ$f@_}4YIt4Vqls`!O8jAPh*qy9Cdn=r`Zy%3?Tg7i^HYUxB-fch- za6{6BT>m}R{%ko1!%dnZf-x`0;?AY;@Ng>sPJ+?=fFCm$s3%U(mSM%=dLJ1MqT^Nd zIz?q=dxSqm0Kk{i3sm#h^~{)F8G&TzG=!-OSFM_l`Bp`Q8Gp<`q?C_R*~^Mt30+8Z z_wBcDKZb_8Es$zwVGIESwVC@RiDbI=hXo#&O62Xk_ikua*R7Cq zs+q`WR8+o&=q-(G2v7iy(F(jeWIXObab4X!hBS0=w8Aim__1;wI)Hd^sU{;|-oKAY zJ)z-L4YB*I^d$9*7|uL0GU(D=0WNc`uI^-LQ3Q?z%2y^p7HC zPm~d^%n{2_kldA1QFLbd25zNGoN5|~+%51!$Q2mIoeVwN8H47PIYK%`h#NK2gHN0Y zeDv%YClc6=oi;eJIg^UPU&LQLoAO#nSiA{nKumo#R8vR{QE-mK(8-pDNN!zWo zwCY|h0l@)-96U=Pdklebl1r9+MV&LYy5Kb46N&vxSwa?WFt+J2bP%g+X-zobU=7P7q#nq4Fb4lW$egd zZuGH`i8#7Dxm6)8;skOE2vm&8K zV$cMnQ7ocRL)Z{hF6v^5u1$f9=nkb?9z^wiX}O87f*?<}|7gDZa?X`2^XxL^mn~ys z#Kp%aE6p{!4T(W`>ue*<`xgn}XejVs0I9;OyN)wq@O2PzN3_}tb4%d`!1PMtWROBE{mt$!jQhv_}e`>e{(&yU5oxcl3;!#IB8RavK3`dd@6 z7L9O;b^uZL9E5`pP{kb<()o#OG8D;XEcP-AQU{v(Q{t2Z<_+K(?!S2`A))wu}h5yHUtgDA`^fAQoB)d-A zR2D&>p_~8m;yMvF!=AhsC(1UjT{|8f@h3qcy+a}4Gcu$IxHa(Zm-AeQ^d5!0SSBa; z0JZ%g(0!C(psV;kqGAZf$f+dYP;zL@RfJ;y5V%>Ai~ws3y@Dp@{3|vMZ+pJmUo(pI zU3Rj}*wa()2$#!~R=@OD_|+hc(<`X-LgGoK@N^!Yqe$kX@hscV=$Q(prly+TwqyV3 zu1E=^+|Cy?t-*H(CHz}(;sYdn@?;9oqNRuLY6zQL)8zqhW!YaJm&Gm7Dt3N&F%mK0 zBPYa07v^cOh^(ZH&_M=uvhwWOgF};k>)y0I=z5 znYo73ZQ#|kn}GQ;vTky3*o7}$3UD?|H6boeP2}ADSFdIRQk%POZLb$#zW%^{H)mb5 zQQbI~&@kirgxpm#yK9AAx5>@{RIz!>AU;-5R+eaYLyNUBRcMmpL~?kfxjcN4fB*}` z8V_&DZM$YVUPnQI{S|{Om|aS1x06$upnTZ8 zz@09uk2-I#4A15hbzFu6Aub_7qQF8t_nx~sqQupb-6f~HARkZPCS-gR2T~KzO$FB` z2G;&T%#GLra-n~`7nX+PTfdUP0+vNx($`i9N4KA0>%Q95oFgP)AjlD zDHK?!?lJ6bdAyJ~|Ni~^CkjZz<5kl97N)BJA4#>meORI`B*Sj3ia}Pk?xL(_cKz9; zBr$SCg zJ$;J20xYMYy>3hulqN+$vli;i5I0Lb4R)`nwwA}*+Pb^1?-&;=4p8Jws7)Ut+qt#p z^F}CL*^up+=lZxEOj~zCt^@nVyufcIu$!YemzxJs*seibkcPI7b8U?K4x}N%N^4Dx z&=EOv+JaRY00Q%W%#(A7KH%Hm)2x0<1do@|-7_c|vSOxs6S(su2?>igrdpnaHj5Ep zDP_i~Ykb2q+Azsj_y7o=q5W#jMbKb%Lvg~w0OZ83s#+lEEZ~hnc0*8VFc>(#(Uh0C zd~ry;8_9sqjB=}9AKXU*ySFgytb*m7OP8N5Qknx#DOz1;IK&sn@!ydD_TF>C==pKtF9K`Pur zwh5n?dC1M4WkxE7WB>;q`HU5aAI#O?g3h_|4uNwg`*AJ3e@V*}_-8Y*mvJ{eG1%~o_5 zf-Rew9HbA0-S_^Bt@!jtI6{tvglY+QdaeS}dw@(R@z%$rj(IDFUw{Mv4SO&`WHs&2M zTx`y4VhYg}Y~Q~9g34a^O(ly2V~nYs#t``c$WYaP1w}^p$Y59Tix;y{a;`yQzx`R{ zNiZK`X8g)1M3EhZ4bJgs78WVm^|$)xMr0d==H@pSBR`_VR(5M+a_mcXBlX@^)(lZ&kWoV zd~>($>Nw{k557-0k;s>pnW+Ul19H8?5WR8dkS&@k>Nk{Y?|@f1X=}$RK|XaK0++X< zm$)-_#J~6T_V%VCeA`TB;t@Ek;D|6GegTG-!9Bjfa++m7VG)bFmQTvIJOhp1LSbQc z+{XLZ;AOH(mGhtw07*PEJmTm#O?uo;*2=x`TsPstMUX3M8ET&w-jj z@kDs}yIu{AUMZplQ1zh7$NdjGoXFCIfI>(}PopKm_WASYA3b^U|50|{aW(h<|377v ztV-FdlyS0UujHWY>LeqgWL}}H5Jg307Req7m65E3h7qn@M%hJWM3ew-q(_L${o zwoo$FJ4*le`t@t~u+Zoz9{aE}(-V#yv4DUdjVOWV8e?K&N5$7mVPH42&$6#I6cFvr zwnQgI?v89`>d_6CMB9nyjkxYMnYD2VouIqlXH?PN?b{pB5ni+I9$uYzbA{qQxjwmN z=g(Ip=w}>h@y>-w7;8AT2Zot7MyTk~AUGtQ$h)3Dk|2uZET&Gd+{ee2F$iAZghHXZd3d$msnomxxGh>7?A-JCTF|2 zWXBCM2h^imATR@jxd);JeLg-p3O+YW5@z13$`iY{KpkmKUzU*i&D?fXLPGb8Nl9y; z9d<1X$F{V!bCb{?zbOcQYv|Ukt3Q4EB)9=Xvd$~r3}V9L<2y-Q&C^FcV7*H7Z*nMI z`8R0jHRyyH+F?issm?it-IuGj5A*+Tf7>l|5}0Y|T?RryzrWS`b|tc=q;__Q4w z;rr!tbxrgMeEaIV28csB!fwI<7nf%2-Wi!u)xaWkot&KLYHh3h8+6}agXM{_;AyTG zJv(Ue07PRsPd;)6lz!D}j8-G7vtjr{z-Vq`PUOT_{T#;vZ1z}EY{p9S=ibkch-M#0 zvMardUsVGQ@+U^_%9AJKp0sb*uI9&&AAw@t(`H&-uTO?EPjPvPsxx}rsY6292Dm=B;52T+gn_y% z*@&*7yA5pyMXk`Z!s><;NmfKCQXJ8EGh4aN@NFHnBz#oIU5^{$I&?x2;ae}UjYC!X z>kIB7NTcPKYC$;^%{8o^mwn_}vLbXoc{T7#YwJPZ-Z|}U)iAT?@8VKc`}&!Xe#=BXWyZS6KwASTLnAxkA-G0%B0Vp}Mg4EVwQ3>UTn+J@p zd%Sw@!jkzxr=LM;XQ1Z+td`=0hx7=(_Do*a?aE%a-?xnPn#+!;-+8JoBLQpJhRp-I zZrp2ut^!I?K`l?gD-_I=!9_aB^i4o#;e);E7(0S*AhEsNz*oJWkhG=%QYw}aaP8!| zbL0AS!x^-zho|*8MIuE*a?*O^M_<5_lIQzQ_xxBMA*DCEt&Lh#AzYx28_3 zk_t#v2eu{}b#y#hb6iVj==Y{1K~N|w>ifJ0Av-|0!YQ@8C{MWS;pYK(>iL2k2-3s{0$i*~eY%{;5sS&{iEf)TWxa#yQ;y>8#-Fw{PDrcyg-M zf(1RCHl<@h>96Rpw}YXh+WN(&TBflIF2<@{u3QU8Z+Jj%Rq4>7!^gdY$Iqp#7kC2^ zj~|TK>|V7(vtGS=MW#+YaO}BlpLB@0`TXC1t3e_HPa-dP5F1EAR~M00+neW9fy$4a zJ)u{B7t<)y+&68q>1venE>8JjPj;A@>?1goHS5=pi0NV&bLCx183f?Fhc$(+Ci!gN zk(-I;>P~;aj5x|uDMPrvXhBDDsP%u*xAOz*{jqe zrN%%)gL~>SI;@W9goX3xG57;29(rkDkWq2?35)g?T7i1p6?lbX#W6m6)b`F*K(eHArc^w-IzaVy;q%KR zL06v4l?URJSiC&#%*0WnYV$=`uU%W63hVQ=G0v(iS*^e^R0w3oZKL>V^ycaLh1H=c ztd@fr+qbU5T|gRxR4co2rv?R`cGl2FZEfv?r}e33QoMe6OH5RJGFQ)s$$Ah6rl zkI%%c1gqRHx06X_B54G}mFAe6Eo1c1$jfa)o^ew>#wHV=O+Vk5i^e}V*f!a?c2Ig3 zh;-pPdDYiI5}MnFd0A`kP1tnSYvIB{G)DWg?U5ZRxD8QqQ9p1GS3-2Q5e81W;vS3M z*ALJa>sUv|Cf4-Pj$C~2!2*S8WaQ|7{_>#Kpa5GV;&Jm|6bf}9r8s@ycuzOHekjA& zmg|82I-+~i8uhNiCsUe<>Nvf512jx_8TA`B6l8;?gYZcUmMip}NdfJ4#-ftCj7&&tgSjBbk-$qoCoAk%dF zp6F-cmX{tzO#rmR@vOa3hjd_*yLRvHgl5TySUxTl zq~X!tbQW@zgChG_bZhf*A-dZlG6megS-$61l&?WUG|(wx<9nM^jT<+f2VeqT`<$rq zW{h&ERlWM!#b<{B22rPlMnveCROUCOH*XjELr`a8%;|PU&oMYWws^bEPV<&6wH3Id z?DC3$EVLB>4|z-uEF8-*lyTN$g7dj~D<{}*8uM~@7XzTRHQI>ywpq?JWMi$7Uf%xT zPzY$1D)z)*Pfm9KYH}75M`S$i?p6L2BHg;xWst(Tr6o#5b8d6_?wZ?gb<4;)1{o9J ze*}qw(z6*hY#?w|XqG1({2kDxdAtO_dg(Gh<5@4u@##;TXR{7LI1%Y(m!C9F_zyt?vtyEoZw z<1YO)tGNZVHh{iea>cBb7r2(VYBE%PB29@w(Dt(~w2+e9q%tWwlk)bpBnzF#Ghm!$ ztIWZgS08z9`xdim5HeSRUgH^=4Rt;2%HONQa zPrkWsf%LmytlOUX6a{h83zLcF9JNN<|0XHx`bu0iwj}wRw}x1Um1Ra5|rW;W!u| zpsP3cU3ozm2QRNxr%uh^KWt!~nbmpsyjYS@FM7U*gs>Soa<#s7qEG2N_Li2yE7AE_ z&)}h&4#Np)V(6w#evDmHbQrP+B)m2b0V{TA*G3zlJ^q4M`;BwTj)HZ_N%W%1s>w>< zARYeWgt^U7YsDig2Io4r&c2R;-2fz49f`xxi0{g_4sb5ttkmX2;|061IM4Z%p5#(0 zTttDBhsHqY1!{-6rwg;rSP(`O0yZXfyh6Wz{Tqm8pHeo-O+JM#$ouPiI5cZCJn}BM zncnpN_F#4*B@O~Ly&*%cpWHB9Z3?I;M<8AceIug)P-;b~Z`_FC!v(1WNjre9ru6K= z=9$wlT8L(mEqtsy6xpKTrRPt$E{1UQix@QD)FVwuXp1pp#(Zb@3;ra^Tx(skmMt~h z3#P~K9sSBT)6zhz2r}Q;!&aRm@+ZaQ88d>Iky>}jT0Mv&9!-5z6eR|$0yb(;(>j;(MWUAK;rXI6h1vZbyQ8D!ghDX&lkN%>T?ZIIuSt}e23q6| zh(4Eue*M-m^EB&HmNkEP5Sm@TpH=1n{|Y1~_5P!{W*mu@#^KRdF?a>jX5{e+c#oZh z$ji%bE-9zbT8cBk?iiW6vuzPX*Pi2?)4=}947gW46rsEfaCa7cT&9!h(N05GexS+ila zMNUG=MoN!^gSDxtGK4Q9e4e!~ZfFH7&i~Aru1K5afix=oVA_j+(h!PbA*G_cH*b%m z`uJ(L8X|?9@fL~LJ@nN;+iS_A)xP0UAGdK!DIugTKm6?4zHXRXNsn@T! z$}KO6BQn;SRsx@y=1@Y3MdZb+N^Y4;AtDgO(8^B>&Vq8D_@p+>;|2yq6qlTIzd6~% zb@YAQb#mpAGS%UvNh3;(~Aj@~*VjpYY=GGCW|w$0p;ZmzB!nLC4) zb^s`px)s`FVUVdhZA2o_+D1i5{|{1G1mKSstvb!%tKt2XkLWdEgQXnF z51R+Iag{wGHN^jrElIBr926+$z0hc#pqDB$KSX2z&p2q2=dR>4wvHIsR>|t&edzW z1d)~5YaKqxh#_Xm zP#zU(%o<4B_LQ(|mcH!6r6_GGMu6|~hS=bUZ3ecFcuAeeA>2w5q*jiy`Z5R1;2H&% zT#JyXO>ahY=-`zNIirPmnmy7|ZhNRkqzksghE@9Hb?E*Pj>qEN4v)(t&YnGcZh2WZ z(W)kIQXQ0N=@CP)z8X*7zq5Rply?YYa-j#_cZOje?och#_%XnlLYnPQ2YeN!+PXMPJcw9E9&!= zA837#JoHh${R%X}ZEXtJ$r3to;)E47hh>ws(N@CipWQyXo~Eg_**z9iPwyS!56nRD z(CGKs)(G$Tf1;I-=g}qHu4|Ub)DoOb`zQi`%cVhvTM(L+_IHq)C-rN+jt8}ebUBzv zMo(^J8`eAk>h(H@Xih2MFMrK?`m|;Qn%^9-SiPUnu*wN@bFZQL{G60~e&Ou=g)4ZI zA~MMx+v^J_x)kmK`q3Q9+ud6#k8wMK;5VmrhYec((ivM)4EjwyTII8@1?4Fj5X(ZD@_|9%~n)2v>Gq3KVu zvs?U@(Ol=phA&%;Jb_!TmZ(&vT*FLUBX{|?CUCHuLPJ+l=0OmSdNPXL+qJ|XDC9kT z)wZ7VTM{N+Yu~4`Fq%q^CR2W-R5@ZXu&_ANdH7KE30}_ZUfQ8qVo73$PyTn+hVa1y zei?>$O`EkuFRs=r?_XnQ&ePVKX<<@S&BP$Q(!+-jDXzp&bYk*=_-xTnqbIsV{ZlQk zVvb5oR!9;+=Z%dIzJ07x7BM8?Ip-%jt1Da-ALL?2E=^U%a-|$-e1kgYq)+Xuv@St_XGv$0%xuwDHRPd< zDM-#yWwEApWQU+s$!YEN4#4ZT#`H#d`qBe3a^nO-1fQE_bqqq6(nedjq?G?GeBgHQ z(wmT|nQoWI$_?>0Oo-PVQBj3kxhdR9^v|BCPC!epB2NIDtX{8PJ+50% z^IFPBh{``P4Iz)WW;1;FKa0V<7Kuj!z%7uIMLXN+$e5aRV^jCG6oJNF@JUIb^#q^u$MFT(tWZ$JUEy~cP`L9~Ml4xj8d(vRaBG%h{yHbq5zDeK zj=8<$V8{l(X0&tb5C430>WjjXtmZ1mT~hYUZO~#vs+k9#j2;XV+7I{+bGIz?>#mIm zFQ8eAz8AT{;LjU_mas0ZzF$g8e_=%+-`eCltvem3jxm%D1UljXF3{gtqUo7LGGf)~%O&yGq*RNZ57yoe$@F#G+my`TJWG_>g(PywoL+&OThsfJ^oB=`j2NEaBTOQifwor(|?%TA9RXnR-0Fh2BmHv zj4hKvC;q6Ch<8ey8m(c;*X-W?tan9ClgflM?cY<|3jJg_rv%6TWf~Jei1dVur(P^| zp><&eP7!7_>-MUkAhisE5oue$?|GFPGphCR2byqydP6~uIXE2$T5CRRMXlcm<}TL; z6_47K2c#@Ly6;;~;IPs<1~WHay{h7@k-rI$t8_z~;~&i19oIJqorO6|AC#HrEP8sx z;^+*dTEHwM-YpISWh5AXepw>#HL$70dZ#idN>lr;6G!N9??*0Pmi_qjstulP8-*_x zu`?)yh~mg+@#!)#cTjklok2L3#@H!9A@#e@X^c-LKx!9|5cn_qN!f)@6AwJ!qEc0O z@Y}4Bas?+uj6RKj{}3dsaW;okc*;g=UGB}g-bXDb6{4OFvU$KkPoL?9VsXa?EA=$? zb^ydjO%{5)qJ1^%UWkdDK}jYovS{!50%=?TU7LXLDume0kd<-B$T{#b(4 zK50$Hj!@g-O%c%of0311ZNzfhM0#S>*{2OR1lgSW^EZokeL>~O&f>iRsj>=-GVDLr zA?Qd$>8|o&v_Aek-+?VR8(%o)I=hFdhhvsU8;UM^ zb8YGtD&d&c#-X!3@4yQ3c+PPV3Z;{7JxL_~umenH*vHGb`=H&A`nR}cj##t|9iRW| zl{D*uxu6w8kfQZ!ucIUIdyNL2D>Kn6Q$5^NPG%h!%>`0&Ek3h#G@ri9{MW;|t;Jr) z*}zq@Y&wEjUC}V%US0*xy|wK)`>M9VK?NSTWVKvea_ws>^Oskj*~RhcUJLA``)LpA zcl)b8@K9CC*YGdH)Pi!dL_@F1CCvE_R|1Wx}BVzMre~zmLD4mAz81mk~ufZw*_{o#}WuF*v zwo)DvuZpifWX_!SqL4=*BQ&-DUSK1-p;aLAN}VDyU~~vHm7e{dTes{Iz4A@Jr8$<` zOo(|~?mWP*N8H2<-QajubL_{S_Kge^At&--25w+)JbnLHPmC zPo}!VZsq||dw(e~08HKT3*Ge;c*6p^jVCKw|JMDhqVfrs7aOWHeyR_6?L^s5#Sqvf z%4W8ZOPoET3lrD>zdcm53~&^}$HDL9(W0b(bNMxN8q&Dw)2G*H)U5_JafWI*6vUd# zTPrPJemC=7|9Zv|#jQ-0jY0-IqL95#%QJ;AGHmd(Og8`Bk>Wu5FS)UZs;F{_D0qrq{O-TZ#JZVePd$32D5(iUcGj7IUi#!V`Y+`EZ2hzzsj{lHCY4G z@(9tNxq3f(8v1DC{NvROLccC4dQk<%1~wW@DEpVAap|$3Kp!(oaOZR~#*%BMJpTUf08B%Ea3JzAK=h(H=ioU}Spc@0&Lrl3^&X(LS?z)^Y87 zS{WMH_4LB-LGx$dJFS~V?5DGldZ8`1@7yVxVDJ`=6(1z+D&^n)0|)%!uj-cW_{-fr zkpIQp)AICwf<-I1Q!=;I6ja8yK}3zP>2pzk1OP|_LFUfnG?RP$pS}H?31j~0 zC$+OEfqIap<5e{{Jlt3UP)@EJp>8YPElrnrQ3xiwbosJ1M6`M&b}1?Ve_;r*oN_(G zzu5O=r=tNCgQ@28J{`5Ey!!hO9~?W^AEmnza>m%q)IN!YC|?TAPlh~?C%}vf`6HYJ zOX9N_gw&XRDUH3rIZ?|!zL9sL*End#PX!q*@FR{mxIH*zt;0oblhaRuvT9IXyBCdW zxmobuZ9P0qF)DuE-N?uYIC+gv;73q2LHo>eKJScfD}X0IoH|Lgx0X%T*tENR`SJ1t~**JFxji z5z)(m4YvHcC_}asew5H*)cjhS5+Yp-a7&IW(f%eU_*Sf-sLVq_E{-lln*ypx;zF3& zrK?vf)0_5Y_~)(Vn{3}zGdVHyUQ0`6WQtmW8-UMs@ZYg2CE}2-tQBP!(azE?-X#=O z%(W1o*J!4?Ho=1s!q?OTjXrVq?2323?DYX832gZgppVATsp%WT*HO11vjp;?N|;>! zvlKc$sxFOU6~$C{_weW>*JY|f{o@#QT1mm_w54N{&{?{{SKb9h@9GVlNthk1mmdy= zdWp{{>O;40ANPKdY6tN=y?&BvlD<>T4iKjlUd42~&+<}HOOIyw;fTTJ%CmU!+KxZ1 z%M&C{f#e%MXacxf`zUuKjbfp#e-tg~ChB2Vc75rzA#IN5ve5=p6$u`S&C?oBh(o8~ zAe%H0fD@?nk2)tB(s4sCueosH!XY<`PqWdK$>nYTn<3)L%5U_#@^Q#`-A0mG2`2}t0 zn{*ZCS*8cf5L!&9_+ zBC5bV!TH$$CvAl%j9FoyR)83k5u8MK@7){7&CeXwI&f%pOMAD`S-j~ir>c?g*(Q}m z^iAJgH_>DAlSCZi>Sh=^~u7g_7lQ5>jYXc#^T!bKtgFpmn7 zT6+9L$TA-D+tl9(v3bt0O*RiC?ZXET(Kl&*-}Q7<1NI1vOLcdGumXzK@%KUt`u~%a zb$+aWw7KnzeY0ani^&PxU`*nzfQX2Cj1T>Jdl66W3d{`%gqBHVPF5io@{6FZ#j4CD zU67m7Um{I!ZyfMtjD}b2MlKcWK@^h%sY>Frg+oNpFU%j|B=?abmB4KT2&uy9cHkegD!P03ZRN2Qzz%ii3;tz#*+vSOOFU(~ zs{<(5g|q_9L#|pGh!&pOYQ~J{;-4g-x?#&M9TWPL){bcxaIivJ3h&^*H>~{Kg04;c zAXzy%4*&F@U{KE8fJD6M>QcJtj3QYA3COk_rj7| zk;TXP9thVv3!=qCl5$nIMhz81#EN%8_B}`2^qn3&e8PDBnd#ZblP-m@#{KL==sUMb3s_Fjx(EVz}VA!sQCxj`IOH%2_oC-9wgKRfIvbU?VjEr$Gk zng6KL(!^DET+*PaGq2Gr6tQEBFZ?2;ZwY7!Yd`q$cd`EM;%#wba2g_DqC$=p1! z%OqP1es;IqJ8_d1i{0ts9*03)-LBOT5kS?dRmou3bLgeM!LK)cx!VL+wz9Gc&6cVP zlK7>nVdm0ZPXI7>96#P3hT^x7+T3G^daLwhFxaA&4>J>N>N^RGc^^OSnX5hsbqib* z1YaRH7SeaoM9K-zgMQ9ug=5%LwSP6c=SIt{hO;NO3Vq_m+0o zj0+P}Ry0v~&mE6qS+|yJ*t&g$#T}tCNhg$i(ka}82(9C*)~RFM zL~@gkj~hF-dCQiEt`2M5&-;@VNcY(vvgpdHjHfu|szmL)eE%DO^XmooW9o=pmGY-1 zWv9q@L_V|=$gU-8K+q)dL@t{4eLW%T8Kr5@B3A8mF z92^!7Hn;DM8kGx6PEyKg+%bxM7vGqeMi$Rz1OLX7HH2Fe5g=!vvd694+IOU_tq8Bs zbEizVPdxbfqXV^=6AB3=4DDT#*AZ)MbmRj@nt3SEbxp)U5Z%TkbQYN{v`8|t=vd&K zKK`BF(8`>= zMm^^3t4d>>ae*sQ1kY_3I3!-vY|9c@YzU?SG_Q5rI|l(}aKFPENcx3I<)vQ=_G6De z2z|JU&&g?+IQ9b$t{RlS65k-UHnFsUWO(fVjBC=r_H zmc`AnMLKSmr>8@&wFaB9G!p`K;Afe2YiQI^{;l-U1Uh@Un=4AE&C7oBL=Ir4r9F=& zwPO6f9yd$gpo^t>=a-_q@W&+TD1hV*!-f=WN zw{`Z^>C@e)P(*B}kc0~EfObc2sM6{JsiMdYKewFY(UUbLUVP>7K~dxRglU*&IvFH6(S}(#&%w#~aUN?2CJGp?=Jc;VlPp zTuGK2M>cJ)`DM+w1XBFYJlanHZ|M0@*SF}=qdxV?V6<{}*nqhbzP>b&>z369L~uajeF=Lx*80TBBrau|)gp1x8*ObM=avR_^kg>zl_=>UMaT zx;{+b8fGtlFy2}@zXAdR^0=_6{nFk3$Rfm;E88PIeW=JHHV6>=O_cRdL3y}3SLti> zh+9-stm_#0%ov{ap7{Wh z%!eytLn_$Nh*|;ZfKm9EA>{j#FKXiJIH$eSp29gQhmQF`%LPIYnM2l)Y{DjSh5tJhY1{zvgu3 zI!VsK*KiP0s1{yaY}feCty^g{81m@;+^Fsb;nAvqs~5aXW3A|NYdC!9mzGrO3M-nOj3Z7R!b`+MPVuB;ttL_;K7zje}LWIgbT1MvrA*k9t|53HiAav|;9_ z&HBc-FJ00C`bJ!4jUZvz{UIz!3=7=d$6qWUCWTXE9Yj{o9&wy>^1G-62;thm8D1|@ z{#i^X6c7dmk_rcA)w*yZeN@g%Ss?&==#_CMLpo^K5{bo z``vA)9m#|d?b4So3p7nz&-sUnAM;m2#|HuWT06Cd?d2%F+P~tb!{hE*l4`MBCq1j-E7~(uG72*^I?F-zB&{V;`Sgqa>~jb<<*f9h%SNVy!)7KF(5w_ z{v8SNL?@M$F-CVKywpp{K{#?w<^BR39GU+W@FQ*&Ex&C0O<$|muC1d$52Nt3*hyjv@MP}SX4Sw@=v z7ijk_E~;W3aM2&`npRLS)TZ}nWLYr(!C-4L2FTfrvp*U|fc7m@^qH(di zt?hHh#y-)@{g1K^k<6>MFx89TbVnj-KBru$BrZ*k12`f~(J=e&zd zky_&-9aq;50|$3M`i#pcU~;}M_f2~I3!#G)2`plPWvFh*3bKnQY)0EESm)n1S#5rp zXjHPH50@=$=47HuHg`~;x&H3mFT8fIndBok=q0)l5>cX_*0~F1K^F&BU7c{>7tl>TU-79?`Px>doNoSu}~Fh58{4_1>;FsjvVC* z(2xdX3^QAYL|FU{lTINN4i=u3&x;dDA}i$}z-(6&mg9z^G$FOdYFdY+S{J zh|Hxgt2%fdwbXkHU1s?P4qjRX>|D+TJ*jrD@}n6OR*C9(A3U)8woTDfB`b{YEwVM? z`Olm=BaxiLd0Pwz6%9N+g0}amRQgj7DFlec!gBGtZ;$W^$lN3VL^Lge(9+RJK$;h# zdTZoZ$UFWBB=I#SdG6s!puv#fNO_~5N=ni=-_d(XCe?f7`g948Qd>1wHlz!X$gK3e zjk?YX;8J~w!5&ej*ZvQ(evnsC2~sg_OL-G@hGSPBOAfL&)B=Ve85UmIY)CoUbhXe- zX`&OF2xX}ZNoW_rRUK0fp8r8?+&+{b-b|YbJ^mb{!d2{&=l*IO-UyAba1zWWq(0hOQ;byC7Dd}wd7KofuQkU3G?wSFKxx= zMDI{*`0iegi6L5cgUAqw^O-ny|Apq@+7eb`Ke@jut0h@Se3i-yj$k(wKRs6b>>&zM zoZ1)9cmOEwkBh6VaBdfNdORJ8mI4dccLa)Pp2kEi0^<=74^l>~OUuci63n*Kp3GtdzTZ5Cm>4k-bxxS$n0Uk=ctE-6jH80o}L9_nA! zZa-!0mfCeuDZ|Af052k@B@)^}d}C5-viobcGGCe?(lWx(26%aO_WSAYAS!S6lPnvK zBTb}?t6})Ih=@nM@BB-C9BVm91l06#^pSlTJ*4g6aL+W(`O}YnCsbM1@k6CHU+et; z%B`3TQXR~fDu9ZpvXj#z(}V|)9*KxsvW9?5G-0FSb)@3;{_&-aTxKYxMHmnL@3-Oh z&d*cKZ7;)pp1>d`#uIB@aHpilzSV=$8KI|&D#YAAl?AzkN)f(G;Fen&m&u=;Oo(X3lGv9)wXC#nio{hSRc3MEu)9l2K7 zP6y~w8jc*UKF3bChPf31P5fEZoPyPDmxiQT_#J`M{}a*WGK2FM&zQYhj$rABul z0D$Vl2b})2{>LW|9RGp3Na2T`UGTZIIc)Of&WT=CXzL{UvKMNK?*u?}#FHuB4$i6i zT>X^!(z^O=u1TV=Y8XFAw7n&7{YCJlIl-a9R5DSMn+JbJ7L+j+X=VzA#tjlcGpy1g_! zqJ)(`1*+DKlH_sHvhMUDM|{qaDi{oVFD@IhL5z`OZh-57J-5E{Z*ciWeQm<{@TqkJ zO**ja$uG#inIt>Kd|I)p{2ecHHP(JM@L`px|7h%Gh{Ow2Setk3(96qDT4F0nOMb`O z-)tN8i|mFF#G-O^e5X|qsH(uKZ<>_{)>VY&DeE>H8t>f{();}u-sSzvSy|&fPW2{N zS<_R^`gd>iX$Bdr=j{`$SP#OZ?IIabDv*mvyMRo@i0A$i*zxh1+4V$W#QM)K3rVTX zQlW|JEumy12AOFqgjf+ofI}nsn{mn!X$N7qq?p{cb7xA%B+p!M%0?WOhlh8@Taoc0 zH>H`#odoxWczT?bRX6bi7q@sD6cQ;>B$g+OmUc0%lh-g1s6OFs(FF9r!16&xIlG-wT15E^)-VG}5^Mkab`*)2&m5%^*Jq1G?& z8r2S4JHLHsHv+b+`WY4$F%7J%B5#kM`s0(VtjolM2+YXsFMNjvjI?xx&tEz07D^XJ zde;&Spuk-Sq+2^Z9{qG@z?wBJX!H92`cg+M!?4iuTse6k&J*oY2zd2nWaqRf!Uu}= zgk6|?Y9h^upwYHrSYK1EyU*;3xmhwUun@IblufD64+c2k);?lFn1LWe9K;0-3=qe9 zWdZG`T)lc#oRSoNL(sC|P7K|=*&*tzwJtq(bWf9vY)atxjw33@(S9LwyNZzzRSYz` zA~p27dJ16Hu?gM9X-p=?cV1>ZFNHwWr310MOD54z?j2cULZ{P*jv4o5>&<*Q?GWV- zA2C8+G+TLgkFE&Cg-+{886&%fn@PSbFu$I2W;gC^s)fsZZ4uAl2Fc?`b||~6N)w`x z7n8^O6EO|sEa5~5;{nx2=Ool|RBza@f!nvGCUN=xg2s{Bo-`iHZ`=rSw@XEiwfymi z#EV_11eqpUpE6j&2B=JM3$1>t%7M{?~Z3VOPd>9VH4SJd7pb%jiGh;@oPUVK!Q=2-X6fF5-=o!*Tff#e(5n&^uiV(bs< zEm*jaCsIT&q!`(-CDI))63qjU&`h$df?fIAyz~{P=ZzWL{!kLb^ z+QMEOJA7&Gr$Bn27M7M@j#VlGR<8o=PE3OK^wY|m*;r=jKzi2MWciV60zj4V7cpq| zBg4`m){sSL{O!xCOuXXKmBFBLx>(Quqxs$HTE9lCt&T2&g=j0KH3 zcMl5K#(GNdO52dIev@xWx35g2xdPypZfa^5{CB<=(0_d#tz7Thybk3ZNx2EQo`!8f zz9M^JHtzZDi4a~$I#;rX-b^rzsD00-ksznQxWuA~Lx*oxy>EB6H<-dDe%I%A-~O64 zOAic|66E!Sw{#N?#DlT<4ruD2`LvWGOAEk? zCG2@uShzRt?k^e~iNuw)qmYPJO1ag9K2uC^ddh4J&h*$qd^dLS>;y41)2zsW4Z5(2 z(&dk91yw@FjvenV5OG({-)s{%IL{bIyr^zW?L#S+BpOtlX>z-Q_mH%10M3EhNXnAG zJUlYEkboZi%MREjO2+Wq2inBtasRksxgl%y!Gb=f>oXx*Nc_jlR79Gvn1gy58ekEi zih8I-@#6h1kp@mNsn)bu5GrotJ#$??Qh;5?TFqrC1)ZWfppnap(t}GcQ?mp^`P%ku zPUh+9W}1(VQ9$Dc_k6jUi`XDkubRnSrXtFzOK!b7mOgmiqD8BbMsP`psK)}`lpAt^ z9UBG&z>rsxxY2!aaV`Z*C@yIw9c!ghg*hi%e>*}(_p!$R6ute&Qv<;$8))`7|F*Ft z2GgUPT3Q-0MDDK0fJ`H`oAmMCbZQ_|E<&LV;kGC%lS(e~mQ2x)rQXz9Hg`htQ*<{Z zj9%e3@MM%g_-k|f1lX__NPW(&_|;2}>aAPh0qcN2w04^G7Do1vn<2QMZw<}&6&)XS z!*x)bPD~9J^jAgzGVFp+(4DyqDs?iU1eH>IC+wi$9G%S5?gat-<>6;O_&S<~mvo#5 z*W0LDYFDZx+_H%h$K-qwbVD><$;xU7TqfftZgw=W`bi3Rj^>Ao`KaFANF7wrKlB}vKJ z1Ux{b#CLVq_Z`%btT+hQQ*Ikvy|UNzT^ zoGX)nLvH#rH5Kef1hf+2!M+bDuD#T5@KhkZX|P<{-aR|7;B>>q(AKU$H>s!_+0|m% zLNbrpAfMr=dO#GO=xWdc4uDJR(4lZQin8e-a^)pLLJ(t% z0BkQ4LqFYKq47;p8^=~37Ue1#79gr8@4i6?$^7SPXgLC9M)ze9NvVJdiQw6YcpnPf zp+U=5%3Z@-UO`JPtcXq6mbi}52M+k7{So3)q_xn3)a2qD0rgYf*=(kX;3Q-pvgDzm z?_P8JdXD|;P1oA}=OVv(1{dPpkcN%3~lc9YJHrniFwP`7EZhvQ6uYK^4wiM5(jOk$7E3w$o zj~O4oQX0d;IB{|CAP#V9&GirmES9NAcsJ3=Br~20TpjdHY}~Nt16X=DUk}tzN~1uv zY_ihEjmI46C0JnkBk>u+Z(D9#ZPOReB1jl-XPEkZKdwrdpZ43BcL6`ULI$$nAv`$r z$6s_i)R-Hfe)F@tY_`c}?b2kmAW%bbmuD*g43wxwVY|eOKBAyth7|VjK@`osJQpwa zeV4f(27El8LwSNgp;bteM6Mv|XkgyfG@Q`qhZ=7}C239plfQd8P{@SR^Q}*%23zLS z3}C&Wx<}vHVax=o5wX3KKqv90B1rI-k$ygXX(6fd4n1BDV&F!(C<+WIJ{28Wy$}d2 z75swHCYGz^oPT6A?D^SQ4?+Uxm_~Y8c|M!-@aXoTWlW3jiMV(JMSsj`7gNU{w3i5Q zYSP|vG0RXffQz8uU}@{M6lA}YW(iz|_WLT>1DmO;$h-KWnJOO+%_v;wdC11n-z^4) zM?SG-S(0*^F7m`C@L?-vyTF@6AUKr&^FnuK7qlzVwUc9GJ}Yjd!u> ze+SeMq~_q;x%{V?@W2?wGs=Z62_J)I;we72z*3_@Lwv8W${$oGGDOuWJK$zcST{%; z(om-*DBJv4R`eopXc>kiKsGY^`<8Rz>^oBSG5$L8BRWe3f72S;n#l~V6$W;(8ry3H zT{=ac4hp^eU#DM`j}J4GU?xhe@2vdXClNT;fCCj5owpz2E_hpgq_!X!Mu-2i^7svP0ZB?c2)2F|r z##W@7pRuuEm5N*(s7d*uJDJJ~X+Xoj4vQKZl3VsD!sJm#X-bs@@Cl3xp9W_mFRcy9 zcLjgAfhG_nyb(a4ApV>s!WYqSb2qq>uTgq?vzcvAKga(X{gH6;^y#ZC&!JC~$+S}r z9~d=><1qkwSk5KXLk%gg)W?s1>qeJ>_F@RpR}@}Xm(|VG<6gkrJ<5%T6aHmf($a;( zd!ngGFGr!=MRUA-kO+3xc@eW-1hnDMqppm!U$U@v$|G7Cpi!4_>@}=Z2?#_tL(c=m zXXiLP(vTd`hRh@I{QP=E_}69pr>;pz*Ovj&N11T1r2u>RoHTdp)F_`{6bjZltx#kv z6NE=bl%PUUo+jW%GJ8p)VLaXAH{TcXzlK2hZ=zcS0hr9u=iVS3otGPDC-r8Yd{`Jm zk(yAs>LB=3p1#F#d1CSGW|>=>AMT>WgKN@Uz@@L@rn#GF3mxr>y*5L5p$9re%2^Kzsh~9r_owYXRG=ZxrjI9(JHJt7O$BBa zoL#0GHWPs?FGDi0S$NTR7ePTjh6hqKrwZj%?XdYGS;Q?zjk?o)&tWb$N>o(8uhxGy z&UFgZ2lt^u8yH&N1fyWv)*%_O?H4)4*jGudFuwPnYyVbGPQToU4x0GO{R#b477WEi zh!DGBi25)7dheHgkxp=bu=`Bg4Qd}Cf+LibJQN8bwMg+Zm?`9(NTAul5)8$WwCcbC zx8Sng#8skvP+atxmw|7`age!vBD+kI5;+$C{+7&}10{hIjmPOBAmH5h=5%69>FH6h zqiEUU{#T?cYB;_BF8HSzx6)l>(= z!^2^Tf!Ni&@BV;qnJc(syk*bp4T%4?_r+R!+v%x>Gk3VgoO5dP_1}*u!9|Di5GPGK zmJgCSud-{U(&iG-(H8Y7Ef|{KHWhUhnz9D(e9qrkK$grcN4(Rz`m;d~Ky%PF z*Ww!Z6N$sUjbonLgHufA8zNaFkPI@p~eUu2YT7BN7YQfGby^Kyn17 zel~5F(2O#gf_n<=WBRHPsKey%FV=Rp8Kw4;SiwDenz9+==jQffF59>z@6i_xVRQ(* zf^g1Mc6A>Q!6(@ft>wv(;p)FkOiV~Vx|1uVP=eS371*jr9!vyvlV?oZRZQL%wMxzz zBU8IUXs!q6@7q95r??KU1P2Yh&M*!EI;CpnW`eDH4=o~=8orhUfr~|pri2b+HQGYJ zIFY(BE(p@8()Sd{KERoxGrPhyx4GHcg58oa2zK?Hk}I13PIi;?0hmT|VhLw5ew@sH z8+&0MkgVpQZy$5IUKJ?P!Oo{q`Z)&=EepF`%fnGm0oGS zQl=Ln4~TO>ZMX>=jvN@(5G&8)Q`2ihVFcl@S9p)R$iacParpiZMwlBFn`j6$LH;bq z9|P;eNPehZ-)XaFuVsD7pfssHPtKspT`oW~_f3Z@Q7ZGzEMA3_jCRWyc$QZYk^?`W ziWNMXQ+^x*4h%MbJ`+_P0Mg3cyLSsCq6QS^junaQ6j~MNj>?Ms%vW&^M(4F;dM~uX z6$F9%4*nwCfiE!yL|8)%+m`6{K0;=_$djL_qPjN`%UzSuDfxsqf z?Ce0`e<6b|^LwfO`;#{s(zuqUS&JpYdFmr*Blx5s$s`7?Mqazqa@u zZP&J~XzF2XlxG3qO~jBss*~NIspS8`L!CQ!u5~U2fWxFKqBN8nnmK7&;6hA@ACY~RN)U~3_k$5%8qq>j=a~sD2;YhkHXmx!7vOqjv zz&1BZ{`GZ2{rdHrTUu_pxzy0Wu_ITf0EraRSUtgKYE-XY-I9?jKmj{ArQ1rqz$F37 z7$VInKSLOv|Cm#zZ_UgQrAzPsGzJ0jy=14PW3@)*Sa?ix$=U@EyN# zDejO{8JWOOKpYbAD~j{*z(W;MnFNznpYI(L@?{gcGT3aCt{ zwP0al$9mzlaZCLUWI!%O+!~WLqq~y7M^@Sn zx@?s(D#J6yCGRSQPU00-sX336=_H{9TGS&n&&IP0mzq=-ojNeUe2bo$vouh81T2Yw z4G8n*{)~T!)B-vcZ4)Sdc;PmEZIRk?yN&(S@))E-U!N}-L$Hz#qFj?ir~Uj*SUr7>iR|2%sdqze~CX^OX3OtMzbO;D_h*goNY8rAz2VC0&us()@3Xxulyvfd`_duU_P3 zO)NzYz?=-u&2RROu)Zmv$Rb1G91VX%h^35ymS8#ttHt`H5~{nHzTDBFC^*NqXtE5l zf^xCDT|PVObQ@7&($X6*S#VIa zQYJ%>0HPmbUXAfh?(cYtR-|Q>Zn)&F_+=Ns#DdqicUMQ<()kw8hNUm#H-T74bP43A zAl0cCb4*{G5ig_XTl3FMen>ch z_80P0X}`tyA`s(g@CR3})RKa&{8w4ZsfqtJH(1w1CcG@5x~XAU$Jk}SvV(25mbPu* zzS$5}R8K_85|Kt1M$5SJwRm_X3O9jpn51)Xw9OCVqUg#FsQn)!SFk zpKGBRgBF~AyRnG6pghS{e13TE3X~1Nb#16&qw)BeL!dByugkQ5QhCy}0r1j!uU@HN zoyFm}t+Trx*$WF71|B|q_{S>4aP4nHTOj-q*CA#JV{2WR7>TSXjppIg?M19{$2so& z5HO04?&(`PBT`ukY)@mml8r%8-L=V@=&OGM5L`bjEA5rD7bi#P`I3L{>5z^zm<$~B z32Clp!p*9&(?r=Pf=Ql2*MnxtCYqoP<$A7ig%#4NwT=Z<{jfiZ1{U*uD1nO}miXL% z$8e!4+E-k{Q5K@+x3PWP=z7wZ?dEbm1t7YWPefa8*~C9uEje$E7t~Z>v|R~P%IREd z{`~psAb zWVhjeg-3B}c9%6FLd?2JYljt$P6Q~4T{kFdjQC-SiUvroGVwiy-&`muzz>J;14M(1 z=V5loHg*|FG=jfFAMv4yt9cAUEujfy;2m-$;|ocW(8_e3lkxEbC_;$e@}py=V~@Mx z0flDJu%VtwWmu#8+rpxejcBkU2r)K0&wC#Dg6o+iu41Hbh@-&J9&C{);DUo-j%dI0 z*QLI&f10pmYFyXIlP$?jU^*3|Vq+QQL82ngX(F17Q=qs}oVbCQh&5E^7tuG)?A0ry zcbjS5JQr%m7Rca9x}Phgd~*(z9)F%z3eawOm`$SaqkfQ}gh9!63bSz&QLUot%uHT|B;*5(-BXu#MTb^9|+}cN=GeePn0UUSmp}~F7|9ThM_T7 z=V4&30>hDM$p-Ai!~&Us*3iV$yNN-#8}J|ptd2<4Yz-N*rbio2DJBHg++BMaV2}NV z!7k4^q!oT3w$A}#e?|_@C%~VWiCV0s4b&F-x@D1II!^O@L7FzgLdqYxMB^=nd`gNh zx4TJ_1-|A$^vU8y<-lw0&+*s7){y%T>Y%Dfg{_e*3I~IRur4J#ViSnxUV=f=6iHMM zIR26qR8N%7(f@9=R=1|{1r(7SFEa7`K&wP4{8ZEhq#{sbNF~Kwz=6m?flEl)l|Tb& z?$D`h*_`rQW+<827KL~4rLk#$D-8=jV48^sY%Q;U)j5=;F&aPm~x~%!^_;-t{Hn=IZ~!Io}tZZ z%iKkfuoDRhcbqC){*N?1G}+aud;=*|SA2i2gOGdVJ?{5eY*n$q|EG#s9W|~w{IDqX zu)PQ9i%y-dce6PY6~H>hAH;`V1?K@2PvVeGK5gMj-o(p__{F()b`uL9rGC@;chCSM zgDsD%Xf3NW;XP~2C_K8;wXpgQmZdE-wncZ!FsicN&h5Hcw-fta3#$yWv%B=Ty5pvt z)hlxQ`%GV9*rw7y9V>mjGyZ7TfBL>TF}+>mHD$XZnOQ`A7)i(}>T8NSaa6}8|InjW z2ze8rA9s3L;?x z69vmdL#HJ8p?XO4+GC{t=fmsh^!f~N;w)9|-SVmF7${9R<$L`Eail3^ZpY_N^8)V%ETj1Ya% z{~Z_$RcGt+Q?DORP(Kj+zdRT4cCKj2W5Wc;IMaIc5^+Y|^z`&(2#ln=fd&E{UZoPM zf*P@AvR-5ukHNUq$L@kr>wzo$p8yZZ*#{A?+_KTeIHatvlX=|>w)aqUuOjhVqS)Z8 zTFopb%~E?{1-59~0b4zAi22z^SZ=9#MF+wOCHfKc$GvE$Vlu-|sbbAK41SZJ zFY0SS1vIg2Lpc2e$VAqQia+=D%Hi)UUv1;U_ONhwhNeKZV66bUMem>_o>G@?WN(58WcHa=t z=$cGwvmoW7p;_hJs43xbBp&%0 z$*M*W8V>6;Mwmm7R)XO%?MqBE(qmHlsVL7hw^2}mu@tBVGZVQM72XFwO~;h$z3X>2 zuhEkfaZRGGWa%A%hcG@3#w?9EO}30A2@d=i!Hp+g0y(q9VgN*V`Ps83ihZ2G5;Vqf zP)Pxtu~i;Y*JbaHB}iN$(S1hp7+8~7Z~X1c29an8mIoX!+H}^Wg-JVujCbvjzFd7L zmKx0U63Q+|q5KXQURfm2OBPASDaVc-Q@)TCCM-!#(JA`c0taFX;*gXGUdUAAgMUi( zEVQi}KFQizu3DIW(HcB((j}42rE4orO6ncNacI0Q-2DtdpzgEc$CsJ$X>*pO!0eO+ zn#Jy8%~VoIkc-;xl{?`#R$Pmo3}S)8cd&b3)sIvZ$$FORMAYttMyS-M$^FU`*p+VO zZo*s_DM6UY#pUaJ(1WrP!dSwfa3ePX8kbZcWq(V9jM>pJ2Se)}#gf5GCfq%~^RF5v zl__0s)}feGPe#ErH06=SKzIQ8b6owzQXM_%8LX5VIkqkq`t=a&l*I@BS&q}^U*GSl z*lE+JuYoy)ncIK&Gi8cnA506K!QMtstIh)!y3uI>NB z*qaB`yuSP2A9FJ7Mz#!FDH@EKhfT7gGM2Rp5h9c^W9CpOV_4>?RBD-ah$P7nTWk@T zLMfT2vdu%!>stB#zUMrDJkM|aan3#twLZgr-`Dk?uG3Th!6XVU-yOWS(m)`Z-QQ~! zvNxbW79S>W8Md{mXvtZJA~EUIX-M&xp0KE3RUJFl0n20tswHcOm_3F|xv2lom$a4Ou1H7V3#0jWN#S~;hTc7{OQ1!%)9Xd2@ z+Vqvy$`^w})V_Ty7B4O@rH+Ii5CZ5K{|~&MxyyMd0`-^)(T;9FoFP;D4w-XexJRXl zkToS$8rknblw{wankDlG20c!S`203mrF{*Y@7}-G{1Y7{AvyT;rfy9pHwty?*RLjs z-T6xd5@^rgy0!7`ag{3PKAofLTR6FN=@F>-dI%; zF%YV=y_+|Dl^u|#=kKg68GM(Tn#z}63``zaVq+p5mL?Z+Ad&fC8Gnpr|GLQWe;qmv z-I){hEq$r|K?trpy%u%0l(%MP>L+RItnI$CmS1XLmN+BZb_sJdv4eGo@iZrd+* zamub$v}4>5b+>*)Y$iT_`qcZ`w@tI!e14l0F<|E2idHjhv&K#)?R1DJrRY5T&W2Vc zCZ#9V=64Tf};&i8;6^rt9T-}P(=+V|PDq402bTFKC=V(PE{`8HE zcQgoJs4?*{|A@p={!Q8R1lizaSV4WPE(i}K)KT_z6g0@`qYLh_8OfMh0A{n?;p?o? zTo%%59NPcAads`CZMeT+6>tME_XR_dg!OS9+x<0nna!O>sGBsDlAxFnizST^pkqw#;A0tnSu|9cBpux$d-qB)7 zylN=MHO)O<1*k*jRNz^ne-BJx>kz#VPqEtk_=+Kt-~+WQXFY1F6|ZE(I?8{PheQ5h zXGT(4MQ`^4u$62iOw0o(>|*JN^A;(jSC{rvAOGzMzT=vt!&tj+9*TLLapFS)X6heU?DAUr| zVc@~_9Zm}v-Kq+EPTbHRML!-FrtXJmk)xYie0j@i*S3&A(e?*uhzMPlad9HX^{6DL zH>JX$V-xB~a1Uxx&u{G2BT$uZ?l^5sWar;gQ!_e7Hb!@(5D(@;CxF64=3kL2roN#k z!7}&rSnIZ%kdB@}GPQ$!4HnT`6t3^zwL8RB+s&XWk2x=t-S;a6%FDkrYBHbYJ~jFc;Q4j-uEKGKBW?#1^R(GIM%uS;xMB_^YT!`v$zRWRhxQ* z-+aOuA!@r~Lh(Fv(~2+Dm2@4Fs|HlP1QSR#GM9?CJ&^9ysb{G~+$WTtF8%Qvct(YH zFP?7Qw9C|41nh)|Y`=FHHTUzl{<{u#IR1_By7O$u`=;kn*GRd?{wrhY;P;|lvTdoj zk>ZkrzSi_=;%VV6Z+;v{vM?;@KQK1CaD1{gRbi5oq_1p20D@Y<0H2qnWXV(5v}teh z!M%IsZ;08S^>~Kbkbxuz&YV}N+JIwcRR6qba>d%3&o!}u)26-StjqaPUtlo;_8 z=F2C(FCZH=a|QkUs)IYp-R~FP3ABBh65mw)F);mwMW;{bH zSZvZUiQfxQE`7zPckrW90$B9xuMPspJWG9QR$5uoYpE4v>IN;8NN~rDY5c3xXS$DF z*O%}I%mQWN!?1IQ7Q4=_RH;((!qTp^J7}E`exC1jfz}qRIDR^%_F_2nP@`lHcd*xp ziQh_elYm6zSm6z6dQ*JdNXRZCrf;Zc8DxUXjI@eqj53&JA=yT@Z|Ymrtt&Z%Vxget z{f)ZyS0_rPByImB!`S_tzJ>B+B1C`B@O|;upQ8|o~A8U87QK}&!81Q<#}ae zX_KK|?BKcx7xhSD!Zs74MXg|V`bOsWO+QLD1_6d-KuqTs6nIt*NAWU=@?p@CF{}Jm zlz>j3JAeMMb|%}Tc!FinS7WsGS6*-c9d#I zGhxSgjz`{=Xu)dDx5W?T21~%MXvl!0{NJ<)9nfcvRrUs+p45Y6b$5@_JS?@*1CwTB z=brv>%ZD%2H*#VYRu5w4Agbg_69@2GwvqQk#P$4Z!v~$4UcLOOVU_B?hjc^Bp*@l& zR5dG2NoV%wfCBd*s~Lsuo~YDC<{G8x%Pu=h%r1uCEMi#-_5;jfmgTmtA}$euKV?EA zOdApf(X|7!h#8zTj}~nLL>J4JExv!)s5Zr(%k3p?hk8>OgrhSS29Q`3%~<5vlZ>Bz6DItQI6EB6Vp2G&lJ12gx6(?-8CE zrms|jE3DMjMfeL)qY7F-h;R$C4QOHU)eTJeF_{jZt&aRM=9p##BSH^NOIu)*T0T0$ z4S1iYO18lo>fw#!OH907BjYX|e?0-QIjltQ3FGO;XoO`n4UYT(8yjoST|807ru6Ca z7m`oM4?oEJ63hv$j>t#9@9#gvfh*?M_we%CSa1SNPdu}nhVdRtj_1a|SbON^v7<+u zChnN$_=m#Qwwp@B=GZKPIs_^(H62*Y*q0a+c7+Vix**(@L&-S<)|4Rbo;@q`n@0o$ zgoEry@h_!frN7$u{BM=}qrH>a$K-BoV|5f#>KHkvqtJnWXGrq$>>q@V%A|I&lL&MO zOC@p~=A%|);d_mm8Xdt5p;~gTQp70!d9h1eeg2+o(VlU&2_F#^pJkGo*^lMA!=$GZ z`uF0+kXHvQXMnVICrV@r8ASbtS{?W(TQDFGL!Qytfc;6@f~XOPKJIL*Jy!UD?`HlQ zL}&Qjs~=BO%v8wF80|%5bpsh%MRDKlP3>kw)N#(AF5?zAItD(v0dYH*a zU*leHxyMk_wc z{EUB&fEer>eTDzogAIbT#)#IpmLHA$zVfz!&m4*HkzD#sbn;U6v3*PDMQ&L1^gIy} z@Ut-#zsTv0F3J9Hyn~|b9(-QiOqK)PlA>JM`KNwnDpau9ZBr8ygclLc`%-#4jMy6& zR{#Qt3xJb6@G>lZEGiIy7KzbqQ$NggqLlD~jac|BHpFE8!^iqsP^*)yC;KE5TYSPv zn6e32wbor{xtZ0Md*-hO^(Ri`SUg4Myd8 zkXDLqORA*M+xMA;D$>Da%c{^|0J{m+hSdm12lWMXwdY0N;`r$KpaeHdtpH;O@z;A| zN|(`El8PzfTTL}dP-m@&a~5SB8MZ-Fc(&Oy{5UqXj7=Vm(9_5{e%`kB$Y({? z&d8?NLX!kfntPFMkM{Qu=wuj~DT9Gez387d#GZ*d=VzAGy{NjE$rBT4#DZ_H+oS8i z-7oB=d>0O%kz>bJp%P~gw&D@+TH{o+mNWBo-mdu{>okd7=T(;)LHyX9xS}MX+F_P_ z^i;{d6g%LYf5!nu$W$31eD%tr7GhKDNy-oqxvfuYKrXOd+VU$a^;vNe%SiO+I*n*X*MJ2 zWaBpi-cn?WiDVIz)*sb#mG{&4;7^jnG3e+xV<50{iiq0LxQ`e;TAgq={O3Lp1IjW% zdXOw>+EI~DW9(!FQ1veJzpDLtE3$b;*8!w?E+Zg6Qb+dD+`GJQ%gu~)=X$*Ewj@59 z7i<|g_Tb_ZC#~VD&LfIz1&!qH+EbkpA0O|T3$`tFo(oDfCVi~{RM`ECq?%K^<{Lk! zd)DBdG+5`5n@WHdNCxLe18T*PlPBj!ej5C9JnbCn&Nd;nePR}()Ok^ZvEnxC7RKE@ zy|A0KAzbDjP+A!dLPP`ybHcy#gBDD()Xyx{1VBqkmq$~_`A*~QK`rk2EjBLB54cz$ zfKVeG8JgVlap4S7FQQk(nej~;8XMci@-Bs?ZL+Q^jUNi3%WZ0owGg<) z9=Y$-Q_T{8**gY*InT{ z>~ZUrrJZjKTChs)f+HtYK$#djthjZJQ#JKV_#dv2o`HVe)_dXXZa7#q7p!z^VI$a< z#&@dh5xc&2(%}23qknz;A&*p=tpp*`+hw-xK{BQAF#7t3yc3VRiGTMxv=8u5uY9s` zadFXPW+f&i9oVo-gt>e)wWTSFJ&}@901AKzQJS82jE!=5FpI_&zZW+PcuOsog$E|Hp-HXNL01z{^AI49gck&@?#CagS91~PdM)mva6 ziXN3ZmG76K>f--yvt_nEF<#pQi4c+&jvCwfH~Rj(b(;vE&tJZr;bH7B7VQ|9yj4QR zMI%H-`|_nv+^9q$RPg)acoRfLB7^lm|6f5CM6E9IB|;!EJXibX@~EY*+n$f>q!@d+ zumZ_p`VC))_k_KJ?rlQP$e11yap-aRXfIKRlv8L0{OzY#5NH*JiK3up(9CWpPcB-f z1B>8BWINm;%O*{FC476$DL@&dL)6^>Xg`75 z?%%&pi&T|9kx7eIbAF`GYdy$dQF?m1)gjIKg%|ZBv67JN^XL)sY&dZU<(tp(3o!g5 z;nG=R%r^EIiCpUf-Ez$5hX8rb2E6#ZL@-DEhm2YDxLM1YA1O%p)`-5p`1-`-$1k!2 zo99t12?PVb0?&7Qgrk*J1YY`A>BV`YnmoR39~~bgcV69$vsPhf#i^+uHho&ZriXfm zxtW<$uVvd-m>37R^QMTWNof&rzxT$b!j%_8ADsi7(Z3vu6KJPIVoRe#$tr+3+i6Qu z()(sK@igofVC0cBX40gZ6dDq|a#ZD(>#Lpdt?8t_NNiMF-<3q{Em`}?;c~|s9zqEy+&%xxWx5q5lG4qRhhX-iGX5I!cPE=96Gl3xI4vP z|BN+;U-~>c?Au7!oMH+%f^R0~W5-X!+2k?p6NklNn8TogKX)>yNOUYYw>WVyzz6nI zsawfpF;sIPOqX+pKRm>1pwERK7}w_vMK05fwoU!ef*lzMF^E9qoXU+z2#x&X!x$Pu zWW@FlCUd|u)9Ng5sNfjPLjQwm*3-LBwBg9IL^S&_WGE*N8ecDc)Vy*`UrrIwe01rG z!dV=;XHTKSYZ zNLbXuWpJ-jbqQSo4zl}A zva0Cp#4F+P?o=+)I9-EQ!;%}@XFhV@mD2T6>POU_F8ih`W;+F-AB>7FJC!U)jxH4cmDeYfqQvZyqKamB+Hj`AcT;;;+so8YBJgU5VTs zE`$;nz-IcEuXBQfsc$RsVUJ#d_8LMEedUE{V}Hg8r!vM;i_%Im-I4zx7s zD@wP|{qRB)mo_==OI$&>4}cr{I@Dnkhz$Ib@e>M(-3RYBR4EO*1c#Ju8}d#eL-f)O z2k1{6PElD&p#qVU>&43cYD=la0C%dntz*8(GBy?&pF%oMPvGRJV;s5sH-FMvf}k29 zxd1RDR2f9;`Zb4!_74Q4qflc`reInOoV5+|PAxt7Y-8<|xfl-zM`8doVB=AIU#-hsLl{km~(|n>bW*-fHztXoF2VVzVZ~#|TELB^w{;~F4#`f;ep#SEU-D#sa z@FL6)^Jc0zr!|hvBRerl^*4&*vmCyPf;fawIq@9`V2XNyTgJ$(As!7kyd5FK$#%|m z8^Q!9Sp<}#@Nanj+e@y0KJdVtqMyBGAu;Ay;P~_v&YviQ1fc7yYGM_=NjxNkghgDs zF%|ny#6{#WVJA<1bC~}J8I`EA!5IMP-^8nmM5Cw)D0w{)P>Y?V>yiUapZ4b&&-wE0 zn;xQJEhSErcm*s+XRcnYnK$o!4JacoX9=K-6E>pc43zRFu^E96nx4 z4JO*DCo#AvawgEbdR^L#6PZ~4p6q3l?k7jBYZZRe1r>y}ON=-37+3c_l9n#>J`X7V zu`o;bhvR4i`t8`>M| z7s92igH2!RCUur!KL}j&eqv zcNiO8uEl~wL?=U09sGAti&CF)xGH26ns&6c&frU}C)@;R1Y~yNYM(HnsVH?0b1O2K z03s)>L{nfM7I+Ul{Ls9mty;E}s(|2>%<>I29nqBw|1R{IAV~xcgn*!6Mq`_sOM{2F zdE3%~G-^Y|vx0!7QIjTBh=^`F-Rv0)Oyqw`I!bcpbPSO}Lqu4J37}q~Q0#Wy0ivjI2Mibx0@dOCn*TPcSFgJA zS7PEK=F~MVFtK~hNcA(U70b{N_90@^fg&Wal`O|UxEj%0@Iy$|i2!i4pJe-%z;s+t zpXqVh{%~Aw7P0NGs4J?~o1Q}{6FwA-BuPf&{f=_Ys)0usn_PJX#BfhGfYE_!3MZZC z^JLYFn|QtGaw|}_Gs5;9BoT0TIhGsUcojMSp2rwPXUZW-H!h(k!5?4M`API6)J`Fl zUeoD}9_>g#wT#?E(T;t0P?XGX?N~$9bCpE zqbgzT>f>3mvb-l8iC&qS+9hKPSbV#{K5j7t*3CpfODz5ylIOSa z@IL`Y9`)A7%^9Q-#RLKjWPigqbb>ZVq)!PpHZt^RKk{1?B0QsS9)ld98TAMp+M~xg z=R#AB4b+w1&@;*?A&h}W(jLruIJ*i589_C%1s2sLwY*uwhP&^K$Hz1dXk2hiv(!y_@1O61*raN3J!bB=}@HC?&(7G!Z9u2^C^ zV!bQAU>YMNB&@2$-3D7x6(z_+`s3&ThE1AbXw|Lu%qA#Xvvmr;*;Zs`ibU~VE zKYq8cI9oPk03RN8UmNBXi;9g`fF>-A@?8|d1?4MRO+4G91{YVi>%3$?mzO*&L49Mt z4&w$5u=EGzV=73DJ(Ydd%kZGW5$wF(M4 z{bEEn+5`zZHoxd$`>j-oY%Icfh7IxPSEB*YZ2o@1+u3=s`^8mj)>PSjKY}PN-8`&@ z>43$w>(yZ3zO!`X_0^cds`uiOuQR>8R!V~VBXs~|fEbb(h#~q+RzKU~1(rlB(gZ&?wdpP+~VacR7DxRNp;z06!ptR~`iib8fOH>Wa8*8Kky|K`~c&xRkeva&AHDfOO!_=`Z}umv@xad9gH;e>Pmp_CE6 zT$T*4qheP9wvQmP=@(82`3lTd{@uc-_24%QXfm#ko)_&Yv99SWUU%8l%qPxvy2E7- z6mHTvZby#s8dn)fMEL&jH`;k63s5WLok6fKsRKyWmePJoLkdD`-rzm8vTNVIVMfk2 zg~pntayvm(B(-$KM2ViH=QW(1T3i{koOHHoKg^?-nmE|aRo}Y&c>VUw8~X9V+T>Lo zjWx6RO_*4zwCXFVpL;OttqTE@fQT26ECq4SaXPpL)d)`faEmU%%qPzzz~PJT4e;2e z@lQ@^wdP9BBE=|>Z%W+o%BP;t__nPN^?UObt)G;B zM&;=@rfvBudJiNSGAo18sQl2_ooIITU|3zS6ez4v)JD6!}Z^wLeD?OAg{34?>tYA9b7Pf2Brc(kA;CZ+kDG8i!UL zS(K+HQKFSoB$?^+1MndqWO0SB*TqRq!j1#EQu`5bWy6wZ58l z^TNinah&*?molST`ilBSbW&8*WG;os$;pKZ`ig0Gkl!wukqj&@wr~q{7xW_OckUz$ z3DO@KjHEd5f;c1<9jJHQ&}L#APJ-kUC(0@KePrIR@Wxb^Dvr7E`R_|grZ8km{)~Sk zGEnNqY15YM-8+ZAT8tmgM@7zA!QuhwwpzLWZ2Rgv4s3e4F_^2>7H z$xORZ6d}Q9Tdq8DqNAW=^lr>lUds5EGg^*V)sU-1c~Q@c<_216#F#O0)AJ;AkVVKo z%(g-;p>74iq=Z0571AL{v^|blaS^k&&OniX57DB_*gwt#{OvV392p42-yemuKlbN# zFk{`fSR-=>@C|TA`bl`{XtnxhRp zMf;AnwlX_X2Hj9O1;$14SAX$YAaXcuh9<^NBfnGfeQ2;Uj2%kC>q%|S*=2U}o>9QQ z|4$yB0(_%|2|_dDQxXqssX9@*al*}bW3;;On6Y8rq>(3~DB??i?E4`PiX(j_!OdXh zt37`J%?V!QkoHH*5g{p7CYsIj9IkC*3=CBVJ78+IukY{1&JC%TrQd$>6bN@K!lvdt z1~vj*Zc=I}MrLL)<8F}7lu?MMrT~^eAAgJcT5usb6|XUADRXr{HQRZhC>SLZO{%;* zw>mt4HMOI4-lVb%UW2R9+r!hg$h^{N}OR3SVWi7cc3R(E>CBSQODId+A$YOCbYBc!dM zNSIpeg5hwOk~}PQs2=XyO_Je@)$VG`*ok@jUnON9Thcgt0}rr5aL}#d%j+ZEJ(+Qa zGHLh1(x0eB&H*!FYS3NXtn&Aw9}odIUM6R#L$j>fAAWwzd&5AU%ke?Dxxo*w|O^2l%y&3ddy$%W_+OIKqj*gL;5>$Np$H%%4 z=YJZ8ghl3ENbLJVC_`r-gZe0!Nfk;aj7Elp;t^F^Ty^$6JUSdJyIq)1aP~_g8_dYA z2)I$1ITG%lgd0R@vX~dOQKwE83l=Q+e&2b>C9?9+0m>v8jvvI?LMh^arx+8=lh~44 zoHN%D|0SoSM6)4nMQ=KWgH9#J&29^Q5B|irs|-HhTD6gPOV{cD&{IH|j!nA@bmk-x6;=WTJvYpSI3CQ5@(;^mWBkdn*rPrMu9b|*HRfC{kf74??6<_g=RQinmr$@~=2f^(5||0NAuK#$|cmqG7K{2Lf@ zJ!#pAr9(g-g+NF3Z|SA1Mpv&~Y5H&K_g7vDu6U2cA4Rzj8{60<5R4zl^y>8M*Nw^c zby~0>jTxT_8DIod)r!LZt|EfZ;5E(r`T1FiR9TY#6fxg%km~+)@R*HCueu`h(GLEE za#2D^W6}CUD;%B+fy-#;@(MhWL@fkyStj44-ew1P!QsNKz$-_rjeVo$9w(;MoyNbi z|C_Hu5Dr8Ur4L!m}oBzE3zs?P~l5w%(cr$^d+)xDnNy@8!bgc8M^ zXvUj?Hx(n05|=IXpxAtYsb!Fw%pLo$u93P2C}Aa_fE8P^-aQTDz5l+8Dmt@aUS!<< zeJMqce_-!cZ)D5Qya>S!=-UG|6tu}ZMiWMZ7KlU!#@c+B51phiiL|H=U$+uj0dBgr zTK|}e`U7LRs{@a_5Df()8W+|i648<5T+?4dhRb}Dre8g@1q7$kG~|P7hV7h8&&wtS zsIZ9MT&P>1o`9%!ve{7lfOeoX^S{v8-V}Z8`HT#^nT`#02c;zA{}@RxNQMig0xMg1 z`xO&fbPF_4L}mrpdGv6!iMBT&g%R_dp=73hShH$XCFSQr*q%z_JmAcf0o5v=Iy-6@+sJS-K28A@&5)Vd$sa{d%|Ge^1jVM1UsyQs)$b&v z#mAgi`M1Z5LY9}Mt*rr#GF6mJhVsJGDAy3drkYRFA5lMme<)TUw!?G3^yp-Wgc@yK z-6zIO(r6I1v~6n+;BkiWOdH;u1Os7YJ+Zemh`Lrbkbo{+h<>aswRVSDZ|X_s$b-n} zH-|<-&=qXh<**C&y=dcU0Y1s4qCM$tUw0p8iICVig3t(DKOho*<;<&~g>S=YKFqh|&sFqefc!=yfulhU-UxIorQ?78T1#T3b=80LXoHCmB%2RiTRa z_8J;JwHO&2r~5l(5fXaLONMkebbhO0ocQ<;0r{01M9m+Qfg} zx&B`&wEi+G2O9Q`MbDhdWmh!m@!yg5gUeY-^PX&IVw~`hwJ#;OOVml`IEdOr66X+P zq-Lj~i;}1pFt;h_r`V*SQTK4T1sASrZ{P9H$P%J|d|tJseS^)G(WEpuy}(<^jT1Kn zRm@ZEwAApUkrdv-tV)t4awcc=kCN2{2splz1y5$tk|h;nrC^(3oT)_X$b2eUr}S&5 zvSru^AsJ_Q_7VtTS&??afNUxP6WA_qSJhvc#wj4Tqp4;FZCRN)mn!Chn2<+}D)h^J z39d>0C2a&&fI=B0hI&>=O*)K2srjlvTG=X4=G_Sg%glgER-}FttbohS;7ySfSlOj` z?m|%oHa*zJ67e7r@0C!9a-bMfDTu~XKo9!;#ivf4@_QP~mLLdndg+hX_t?mp?yXv1 z%uf2O?HSoMA~z14`7#K?25%`PqO>*62AEhjB}c4v_0h+Qi)U2w5YMB-L=PyLM08G; zG!e+8{1Uevy#Yd*^`huVOSKZ;WGeKnyrzp^?_O>)1S8iE%G3bzZU>O$Y8km*OyI?l6f6M zfpL23glg=0k(~^}duJ?aW*Lcj?oGRDKa*~p+{9q)`F!7kP0U)MNqXR+b_`VNdySZtX<+{2IC)KVzm-RHJ z8NE_53>G2*0C@?4gLboj-+^T(T@mJha^kyj;{Tzkzsa>3>yTr*rYP>2mu!1%7(yT! z#tC-iWIs$Pw9{ z1(skpru?f7e4q!=>#vF*MElQcXFn?v6QozGnvusG0pW3X5Tl`&DJENN(`}<=C?6aY znn|L1rl@^h^?WaCABuhuxe(H884>0G?8OTiB*2V3X#}~~;9hGG!7#k%Y-Xk*MrZGN zjbWiO+cHHoDeoz+yDnXZ9zd6sst4hEY$=#Axc$ETC$-c&5Y{54G#7h|K-XXZ(d^1H zqAmvcbt5)RCe}&y?|pi~d|@5z*#Fr0;FFvfL~!#$Zmti($!f{Em3O=hL?H;sH>y}- ztiZxY&})JH_n;we;^d(nHGcf%=yoWG3+7LnvqSILT)b=7uDC`<8aL!xlJ_Y<3m$JI z?w^fOME{#;QuZs2&I+&5221Ll39@5b?<@4JP>p;Usm7ulmKCUjHQmR)TE*12EU zPR=t6vXUZESNdH*J@UTqcHdcgazN63QU2%-ORxZ}99wh`E-FGMYSL>-D3{a`i0Af9 z&y(rwty&oa`2F5(L8`u>rn{3KT!PU&JUoV)yQpG{9`D-q65%wPk9Or<0AJxb*!Bsr zOL4zI^%{LUm-Pe^<+_K50c!l@aqjOK85sc`s(KVdMXteiTqwpNQIb;Cu(OfFdGt5#BokY-? z3(_pgVbQQ~Is8;s_985HP_WXdfB}#1)L5F_F~7bads}4%MbWJ(Cea1hi>h)9q}hIl zXRrXI<19PH^+UtEN!`fMkN@h4&tl4~0tGP?nCkqla|cejuICgGS5N`X ztAxy<5F8*dm&k17-0l_A4%3lzg{UrMjyHHF4W{_#+_mf0mCw(}oTOQ|X4CfV`?G_> zZ`z>Z4=1h5ANR=3l6MNipe@DzREclBoNB3%Gn62E;(qH713y2nYhYzJPeY>TF(}=s7D_lL0v`3Tz0N0-?!CDwTQeQ8OZxO#Ob#E-D=k6=LYF%^>0`9RQ`?atl?j3gd@Qu#$lj_ zeAv~wD{j1uPL3Wvxbeg(m#1aV6g>~6RrKdHP)+%!_`Th?-;qotdOy07M`UgaXzW0f zlt?z7#vcFcLPhwh|{hnJsXj)IF)f143r2|hWQy2HEnk+i%MS1Ndm{LvmW^{ zb@sE|<1L!Zqft}iz>wVj932_0ZFEl7~lMKq$ixlW)R>Labll?yeC8gk74yc&^tU^VMz%;mdvdn-8|# z)3ia2UiVgJkRK6k?{wFB7A1#x8vqG*gO3W-Em;gtQ0v50=iFr<^gPK%DlePZs&iG;^k@{>0s zT@E)>(>*W7sr+xzn#%IyJi0@St5kzPr<>-cvc*WXtSD?Q6qLO*9^vXY2xC!2@&KsZ z{a^S%Z~^&<3rO zTs!mH%YJ2c54qU*IZ|XPrl#u+KeTVza=_rm(Ge0G$eCHyvLc1gJ7yG0wg=#?m#&oi zAUPz=d^M+x0SE))fSl1x!x$8Kdd;TegLbT2=Q&Q|ZLDTI!ZgW8XU3rvmUcWTsoBRN zE~#CZW20$KzA23_sj5*!%Y-@c!%iN<%c9*TxIN+D2z&%YVo8BU$beBAnClerZ6-gg z2T~Tw$Y>fZ@f%AX6q)2~2@drf(-$(|8MgBV>o3~>^AeyeJU_bl7p z)m1(7sau7aW|V|m;#{JIT>9Oa`=#|Nge78h`qqE9B6j;a)5*RZ2~o;<`NnlzCW;K zM~}O0dCgFkr9+N^yix;+k5PQ9aO)vEcGM^HpCGqzeh_@_IgEorK|v924f!Wdl%dkhJueqNA3?1Fp$!O5~ zY4f3s*zH7OAro@xWzBQfk3|>8Y)LrFhy%MxIBq%ZNgq7#<%2y2SZ8}leaI9%f(QuS z3u6}-4GWK8F*AM&TDt-FLP|<{E_2`p1({C7CA*oB4H!v}Eh5=1Q?}{niq*5y%9G?I zlflUudHZb4JNy2edyL{=hBQ$u**CIh4LiXLL0?rNLQg8s^nIP**7^N+iBH(NyqDS@ zB+d1u=E6r?6y#sO>V7;mGQS8ziY-IVf`9G-;D~IE7-l@oGPXo`MwAZK%$4CYSts*8 z5bIN;Mi}Ww(h=|=0-u>1+4WjyExQjyx4qizIu>kN^iU%KkjZq2>YBbHydK<~cIOs6 z-;Lc&{zrh$&5tb2i26p!hHIC*&V}e<2h~q|U(-NSFY&|FO9KoRQ4$yKnDy`sdk#K~ zzI`)n^a8M$504#ub6mQ%-s0lKj^?=+=;C*jd)KaC?>=J0ZolY(ZW)slmgVoAXExb+;UlFaXY&KTRzkc09lI?EwR*F(JO;ue@TPuqhYu07h|gAQml8f^;Mv zt+Gh7^z3XCN$sZ4AW0?pWI4K(lY}gjJ1awn*+9z$e>gIm9isSZ=eHwx;(sM2<*52Z zJBpI`Xanz5){n=Ge9|&#??Dck`sh?VFCb%5e#VIaabk4uUQXOn(7SPm{Y*8kkk_)H z+0q};tREW6fZeX*i=o9n^YEc>g{q>m zty`|wqN^=PS;Idk>$9`AZx`)~`Q6jB z&IaTZfH&sUUgx?B9li`LeNNUipPXADqJljjF1A9J>2&?+lr^}(P z9u8`Wz5pYZ89hwy(k2ja1%(^v-b>vICnGa&$h~*?_xt6H(?D2eKCk-q?OW;HLWlE= zje-9NseSq_=UvljrD#WBEz0FTMxj4(J*SYg+FQ1mZ{3eDY3p&ckN6fIQHv&V%l&-M<+#O3i#X&uEeww=Kh zF(WzrJ4}CO#F0sMYg<%mweC&C)A`#kC=;vsVq83D@O&-_r)HciqlH6_!rsK*BtQ-b ze+dKE2&Xu3X1eM{h8~Kll}Y9jMpMSCtl5aA*ter@xq8Qh4bw6;eU=8l?S~| zPo4c9+tuLk=i!{$egWUlP1e%|3OnFG`QoA*$AcG|w7peg(b1>+JDH6*7KGEh98ga5@M z)e;tP9xVbX9rd#E<8F)68UXhRYLL%QwoL|2J>q?kn$rNZp)+5ShO<0Jprl1eM;A4& ziJJ*1Kjp8yQLpZwc&|NMHL~5{(+f^Tuk{8Emz)$1-0Ct_`}sJPeIebn6x59yR|cpE zqpnqCLcruxA8JAJEMZ+%=eGf_E(P2qZJ6L&L)L9Z2Z`&8^OY`1GwOdtFDx9jv0XX0 zhs|k6Nh$bEa&o~-&SYf7uJbgdp2dBLwUeD3Yp23Dm$|p>yjxdeO%=E!X&o3M3O7VJ z)iF|2-a7pf;-DM}pnJBz%ihxpD0aVrrO(3M`mkEiO7=KM@s^h-`oR@C_x=)dZ9~2DVmYgaoUr7PB*ky8L$TL$TP3i zQ6^Xta_ZpV=n;GQ(xq|=*v9w{RSB{Cm9KELVE*sH#h+rheG((jLP@`JMbf@Wx$MPB z*Z^e_N({-{x-Q`n|2tq|xo>ZI5jh*hN0A!*+i%sYt9XUQ4@o7KPO=l*aY5i@-Aww? z=s-H;IwUE|Q%0gQ>|iVYv@Zkm(xqEw>vAb~a^$(CHW7HCy2b3sn^WapTuZMtUI0R$WdE)w-l z)Oa+idUCPP7SCr(i!aNPMY6O$_v^rLR7nA-ZRtrbC8mzwMXs_;qNIv3F+ zd9zzR7xti&H3dnPG1Q?(5yg~$e_vt(fp#>QA1^>UyOtDK90hi@E{IG6C5q%zNDIwP zJIDPYM$n>P-xv!$VpjXVb>WP(;U2kDPNn{69T^9>Ur4n??M=4Q!k=H7aKU?%7f3ok z1t6!Y7(k^>f`r3(V%EtN&w6z<7NSO!bYTG^6jlX}<$z{iRIzXY0Ai7ISdZ~{#&fr1 z@P!nwA|m4{>t`bG8Gd>JSWz#V#x}Yx;1N<{>}@|y%V;HakNfO&YNZO29#v8|@FfyAl;d^^m%!r>&TKR{R{S6AQ;?m6~dWiCbp zCWusIF_2NU1-jZe(u7L?UFNhBpXx7XOlQIlpt)e1+t+$+p|VS1G>}&b4V@y`AuLg(Fn=lYo4>blt{-!nNlmo z&EfAR6KOmp14R+-2ivK7ULMcMH(G zgPg3|T40B#*f%1wi`0j}PRM8N+|0LSP~`6>;tlmiZSmj9h+JAcdsnKE*x4t3|C6XL z?aD70RBRjYbN7wz0|d|kC#J~cg!QC4XayuH))a<;{UvczA-MZLr1V{r*QA0+Zt9=7*Ry{+{>TB5dVF(hUdaNa8l zek?+QhS`jwpRNx8iZHQImV z@T0Xj=GC#BZi-XX#LMNJdOa;|caVc}aa7bJhxv8e28JRzY6!PXTY;SB!lkhT(!PK5 z*^qtERHG4KmPf&`iB(^-2PP)owikIjRizkj?`~G< zq9bw`DWvYmIf5&4`gC$_(%dh!f$;&37j5D1yI^0vu}X+2{K~Fwf1rq1#s&it8wNI< zG4>k>W{thp_6_vk|58>B+-i1ItL+!8U%%c1Z>H5Z5l3@^v$sXzbz`QcX33v0q^rh;SFb>WAw$Zk$8&-4ceL0#k!r!_Y^NLYHway+oMAQxl&;%WHM|?YtDxFuj^}{)XZ-a0g9%G zmPJ>AH7HdR8Xa|0{v_Vr;Z5~;&JT&#XLX2us2EGqnaoVxfNivJmdM@#_Tar7N11A$ zvFp71__6uNIj*kD3xeEUM+EkdaU&Hzo9ReNd{O(`NhR)m`h>z}z&t1lE=|dL1xEE6 zFktts2a#;aH`}$Nh%}awESexrTSu@<}>5J}%NN>yEOQNDeRN5J8GSaLsQ^c_xY@ z06_ZlbK)|2{5ypRFX;aLf2(2(X8yzK@)F*U+vE8(*EGQgtWIP-P~z^z^}6K&-Sdg; zBn_BmD~Vh(d<8Lw%)7YO7Vj}~hzg>z<%>lAI9XO&nX=7jjW@-sTy`|;PZPD0b%GIc zIGjI@$tsvRu~QySXBc@H4S*?Y*86TS`wrRnZBya--RObBqiwR+?EVfVUuZrnQSAKKm7;K|P&SobS zr0dIF&GoE_mDPb+#PW!!T<}r!6vVj-s`~Gn2X$$jemb0-%ZWcG`Q*t)>}=EvY%qi^ zE`_&IZl|vL_9tc%i&VZ(>=3fs3Wkp45@llBorh?}!8evzyM-M)uRY|{iZOA} z&mi;r)2^xPNsXzXU8y&CB)h{3waW175 z3k~hE$KXosPQM2hTZxKAf*$1)9k_a#zy1ctc3k)~wV)W)uY1$!7cX4!qudFLxGxMd z=8_SF%kh)-6IW%Zh~x`V{;1zpYpB~@VwSRE>$C1L2RK6H-V}W6EI*RBNhfd9kQOpo z{!@#|v%ckxH}|;LE`@h7NJ_JdEFt^E11w1dw9(f-UGMeBNYc_H-j>U!`3Lv20y-Cx zP{gw2v<>V%GdhB_O0{l>wVSj>w9|i4K#6=14u))YAIB=G+{VkwdrV@^W@Dln28B?OXMe^}zdtpA( zj!Zm5cV2fU=ePvR%?oJ}^uw{-C)9@f8fTKtkfQ+;e5m&jC;*$9S?eYtdXRa$WrCq; z8BGJ)qR?U>ffcS}i3Q^R{g2;*4Z-#0-Fx?Hot$$r2K7PAd8T~UEKW5f3Kn~VBHk$TwB(c9%70pX(Dgr1TDZ~CbOYOu zeAH#Bgd%-MttA347($A$fhli!E)q1$K#P3ZK|~Cmqdf5?htLI&A|P2-Cn}+q2=)X9 zhrHm&2|vMjkLvjux=GB5^}NV5UMo~y4MlajK4o}$j}!9jpC6++m1V_|#xb%t_#us+ zuy?(PF(`Z0?3eMX>9T=JaGh2`?zg7Fu(&kfD{;8<3P@TP7>mvhnc^kV0`jr(pTkQB zSI7plgd1)a*hrkcIDXHtsE5BG+vttD%bS}C>oZ%MCS*dynUB5h@UrOr3S$3-kdWBl zj7$34!vD}UuHUyxLo1!Z>_z8k%>hO12j;cUn`cVxsDyyn>!SzE$I=RmUDn#o{$Cpa zvA|pV!?96pOnG?$NM{PpG5wsAE41;aN`F4w^wB2N>B!kl)}F+t`MIgOtH=EFPPCpN zCr{o~Dtnn!susdn>arh?J=zHYUC&jAPsO5iJ;N9rwCoUbWdm9>@u$mJLFwB0b6h-L zW+%RJtirm;NWj?DqLmW0h9cAk`Hk_NJ9(Vo-rGHp!If|oOJI;t3+HU6%g}$~UzlWH zzC4cW-wbys(n6dH&^PweO8KcG;i9M);0w8#u}1**Q7Y?DBTi}C8||_vS&(HZ@1i@z zlk<3RGcs}$nxv%Cm15=jF}AD*@yNM=iAWtwYV&4OOYJJ7^4z}W z-ot5j%=dDJ`HO?%%Ghh0%;kK(XGvz7aG2s4!X8J2oA754G~~uOF?T_hjBwcMSe$Xy zKqbKL9lT5F9TX?V>1gUmU9vp>-oU3lgI+X`F(sQ=rW=9v1B-Smnv=E3n&| z>;KCx-Qvc_B#Jii4;hs&Gfc}PQ=bBgcG08y|45B3Z^w+O`ZB_$tj0#fOeH9qMM|_0 zgxR})dO+=oEe4bTRt`^~e64YC`M`MIx&v^W&<#1;m%DbiT*n)P4}6cX@EaNg$s$uiqM|Gb0gu2Exbfaw_84*Q^=XW_UAziG zK_0Vc4y@*^+CThs1AWQ}8&DKa`OwiV*Bk}`RvK?HB+)OdpqT6d zhPqx_IH#)RhEBgt<6R*net6h>IVrT(D7KVq2?Luvdp6~i8GjO>AM%vhx_x`;Q9-Pa~8zpZj{> zp|rCI+r&4@`y?lUE&^Cm^{=6*^pPJilLa9jANTVCvsLP5y zcWEe`Aw3gN#aCjgql&!nl)uhHHy9&L{uKCzLY{%&Gqr3 zInsFnfGjX=6}c%5F|`ko1G3Q%EgoomVih@p|pOW8h23 zQIC5Tju}1L%mSQbx$7`miQ#r*danzG1w#F>DL`6k{vSKaPMB~P- z@$g{c%8L{0Y>#}EyB%*Ozs|k|%Y#$OdC;KE*$b!FsasdZ4@qhW8&(kAFJ4kzM@dzU z`(G^ND#zN@!i@ZV1Ds<5A>kzFo;nquAe1J=fmW;4gDQ+&m4>EQL%H{cvbPt_XYzrb z?=5JWfJ^TjVloNd16*41x;+sj`+ZMYC{u7g%S~z1l(I#TNty{|!~7hB$YWw%kZDR# zPV}#&^(UMiMYf0dwLu&v=6Y4K)4wQB%klttDc#Fjb8c!rYIuq@!b{m0J4+_8LU2Wk z#L1MS!eD~?SV0@^wSpZJ=HVWSJCH}Q$#P%Kh&WaBg?Z3 zL*aeGE|!(s05@eRe|^)Y`sj0aYz-e|PR0C0n=F}|GtToDZ4R(|L*w$5IMM|pm#MD$ zsS4MZXYPkKLD1BWr1Jk5gvFckDOmU!kXi~el&)g&PWg+g)PFRa%QkZr!)bv#gM#dC zw|h)$QAvTc-KTdSocTZW7Qp+aut~bZ=hM+P-sUseV@U~l3TfD4lb`*{bIdAopYYmqie zT@Aq6^>cm&8G8m`v1rAL-b82LPhKR$8c1=$QKAp=mU;BrwL5uFpJ`}uouRqJY6#r? zYwQEC7`D2l#Mq!f-T$0&MfLC2vImW_Rw0XL(uuEV_K48#+gGEva|zj~NX{3@Se`X_ zs3I7lEbmDf1Q`l+lSmsOy8-Qk3)_-VyT8}1+lWQR;V!B76ar>XZ(sJF?lXAsNXDZ> z!rXj(S+=#?13;K-weVeunA|fr<;01R@E3SyX4qKED9VQbRzWxP3N|3C?*y>s`jkXNFiW6oNQe3#;{l91px)8la^GCP%YJV8|YOIYy? zif3(~8qJrHgiq{DX1hGxMSUo@7@tA?SuvDi(eC2F&;+p0{nFVpo>~)^(+`(GIvXXW z3f=Uxc z)g*-Fnyz0BQ1l7EFX4R{mj2&LEg2UgN-ZRQE%oUE2kFA4mQh+XE=+=*!GzKX#UgWY z*qufYB_NaAc}tK1$m}i}j(whv-1_S;-CQ$Yb?c#uiDkn;0QQbmA)0oC5zq2YcT?ow zvRxpL<2gli1ud2KTolrl6)BiPrQJD^LJe=9B^gQp2~zw(3-v^1Es4LcWtMo^coHxN zJ%s{1K0?wX(C1N){8vru`Hi|BdQu8eMlvm^TmfTW$b}g+c(CL(i!57iA4FW=UNCu^HXMCq)V}Nh(RC43 zK)-&?XC-zb39}Wv8ThZh*orVJPc705Zli7eFbJl6gkeC}`n|byI7dVz1L?(Kg7~>@ zpzXrC9q}d6V{*zMWJUJO-;_WQJ^|%`ddf~`M#~)fzfz9s-3Nq<@QE3T^+zoPO~Y@}^vbZ*3mv15T0(7Q@7T^{A*FT zw+1u$U@iod#5R#~txrMe0!%aL*Zv^G@C^B)GD7M3GwMp)nGbEBKare7(6k2Nd9neE zToq@6VmAAb%g;&*FN*jWWuBNskS>q+l$21-Y>v&dlwTrDG9v*%82hm1o7r`y*E2gB7FVOT&AcHQ7U;0EZkusN6gOcIlT_uN#q<39tnh(~I7qZ6=_~47 zlGcazFKX{z@8@5_86d!2l%a9lv@^U|*HdRtD{EC{(LtGBJT-cGynAlS3n2x1Wq07DfvOy(^%_;*q;cTRsr_EQyQYz!9u3h^} zdJ>3W&3FjoU!UxnZS_OV1%y`=*rcKtZ?TedY*@8!Jy6THh+he89&bCT!hkLtt*cUH z3Ozal5Y=#%yzj)2nSy9X{d6`!|7W*)t?WCVwe{p4<$nWu?#sjOul+N8Gvn7FS8BgXq{pg{hAl`uHkgu|6@iv}rvibM-J@=Hn4xQD4HB7m#d;8z3rdT3`- z*$BPlY#_PBG%4OGZ>gzUB8Na+f(=??=vSe6DEsJ^Zt|Xa5t9HDgFQYICk%u9DE!!xcQ6HY`H7Cjb#Jmt|Y=?wMB5|HU`qE!%&HhEd1w?S)RG4KCCmiPRrRV8MI<2sB6?i@0_n_Wfgg}Gev z0uUyQgmQ`)&ub(PT2BO~N0>Xv{HC^P<+!LR90%Q_WE4K_YzsUE}P)eMbJuHjV7a2}o#$1{N`weMNL%rt%8wBpDN}DD~#l53{CES5fcMh4`MhMXW2o z2^(kPVtvP}Z|B9ruP+AdOW612E)k~X-oFn!xdc~ZHt_^1@CbaD? zJwNnbvBPbQ9p%2~88VU7DmtP}Rj|CxB&sJRgz{T5J6sMbcHtC*RQDp(HfE?UZD&#m zdV+Eaf5>(tCSGC~Wz65g?}-K9Fcz7m?A*0$4M-H>zeED$RATK%-xh@qpO~N$%>x{9 zy&zFD@8@M^8Y*jG?@^ueIcO8Xssz z`G`9p<%u(Wgz44&u$J&9#k#pXyg5U|$Sotd63IwKslfIWT8=d+%GGt%pDVNY#TO0#BH?Q=zg(xgveiVZo9KKt|H5M9Ro<{F1a^-VqkHOEW^>M0;$eV4DzWsn_tAiXfJ zbtZMTsO5+0jnRO0BRbpHXrTQU4j$ZVKnFdx;f!irvSv0>U{_h9-^XUqQRp1`xl3j4 z0W|eySyuI23GA4~(&|A7;XI(7E)TRUF#;ee(6E=h=a^3PqF1cOslBU*$3?KDgIs2kV}Dj^FF=_n>udm9^=K|j^1hJyY@fzrf7Zd(bKzr zfos>r#vQ*ONlUMezkT1NmHAg5t$e;2*M4x9KgJci?J*u#e@oe@#Hy+r2l{R?|KaA% zF7Y2PMrCh!XKj~f^xHqXhAs-R(HTD6ntdLqH~ox1HluardWGWo*-={?Qvn8maAR5W z2>a)MC{IEYC`jRkDQE=l&367_1ndL_oteFW*1#v z8mtpj@BS7NH4Aqz`o&K5d-uRze*jim*k+s0PWwlAT-qXfAX0PhJmM_>9H)lG=tms7 zj3MNUpC}YROorp5O~N_Ngy8Gf15xlaJhmQ5BVCQGx6SkCA&x%SLN4s6OobrD8L!5T zt!959OS_StK8-62w2TPc-?Bbkz}-UIyH2F=&O28|Qr>ZRMf&V<@Ze(I7}RJsv2Sk= z@yt|*v(lP%9BdBbGZ(dR>>y^F*ZwVq#*H`Gf#l zTWTp*TCtYPXlH8$9MfmwRN~#~WK89`2j&zYtwwa_P?kN^jBxS@6tWBUpE!V<_B0GG>Ng#nm4QYq{~Y*%L*f; zYCE|oHy7Qjx{t};k3}()Df~-vxD*l+qSPHQ;KF~*>MM%UnD`nXoGty-%qd>?wA(gs zX5*p>IL`F=s`6gxIK4p&>T_z}L~edKSODvA0=0~8yLZn3VU2s+^y-aw4phJz&pY|* z>$xpKYP4IoJ`I;WG{MI6vAXo}BP%s7xSI-&CqB$o;W~=l-_ej-dKjEcS_y?Sv}x=1 z?fTujk3lR_`tIFyu)K8IBlNdT`TOCjdOWh23=*^c#EeXV&9LwJwt@elpk9yH^_=>m z(X@EE`EQx75Q^yA9zHL6NY)|ew22MXXd&DoG zSKRw6r59Q4l|@gSKYv&wk7C=rm&eu@>4u+CxLOUQW{B);Jhmq`xDvGOTo(66artN^ zLFZ~JaoTP>0i0a$d-LI@p!Cm+ik7#{DJ%>k9tb4xJluQUW_s842WZ3d6auc?C7;Ws zDB>DzOB5yz5|=9m{1b5cn0ov6?MjM`CbnaKpyR9iCzusDo|}=%jw@$1jZd68fTGfo zmDvtNDxR?y!5wC<{->Uz^6*a!w*7kBD*$RXE8`P;z^aHdE1uq#yQ-%4M!9KIb^T=G zs(U=d{%AXWeSB^_c`^t0FuH9^o)&v|nWXmQW`^TE=$bQ|3}iapX3AcL-O=ma`)=q! zbFjAljOcPHZu_7A%r#ohkN3W+*@)+U5);F|&p2}k@Xx-(zDIFc+zK@uIyrv$!kg-o zJHO@0KWKK@QF^_BK=hf|Hz{NSkBI?OwCsr>K`gyA0|Ew*(Bo?F=LH2O51J_k^n6?0 zdEv%4yjgMiHQ?-FWdK>C#qCFQDRJMYR@-|O78RXk6D9VWxa|J96E8=xT>jwe>}t#G zUep(9^wtAzQ6=gOf>u4vdMTjtV;Wai8hdBa@eO{s*h78+sB|rg8Y<3B1eR8N6mLJu z2P(9@PMuodnK`iNLB~#=jul?w1mo4x`?TL*{IzOCBfCbdk91;C4AdAuTPCQPZJT!^ zHTA9Mu@{s&RrI5wkV^e~gH77?T@A;pRQKS}l&Wcg=I8C%2#U+^_V=1&%|l?@-?7i; zJK0O)YVNFlLM8a2S~+^`*wz+fcC}*E^A%p-d_}i_neGFj#>aMYQa;L}ZC%Gf zD~;7Oj-C=evYx^?U@W&?;W|IJxcGR^k`8{h6pqX$yvbblIJ0-tW^MaCpOhnu%h#G) zC>slaRxha%201Y)X*?#Z@i*VxMOGcm{I{91y@>L3^z_`CpTZT+A6awqMTcJ)9=)?0 zA1p2WBNpF#Hfg?`bWa*pjLC=kjwN0n?DGB`p^Q>@fqoQ`=3=t5i?mz;Q}M>(20?@% zmQCXpP%~$gv;hopsqb3OVbbL4rN^75e8oSP|ISGA3l69r_swf_kxo=i%F4FSYlEIg z4Jjh3b;1UJy!+_yTk~?AH3VzvKB|^Xjk2Y&?cqK)Zn>5WyF|GZ0H2c=HGiK+NtX-& zL|bgnHSM+vg@>8PW8QjhFi;Y900+5*^Jch|*1Yg#irAe?TQ%hwNQB&3|oSKIcLqCU$ zzqKlh=D}s4LyLw`#jzsU%V;XTcl2S5ibAIuB(F`IZOVMCTM7w;x!;6C_3vx~rYxr) z&W+wp8#ZdxdfEhD>iH9v#I<}`R@4-hbJ+AVGrRQa71ANWz%qOs^v%%;skh!0lAC8B z>e?}|AMaS%I_Zo|+J_ zAR{PUYlRA1aV+9Tc3-P@?ewsId3gtN$_W=N+!J=T z-DeYj86YIcrLt7*1Hi9sBe%@kJYY_IocKUeobl>sIXUHw>|KI*Z2BaY!=B$l+h3j? z0GeG|QIRQS(=@Osi~>KQk+r3?-ulU}fg;PGXU|D$1K;vwgx&foJ*x|62Csg(C)e^& z9~&@>zF0P_(h2DI?dqGR{)D#0CFDt@^6`0;Et0+_w8AL0r;4ja5R^zu6 zrhus7pc4|osI@wDKnCDkV$AwG*O<+{)|U!6^cBmgC%Po+Cbn=8G(O=Fo;n2`K{pj3 z=wX^N(|97-tBt}W5-U7TO{4U3bT(GEfuyDbs~zx|vQVO$gvuJf9Y}@tz3mOT118ZR zw01;0^4)xPzTN^57fAcV>xnn90=7RtXy$DWpBo_~+@|@P{$+o=chgv+VA)OD`v3}T zDI52UjXFK?h#ypPq3;3f6bXir>0=#H^U~+^r$<>NqAy9~)Y6fxwPuTWhnJR=+yjwG z54IYpGDCNZtvHEXaVqiN^hsC;lP25(Uullkf9_=kGd?YEvpR_bC*YvMSPHI6gn_nT zh};j!0l3XGCQR5VQKVH!>^n<$hs-k;3yhWncGP^?A5`|MR-Iq^;{jV>Jxh8MYfr65 zy0%3U8DM^MNc}Wa@x#z0m_S0k$wI19o}Zc!y4oq)KG&f7`&OMgorPm;t@>4QIauNi zv~vbvqlf%#E7RFMh8Cg9q=F8ICZtLtqqI45zvS-E&9&DqmEDxakkm_6Z;w~|GaW|V z=U6G5R_D9#GQLwN8lERJuQ4s3b2?XxIQX|0Teodn!p$Y&^itl$)ruq*{GlAHxCFEb zx4Q>V5=b7jrI>B;AHv#`)ZO`EPp6_*3S{pBpLrI($K@xrd_tCE)an@*qF6rrMd0{` z>F~K+^VD9kWopc+*)IY7DYVZmtIq)q6=$T8n%p7$4o$&^Pdhuy=!p^YBTRviv|F_3 zn%Zw#w7rcXp%KRYxF*MQesx~9G)bm6(Q&KtNAg*G#S%;)cYf{K86ev$Y>UvbMzG|^ zTMQ@Wz2eT3o=cQ)x%<%48$XKu#b)@Ui^v;Ng$Za!hjEILY!3V3S|XrFC00tJiTguc zrSca-kx+x*1u@VZuIk;x1Poxc;cG~`dOSJ5>2vu3RN+BTgG7w7GuaP!rmr4v#y#Y; zn9r$S1+e344pVbY-iP8srJmQu$$bc=5UknLbS_q<;Ppb-f=LMpJrYWQc+@^{2Ce2> z*1cJw)n5}KZq~u9(sj4grT#~f-*!XSo%4-a&SYn{vl2HqD-W7)ocjK~dI}RyjJ_?1 zADx^IvTe0HcMd5lD|5+X_dp%aA17(P$0UMOH)Nr2;(edLQ((D_=Xpz(C=rdf_YWQM z@U%*pLhcYY>&f{A+sEu|a{?ur%8Zb$j3RAf?jP@bJT_?6D0Qj9-}MyN7V#L{4RJg} zi)cHe1C_fM4?J76>g|X#ZMStv?e}(eJqe~R2S<~WQG@1<45zw-Ti9o|heYr({KdwVmAw1c(V^9cJmYBoCk`2rzs z5~=Vj{p!iCcujV5u{iFdJ_#p=AfePbC za4Q4sL{TITml9#mpQuh06MoV0b)W>`^rQNDThl2nE|+AyU`IN8@`C9u67^nw{12v> zhqha3sZe zdEL|B8ijBC>@0DXSYDz>r9en1UiN_Bo@K|BGlH4RO_gaV_>?g`>y+4vZ2i=JYoIg) zyMPi>_ZhxwD3-vb;@#H@tzeSb@oH4U&x?!acpFiBdYI`S;P~aF7&pma@rKiUP4>I> z5gck|rd8C+6QFt6ovQMXP5Bhi-&%79)QLj-lf?QA?VRS!kl%7zaWRU}_woq(_bak` zUR`)fTQNWuDHU*fbK+b)-%^*8#$vE@8ujMafjymg_-i-t&TAfR6nl=aaTx_MOYj^b zE5keqrwF1aZ=0t>Tk(YW%$#S>Bz)@Dy?a4$WOfR>en;QInk&pt4>}-21{-VZy`-Yd z^Z3;jpDKr901~2sFjA=k&?jsq{+}d$^lrbzQ+Uj4>!28rl$4}zbG!}~zxPI7N^ zFFubxdi1EZOUY=Grqc(fHc;5xvv5>xt<17+P*q{?h3GS97Of+g&@`QOkC7YQT7z76 zyM25_FU>wo?J#1co(frq(J>1B^Td!-c$s(cS_d&K^4V*B>0fM%mggbGNAv*-CI!%; z^B|u<@U#_!*n^MAbkEcOi zY;3Hd^hLiJ@hWb0zS^hy^C>P@;rb}=*L;z;7C4tdxA{`)0pi(1*d`R41bK~p>rCHU8`M#W$@ zM#B+2oPA4w?OwVS!GG|I#!YHcj1%}H%Q5w{U$P}5*%MAX-#Us{`0PU zMPh03PD`xm%0*Pk4<~?u$lwj|8c8LDmJVJYK_~J>x-+kh=j&~}(yejr`B7u5`*?O4 zL3o{e>G$7%r|CggRb5Ifi6)hRtgJ?YH+(a0e1FtIR-I+y;_y>a1y++Ha!WcCNuV-;>c4pv`3RCeTBf z9=cNvGQm&ND7wL#41IL$H=-PK)GidGpLyoils9EX&>ih01st=s&0GA}=7xBueYp?W zF9JCx-FZRowvRTkEn2ojiLA?#!?+j?9=wF0CXm{q{n-ShI?Xf*v1wOSu6kH$3;=QB zO?yM~P#N4Mi}A=Nmo+2^%whwx=rv?WtDdsd8eeTWdFTmL=WD1qM`gIt(+-i1X1j*; z-BEn&c=~IYJQSZvdJ}p1=&hlcWyJuQXY_y0u@xSA1QTh};XJ;&>XAghbQGF^G4rzt z>dK-z`HKJ?nj^}P?BBEbDK2p~bU=nXkKN?^|COUp{kitl{O8{seG*fb%@9GTeP{rg zmS|i18m&f}cWbr(A6P@QJ0g%EI%(*r$n@@1#K{#R>G|PmZv$URhB;hg+G`e(iC1-1 zMZDmoO|R;hJ}^}I*|u6Gmh)**AD&y+>UIr3J9XkmM{rWjI~B&d{T5tr+IHI%pE?WXvW+9G=GaS zfb5Oz#pS|JWPcwLMpT`^B`-9prgX)QB_g>vQ1^~*y)>sWRxN0=t%zqw?3VCw3cp+a zE#KqFI~q3@ke}iBod4O{+P(usatCC@!xQYVAL&rM{V)w0$dUJ$o;*5ygC)Zi1SvSS z@q;Z}(_#ol&^YeZnT`rY(chFDbXVz3_1lS?P`}2UWtJn=B4#A&B6tx*#~H$nB4F~> z2wDu%y3Rl9RLvclnNCMBqV8^*6m$)Z-X7p5vE=t6`@K*o4h&z}^y*#OHUx4d8TP?Yy&zv6N?R=65+d)CE#w9qI`w3BJ^p+# z*t+0sTw3W?QET$_GdFazu6I zZrJ!UUSOI(G>`|>s&nUF0wyeV&xCl!fANNXX1_VE#O1aOqYWDp*z z8MrWIJrBbc?S`;9jmNR@G#Ww4FVdtFB-KHDB}qZRq|S7d5NbZi+tS1H+NgcP!^4+m z0yImm&Lab&$Q|^AtWW*Ed0)*3xVERGL?Yn=woNj40prQKCjr?q>_^sb0a#iaEQ z*-jHu!yU=eOcRAKuoX+?ao(#k87{%&*s-3f=ptQc4L#i)-4&YiD;8#+9NlE%@!_-K z9Pd)a7r^|;nE+v=qQq`|m;KNQMg&u752p~Y>gLRSjSuKi6kA~}TL_WZ3~)vSeTJFL z2edN+-CX1TmN{_-s?xGDgNNPdQb zBne!JlY-h1PH?kJACKzl5afnc(Hg6iZ`)8>Q-20AlNCs;xUkLFNe5VFK_FRkQw5(z z1~v;<^MQDn?!{k2J+gzQk}tpwN<8S>JNh2=bigoOh(JiO^?e$-ru@0-8`)7y$J^V7 zG8H?-)m6b9+^JM{sH-Z%Ht-P2AwS>tX3gk0%CqOTruRe36Z`qyBPdDCZR2LmB0eQ# z46DdG#X~Skh?Z}c8%1z&P2E~y62pgD4iOXOTK{3p+7P*;Y*3uL$%i?Ay{5N|AX^}8 z>2NtZs6y?=;DdZU=9MmP<{ERE-6s9xgdY-8#YbKYYv~yox%NT{Foo<76l1wWAo%J4 zFv)AOP&vGRF(Pf}PW zx7N%eZvU?Ha08XSqodGxJM{529L9g*JY~X@F5L(>eJa_W>+)hmM?!3?4Ij#;pqs)W@F488p1HrN|C|Nn z8e5%5@W|R5%^ado%xr(Wvc_uoJ_dU!hf#0+VS}!ufm5JLHTQ)zf5yMlS2tXi5EFX) z^WAmQxl`RBFDFf(RO6ydFQLm@@LKZewIDvm0gh0?D)oT_2lnr)If<66K)bR05cQ)h zx9cDk4zaCTw?2?p-W~!?hfo2hd5?-Tpbf#GOn!~hd(=h)53+uU#dQn(-WLe1R zTk%(xuWhcQK?bF(1yNX`d*7OC;Vs_l-Ymp$?n_;?y6CyXkjcZ!wMN%EZ@&62*;;*t zLm2kg1{w#Fm!klOJR1IFeVc7vP@xLF4~JEd9UB95F!Pr`|Gg6Pnb$kGPE4Bh(52@7 z2S()7U8TbN;C%~jYL@Qsum*|?cbIPlzLp6KxWcZAEjumNc@4#er}hBtX%3wL&B^Fi zqE_xVw8}_SDHIvkw;!+ht7{5T6sCsux=LC855XQFse`@bF4r8_ezUn%?|wK|=-S_Z z`y)uudaN*Y<8F>?Q5|3V8lBMsx`dMwR;dka6Bbvv&jiB_*rS0R*P1@myGt`;N7;rq>GxefF((2xSQjCtpa zO%*fwcgx8iKT z9+p{H^G?OeAH{t1I^9rjW_W$Y(j-doyV<;osf&&q3dZMmzHE>ptK)}?;uV^WC~sZb zbbVdusrk|UXhiK7)|_}{5DSohk-l!K3P9$=Sn>tZaDxF!zyfcNE^4!lj?b1&Ddp50 zGixK?osgo10QantnB`7qZ4dWpdg>dqx@G+*D=urlq#_^@V9R<)lt4%&ZXq4XefDhr z>vam(4jn*4_<1TH1|zlB<=6AG_wcYZN6~PML{U53l_i6U>2VN(6VNp@G)uSH0uzbW9%Sqk;P`D0dxX?@n z;cnyM!8DW|ul_VvT}C3h1h@+iO2-q67Zj=H3uXbcYJOp7z^J^dlSZW_o)6q;m0psO}ylwp19GJOVXP-ei#jKkD7}V$}=0 z<_v%bClxfq+G#X+rbdS#A7T9MD3->HT^)?fV8+RKbvL2@5@-o{*F^LXl6#Q7%8wvT znYh$m+){kWAAfwq+fovXxjriQ_zkxUC3(>0w*c2=fIfk&Zi0k#nvS~l?pE`ST-6%H z-zvs2Sez_7Avf0-r)rCXK*@B%Cb|?oh(AHOGa|wWy2Fk?WY6SIFq+%#O6n=9=TH}@ zw0UHC<#y4g5of!1@BU&?Cef#GxVDEasi@p*L+&SsX{g3@A?ygseNXs24RWkWzYzAS z=(-F;bjK<#Uj)BWsz`J#ZUbCdxg$e5T=)rQWV~8p=CAT=PNnepMVylKORIJQPeM_hlUo$7n;9U#M~N_Pq0yWZqZ%fapb&L9x``l8l>)Et_5(SP9Ik57 zO-zh&sSH&H@NFae$77$>RWsBOsyW23duTu+IM^W}aHV z(0m#_U{8WFSX{w=Ze;%4e3%k>Gk0z=R@J#MIMN_9o0&`KDpUlUh&D1&^4Z5G@FZtX zsPnip+iUDHY+qFFdrG_0QTsIMC*4S#>BDUp0=i)R@&RFV-m6z<=tj60lPu9v=q&v? zahG66Pe8#X^-Ep~7|cEXK%2!g-`72&{cXCpe_a3EDjZ5G=Y_CV{uL1n`}F22Qr9p?q1ENMCJ96G zkoE7*NVbeMgef87P@x^4iYYNjdS-obwF**Jz0j!z!>Qb)0f8tqgL1)xYmUpo8ZKCx zOjydjburh{x{Hkn4^H`CcQk7vQNV_0Kq=|C;-Qz| z#n}6jZPPABP>njcf^?UCDPcM_Si3|?1)wk!dQlg4qeTm~%`0dgAtu4TNsOi+9+xw@rdWg+}f5Iuj#si{a1o?4s(Sac{DP1Vpm$~mp! zbJRYxpVM-hUVHj(8bpBNA0Kc@z5e~vOSK|V--vJ)y|<Qfxz?!elxW;c-B$f55r>2B*{kJjxm%iL0(!w zIJKWR&t}4sC4r30^9Rq+1GUDhh=(RooqO#$=8>_r2Q*ZL^RqvDEO**SWFCQdlDS9$ z=T6lH2q%Q(m>9K6RmxeG5J+3NEmzsETzMBIXn&hTj}Cc|cV}yuMub;o7hYhzHL8pz zeT5<7aBZPU#F<-#wwc)ik}(`x(5P8EAAaCVfewTZdR9+SWIBKTN#)Jtg!*%pjep~wD)Vs@k^QS7Yt%l3&szWct*u-4 z$lqi$ZeyPWo7)W(9!p7QJ+QJM#fEo~x2K`(Bq{_aoAdsk+7I;@CvIl5?P0ZYNX%v| zYMU{h%ydx)vl`VtusW#j{_%XlJjy)5qLulyD>Wz+hFcDk?nHqkZL|oG(l>8R*_k`~)P7CZ7Pw>HGadS%zeE9mG_-=I$SD(8JcM?QOBW#g zlL6APWhvtYwXo6NJurLS&#b*`9skQx+I^~jt{yxWiypwvl*s>W$9N|FAch+cBuz`g zh9BAy&c&o2#8J(KCM?;#=_UAk#L}@{R6NuvEJZ>k1Op$n7$AJgsVy9b>^c3LZuT35&~+NXsr=v>HrcOdF{E}8D)f01YcX9wN;SnTr*R`m z5R?~xt2wE{W1YsHDHN1Q&8QK(Ydx<0TjRfPKa?HeO8jJ^21;C(tAT|0)zXG6PEm+C zG%o^=l&nm1%+=Q*_(<9;Bh;+5jh~_-PSyNLaqfH87HIKBbjah?lh6W#y;vC4nG@5!ewuh71{g z3PeMW9|YBq1xX-jm>~K*l0h&;<=C-fQFYyT@WANF$2JKQ*;o4|8&qh{P1Lzob9asZ+>^QaXM-`dQQy;FYriI;yarE@SI=F5 z)vA4){OF*;qQCsY)oSz?CV^j`Gt!)*?$1Xw_~D-8s2F~AdYzNAuP|cBs+O3KUk%U9M=_g*=?^(}85ZAXp1s1=o> zWD*WKbeB`DR8Q;WMgp%)c;Y7gf2_F(SM{zQWa&FE4L@=<&rxyN@l!>7bvjXw7!aaS z!uoER=No?hxBNO~Vd412G*)!ou9+$hs$@Z--vlsQ^wlF+5%4VD-Q2%YUaG=D~tS3J7-wL-^9vLgv+Sfu0|0+j#u0=_=i`L*OP1j1#H zR>X@g1Sj0y7?#Sbb`D~p80CBZR2#n(7TX>qK@Zg=;%&ns&#NwF06&^2b1v-3rXEQuZp za9E%k4-JzCHvJinX|%_8j9aYdv`-lPR{@QYOcw2NV-R}1c{qf zZ06~czEa%Sh2w!*U!Bz1BysYK&)kqE<#Dy2Zqidg07D`p!qXe?%rAid6FNMn7lki{ zOY$2=f9-UDmqX;a>&ZbxsJ}ZMemw?taue(ysnFTcq>nI&5Nej58}-_abK3 zj({om{f=I7Iv(rAnR(_&`Ay=m+C|{u!UkOWkmY80wRagg&C5S~96RUO0n7(PQ=ILM zeiNle2WjoaQy(Q`2O9+M76<`#UgI7*HCN!;g2(PUkuhSPN|gAQ1wm`byzrBBHX5e} z!Vr}sZ!ft}6ai2h`=KC+Tu$9@{@>paKx$cj39B^0!1RbE(^O`FRsvc!UV>;jkakh{ zELMy4x_=sq){{!Yo_9a7Lu{!E7QW15&mPD@$l49fIuF0}2eFps&`~w*`*unj#krBf zXCiTs6)bFQ25M#@R4U&Q1dJYyHQBnI@-tUR`&)ITm)1fS&g9N-2VL!Z_w<~#W$N_6-6(POQ;H87Z z-rv5tYXJl^`i1i3flvGi1FxxT_07gN|LXGskP2Q&YK7RoxEA; z+b6W!c7j^UQ4FxGJzP)zqv4cJmwveQ-EpGP+9PU+=1v0>X1Le=PNC4Bi>%|z;hL;T z0lxgKSdP~9-=8kO&}2t-zgYU+|NP@1f#tRQ{x5`Unlrdw!L2_%dyWu$SH~gK32`F4 z`l8qzvvX*_|pY-pQSJ@mCJXc>gFyuPYBA6SxK*s+)=f?R*{U?s6FPkZ@SN~ zk!!Q@6cBDkx#qPIY7s(fH30hS$1yV?8zt948zM4*h~ z(dwfyyAB~mY|({AG-+62G|9vyan)HC7Bg2+T**|?8ikAIEv}=`?ma!r41>6h~-s< zxBt5$J$%`d4XC3H_>MkmIaPha3E(G8skP7j=bw467uS7?BC6T{3>2?kjQ#RA zh2jVc@ZTT*4?!jFR68lNq@tpiXULa(;5uXxh@Rz;uN2R(!4eUiRc3xmCRZBrPSinb|&7l-NKzFL& zOSr5+i!eXIyYyxAxDvzkTKp9NW=T2RpQJmE)*;w`AmV}kpj4`lTfwQ z9IjVT30?LhAu>^?F^H4GkS8fw+y#L-aQ3K(d%CTwd$mHRt0wdy1_IjvesZ*WOT)b- z(Zc-K97`L?+G<59rbp}#qA6WzJ?)pe^DDIK)orFpJKGPOgus<3Nv&jx;~IL<5_unB&?41< zR7m?{d!%gQF3+AHYsgbr_|wAUe_=q7D@~Vcc7l2ll`J82H?&XPXi@mdt{4eu8u@s4 z%1WO7@$VxT1&rWUCs14Jvtd-I0(|bX?kHd_KP<%uBRcuwN3-JS>e1a<6NRifpl~i9A9y@_ZctI z+|?YB)M8(r-3tW)ODTg~JsfANYTK}47tq*(UH@e*CbG1OOt7Y;!*WRsB=)BuGO1D+ z56q$LM$!7y4RCUDCLq)zCAl2Th7@4U47{>I^uP*7`ADUaqA{8+7~kpN6B*T>qNF z>B12wTH1q%1)!OdN*m;=PT^zPkFwWSTaXb@nZ4!Z_-)&Fp9J?7`g4 z;L_44o!6(Js`fBmCD@Du%UDW4aXDLtMSWROCc2hg6$7sIZrZI|w|v;t^uBk7p2BxR zI%W&^TYH$^lH6yDSQHi&Ten87#0D13i~pU$rP;DvOeRl`bTqGfn?kF>)k~`y%u?w5 zB{g9Zu%Zq@x&%H#bOEw61WX6lO6sqn1m<7+*Re%u^Q}yLks{QIqUhr#f|S~0 zX;V^gvtk^PTq*wv?)YW(DH{GO80F>Xj5K8db?>C+;)|q3es{kEx9rQ|nk=P*UylFR z{wOspa(Gx%M)WY$W``rLRk&@oGxfeGlspI*0Rw|!TVL|Drcj!6reo{1(FDU7(_Tr@ z0Jn2^hv8ip5~ZXP)O@*lc{2`{XwEzErwOV3f;(#Loase+!4w=>5qD2Cw*LP9^^Ct2td(4IZs9 zU5dz21ka327^gD2>{M|0Sh`|6p%`<$GKsytnfm;r@n7_f4@~)D<$ZQmMcTb)> zc*9w4Kt4s5dw1`ie5``5fuaLfs&JRnjE!A2Ee%AtDQa*jo*~E;V}m-jjEa&)(e&fK zh(N`Mrb1H?6ilam6^S}tV*IW)KaCu82~v}u@LD%1~Om!*kTd&g3G8y1w^OAPMz#zJDqkAj|M}-VQW2WWydM&`0=R~bTfqc-`4Dk~;aMpT6W$6; zf*3Ym%2(;2UOId5Fu+UL=DkW%e^D-}edx)#1j6_OpQzQURd-Npp?48BWw06vdqT4o zfwz9(dRL=FmKs88l|VW(xb8IUo|W?^X~z#Jq5((o+(hw6OwD1G%WwR+doiW8B31HT zFZs7VquNN?DHh$gQE)t1c!&Mzd$8`ea9RYFiT7-W24=pqhDE{ zoBNw=TnN+L!Ha-CJ$w9ym?037sSty3XVsZ`ZIA;7^0Yh)KQHeYL+ygKJaP&(jNG9` zGwniRJ#%fJAHntevt**PbEKDvQ~*Kz@5_ril2k+%VMikmLfYG_E`daD{8TcwKb6Oi zB6x!>Je}HSeNnW`6-*qsh=s;xTA=%oUaZGo|K6zKnw;VoJ_zx=1UvautExRphM|wx zO>d%uIiK3cw`Qi$f#b*LT==PVFKcs(b}7t?aCu8t)&j7?8xH+{=OVCk6A$(2cIV0_ z3N)n|2bntgq^NNS4WAFLV)tcW8IG*TmWWokAR0lGc9?;#azn|r)=RkgaTT(YT`!3Y zki+|i2ovmI<>X9Nj;^2s9bLwTCfQ*_@Trq%^JB&~p{GoKg3smjIaNA zNZ3oql`AEZMa5H5$g4~ku9VT+3*PtoQ zhrV&89D8qZX8gL6lTwV!gm#+v2}L8_WN06Q1;;;+S=ZFH!V41j zNX?jRJ2XE4*gcaz0XO?YiiwbiydxM|W&GYFM?}-^Rz!jnND|*L(b81l0|Qv}Uq)<7pI8UP~^RT`x`*!LfjjovYt(2Srf7zT=3V1U6c!l7hMh&4=#E zJ1#R_7PCM^@#0bPn|q^9>x`vZmM^Q_s+Au*TaQ{n7QZtYRX77ZNJpzK#2=95P{V{N zbnGRv%8ddL*}AcsBxI59MYQ7~B)ZbZ!qWi{?VNXXt;meWtGVVN{6-L_+$sAbrb(!E zp6Lk|co5}do_3P^Qq(3T4XU|vzYmgLNTXWW z#oWdI^xYD;?n!QVH``C|OGL9cyz=?JrM?cPrhBCN93cgWM1rTz{X(WPP zkj<&3vCo_h<@@DQ(9Po8hCQi}x^B_UF%Tmo zMzMu-;z)W=jd111kA>*Y_bNBgdkXKl=tAzDZn~^>QTh44q%GkNzez`F47S8({Tyyw zDnz~XED$&H{)8sV((%7i-OE@Wxnj9JKxNV@v<{8Sv!z@H*wG+lec8(sM7zK0Zs``l zBNyw76a3Yvg2k-Q%$YkMQ3Ds!RwbRg5~}rB4H$j0ZQo!0HHe+0j?zy<6el6^Rf8Yd zXCiHpmIqyt(5X@|O9qoE%_7Mz;A%BbqF5SFNK*lj!y`ErIb#IpAWup^J3FSs4pb0; z9Ap`1L^iUmvTkk`D!pu1)Tyl*-SHF{$(F>_L< zC!nf$orbra^f3|hE8iGX3L%B@S?^-e)Anb4HHwOUnWY!y`vQ9U&K-fzM7Zl&96Qy_ zOyqjR1~!l4AJ=FMHASVmIX}0MHic4V_nGyIUPCwM&O1VZrIleDXiC~~=(^qsAsPxjq8O8TKhYQ-WDmhhwWb2}+j!^${ZdQnt#Y}xLY=y`i(c1_L9 z%nV%}4Ji(t7&m8x!|vj&qI{w=S!veAK-AP@E{dy2PI9f+z#yUFgCSOGcVuNrFSJC-N$g;fI;5_c*q$>zzUZh=>ljh#;E0H|%u0@`SoQ@#r{BhYu|I z@JCC&HljzJ8&X-p8l}V!_SV2gpz@~|NSkHPyLrUKtoO`>KG^D9ean7^I`_}~PfQR^ zF>~x}WUst)9`Ap9&CTKobp7$n*=&${EV^H~&(H(;x6?>kToM+#Cw#_CM9%)ehH5+ zymGH$@9r;Xn7FP-wsWCjQp*g{w^7Ty1{3nQbc66=v_M-z;a{bg={O6j3Y7R}v zdcP>*hfNn&d7$BV0G zO)Az$-33FKN`Bd7b^L_r{V4s3;p=cOtySI4Q3ZU2xNg9I9c>Wzt%5MBq)cIK;N~=#~59!k7FMF>?aSsQ^ zteUlvZpR-twa$3I`Np!zxkqv<;&5!Xx)+;WHKm1-G(%j1g^}h6pd+V=9xXT8^qClD zJSV>QlaC|`2J4a1zP;la8|}NZ$0@e?QJbZ!oOtMl#SS>*=;0%eAE987z^IQ+q9_10 zMOic)6u=t#-?MlVe%8j?nWlpk7p@;q-+X4mp5vQco@$n({S_Ed4%e=kq+=*=CYfC? zP2HM*eRIY2XG3}obdPa5RIb*c`>t_Lsq>uLnlHnUMahSK5qai1hio36Jlh;Ao_>D8 zSQ5gC63bz~(n+ILDM$zB>q^<;S=!sh-?taVCbq|il3Byle&q=NY)Qo$9$ufH(oKVw zi(ZK=G>{lH+CB1J@yL-`??>*G{*sSczLyI%^+2bm`N#9+24!>_k%0)>4`cPs{pT8e z*Q}Z0T`zd-neMTdE;TkBKP9?ExA0Qd`>GWi=RjD88*H78eHyxBxo%;_4NK`LW<*lB z%01y)rTJPv$(cPXs_0l5BxtwsTavhCP9||gn@+7Bo85dr?1qRra|d`B{tK!8%5J8EB6(j;>MW~T<1oS zwpV5sUhftjoGari=yIS`N$y4&63u)z*-AdBBq10^xODAU5`9gn`Z|B9iliF7@%7Ka zbhtlI{Qg#jg&O~vX8(zs+Sr8P3x{KkMIB^YGoZl%K^xa*xZj)pDkRk_v!lS z1(7gfAhyq)c8k#O@x}fLmEF=Q5+mpK$~#DmmrvETXC^68lKn7;YCGL(8wNi` zk4d?bi!T#tSKwH*PDxxF9 z5Zp}wU6WEnN38K2=teLtz-+Wyt7Xem$Wj(sJe-yI{@msG>dz}A(If{SUABDrL}KUk6F)bi zxx>swMNMXdvEgrT9lj?j3}Wr^diTxVie4X~zEWzqB-POF*zu^%gr!T*h(|G;dCvRq zPo17GVt#^@{QKHL@JtfB2t=N=Z=KEK*hfJhYH#ko4f&zLqkYGX46PnxeMA8uT%S^cL*ZYM zP4<8&7tOPv1!k`C@1N}eOJ2V>?#Y!>bZ7+xvI1yHZK~8G!>6IMyvxikbT4~smO{#f zKE4VwE7TXwzw}j7|JKRn%}5#?j>3USS_z(aibPtKLS?@X&p2Xc_MD9p)L~qIH5$ku zHhtg9)pKI7&Zs8YyF&PeWomkK1Bpx!isCXCW{nBZ4(3ZOFU zEydm|nM;w+j?W}+5j}cMZHyTurP8HS`m0em!GsNhxk3ny3tInNIJb*?inMn}tqDCN z*d9q{A*ak>Q7v!^?GGRY_7{CSh~9Go?ob13kRT_ByA&JSfBn0q-Fo!Ur+6j7CWX!< zvD1^FtQd1&vFl(!Hwk&ZZa6#{u?Ua;gj{4?W!l&C8A) z*t*uyQF{JKDqRNQ7KAf=#&>{%`6?2~4UvjX#&M5X-&L`O&=wA;27!C&6s1X__2#Tj1Lh{cKq z)SAfodDIhZ+O#S5&6Oq^_?y_j&pd+HWI98sHc*80fkHM!g1W*#A{b=tWbl}$v2N$D8{1G?ppauNp{tp?PvQ!~ zWc|~LPsx%Pw>qVoTiv){Hc`Gb$8{!~6x@|sAyfEW(dOmkoMLU769t+R8kT-~Ggc*1 zC>nN~FeE6Ai;@Mmb<38Q>=;ImPL-$+m`TVa#D%|8v$NRZYnn)q!ET>-m@=T6Dvuw6 za9Xpm>id)e7m==?v+r8?)5NJFni8p)bYXLT^H-x_FAYsQci9&%WX}Ptla*T$SW+OU zt;h~JbiS?SR*nCB2a9ImzMY0B5TOS(q=>0mZAk6f!j8HMmfP=2Y5sR-=v%Ce2G}=g zTt{svZj^!Q6tZ3DzI;(w0RfZF3UokUujvzNQghXp&s#-6vVrIE;5Hu842&ssR9Cqq zV^y7M*Tjc-OJr)IK^7p8A7@I_39Qyv|3!GnFW7Td^Q5Tm-y=)!{|lz)|8Jk^Ro!U# Y!GEHrEjP)NTca91>6f#k=5GFf09v5oEC2ui literal 0 HcmV?d00001 diff --git a/sdk/ai/azure-ai-assistants/samples/assistant-6Q824dJfHkRzsy46hPatQA_image_file.png b/sdk/ai/azure-ai-assistants/samples/assistant-6Q824dJfHkRzsy46hPatQA_image_file.png new file mode 100644 index 0000000000000000000000000000000000000000..02f9bcd170a6abbd4ac83d0be8852797baa3899e GIT binary patch literal 181757 zcmeFZ2UnHX7B)&WcF|Z7P>4!X5m7*-Yp?;*dlwau4$^xx3c&)1NRcK=kuJT1f{K9B z`=*F=HcepD!TT(8#`oPZ?q9fP3{Mh8c=vkOn)7LM`6$R;q2J21m5z>%Uh3*aB|5q- zGIVqsxi@db?+6@`%f&y0>@I28DO(xYIU3wCq?0wUvo^P~GdH>Mr-R`gTN5iwKAy8Y zeB6KDwzIRg6+U&!;{SXBkJTOHQ?;}b8otRE>#Lf!baX6b{rXuOB>aDDEAyDI)%uW*VoqJ=w``*wZ9rcFtIJ=khm_wsI4;4$UfjP{ zKD&>rr-tKK@@R8XlHEu{q@`&|gs8L972ku3c6M0-0RgwtHB>TgNj-S*AVDoftDx75 zS+wh-&Jg$hN4_j#iG3A*)UozVX=&+zw6rwoAnt_Ys)=vpB80bY+csmzCrh0jX(TK4 zU>k$o;y!T<6Yd7*+?V{~I3vE=h zwt4FjXX9`0?h3oizKI^Izrx!5M)CEuYMRA8J@SylDY@Nu@=J1tUZpc`BSGB_*2^lsUUxCyNu6u$S2!oB zr6l5~40pRus$vOevP#BEB*|}HdbGc?r-!EPkfNR2(wwB8()fU;@a0EeMfUCbBPn{t zS~fN|`8E29D#rxO+Y|(Q+%mBUv6r7P4dkU^7wqK^h^3?ZTYYIDSbr8j@i+&^r-a4E z#>Q;hA-VC+eC5V?`BuJK(btw;1^cW|C?4?mx$1c7@@1`f5xe1+OEjwM+e_AJ($Wu= zl$7k&R-DPrCNegtZ3JVb?Nq0wc?fO>I@C;4F!>(KVMy4Tb^tg z!S_hQF4<5ZCx0S&{w!yOA8RZ5+jt-4u>;uMv6H1g^?E#i7Mr%FrqsW_lH|TTx1mXz z&bq6}d4_Cs>g@WRq6)YCnm=0)RPEEvzjJA*CZh5EKbx{|Rql3Q==HXgP^Xl7mg>F= zWJ}RV|Lf@G$6WqlKd^cY{)Z)1lGIM-wnjwUtneMdK5FPLcGcCIXi88PwjT}jSR7!G zYVea-8_tOg+4|jWWo2>Z#W|Z-YH51ovqGBY=E?iHHD2PtCCUWzxU4M>*p8o$ESK^_+aQFqUd4%CkG+KEJ$elZ+`2JE==u4ipQP+lsy92X%Y?bPuk{myZ%p zY0oq^o9N6hnD2HKaGAZ0;!_wa7B3&w^8MqZ=2B13+$ME@tTw+z2fzFBcy0l9dSkV2 ziJK-R$D%REg2Io|M1M$F<@FUmo_9enb`JeTM;Ee3@F%6>0@^12~bCdsTdHBs@kS9ELP%ap2cpRDgIh>Z^Jfzo9`bEy82B0_@Rg`5*Qhoib8a*$j7Pw^DeRZPTSFr z?4(amnAQKZ*g&W5KHuf&rKd^`nB^ZX(O+z{Wy8*zG{i_bPi){yN*Bon-YZo78~-Yjn^t|QloX^h+cParaB*EVF)@*rl1g_yDJG`7je(~r zPBv^76)Zuo*u~PMHCW#xp>VQ9`?ueIvrI_)lZ~Byq>?jA<>JMQIWwfz^71P9`1nZ6 z$W%L&+*{*0bF}6G9UU*juS45|N%@5;$aEv_0qK2)xFCV?)Yv-~^%9U{K=ch+i4Ehb1H=q!@h;lDe_Bx}0rX%R=_OFiuiQhr)rOzS5s-g`a#6nFZ@8 z=9p`u?8{=!)y6xrQ+zgT*|J60VJxI2S@X4OlG+F=M2jJhUeTNQNEhk8PfyT2BBnDc z1J1a(+ca%c$5yO~5Q!Iaox9yr;_lVTqhFHDpyzZU)9C9Nw)MVD0#zdhk@FK>NmyRy zDjlC?G~&Sp7l$ogN}oI;{Qdnc{G<%40&NmbRtKLFob_@jrxzT)yNSix#revzt_Qcq z+w-WGCA!WU8X*Oald1S&R4-;rg~|YScEd55pDQzdq^PYbqhy-}V~vx1_Hge1Jhdaw!@Q@Yt5KFK1LcRD5s?b#9IKwx zR(;Pd-Yg@OUXE)aFUc3%?b-HqqN|X;(S2#wW@%#|gT9+0-O1y^(=*+JttjNfOliz#_vmj0c;rXTT9+Wwh|+oMYQ_&5tr?&q{=z>SD=R80 zEIqIhf&Pwhb6>*-Ysst?Pa^AE|LT%fv5S3VheF*@->zM|TyToS%)<|G{>ZeGn;MRu5?m6r)iJ8 zX!Mpn9Jg^UTUyg}@?RN8I$zE-u2p-&Brx6eDXnx(3*G(X+eq`7zT6%UzJu~?2{=?P z=yUsa@BVHb3N-$}+xvv5sLoD_mA5rf5~R+UzQ4EOLER1Dm~fjbJF)j3kRFP&=as>1 znRfx--ywu+;3^8^K|MN3k#nX|ZRB`P&N^#dKouGaVxms|9k1yp%z~re-v7g5`h5A2 zghwauSO2FIeF^gpt4p*c92a^+tndp_=L}S%=N7L5k7k=TH!S?d6|>C`|+T-LW#kOmUO>LoBMv8uh{vp$ok{ITfu~klUzXmPtKo zsEJ*ZZHliikCZG=*oJUDj|j6YkV-3_dl7U>KNX!fzc4gl+C$u9q=9{PNaANYvzHU< zvdb#1^$>@2`_>&hT27=Dy&8&itx<>!3X1!=G9=-3U)a$Ml%-_h`?f-sH#j?%t<*rq zHjV>9Ji1!zH|^m6a+6v8(tA=A2g9tGTnuP4GmRpXMX&KvJ{i0UOsT6=Af41f>?S)K z+i+~KUX=~k_}IeYBBRUTBR{`Z;4hujcwRTW9j}oBd+KPPpG3`B#j2;%1J!0z-`;B# zIi+vhe&E~SelU$ZuUp6H=oI?V6Quc!zVHT;2B92xP4V{aH!}8yJjSk1-LP(6yfiuC`7Bm{$0Bi`%|ro0O}khHA`k2WC*&c zw*Q55>R)?DkDITU$HfY6JYY;l;OL27q^mN>B2mz|Vr>m)zXlBbXxh(9ym8 z^C|ZRI=Y8NBaQJV*x5fS%;6~p7t5~UDQ5AnOjY2nRez}ey~U2&@^xaPf+EwA(?UY+ zyyMb->pq8ac2lfMSJg;W3#4a0dkw$7ICkL>69_n?i!(IQ6f46}m{BxWl6m9f)^dMW z!A|RcNNtG}b7dN|b^Km|2c{p|Qs_9@GWa#DOU+XaR8ioXph?5UwY9Yk89a_Q18$M0j1Ocf4t6Q(zpk>-oG`jjV$=_ ziJ5=~NoU1TT~pu}g2JN5TC}ZHOFdSnb%q|DD!JYIs3Tm+7Cf3gn99U^{cnSXNz|`K znx4m4+i8&BuL}EFSSF%dAR^>>=X+|RHT9F1NZOyI=FH&NW&i-UW(u~#>O!AiN4{-* z_82-=J?Ku~9V=DM*qE5xAY!Ds6CMxFr}{?W0!m!9da*Bl^yQ~dpFVu_sPW??234?j zvY%&rboB|NVC}PEu8YFE++#T$rU2 zs!`|LeGh8!gYY)_KN#8B*+q^*EFg@={`-e5*ws=cB_+r8ON!7qCUk;}oqoKU1JY<~ z)A(3Xk>vE_`*H1@_}^Ef3}{`Be71wdsGPhNvA z4C8U{j4n^0JqV-ypopJhWo3PSN?#94cfx-%V`0IGn$CUfm=th2v<>A)`%iW@XnS+| zIX|!&t4#Xh&IeS>z@y{;@rGi1hM{V#^gaQLj;52#gQS+#O8guKE|tD^Ef9b1vzvYI z?%l7j;?z&fR{Yza+ZN~9jRZJMcBee!ls_&cq>19J_VMAq=2w><9ch;|zz&%kYk$pW zTr-T~Kw4{}O2Vr!tCIEn0&S+~|8e#9iF4;k{5yGk<>kiBFF&0WR+8>#z;eNGI5INFe+BBB?E+FZ`;yznc_VI z07y2&54CsKZ`wZNd*JjD`JrbbgpqByC={30{$LZkH4UeWSJ-z zUd&kmgsdam3(Ox|h>GXhx^=4(peMMx#X~A5$WI5DH>97}d3onz#nz$Z@G@{@A&^pSr!gZhpyXrPid+4=tOH;RPQ=#7ncIHRGrYi2mh&id{J-7rZxH! z5)Bz;7%$bK8hmaP?W+ZfP%;DG%#p)~FW_u9CaNacegEKNK9r=DowzhNp0qqsC=YTc z*IaBjqEx&%7}hcZ3O4dYu-l*Q&^hb#=t*@@iwx$SRZ?}@4KkW~%e`sTw9-+$lP1Uy z7P&fPn>1SFcE#ZUXq?o_`kb9&RrJHF>+ADtRwavZ1|sH$U8%_#%jTK3V5?O8rEIxV zni&^?73)EFvY^O(4duIHToZ1SXO8bBU^lEl8>a}$3+~>%XODpG;I)&w1u78eb_|(7 zrot(V15=W!8je0mwGJxxqHn}Y*VS`0czcMWvHq+j@bT0K7o3jd;_`@dHh<~6{dmxl zs+*Lgp4M?PtnpH*fLY7!?jq-oT&t+AXSt!|{LPJ1%-Y@>5KQh%31}=ievA@7k!dL( z2R*cGDgx(-zi_qXtZybzx2CTJPV=3+;&&|v=g*!xYuR~Qu;?;q%S`y#bA3W9v7u8? zYL5vBH1~=U)eabopVasS<%*F4+oiPy;7am6$GZw!doxh_8_^<1-g52+JBadJ(*!oF z$*J%M`5b=0pjQVmtzSjkcFFSGo&JC0u7$n=$rlpk*syV9BJ>%T=}J!31SR(Pmf(ax z{hT%T&81fys_?}+>&^_-s)IE9E*k;b(mw2w9|q~MPFN%52`iulbxkfxJfg2iTwI(8 z$6C3T5XW5heO3rgEio2~@I*5*0k6UDN0hsx_{pPgwGP+6-o~xi?GRNx3a(OV(@w}o zEVIS({A6pENn;iiE$=OF@g|jXExY*d^j{_TZp6i6->mouz4^0PP$Zqyn}p}-1-rSq zZ5NZKjkP!Wa}P{>@Wb{pL%nEeBQ=l+kKPqYD{o~>NF~*#p5m$r_lu8>HNleohhlTP z9JEVXJpnJ@phI|d$DpY`pS+md`TK?qjBzs=Rt#(u+n!{}$}am>U7|WQG&HaovgH9H z>RT>~iV|Y#P;AoWLQNrMsf0Swk`gAq*uSUTx*Ot^y^yAKz|!oL;e z1~$mC?jYSkI4Qx@I|&I19a$#NBAvc(DF!NWDCf~T^}Vl8;Kql| zvLU>ODPhe@pDtB`(+s1c<}wR&3zE(8?(TX49CPzMu`!Q-&oz#6R|6ZdU9#odMp>{P z*z%KXeUykOyu*R1d-@JHZrtEUF?e!FM9p<>tnzH=m8z@0@GuhaobjQ2#=^xrtW;Hc zv?+bhKq!fhHavb6wU-b?yi60E#rE!n;4=ZzWG9gd{({%w9?eLL$XM=>l9|L@>~egR zls?fvO}|+&3UZijHjnL1gJRTwE{k9U+VE>XQ*>Zw%cQ`kD*R4U*HiRM#VL~?AZMoN zz z7>)k7j~KFv6f76D=Qsvp2k0?uQO1!1o}>8RIx-M>?%P%hGYue3?c&Azlu^eIhn5{K z*aTh=v1i?}Ws4#fl)k!u_WFBTS^awL3F+^fH*c2Qv7GpTMcf)HbJ0XqB9PLgogt5VT+GoqY-&x zrt|g9xFS8l-{{_%|MGG!scC7w=%Q}}951vOe)0F;bcK460C?1fuYF|Y(f=LFPoP`~ z9V{rnk$xK_gGs`psBw$({zsR<#Ol%QEL$J>1ls&l0)K5sE5n{WuZc~D#jw5e=rJLZ zsK;OZJHPey5t(+m@S zPSXf@T~1+}I&j#*sPhs4&MfY;_2zrTGad~!HL2i{WTC;d*yon+4|=uIJ<*X}&FkLx zL@?33J>x-#7vQbf+Cra$k9H^?bYv>TdAvj5q)r~Mz7$`S`&=m8nVVeOHg6_IS^44+ zyc$-Ig$qiTl^Sq9#(iZkT)42qN;)n+p88x}uO71KbW;mz<1n7;EZgNT9JLqd0Ac*91{5{v>B*D~*EsIv!k z*o{wITu)w%7TC4uD7W=y)a-uLoOIXoADSbjK>thyn_OD-P=P-9wWMe>S9L}#{6J?s zP$P*_IymcCnrZIJC55AvlveVhFTH$wxDacu*xXe{v9-KMuM5m;OFNH&TJqd2eCyg( z=`+6OIrCMJl3jb&7vIjfpgUb>;I@RVv6W_S=bcB&R6D#+G2*6B?MzJ*O#(8R`EOr$ z+;RHmr|sk2#f9%$Bd|!|-g00CHcrJUI2r9MGmemgv8KXB8v`QpZHG*uYP9cQebKlS zQ^wdWcJicbTG8~wlBLlT-aa}YTD5aJ9u>ZaT%a@Cqg{)ph@|~$7e{?{K#D&TgRpB) z51Nu4Rha3l`R!rZQPeD9kCnW}@;$`Y03#2GdHhIT(rddSz)CWM0O@I6+fD09&blZ`hj;BZjQtiD5 z!7@I}ec53Z=YT+i>=gn)iBAAoq0^uk50}yHq=E9;`$-+ls~N(rIYmXGyy-EsV2#Vs zQfw?HQKz?-h2v+bPadGPn?P+Nd+)~0n<>?#lB2;nrV8!{I+>07xl%E4!x5l~G*Wnm ze_@e>!JD8+!Q@H-8sGv)0?)`QRJ^GMJnv8(Z8&cDrwBM5sU9(pnX4dXH=%2$0L8u0 zNWTGt@1d`64)r7l76lJq)b+6y`<@!@&7y=QCk{8bE$PINPm@a4mdwiQ_+)~(6+vNV z(S$m(Pf$yaO9936&2FDZ%|CarJ0AXfzGdc7*%$ov@M_o%**u|~cTT++rw>fdC_E6R zerde;>nMaGhvK%p<%Pg7&?eGOy?3&n4Qx%;REEc>TIevYf`cX1fSs1*Fs}9M-S*%6 z8YW16?I+#<3GI7B!Ow-hov6tAN}uX1u}kU?Id&QxDj*sZF@E@+r*DA^y)Wp*K};p2 z4m~)N$FosZaHQ9pwFUIRNHza8DCo5 z6_^bI=#>Ii-8v9Vq~+yXVOi49H%9QCb83Q7?Q^?bWMDSfFNME+>FX?XZ1|?UUO4nK zn8{g(gVwKKZ%=pzF@0WNev&NZMu;7XIKNf5X!(;$Si=dhZH0Ceui(j;S`>?%Y-rS0 zdGruf?4s@Ee{jNXf{a5b+F7ga@FBM8GV6sY17Z%~CBg!1`un%_c4YGcD^JsC&p+O% zCl85rVDc)JM(N12>Fex1(L>1BEBGW1hN(OFN??Lht+h*ZNUB{vOD?Ddmjr*C@crl~E~1CJ0X+p8xVArBRD z27BD+6YxOLdNZ_J26$>B{U{u#mzS3d`M+3MqLv(mV^gJ$v?9<*J$xVfo@yvm z4ckr?y#%BE6uW8tv`g3Tbaa`&Je|FozL5cm+PSIPt-WtbU~CDwKNXxAtSO%FP72m@ zshoJ_LXL3TO+T{GOlVrs+vaM;m#FF^a1pRa4<6UN*{*PuXqN~u2AZ0 zBpM)`y_kT&4&*6AK4QsPlx~9LcUXC4{bxP|2L52YB^zYgo)Jqa7;yXuagI&uf24X` z?-Ya$m-&elie;fJ(OVDSy#-exzx(YPC=Gq}!otGsv9@%FEslghYG%fH?PN_wz~T0X ze|J6f@kyUZGRie=KF-e0&RBIuf*O8{BNaev1XU$N`x9sq5L^p*#`XaKu`K46Y1G07 ztbk#j!Wgg=Sp+0k$a@2~Gt{sI5i{*7$VR;Bdh1-W1u<>6S7%%GbSI44so&111Pha9=fLpggvy(j=sXES zI3Y*Kf#Ev3bcOoJ5oZRH#_f4A-T%3KsEDmc{X}mR=fiP)iPT7=%d^3!S6J$az%7_1 z4~2)IH!S>!=g;M+Y&X1GM2}Qpl|)#Yp?AbQ4*QMR7hMWI9v&VABXMDNsE}xFaxT3O z7?K5z5XhlHwlPHw5b8d6eE6oG+}5SGzC-{zVsM;hi*15Cw}<1J0(8&u!JUwmkf~~l zmfYkHiQ3CH63$;%#fiKiYR9j+k8dwSz*uek6=EKs;Np0(+U6!zK+L zZj&}TqKofHgS*hd?!L6J@CJa~WhlZ?1qPd>-#(&e1CJ1UFgH6MtJd-pg0w~6g*KuV z&ldzZ?`VZGEgODL5e}_j(ZtYDJj6lGXB@H($Oy!u(rM{yBM1e-Apf8%?F6Z1;plZe zZZl7!lYy%pBs~W>s6$+aoga8KGh({j@E%T0eekPbalM8vm_C=~v@~l%Bx$B|)-Sb3 z(-flbEzM7+0XWwvctB~BjTF^{cBzKFj^@9o3oA=fO`|O-)TvxO1DiRw&bfIP{ApK5 znEsFNA5}ArWF>Fp@kyo*e#GOAc{rl#QRwRjkNNK2UT8ORLr_2c#z#A>foX5qgO&_@ zjb3Y|QchHUVQoGdb|RGTa+@P*1RDA*0$XK88sC~zIA>6H-z`_56t&n$Fxh}eb*N6r zRFQBVaUz)nEt9dG697mS<*tSC8TxZh8+(@mV+QUov^3Y&` zj_&-^=h>U?Uz=e0{<;1^6nwjUl`Hu0v*?9G3$r%RVTd?TZcTJw2ZpZ}gjIWO)12?| zEG4g6=TxNk{rkg^9?pu!7D1jUWVw2$51WHj+drl(8D$`z}eyzfxA ziNdby3?v+p+6P){BYa^H<=bJ>4ABAF7rxs5kORA5fE`ngJK%DIU<-A9_W?VRM+L9e zx`l=1Zq0pHIe_IG_j(6&`9Zf+-;;w@O2ii=Kcs8JYSHXPb5RxW&1&sq@HmIjU75g2y}ZFzaBRAg|jK=K^*Z1vKcagRaL07r;29-3?flP zh;~s?u+1!Lb|Vog!I#IqHn57j<#jz72yf#Mk@j`{{PfIj3B;#NxiV$5s?q@PsDST$ zfdmygv&6MLMS|ma1lrzGFv@lvz5xr(Qx9+PeILjtjI#_Q^yHNl-7bBU@oh3e~NO9^0?F$L3YCvGut9pY5+z)eB|o_fDP z@Zmhkzy3PNA#FH-Eeo-%exh&zEkVJ}t*F|kC3*T) zqF&RCDj=0&(Up7!UHQ&m8^K2j*3O9xMIrwi&WOU5qUiV11X{mUMTHG~8F;Gyi zj3|$#%8>!~1pQJE9@@93`ozpPZ~+T>kTbDDjIcl5N_xgK7xAyv_Tu{b z`p`}(3PxUG>8UQG3GsIhmT@S<()#6JYs!h;-!~}kT&$gcC)Q)3cT-_0K-*;!|AEUE zgFV%L=n?jcWw7gYBDeL5C4-&$7I(ID6zsqFy#kMCz7T#B-4lLQVagiPBBXB0&3m9& zsgm#~(HqE4A$~`gpp}ERYewJbV&3?of1ziRNMhsjo>8zQ7vH&YLfv7vxb#20LX<-Y z79ZYrqBNu;TQgL+I`Iwhk@(-IP~9lBwXq4d^0FIYTB_8BsD{z@;&bB_^A& z1%iY?+HTDwJS3X~f)Vb)`uf$YU!g;I1PY|WA+2*R;@T2E!GRldg#Q2=gjW`;Qe zQFx62$}_pd-MI_|J&R}3u(~?dnco;YFj+jW7d%*-#%xtoU$F$w|DG ziH0YCi%jH6V#A-_PG9IeV-)W?cns!mWBHtFidHl4{eVE5yWQ=aIaUQD9OZU{f25}l zBlDx|b1M{jVX^hW7rS{e4K(sZMw-I`Ao5ccSWv;8C2XK=NAkJy%sB zr!^suf79_<_uR%Fn3U6PrZY%C*^f9Gg>^cvEZEGSee?J8W6Ga;6_nadT9QLcR_C;- zc5)Q$kmY_F;b%NwSPicaYH>C~*t#AR) zBf*voET~ysvN*_$!dYwK!w3tIq-L7&CV`NuExD4z55oN;r+OlZSnp<~Y(B{bxJJ0kt;@hg%KoAlsW9P_u>? zM3N4^=|-1*UCtc;*Vd9E%;$Zp#t-L!ikMxl+)w;J3CnfaQ1+ntBmy+55^Na6t@#Qv z%em}M#EEA~91X<<3X5B6=^^tpllHmqLyr;8X|(QaLs9{m;ah?|$lPo%_66*!ANjqx zEDP^ZQlvWYSf7f#cIa^ce=Npk=W?&a`t-Yha;LewW#$&KnHoU8^Ggx#ls|{V! zVayow6`8O$aR+=OTEce0i#TRfUd^hr=?PFEcv57l@}AnD!f9CXunOI|sGh20lPKs9 z)i+OcMNbZDBNqn_V#{ikh zrouPXss?$u`Qko{{131B99~~}q4e<$Rn7CQzyZicoMBTeXvkE7FTP#!Mn7QV7k}>O zG$~i(ps&XZC#%BqT$16S76Xu__{W%<+^yh8~;)S^XT%B);xeG&K6!Dy| z_94pZgu#F^HUS=CVnGIVf6(m0v**yE6v%JYz4n{r#K`No6z&;5Po>vGdjzU24I{f{h5PVBa zZ$zV&^HOe_0XSf0#WZ5ajvWprpZl-nR8Yn|GTT^Y@c5=L?pGw6Q+ncLnSuDD{C3fjzmxc=8)_lV>mZoJ#)%)r29DKrLT4t>4eNl8m* zFmJsMDGhZmt(scx^4l_A15!D*rp#SWx$Kd_1Q@wSghuicg1t|sYj7jA@VW78NZFvd zk~{~c_FerS>ya{V2EBuJ+mDNui^eO8@k^M<34%(r51j| zLKapA6VJMD;gymQE<&^4rvJ#wk-RHl+H@6iO_hBP1BfK#4 zcKcv%RgIy}oQmC!G(9{jmqmNV$E7YZmSWLWpbDd{PPD8!(xu_s|?bCg1GKy$275$KQNAK7ZZ8CpJf{Z-D)D5^r0i za$cXK4yIY;!p}ux9}N{GC;hS)L`dNN{L_6*Os4AlSy?Ar>~D>a98rF-bzit%cQdn= zZ;(xs7ZNVz57?!a07P~4f{#MS%A#tPJw=V|Hxom($P`~5z5UpEBBm-^@XAog=0(Fc(vp z0stJJQ>lj86na5UTgp#S`!6TnKv@3p@grj#W~J06twf!l>zWgMLBjJi07|9~Ue7f* z5<>zm#z2%W84m*;9}d#Ctenr6HL#}feYJY*q~$T+8=jND?=oX6&VqM# zZX$m5RyNh!)_{&q$6)QpRg}o-&-EXlplsyZAMzc|EWR7Q zhZ0YTFmSbLsK4~&UeT7mv8m0A&Zdr_ev} zI2B41WtEt;ND#Gvzo$X@Q-+0KJ0rVAea~m}o7`!WetbX1EQ)$TnB4Bf8po}?E#CEw zN)3yjG^5N5iixcjP@jHce31UrdHTOT%Jt^{r?DDsgvJDbk<7iLugS}+L&_$DQzVLz zS~$@F3&l+E+N1W65x(N?eRJZPO}``YgEmhTQKOo0A|WyA^<*0R$?UXU!DNQl%@E8H z0R9ZlPUjSl+~V-aQ^x9$9Ghf&*@JBuN)*aXtzZP4X@ZN3EXG0a;)5iz6}yBnuFg5P zVI;R z?1$?vV5_UZ0VH!e&yTB0BWgQ6FBsVX#h}Ye??{m@U?zQ|bC+Por(J)q90O)2c}L`8 zG^$C*+?lgEvh4Ka&%I6gGIB#FQ8gNg6bAsT?Yc<_%Tn&#*wBFd%w>ztJVj(o>)|H5 z=+aVuYjwU6#a#ufUNJ(AZtgJUqTP z_>d6Bf^-kICaKq+v^OT=;>S;)P(-C#GT^O9l2Qz*e|(N6RDBj>Tf-K@5@K>N0Gh^6 zE7zf07(0C)N2p8{7>44qcE@hVWXZ;rXh@MnTEQ^^b`;Wh`P zdDz)6VUwqe?AY5ca1!aIYiu%vP$q$Qn|9`<8+`T#XC!&;kzSTN82G*MMmqpf-VMj2 ze4IpAtmMv5cVE|0Z!O|mb=IOI=H0vV!7T$@05b)y;p51&y3amsl-zGLeuH&R{^DTv z5QC$HvuAPUlhcmHX|3h{Pd9wk?xqkP?aysLJFJ-7omc5Q?pOx2Y=_y%;3(-FchA+8 z7156Mx)+WOuln8W{+WKLxV?EAVyM~)n^o9>r_R@yag&kzXkLMdkOTw9EvD4_D> zHbvRLDZ7RmU@g-XO0tz0EYW1WP9g)yn2spP8yJ1VBck7qz&H{Jj1qNj!05|d=OXgT z4d^o4Vsw4If-+_K`5zHEi41y~IIgN0IEm5yom=3cREvc9^h-byuMx~JGp=^+iN}Aww(0mTKfY-?E?CiSy1jA~@GirLi zmvVZJLLNIFQ0|G@z5WD-1IE712WK9e&woX#+Ar%UG*$eqW(4TpkO=@N+{Ep>KA#V* zLK%RGc9mK2+?Ndb{p?`MNR4GhbI-(i%8@Y?IO@6Npq6)$^=wYnr3go0)&%sRY@BLy z3AAynN_2I33bsy;4BMTde18jFXlJnOLMfxQRhTx#t1OD<=HZcpw|~N)HW2r6_jYz& z$BGibz7E>E{?t`&oDIu2<>aJ8oUoBu4TZCpNR*W-#E%|PGvrIrNy*Z`M5xba~%>-wo+gZ5R(Y<_Imq|3s1~Lf;)p;10t|bHUp7$r^ zck!Sb@Qy?7gpHS{QUPE4kX~MmMQAdN6f;4za6=UkQLLgy^^v1TN$xbbMYYZMpwNwl zWSrUW4SU)!0I=cC(E%4CPm@U_Oft4)U}q-6(35L@`r?J@fdV>4hW9ve$;)B?ndi+J zhBCpULckmb1vqRFt!Py^v%oz`@J+n7{Un3vkd4>YjVEyYmzD;A$__$C`Q5;Lg~R7v zS1Mk;`V*cll9UP{AK`k)SyWnb30^5rxBkMH2=YSw-U#2vk7uu&t;vH^|MI>aC%Yj{ z+@G85v2wRS9dGTjzGxk(385N}Vh$Q>|Q-L{{* z4iZL6Ben?f9j8^)9f*~I%qxliwPfHhwbMUP4>?HV9UIZ}CRQA&_3=?289|t)(jg)| zrea^D72t{n%lWrgAXjb8;!J_=MFzS%-WvRqZH}aF0fuVFcc0$+!s4C*CWBZ!SAK{# z+fRMFa8j?RO}H4dC{S;|7cj2l!A?d#B8klOoLolCn9MkIS(noj>vR1Yt?tUfoK|8j{b3Z@7|IihQC&B6Vo|#0X zn`gujVRF(sBG=6K9K^(%;-AYm&>Xk#{HTKw9LwS6vOwBlrel4gAC$|>4eg0=kz!`* zsPsaG891JdEK|f5Lyy>&teYdrnF>$;yvsz@WWN8nRVn}!$+X7`BTjD?Vu{a~Q(1-E z672u$=T?YzILP~hB`V@^oD(O}9Z2LlIvJrhm+wBDI;m#h+dTG@#UMT99|M8)jYR_g zwg_kgizeVSn4%G4F2PBO=eI?6@OCRDm=EDZlMo(hNhU)SpfCd$)JQpryCH}HrIjot z{`tbbEA!BxEsOy|1skg|$y-}+XW*fI2OJnU@prVBr>mc(D>n5|t`UJ0muFaUAr5mn zGV+g{eay^Bj@`}`y=D-AG35|V!W+8tIDF$?y_qSQT$&iwlM$35E<*V%>lskF)&2 znJ>vO*9h6NU&2R-qg;Un~cF#FdOE86pz)V2i-fa;7u5|ip-$2U`ilMv{eXc zL1G>#v;eg=Lqe;y7bcFMDSBwMIkLkKmfVNnB}j;w!ESA%&8i-dyi%x)#U2kZAW;oB z_jpgtEDC$B6-ApPSh7KQb{{*1cuxwDm7o2aZK`%|9I%Aonc{nK+pn zW0LG6Z_EPzbK9-Yl`E0kps9SStAQF+~hh@RoX{K_nDsDr||M~5_Szi zxRRPWI^iU`alq>{BPNsTP$I{89*RK=a2_oqN<498lo{MG^2P5sp(CjY?wfH)JQ^F# zD?U^icPdn!NXuJk(p1}EZ*W|>g;^(y#rJb%cNDFuDY;B@ zwHSXV!zDgCu zCLwKXH$=~=$_Xxi_3k-KDIAqJ;0eYm1l5PE^~jd_($mvp`EMfX#31!RV3)Iup6k}P zt1MY!-sfvjQqWUo6lB7cLzZK5ZJG+3paqRtF2vzt3Jg~Wkn$;@PfbPU;f0`;CD(~J zAN)X=bPru}ozipmJqh0co7m2d?$3=illdg=(u|P_ z$#(eN7!I6lnkE(UaJ$5xfBte?o5 zNCDN21*Gew8&>bfSeJOkHXZc!dXhI9zEOOuGQgp$l0q-!~|Q-teAEFFVnqf>fWyb zR#N&hihF|*^ufl0@o1Y%17fRi5ResAwesHQ_V=hPvRQOs2HJlamsFt}8r)$UH zwV4fDa3JOi2zzI7r4QV%88aJkWZI{fim1)6;cB21Azfo|LTKVQxXd6{%8XAt_0X+7vcA=ydh!zqlMhtgttMY!PBk$%%b?ibxju z5IoTolzzC2k$?U5S7&?wL6`+?mIrD{JV3!q;MBjrOCIZc{|_L7c06Q?ahLehh0yIr z@$&=%W07aazmW8BI8zM|s2*_qD`L7yI^*gkavx2hp5(SNq&9Cm4IdU4FHs1H#Yj;W zrsqfofegMuW*t#lHwDPV4>!AT-Wm0_oky9p*d8g247I?#)nVE%2v$t)1j4rpxv5nD z@2;cUTgAx08*(s(n+|6@3lAp;n!+A$G~jC+jhhjg>LkD8cX0j9@Ils?tLW3@rkNg( z<*y3j*Wdm9*Q&?w7vV9~f2C6~k^hf}hM8r`IQgm%Gr=dld66CO( zjy5!MrHqD#hA%A_d1;a)5?`5;G;0UmB?d9M9RdQq3NfP@DA4=}FeUdZbih%=t#?-TkvEIW zRp96|v)8k+8$pcDlUD_X0m^txim@jbZ>{lHW4JYjU~qy&bl_eq5I1H2DLr$ipR1U_#iyopt$bZ_F<_nfk_ts)37I?^V);}YDctAM z@ATs%1DS*+^KH--6yT;LR~*Jap4?-BDG_1k=@=sYfStGaUnBhVxAhzMPKhXGWg-8`n>C3=BDry5dmvSM zw)0<%ebsNh*_Ir>m((AyGBU|En%%0G`HOj#u1r6&-)ur*YsTDIzG2ib0&zs6F}ruc z==+rmY<~cGsX(=`U-~U#>;oVJG9aYVB0)^JDtSPRS}4MENa@j5Pt$8g<;D*`OJ@CA zZeCU+11P%kxFQW-T#Oby_KMy~QcoMU?QdiE$kV>SeqbYR-$TbGSIA|X<0;~#3v4e#i6cYD zl787_utv-^ha6}S`8NQzVGD`b=+wm_9HHb z7)ILuWFV~__bWAG^aerUGl)N^B6OyVE!l)n!2>TZrrn>&{4&bF37Rt@s$G+T(5nUu zc9Lthh^GZvA;l1rBMnfb?NE@R)v6I59$i6$*E0+S8dre%6NL;lM_9n_4t|1w!~YfpBEaGl`@BA#H?R~)*k(KJ z;^oVUu;rM9?cR{qQ>^9$p$}%+CDfQ4=8ZW(=MLo`*U^2m`0v8M*XaKwg-vdKDUAQ~ zCoUo*BccIkcskK^PLQjyx(^9EP9&q)@lgz>!hhh}1(HrKz~nXNI^RIFm#)e?;EcsE z_tqw{5TsKln}osF#6cy^kT+`?$EA(+I3*;WDWwF3nOu;7aI_HZqTvNHyhD5ta#3DO z2GAop-k2_c#1b1A9&X-R1F#O`#3nzQ|{_-C(D z-}moX*1dmWoUBgR3Y+se=o(&mzf*XI`yHVZxVnLS5M0I9^7QDH03YN)_<_mOmSXGsCv7Y!9;sz#fzy(=Tv0h1aJ%EpxNzAXtHM5zb_EKjd^@v+t#fC zh|#0_X1aHHBE0U^>-)5jj9=p|H$#R?HY7nQ1m}mW*9iXUt5j0zoqX2aPg*JFc8S1h z)=tKe5Jpi(H}$%;r&3IcRq3+)<(w_O8$uUOLlZ*ZYCpOB5wi34QeAdUKL(7^A{A0) zBZkZOK07#R$Wt+TaPOagXrMg@T%HwJ_ueB09@X)Z(QX;J%AF{OB>H6szly>9b!L5$ zvn{zp2v@b`2K~DA4j^ZH`NRA$i+jCbK_S%qe=~^>E}4P5)9z!yDgpGMFm|o%41hOm z)Z0M#UNcFZ!7B3q(8uW9;|!D$*+yHIJ67;ovMan z-c<<|Ng#J*rt>T9y(Hov1c_nC3g}ckf)Td*cKc+@@98LXob!bZYHI5Wl+vm0m*G|cSW(cMJ% zjds9`+-dY2_nh7)5meAW6?i$`GZ0v0;c!>YZM1_@h72z~nMl^hl<(!VKgf_XJU99Q z(6${`Ys(7@!@#1gWW-6SR-DW=0r{z9o2q)J>4O2{(g7j8&D@ZtkEmS-#k_72@! z)nhgr8UG*l-UO`1we25X88fr7O_gY&$y_wcY)MITX)qMcqCusUd8}9z8Whblr9mWf zlLnel6sc6wKoY9&cP^oxZFrvdd*1Imj{ouR*!!s6rnS27>%Ok@{7vV=DL1~fC_p`Z z_eK4ad)DB=s9Yp}g2P+Cm;f@J!Q{94Mp}JKgkV~ddl@bsp+B&W*PGWsuYsLsll!35 zh0knGftT~Hw_1w`2^2aSiLy(v%B5sxk=^E|D;FKmvWeTqgkYO^jRSDLjL^2rS`-m; zXyGctw}{Wg)0358iB2_UAk@eC5K|YSd^!}n^?W4y3`3tW6*V&-nxMECcA4Z-gZqm?}1<(8=r8Oig3BjT^gq&vCT)Y z3jPor`DSpV@ImBk_*ox8Vf0`96K`OPe>!b^%4X;%K;0%uH?D_3wzO{c8vKrrAd!fK zxJRdDMVF0&ha9wFgHs!d&p{p)kmp?vvDcC>bFJW>^ILl8GO9$Es-ox zH~ZZP899DDwQ}{S=){L^^ixNaVq%TuIdrbIYCG~_*KUPN+h|wL98_^AMjzNsvfkxU zwtdzsFe&I!)}np0eSD1KW1#4!x|Xb*_%&x5SO7l}4A@G|tS9RH3%F&*P|xXH-gE#v z^A4w}4^SDPOjC!ev@mex2&DcdN6&;|ppb}ZG%HI8ibe(WrW!gzbv(CJR+7kLZT;1T z#8Rr_0gJ5$enUn54(FFK*vToc(FOu;l2x~~&Gs!w$J(a2O>ar-5R`tt4fmQL*x*Fd z!Mx)g`-&I+1vygIp<^WA6Bl|daDEh$$~$)KI9bPg6YmpZ7!3g0QO{oZ!=49PhmJGs zVI0#7dOcGJ&03d~L)ka9)v&>lZ^Af_G+YSUXRRB8;wlBHDg<6oy+=Y8iE`|i(I{q(DIgH4 zMUW@#`ZjIcW?Q&T>TV63{NO|NgdI4(C2>C#i6g-6Ahe%q141{`cP-3OTnznE6w;+= zog&49Aut}BRqou{Amp89O^Ok$U2H`G``b=VzEsGk4>>4G60+P?(20xU9 zLqupoj517iwoxd+T1ZYbapMZXRMteeM(0mR<;U6|nm*#W(SlA=3<=urs=^&~iT>Jn-% z&;Xt=(^Zer)dp8iYf9$4@uSeSlgUQh^i2B%#V?_@s|Dm&{4NwJPj^n#NYKAh&nrJg$=6uk^iWaW(=|H_VryQg z+E}Ggx|-D#L=U$Ugfwuyp(XXifFvl5m}0Haw|$GX!Wn+Kyn5fl8%cOEiVnRP7-$RS zeK2xT5$J^HRRxMJ()A@xo1>2g6$XaxU@QLa5H9@M8d%K3Qbootr`%ZdzEK5OD*5nC zY&<&yeth~nu!h52Lt&*91^o0C9xD>C@-l5nx;1N#qwt$%G_;M_%=*MW?c1sGMff#^F5{&_JJf*2*a{EEft9!#$@=!k7zd6L*vh!60A^C# zL#E|0Io$T|-_KJ66LJ(GbExj0D?bO$frci9-)GRbzpA|9i|jMbe;n8f^RZ(?LWx?G z)?C_Ku_>!!2c(V&33GyA!vVF>5pWvP9<2pHQK%nwa(1Yu_s{vz_2>NVjO)rsR>0{< z)Ii}ExFk5XI9%Jm&F_5svy8zbJ@)s-f9YR@&zQV43fU-+BCwZz0y95`k56BICjEcT zhak^;$`NGPAHjBSinm7FG`fdi!Ylw9uLJJ^!p8>dgv3Mik|?*s2t}eKShOG2wISbs zTJiy!hG3sEs{5yZ{iK}FsbGJ6&sCG3B4U4h{CB?5)MnGS!jGta)I||c2@=8(>ht~G z(d*5Yj~gTN5_BCL`X6lOyDmXU#}pR!=yQkG_px8NY1Et%FGtN8h|l!zx39ZDTa7sQ zqSsHW82k0UeE&@<#?r4kcpWm;LVXzWcc*Tcepg__aD1kJ`MSUE{#?N)_xG`TFKbua z{JbBTrczQms0Rro{MYw1(#vhgIBrPW(%2U7T2xEe=->V z#y2V&YX>k~7ovu4wi(ShjJLJ_H*fJy`licTGS9f}{hOMs0ECLb=ZUJP$p+IsetcB) z|LIy9OnDUmNl}sCtRHpE)nTNEtK`UIoPWQ4{q6Iv6Ik&F1hG});5u(PprZBg@|!Y; zSKj2oXZrWs*WW(N{^KV955B?K4w|OpA!_;GzZj5LLb4+J?_7+JyHdCq6^b~nWMTB5 z`Zw47DSTYk{C{))eaHXx>u;aiIQ9CEyTQ=k!`UEzeCOZz#;x*#VGPDL70BJ!F645> z1z+=ri|@Jw5soiwr!z)g<~}>9nfq*Ce5QW~c7NUdIe8%Yw|aau4aV*U6>nu_?cer? zUfdC2vI;^MzOz!ez7Nmc-#4SCPMx>uviAH?cnOZ9Y4J_{~Pme_I4jPO>*Fev7>7smw-5e+G=^Whezmo zM)Ks95H}9^6uZobxB++xrKhDzuBpCC49u zw}^J#Cb-D?(-kV~0BVo}nXqTHNezh>y&l!Ue`Tsj3SvMG#4nL3D}+&y14O3eCdz6> zIf-HkkdqvZ)saI^nr{6NRG_G>to>qvohl_X0wm4{yt~OS2ABrTcmZTU0lsLCPcx|r zB4Gv)AOV*y-$gBCpMn)n6PanuaBAI36QiM-i46-Aq&^Yg53GenAlHZ$Kp8Jtr2l>0 z;ES85R(Zue>~40!o)n=hdTn){cwKyzk6Sd2MNJo3{LocZY3(E~7cT9jE7kz?4=xJ_ zAY6w`Z9>OE9ss4lLZIdz&UJlClYl7R$HEU)HNnE->nQAkXc>}*`cX|uh(BFdS~L_0 z1VMd;mqo9QYA}22efRm7$t6L73Jcb)W|J)tw?-Fgo6BA~e$q*Eytj(){$gL>1>IsX zwA-euPP2n>H(!GkNTT6Lig*i8iY1Y4k5M%1*r=x!oxvC!TqeY#T#xtobJS$9gTN)4 zVNJF_=j(Z)0s>BodaeX=P7VlLM-bldcvDsbC>%C*8x#leB-cm08QjBSx@33SL7Ebf zQ~#cVXn=c4_$I*tKq1ezL`B+dAHry~eX%59mM-^us%FLkrMfAPg01iGcJYHRvJat^ zU*ENM%^J}wtuZ;eTXGOuZEZhnX;hWF^H*Pn5Qkl{^rDZq^c&2aRiu zFBr-rk?*Zb(Llsrf`+hhSkKrNzhSX69b`eDT8}KhDf+v!?6tP=WC%VCJJPT%97udl zNk5-lz%1|ZO9R_b+y!l93`FnO91zE3-mwTV_rPByi=P1gHp^3mmxhzG*wFL(GGvaN z4{i}e^+>rlN^O7$N{oo8$J<=KY#H?z_!)q4ebFa`5CZ8PaJm+G2*q7K2$87&0^lUE z(;vJASc(Wpt=c2vaXV?)6*eCuJv1gCuFe{0f32y>nDxk=cO#$Wpo(V~1923I9{6ArWA(1IvHDXZ)8fYKIaUx|P%6~2!r?AY->C*-NZlB9# z&-eS&Nb)hzFhHU{l``maA3={EQ&Caj6fJ@SfI=`x@eJYJYHV9}5**~N0I0K45wcR2 zo5iUiv(UX2-rrlbQu8Gv3MPQUDnL-@4&P9a3Jwu8y6pQ$#!5(3XaJA&^fkDUl&Xa{1PKj{raCWvhStf=c^~=w6TXDO2 z{iT`Kntr(w=tM2AR(bK%XahRc19pTQXr>8Rb4l~{cpRr0y?^a_ZljMn_8%H`!a92N z=#H~ZgaDXnSfWy-7?5f>Yz{(2!ejxy7@l+ieK1lY)ZjTs9m$(pS!*#-?n_s7duz!= zDMztaU1y}9hx8v(SC8TZ#Y5DdzP#HC-q%hjMf~6O6uX4-kRjvxBy3oGf5Cx6E7$MR z%YsRCWVH6F%Zhp_%fjs}K3hD!k;+a84cXIIKs_<4qsGw7!nE-9^;vqtmVUDamom?_ zS-+ItYmsH?GjH_a`K1P95;OlUy}LkfcB$bckL4SEj=rq9yI^dGMV8v>#BjmgO6IB$ z%R?SiZbIhy$tNLeU_9RBbA7Xqd7kRA+Y6p-@_8ImztIQS_30BQHfWZ#URPY+{Pj=F z8yjpPeAIt*soMJ_A+v!(%Z+^St-K=^!e=eAbno-7^2%BaTu96?VSUfmUNj8DK zQW)z5dZ9<)53qlJG~czCbdn^Au`*7D`!Wpc036hr>Ri7_P^MX%E)#}A=pwg+}cZpNt1MZ4+E>L2hIDK+Y5?Dt9ybW3* zHKX%_&BWLZ6CB5OOJ9NoT@QrfiW02acf-#IfcOsv&&py$)7E7|tY1leVR&Fr5R*mk zzX_i2VaQr$jP0z+_N*|%}k;A z8i6sQ?p7H}TSBf$&(G(2HSBlKoUB6CViai8m3+hCpMb|-yI4Pj{Carf7?MHra-e+z zsvd`t4YbSRh|ZhiVKG7-8=ulN1*3-c%yqLhA?__U6xzD$NLIAcRv;Vfq5We)IUc5l z`@Ur$m;K+rdlQ)|?M-mq5rL9|34c)HO-6#X2ChzywKIM+Vb}@K&9NSlrDx$<;Elry zfLkd!PXVY{{3$K49a<*J%;%&C`kHRDwC!^#X^SGKnK9%&HNmw8h2t-q&yS8NTqW^1 z)57pr7uN#$thttprJ~*CaqL+9E?KDB0kT|H0*@eEA_dtN$sFo!utmM84%fSu*xH3{ zUb?6lOC|!@ML#~~jAjS72${Wn1d^deuN?SYRM#2It9c=#J^B=78Jqc`ph@U#1muX* zr|McRYAT9sc+tX$0~UEY#pXy0=P*SX8z%Ut!<1eOv*JFbNguqJ?HvZoT(#ACs7Kd# zyP)A0Kmuu!G~QjK-&u$d(v) zSb~{C{QUC3?B_nP227^A%Q%Y=W*TT$OtViSKt>F&0q=bky46`M(w2k2j*H%j&73So zOd*U->^oqnl>~)ty+^90R_G#9;pDALBvl3Onoj7Up%d5*pL*5B@%-At$0(3i*71El zNCDv0RpvF)09yGRUr*s^aNk9Z3jLat(VYim-Z!x*Xjt9+*OJN#G%~7OW>JHparSkkkO; z`115U06TTa4wk6&u)7A24){>b6O`Ox1H0|+1-y_F7*J`yqj_6lHl)pjA0e>uW^b%{#HB?ev>jk@u1P6@@dU^nmVyy{YhOG%OF?;)rX zLAi#uhsH6!sWyXUkkroP*nT>g|IUX7PZ@wTY+-faH1XZo3}H3j@R@I)+nO#L+KukV zdW1TOteKD-DZrHR2*YAXMZ}trxeh=S5E!>YTurb_s@VCS6hi?_6TKi_G$!>QBulsj zHZN}XhjK@|9&(g(6r_gYo$CErN$t=WK$!8a`_WqgEFIQSw$2sf3y`dZp>CX2;(%v_ zVl4=YC9N_zTOn+5q0cm z%V&}$YD*E%i zd~%SXKTB9x;BVx|$&`*{06F~YS9Hfu(YGGg;wv2Y`)_pP)tt_MG-K7^q0B#Sg&2Xb zc443AGtf=Ltp#wc0Dc&mXFRJALu@mI(N@Vl@W*RraKa#e{5J&V$o>cziADxf=bmyN z9S%N*UjI*zj_Z=bIHTh_iN`sV3#I9w9slRLs>f@Syq%IqdVKsl-*BPvK7n&dY@DNPN)SA_`|Cn z`cL8bp?7*bc_<*i!tur*o4AKf=6k_`;~|?FH3}QbfImB&Mt)hj!s=jI7zre#5aLf zG~}pi!JqMa-K{VOQrrn-L9YRYk)jaNOpLN~ChQ(qf(BXw-I#H`eupvaU~?3OE7CeX z_}nQ0pf?8Z0KGWD!;}a{KpE_Am?d7LltSGM61Z6q-OoLZRCghO6TpR&NQnt#i!&sm zpn;}~h;M(n3;_!hvpjgEL!dILrIb&aLxSPW&~Zqs*O0S={tjn2AUR)13yX4@B(4;l zOMqBPbkFzdIs*{zJ}?|)9j7Sa9dI5I;o18FoIJoM7I2TihAc(ge z79u@=p$e03X8vp@hS!XNSpdrDILkH?Q+CEDlwir-2A)cBx8r-a_8>($UDiy;E)T07FVi_l^d0?rl# zaUct0(C~U5a1^#@Z-BbLHsy0%-IMrGur@PUbINl~l4p<82P5EXE_0Dl zE2U(PP9%jK#9lN5(e53&dC=-!8s4!G48XJP>AX)8i$<}79~?3i8nc||MUUI0-A-7F z2;|tD6ya2OBM!YICBC;HiHa~&N}^QCz`0eu{nHjFCeVkB#9R;$>qiHZM+U-k!}T>w zN5~pBO!+zUP%#I535D^Zo6#bouim_5#LY1D3TBNe44D}ir}^o9e@5Qa-w@{L-Y1h2 zyJOMZxes5+&UYS?iT!oXQma|RHo2PQ}%o*x^A6Br4l&kf0as&ixzCLH6k1m!N1oX12Z|@@gN!E( zdjwoPMI^$H4`Rf!0tuz`mwP7l4F2tF4iJ1^1crhUb}zz#b6+hkMPPY-7fqqQ<3KqB zR-JDQ^|rxe3{=&-QHo+C;5(Jj?L{HY5kMFpG9ZJXCN{xxPpOZ&hTU)`i)5=D6KJ~! zVmUe%p|eD4$<-Kv`V)>2caRDEMJu4XdMgKZC112TIdJC~1_a&>iL%Hs(Mo&sbaii8 z%Y$5;v~%J?wS^;BRWcV7!)K6N7qn*Lt_IR%b}e1H)K6zCbb@c{GQ3}(PEgXUiw5v3 z-V8ukE=qU<1cN)brTD5mZBaT^Jb&?J+OO@aDRwMZ0}ai5KqeKcN&*akSUWh9nFW`6twmXYcXitfFO>>9K6!w)9@#&@22M#`XQO#vPAiY~~1g@Z_sd|Fz?KADyPM+;2bL^ekP1){NIr;fAI730N z>p%q%bbRP_poKKV?_fI}2?%yr$+p34CU}yKt`_^@K>_fl6$)8(I8=Q)p-6s-26(L# zil^kI+FE6D2cQOGvbsK(caaz4PCYsN5J%250bB_q+U6Kr*m16T&oes83XMq>7zrm@3+3P8Np79N3RCx&W+bb}zJ;y%o4g_A!M1CFH(AU%)n zk}iG6ZgA4BKS85)ueJNJys~s+EqKovoy$~lFo_J5@_SKVuP|@S_0?EwY(rkt;jUW^ z?Phu>d^TpB|0Y=bnRx95&>-DBa#GQ2vuPr%F(9ymB|z@YIu5$$9?sO{iax%2%UP4l*&it0WrXsNE=k zjfx*7G~)cbQNISRMoKlB1^dvCkF%pdfs7eFdUHp)onQ(A#^ArXEtLv3LbtvYNN8&p z1p5-cp+L|bV&d^_gdkj^Ljb~>3n+G~wQ7I>l2TY`vvmB2OZ|WRXwX&k!;x(h3K~-< zl(#j8g39Z=z*%KnzClhjFag8QM?AqLzdnZt^pw**%>|g9l)6Gq|#}C_( zKWgzrp4%_yjkX$R+pquf31@C?KR01{$5r?9>)fUHiqyU48hg+68voHrbGS_>W-v@| zGPwHi@(1lXae&eKS>V+9{ZSi&>!)>e=#DE1uiIT8T_bL-I;Q4=u(ULsHmw@b)}4)A zow10D=V!GzrlR`U=~hX$8<6O$+4yz%PEaLwAu1_19?7`-+hHj|BEuT1 zE=(5SmXoYg2K|Jm#(Qkp5K6vn^vRQq(cX5=QQ;xX~$`IHvAddU& z92qA%JPPi2#8J4Nhk&K zYoe}KDI}}p*$f+ z#q&C}BaVoBnMTJOg(;(a22n91J5EhmIe@qgI6%X8lwGYk9|?v2I{Yc|-1Qn7AtEu! zOQ5+KzT58hpc$`&PARFFn1qu9v8t-7e_CX0c=&CaA`0V1zcyJu4<%3uWX=aOL6ejB zmHXK}HF3m@LDT5Vw;?GCgm6X&gFnwODQS=_{4o5$>Jb3_d&EnZd3+u#G4<|bdS!T0 z@zRH)Mtt7E`D3;p=piOnTM@4|4Npi3XoZ4`W_W%c(WGHiI!f?LM_RwZL&q`A(A%Eu>1L$vizhi=-^U@H)P^oNnr@QhY(K>x(rp4G5=-vVFjs z;TJ^B9ZGEDeBVPCwXckKpwi#6D#RItyVnqm@s%g#)>3RS};5gCSaz>Y`w$Un&JXkRXOV*{guM9p+O&@ zLmUwBYqYQ05>Y;MY@vBI1_w+XSW8RG7_g4y+Cz&1>aN$ ze|it%WQ0tUYoK)^=%$6w?E@)|b7f7hEwSvc7k0fEJ;unvc;TMACijqkeB-1g(0~_A zY3W>qhpnPK>udcOU5kHtHx3$=CN|iO;$Ea5tGPSP0P9W8Kd)g2e`Z`+48saB%RgRE z-sn0RN`HLFYXv}#zeI56%#|i4l9p$<_NV&#@>IRTeed@Qnkwy{_x;;4*qRR&^_0TY zUGmSa4dm=ed*tQ!$lq+3$2ct0Cxri4&&{ZNZ>kvT-5bo@-~T`V#(`ByLl~}B?2{)e z>;^HI)0lXhV}AWMwa~d+j@#2z+3Hx;_PMIr{?)tovRMP!qS+0D88y8h!TV;I1JvV# z^Y-}nuW-wH6t+&Ab77*QM2(D2{`SHwOVML6_MiIr@vkHWkUK&9MR7U4?#;AS?*plg zt0Vb!!Ab0!@yE6BkHztiZ+u-XhMixS^pZkCM}GT~cj7Ec?XGd}pB!5vl2$(3BVE8h zuf|UDgrT;jCPTtQqzBF$Fi*cXM8_}6x$k1TokH$=3zPfblU%Tx))gY12}=6pe2HGH<%x7gcxrj*&WnbKZNmi>aTAzeqrtl z>jLA%Ox3(fjTjqei-;&_YhzpG`ns+D>{b48pZ^=*@IrH2x}5)u{f{G7E-8BtGkeFi zJ~th7djc$;&HtkA=iE2$3$I?evQPEpJP-2x0rLD6cF=0@ajE{|pa?4M&ybgxut0Pr z8k%#=_n|a$ z(6a7xV&1&J%4T8l?(g^SEZ#Mj;ZlEwk;9nzaRD4T(Av*BrG)RDi zg@q$*A0QrwV(}<6XPEbV0fD82F!_m-WET~)D%{Bv#cD3}coxb><#27~-Li<+O08mW zkFG52_zVSgwx6F~^g3bWr~zP5K~NeA_m3YRN^*Mk6~oNG^z2_8aD>vnXhacU~ba9qDc|K4vB>G6HIbqBg*X|O`&Qm2%w}NsvV&pVFy92 z51dyE?Pf8s(klpxpyVz|NlA*gqm(0PwIUa{_EN=LzRXndqzF>7$l6k!jG@yhuh!>TA6h;922s!zj@*@ zD^SlmNBk2|*J+*$Pm#9%=y93rH>A&_PdG0+b|MCav)GRcRCQyRGFAtcTExX=#h+y_ z`I{^0B&QVTRA}8NQ};pTI|@bT^jgeFM6?SnB-=ts~Hjkc1%>*6*)f+*1s)5cxeA>ye+%aifb6M7 z`>2M^gUOjOjR=LlB7JTzb#gj_Kn?GL_DO32~&g3|Ok~EjDiVtX3m)JcgBL(uGPOEiJybh9}P;xh-6mK

`YBf zQ!5coGNvpyC#MKaPNW;sS+`qB#F^oy z``D$s(W+P2LCHJO<^cqc2?!;j;X!!ZRMB$Vz~4ZrQAp%izOPyfH`og4mCsw<+w5g1 zhYI}+pUjONfdwZYXwQwg*c{Q)-1CjQ9&z{H5{`#%H5+?W?T%?xX!;^$6e%WZnABVB zT}$2k)9>G}gVMI7yz|3CBspHlm5hsz-&YEj!y?~d^ytz1(7$`cUcxF8@Q%Mt*`5KL zU9A?n>}KOA0@}aqZ&Vb&T;SW)#1tBy#1pS}+DKb{qD|R* zdE%$SSrx{YEcNJMJTYe6xEgdDo!!OzPb!DwD@?p-Mx$AWCHi37-3Hp)+UHP05bukM zD>F_M=|VTd=Xq#>?0_bGQdt@RR`rW%?VbxicA3!2Ee9rRc&GbD~T%?SHI{VJ@h}Te+oJk`pK0!145|*`CS5J64}q zB#hm^|LoOxY4MmTK-K^?2@46W)6mdx+gzq!@UgjzcfQMRl}q7UzkKvAIeq^8#l3sS z-%CGZTo~qEg8Ht?Y_-yG-uS*+G1~nym=$x+`PQq`_ZEv=o?$b zjSQI?H6jKD=&g==#CBS5QhafB`fiU6icv?zFHbMN>#a4~Y||h9BP7s8acsK8g>~M+ z=N;>Ww7t|DtT7Ad+&S%fmrI{82wfLv@@LPY4k`RXDY)YMK9AG0IXMeq@=_j|85zoi zKRG`~eh5VlfG&PllUG841#I?T{rhlH$_5Pwj^MR^-8CLl2q&uTL|}`V-%>VMZ8M(r zXR?NE63-bFkTQDh1I?0{U3=Y@Gs;f-`_E<(0+j8^pr%OpZNX?v0*mR#y=`pC@;L5f zk42$ffy-s>IT=NpYI>-8Ys!wsrE(Ev!t0T&7Io$_d$48Y#moavbntLb6Qs< zmIhoQ7HQPH;0~F#MX0j;b04puRu?8cHQ)r+zO9e^A?{VA} z?^@HJ(=f&&QO*R3Vz%y8;!YRVEp-u?xcFWHTll=w{`*y5#tv6pe$OrS6z^+>`_Nyv zHf}A3;n?t_pKc}Vf{}I|V&rzWT`Mn=K4hd#PEM2$kOSCc356DZyb__DnVnq#SZJS{ z3e>UQ81lZ^O@%E65R0_VSFc`W2T@42PP`gonIhgSe1AmAGaZ3paZlSk7fG`vM?^W5E%iz#Z1va40 zGp3tCfbVOxI~@cDbfNLvtV&ZynJsVwS?z-N!u`RZp8Mok=2W#AG{t%M=hE`JiJ7BEKN_(P$)9QIekIOb2ByuF$4>CfY|Y3(oirO;fczk+_(mF z7lvDwd@#KRDu|LE*g;?kiljr!8)I9>gps*HVk<}~Ar|uMqO5yspqV%mrJka0qa3wQ z@M!_5?R*R8UrFW+|FW=}=&~@iQnOAPJBsXad8+oZAG!+^y(`|W=D%+H41ejTicV~}_0fG@v5&kYI43yh_i)bsH{wn!PoeBWzDKJLLd%&# z*ea=*Lg|tpR>LXGPM#c9wpLLwdV3jcZevHS>kQ1!&GpKwiL`!>VHp)4w72@HSDtXqGd@8R6#v?E$ZA7@gaXu|F` zGqv+2COFXZF)EN&M4pteSB%Nwih5&t;(Hg z_bJPj>mn@u=BSmVXTX~39 z9HnPlq@Ixb8Ij1q7!{3-jMzbozPuPgu7V;U#}#LxfH6a~9X!7$nDk3vT7pWnwU?)7 z?eY1!`G}I+n}E2i$U3NytRBF^Xp1cBdF^Cu!EV3C09N{oq7did&D=Tttemr>XGu1y z*jj9|B}g7C52(c44XuXwy?gtR8VUvkuk^}?gJ$>!z|2QBOq|>>!Z0f z@HCBkT}pc^4U%O&y4H9j67`M>#u%y~s+Q(Q%`~_nBSC90e^F<%hg8k+qema%dPQMQ zix!z0!03?&jYb0l+l9NP2oB?Y!*swRQ8@LE$8N}@|MB5EiKk&lczvtATYH^MC>;S$ zFlsp!9`o{wN;6ZhhzE^r*^O-_pPW&=zr}o7%vK9oTr&ytUd&62`;L$axOj0Y&Ob5; zKTW2j)N@&{V>FUJ6!4QlzrE`J0V7i(pRo z+x@AQhL?#$gH%;r9T66`3YI7p^nkXR)6Tp`HzOCN$&cA425?*)AlVyi>&tL);cVE0 zfHIP-z+Y{E_$4(k2qW4S_2^y^K{xTazeXn74U)mFtXZR^qycgkbKq)UU-v{kRtInV zh#Ty@K^UfmSlCK2%QA`d>V7{iID=94$~aza`OKA8B=MCRIVDJUb3Z`x>c4SUdm!mjR(QJww zrJQT=`zBmB?SUS1z_B$$wfn94HTAM0kYHI;uYdOhi_#Xx@E$KCXf);2LVK+t*MJk^ z3N$C4nYp>e*kZ0=b{t$dBp)CwN=d4gi8S+P4K2Mcwb-bbue}$QTogNsLnFF)JIFp7 zFdB2UdLpI!?jPIhiW{3Q>6~~2Xb|e`S_)7{Ph(yh(|V@7v4o_oXxTDr-W^C$>-AEK zt5*4owqg$ZdQIK!2TDIRgq1F9uHN{bRyHK5oc0hI$=f7bRZj}QQ2!~ z&z~1?gun*Gao83pE;Qi)NC5h1h~-10GkPKLyQ@!1!!|z zK*V~yvga^{dsePH@o{X}qX5j^boq6gy75V(#i5tW03W0vlhRPz%U!a1Jf&pQS(chk zZx=wuRZzwJ*>PefkC;#c%r8$_g8HG!quKAtNY)hE=WBDm&;dnUdq;V z-^5^MIQAAw0ygXjJ+&FLl-yK$1Jofi4Rcg*HlbmbsyeM&hzoQ_>I01U#X_OJQ%e(( zgAlPIVkBdtfsT$lx-<@g11v2o^UnwlEEP-^u*@%`cn!Ae-jiWii2=C9_}qV=C&=&) zhJJv_!U|bw^97QLhzC6ga@@y50(k#$h@G}51-9AFc_kFxexkk1oAPJrCS!>ShkxPq z#ye`>$om;(sxbhWI8hOaUfd^x#~qIx0h6~HfQ{-Qxx{v#`}$<73mBBETq)CY^)T|4 zPvVjT4vvAW#thmo8ahmkxu0#ZHiV+5yAxabr(+>)gvod!mf}B_*_F z%AP|R?%5futY`uK^%u70!M{b_8f)&H%ctFm*qxx&+xvMR&)0j=+y= zf?8ID9W}Uj?_O43o+cmy&%9~1)Ei>ViF{yh>P6u1xRNGyz&iVA2O#w_bsCg8iyKrZ zVF_(@3d9){kEwiS5W{U#VVEdO#nJI7?PfDYB4v&V2|-0L>DRUBlxtfkR8j zVykIr{2ig%YYUWD&d|TJvo1W8*O?If!1V)8-bLD(-kieWC z3Ha!_bHDGA@ZhWiuxWJ-x>axwG;=TjOPVI;k+K~Ny3Q{>PL!g(-L!>!z8}}=g18|V z_W+dOR>x7CHMGXzehhpri34O_cOAE6UmDxQsWSacSNR$J9ZBw18%x1vb3V#O#%&P_ zzy73l)s0^JiK~GKelpaJ!-~Rtz9J*Q8+!sZL2Ca}HJFO}w3XlL{}YV`zs%kAGppF3 z8NZ?QNIQ=oKl;asJ?xhA=E}Zod8*L5F#?F24sD~q1G467A^;tIrl(9$JwyD@seq8p8% z0Ak?i*2C~x9G|99^B6Y;zldzSIrK~RkngniO<9B##<=yAY~&s`KF)&Vh3527c5qg9 z_9=_f2>Vildv-XwDZ~N)C-TyzOLMarjLPE^J%hn+b50E4h*?=!%-_XnyH?!f(VfH* z5`E(*9t!5DQ1U+JF5}=a($;8tcl4kT3zc!AMLqhX0+Kt~%Ph;%MgI3rNgKFXwsjN*vuN6%U7DnRq z5(Fnx6d_YcK;TYoua=C#pV7P}9M4-uoG4LBU=3~Ep@oY?`45O!Ny)vSw>U=r1c89B z_CuNt!BYX2N6d`#_7|TwlhE$@?#~t!k|-@*nyIp3&Nn5;Xs3x2%8WQVj=U$FhkxYr z+ib*lF-zW>0AU2%n| z;9BpEGbogfX%Tn{2B@BQo3b5sCB?r>bqFrN2QrNGaAVXvIc0Tq3| zUHNVe&D}|}9(D`+eD635cu&NWN-xE|_9y+@N3)OKdubBQi`xfuklc+?6$s^azw;@7%~qK85H;=0`Z-kbZw?9O9m6bTLf#gZYg3=+$f1MD2WwH(g7qPiC>HAxq1hlI;?% zb}i)`Ov6d+M0!mmPVKB3PEDHmkfL`WEu=JvI-p(qsvl%zr%*(NpgTp_@9(&dx4j zMK}yIz)cn4EP0x`D{0=md3)FH*NHPcG7EOCRHWS_( zkOw*4lf-D3&UgM;q=lHnroJupf0y!puG6dkPI@S+zK;U`sW&3N8eq3(`&)po#B`$L z41M`>6XYt-oZjWokUvxBqGyn218xUj{0U%k4DOHyzP=?%hN_Or$zA!VfoZ7?fDt?s zi`}ffyu36VRu@uuF%snD9t*0^tpREkyQr%@?^Yg}h_Efj7VG%izZ?V+EsOnoDtsw) z#V44kluZzgs7}2XFgZtfFi3sO!cZ4KjIGzM_wD(XU4`&yFyU-Gqi1)HaOcLy*13`^ zLw$Fww(26VZ+jWqxfno9t2m~t3AqjsPe(wE&&k&-k*YzIk2peQDmt;^)(5vo z#Ql@*mhHmuhJB@Epv3hti3cVv#2l!~vzS;S zn(Pb-ir|6;iN$31BRxt#Vw4Idrjr>;%DyRpieGKS*@VUC!iUm7sV2uqv+^wISimrg z^Qo5mQrJBjmO7ph0lIc}zH#xDoZ^6p$53V;t8v|h|Ic=isn0;YZqe4T`V0bJ+`S#( z+rCnOr4c)GX*xH?ld>t#G5NWqs;cTFv8@N7$sk)S<~rzl&h^X#*eB|du3F??)P7BN}j3%1}G7k7_!ORrpqcGOI!b5)Iujjp0~uj+sL1?@U+K$jX0 z6APfbiF5H! zf-~JpH2PR(XJ)<_8|OOZTchxk^K@~1;V2Tt?~8q|fa#4Gs2kyie0y$73_8$(DWx@c z5`yMnbnf5QcaOYhBSzRjs{dD6?3cS?DQbK>UxP+|ijgQQtIZE%umvfuZ9DsTvwLf? zRysBGsSy3mz6l-G&4+>*xra>5VQ=AheFk1HW`QLx-K_>pvKx>$8Horm37J~2Qb8Y( zaCw+td`QY}#c0oTULDP)sOc1Gg;}qu>M9u`kW14u%lt>Dt-KWSwD`2Yzd>+k$fb~_Ah~U> zMAhcYEI1%8e$Rijndv99i2T#Vg=1WlEMvo4oHr+zn~4>BE6L1VWN)sjt=eDJMd=IQ zEpOBH1s7bDEC!oNjR8Zx@{TviXmv~^w1^3GP*iNLx$Av^-FWq)8O4PXW)Twi2gzvH&4GHi4*b&aw*~-LQ!rv3q&8;Im$~EEZIbj} z*tJdI-i5A8X`8eQ?~27k)e?%cHrK0MT^to6+3SQk^;KxS{mNt1-Ybrezku7vVk=3! z=tnka^vkwxmDKangl=}0fp!w|*$4Q+S}yk#iRa8(VwfFj{KkEm1+W7tt~;z;zH`>k z|BIY;1H>@DlJ#V~D z+5C&M{-u4PP8vvEize^nQKK$^dyhmJN3Lfq(?D3;wWoaYNdfrk#wRQFSP+_%Ac&?4 zpF)3y-F>L#{H~9t*S+S)(FD*ZFNwH`G}}* zOnxzdx;gjUU18YYD5r~N(gn4&iGh??1qy4+Hx8cYvgh$zJy4OL&{wUQi<1ha$I!Q$ zy9Zt(kII1Z20<3QYTNQB3VTS~GxtvUAM_K27yM)ATe;9F?pp}L(051S$lZC{aBDc(p-^2@;>{#a~maiLI0)}7t# z9^UpC0ITMca-^H4#y-f-$%(dhKbi-9AFprx6ydz}1-|Hg!2895XFPa)@JQEJ?;J=7 znKdbf>+GIx0`F&~IoN8PuU!(lKV9bP>-+{FXH@wFt`IQsYfWk!;$kPtz!*g=Y4nHNEetW*)*}y z7?pq&X6a0_aROR_q{#p*0M;Yu59=+2k$+`mRa)6x_|ZEJRQmv*VC=u@+jw`UrDdrG zVCCra=|Q=y5#u*5k=Y|8t4VGn;@Hd z6xtdqRvg5@HYrapuR4h7)S*S9BDEu}tF4O$BRxYn#MfRMRm6}5BPA~c1uY{^3P>P- z_?On5pjA^q#J#%Xy%k8_tp=*UpEo03VvywsJ{g?6A|A0h^eC00kCnpIqB`IC($dn8 zD*&LE$KraGtQT>O=>dnkcPt|&91&(>lUI)D+;b5`2hSR3?z`=f2SKwI|M;Am1ov0cLAbQ=c zmHynW)PmS563mjivi&XOnkt|-n6*G5J{Q^YAr*nfpV9OzmI^{!$v_1cQp4fuNkHc? zu2GbQC+iHTuX8%-7=Nb?IRTqVgP1T_Mclm}a9J*x$%;WD}`1j8BTm4)EKCqX+kx9B^pT$8h(nitA&5HU71 zD4c@#Azj5{3WBOF91>X4f!koBT6SF2fIQ@^xH#jw(_umbxQS-OcqI&P(j@QpqAn*C z0V>#?WTJGqR=um4aaFGm!*gLrpE-Sh)84>J(V*%HvsRDww=HVg&Bgx$Mc2;xRO=eJ!kA-Jk+-k7F-mb* z+i}iH*dL06c~tV@vtJw@V30if@)$%l!@FD2_Ndd~g^TPdir zW=JowL_U*LShR?VM(Jwz?>cFut&{4QKoi91GNP!f!;oRLj_SI)QMhQPm`}iwD~7*( zq1n~g)v<`ALRr^A8F5?sgQ~xvGn~P55}X3EZ&C}>t5&-L$k{q9E%gDujHuykHf@-6b)oX=1!?&j*d6xGcjb zIH*OlB^9+aZOMR%u0Rljlu%$-s~~$;4 zPwRQb&=T)pDyC@s9rlfML~z(tfpw;&5J z3SfTasJzATV_XA^AQff@ozbtW+%ZkwK08Y|ZE=C3SlFl6ed6vrrCu2m)pe|P;oKr; zJjRF~QugbWTqA!LTNw?kgdP~SXNsgCxs6(AFCPG7`Tn|P2*Jj~4wU0!Z^BVNLmQs7 z3li&45D^6m>&xrQ>3l=(32poi`}Z<%&KBdiefedq$`6Me+_i%9o8DH{?=w0M_2g`DuX|{`7v&hG z6KI$m#7Ra0)j&N*nDJ%D8TTIfWAzw+5~{#e;;-iSTsjOZ9mp2!CyrsP`l(##XHQ9X z^+P4AJ^#YkM!ff06Oj4UR3&lf^9nI>{8f6Tk>~&Uw3B_7drEA{?AVev&rd zCNsO|A7F^>56n?Ps#&;bHk*la8h54mnm3Ah;wK%b`YMdGa|jJdMmM` zHrHGa!qUkOLWz~4JBYz^_4x6A95?wCgo}iH^LGF2eMA37OQN{(%WfCHa!oizrlSXx zw61=Qb^yv33V~BK>LZ`g?!-qwF|!hKqVGzFpFI9V4u4%j5g)Evl#+x7J;2Vp)&|f9 zr-g|1`dv3hJk-Clwe-VAXZ^|9aWrt3Br9LUjksJfv>Ot8h`BiA)jw@>ZO2_D|AmPU zgpmY|4fMT8?!cWOEeFBMsn%LvG~5a8VmJc-NS}>S^=0F$@N0qjPe$E``S$4j`v0cz z1N~@<_OytjHe}Xe)j&K(pK^H)YkqW}w*p8_9 zTQRw~7CiPacWn@}XlR2GDZ(ODWH<00fI%d+(A0MbrlGo6DhDGBd)+O?RX7%&BjyKO z5-JMWFHLt+D)tT;Giu$t+Vz-0K%|W~EH-qsVV5NPC_I8TVOhQIZ2nz3QNdt(l2Ifk zt01UuJ_>c&rkj0nT+v|%bZJuLvfeMPg_j2N0*^9kKH7!3;N` z`}a87CDtFFXgbi*yyK#!Jyi`BUbqsT;Dq}qwjsL+W@a|FE!02B$j|tq{toqeTbTWS3Ws+C0^Apy04j0>YC8(jQozk71uG7S}#+x|j~rQejAKY8>? zT!>~L;zCZho3~b~HmKqQq;JZVm!CX&0>JHrkv8V+8s8P>3fS)U9|~UR3P7y2N_fOJ zATmwwMk`4y2B?JFh%hKOWMp2%XoP-g9;h@|A*w2{eM;Yo`{Q@=b!ca4X9a`D@ftU2LO4LN8xPH7F+L(LF7EWX za}&PMO7A}@ky>P2%mFzhrv<0mbSc+`tkF<84&zG$FP{T&89840z){t8bcB%_5?d0f zFgsNZ`8u*u?-U-PBlpAMBy}g9!owa2>jhpSy&_{Ay~28}oV{y2;aU%_;vD zF|%Z`hioBjbv!(@fmcAfaXeR8kHL^4u-Fh$Qe+_wGE)Jp9at<$`JmvdVbZoNd?$ga zDMDnx+~=k{mS*V@7m3X_<@Lp5?;mgW2#2C#n71`d`QCFj?uF|h(&;>m`d|*70X==f zfcJ0$uR!zdG@se?d6M4@$m=u{=v8QOBe|(zTLLen{|L_9eTphxMg(=Gz>f=m#)%cN z+p!kiiUlEn5Ycn24Q_QeL*2fwdKV10$Y*+0u^W1f9%)jyc6#mxVh-m^K5k7+Mn;Ou zE4s?C?}y@{RbZourqC>C-t?h*Ae9JUHspnpA-WdAX=F;7D5v&(!>&GPdDQ(0`=Sjh z(fr9p0MWQ~VW6lX>(=oH*&d zQX&-ODil^VNN!XwMfRL?Mw&9W2eE^IyM;$ZtpS>>F6RP`B(eG|6^uLHl6U@DKs)6z zK&iEwP4n~swB??Z%9C!CLCLj*O$mmnO}B@LyYGT`-WHIWg0M?m5grUv3%OkoGPc5V zu$t{I`$3DqoTF}T(F-{FXK*o1a%wH7WGOrZ9hd6bdW2gXvK5_ps7LIFt936Sl@L^* zDWD2$khD;HaFQkWf+uGwM*%{xHSyrKHUz3+!UC){@EoJ0(m)a6%M6EHu6TMZ>}$Ac z>u+^nXHUJqLWso)$=dNMp<;{dT>*YFj)9GruZ6Ah{G6S7E@@JyvIYc`#xLIZdvY z#&P!pY6n{06^z&5xSy-DXyZZyiYt$_f(X?G+U1a*SxJkFCp>RPw>sEztD#}ol`B`C zl8b9#QmTF|j@f$Oc;u(vnz0tt7lhD(&!rfv?i{RwEtrSnPJ!BO#|}uJf7h@OAPX*+ zSQ&^0!jw#W@j#eCDwXx%K_NC9O611wz(Je9WTg&UJ(dib6#&sw>v@T%@1HaRk$b?g z$R+zx-Fn6H;E=JSG0ww9_=aoGl97YmOTT?Xi08c-n^->(~E756ovb7b??-g z4RO$2H$tMKiMEySJfsN7sD;X!_Upq%QYNY`BgAI$MqmtH)^5T*#atlci13Y!aX^Rq zI>UF^cBGd>C>ES+(DVJfj$ZP?kRzvYY0oxz5Bptx@%n8=h&T}2aMM-U@3}gq%25>F z{9K8pGyH7W;0>ZOGFCxMyA-f+0phc1SUpSF<#qw7Ajv{Ss+a{O(2GTW^_w5esvPcD znV;wD9X|!jLZh*3INa>(8{)gY<`K1iQLv07aqXm9=dxJ1Jxw=*f`@gK8UE#2jJUYd zG$s?k^(rzxkHKI9{8>Wc83eHq_-EDHqUEwJ?)m3G<2y3PRxCDLsMA+V z)jleGiK>q3|3loHz*D)d@8gR!sWfUpX;2|4MTUf(q=jV6R-{OSp%ltYwVM=b8Ip($ znJOD4WynsYWGhoD63JXqkuiSPy=WiWo%Z>j-}m?b`@QH=-Sga+{d3#N}Sq`PR3@8w2lG zlhA(sY++`h<7l+L@5nXzy`Znov*nDrcK!N!Pfxx835HiRNZXi%s#HXnICiWzVg|!2 z{<>ciCKq~xxdZ25<`HEP3G+NAL6D1NB@g8O+Or87wDVBf}OBL)y?zKP*I({18)2XbN65+IF<92NQJ@UT z`iLi|<{jkP$8~yuAU|Q&-$J|G^@Z{KBajOoZ;Ow;T@$eRu-68f=|v!o_mXWLtv}8+ zT(@SQ)T)}ePdhdLd@+HzaSN?Vz6{B4Y*r%`k9>I{T|I-+uR7LA$7#GSu72Vqqqyu>wR5? zW~RZKHSaU5NQGDYPS1WOd#(iMYm@iN56Ic|Z1PZQM2?*0Fgw*9+b#y86ksJ1&VuIb zGqCQj!zh~GhcNJXfzK%C8=>bqdc`5k{IA;?yl-uG?-7O;IDkJ=Quz^mBfz*IcFJ6k z4GM7UuCR$jqM;Q@V(=`EeJO$GnL-*+M7e;}3Z+I7&txEa(D}7jG?YYcu#L_Uvt~8Q zYO5R7G@#@tf3dO`qCJu}m06W;Ai@>L8!W*+(M0e1C6ZS}mM!m%0My?Bzvh@zlM9&9 zFvIk#%*1dv8bNX7ykT@p2dccbEosOd*3;9NJFn@3PdB2g9gWJslbH>!(eN^k%~H&* zql_u6e3FMgEx0-Q&&st1_3N5{_c{K#UxxDN7q*y|q;{3a;Y3VvP=$#GTJj*ou8>I&-6v*f%fdz(%?3~k!)fbY2nX;UoA%+hI;csvk-AIb zF>VSKyMZ}NG-M4h??LLneLYY5BrUEI=e5ZyAda{tCFmShW?Gja9b-MI(*>o4+T-9n zAv03KiGyiHe`wc`WKIZp5A3`teQm-etVwnQ0MbCkU|(Yr>oQOe)Poz)elOmQ&fjy} z+PbD=zX4#OBP&Gk=#;SUj!%2aw_o&UT$f%litjvzNk80HJ4(n5! ze&f+2kte1)lMK1a*Rmy9CAr$YynLJ=JbiO6Ex`R) zU>U6UW@46u$ie`aN>1npK%b^r;5_D_h-OZ^8El5Chrv<}V3y5tsN_(svjR(mL}p(( zRC{^nkNGVPq)b-Fsl8d&kR6x=A={q~-AtSqH<=~_*s{|kZF8K^9Kj_Rse*gv@kK)h zK_>8^<3Vxhagx|qi16yu*nD?KYN`yA#N!374XNXlWB`QgVb)&gQT0cGf{y{9dRdhn zxH4ptn^8p=3mB-ZNEVAVPtFa;6RWrTH<7{Njv*sJ!(u3tsr1Jdo%JO##hRq;(H_H8 zlm;8w!L}v&SH0f4NsJvo1E+BSQ@M=+_q70siV*^ev8+Z2u7tV<+%I%7hmBKWVmz0} zgPPAXU&OEDkZD?HGG=TRk|egU+7A)O7nU8={b$x}I7v&I&m|aR%&+@;4^mVn$bm0H ze#k|wH^K*@(ilZ-Zi+P*@bEF+&^N>m*J zaJP&{O!z{Ojlar2Bm>(Ms(d4fe?bWoj-zX+Z1q6RXTLjLdX7BcL->fNp)(!l;+7|o zQxA&RIP+VK%G4i<*8T|dUw0wuwp~LwX)U9%o`J%PP00dtfTh>#xr#~rm&4Ua{h;U) zS>Fd)^kU>WS^-1Sqp;DVaU(@ZMwtYg30Z)K#>15No7~B;XYvA#r=*T(<}JIR_`Am4 z>ebK_;KouSp>ca~ON23m2uTTqMLN%cn!wapHDrIT2)V|igYaSP$srFUXu3gA)Zv9b zr)hMuw8=hs$Z}?E%POHBoAdyu1AyW`VEf2;|4|=vXa{PcL8ur=yTYMQqJd3`fc6TC zAH_5j@Z=JX@qZe`-xDx|$rECi}F5hn#qxMy`1->ryCqWCsg zP6uMthvY|3f1~CD&Hfr6d^Nu;V0!RK zhWEz*wCj8tphE)cK^BRM%`EuHo1>5EdesQ*c!;;9Qh33G*3*HkSuYt z_hSn@2fqPBU_H?bXow)`3MDcWnJQq#K#X{)S!QJ^Lc0MD2g&R+&8YibOr2kPkKN_1 zLtbIM8WoUzD-=uGUz|NXq!ZwA`_}^}cQ+1A18}BwOXsnYp z{c7Fc2q7wyhggU+q*YSc+U0M5BJljf)yZx)j^huQ#{}jrb8rmDT_a6R$EiS*%NKJo z9J$8+GL>WbkLa*}lX$Mo^^JlZ06?n+;F}DHk)Vm8i<}*t>dLjuAvY)}6+44w7)XnZ z#v-xPH|1a%tp_$)2rVqK7}oM1Qgw!%v~1lGi+qqOoTnHuT*!o~Om2ICxqhb=A}gvw z-wMaNnKao(b{GJTpFa>Ea;KCgPb&!Ybq8`U*ZnS;I5YZ|tT?UGIx1QqFBSzd(eDzE zxjfE@{6tIaM>j+)EoD!X3EA-6t~$mKo5DXTiy|Ph8H;>99-RgNYuY$!N>JaDAThXP zIvY0z(2yaCk)oy_Hw((8%ZYs^hXHH;2))#-rnX$nLRNo#SoissF%MW7WxR}h%i#X` z;EO}Bk684-k!ILI`i~gc*ymKVLKw;JKCeLxz`?dzCLl`^N z?Bi*5}FH8s;!dX&s z*q?6LXwuhpPAC>DmCEy(=Rb#^5+{`dAtpHM2LJ*M{;InFafb+``X48~ZF7R^N6kZG z1;b$m&j%Qr{SW?mEUuQ#eU13MKuBmE3sA+(kRb|EN)9LNobG~HK^-dGWP7^`a88&k zvkmc_4C(DVbBN5!1oZw5`ePf-{D7`e<&dlgm>NCz7!pUWyBe5)7eaNyxYT-o8k2p% zQLXfshSCp0$k-f{?1ShxA~(jFsR1L2v$A8jxnb5onmH>!$fyog+nTUNU~hmN(DM0A z*shdfPVU{qVDc9P;cy94uKM(Hf%B6YOTx^D`pPa<{0yv`MLK0#{$7O(*v z4qXTbllG=C1YofOAQMdDOK29g5>N%yh30022!*0G+X@6E4r#^T-s35VJS;oZn8a5u z4ejgMNi#sAd)*+6PTq!)I1J|ViQjdGba|f884mC$Gf{0whB;vb9|-B9d+jK1BWU$BrE>r6hVy0_g)?T-PI=yB9Z(fbs_M^y9mXGt@8}li{GxFOK!HP-t>>u1}nN}QZ3ul)3Yh372?b|nbU7$ zwxDv;qR)dm%&L+jgZs5Q`aM@F1wQZ>6eB618?EF3mHOk)Z&(_Hc zSn9lvgds%Q-=_YlbEE#ptb$nuM)W~esKM8F7l!}&Buv+|bmN3*0QOXhO)t!JqJ}?Q zQK1b1pD;**#!0Gu?R?_?jIwe(l#zNa>r66v=`Pxl#G5G z?v7dFCj?G8dB5d5!b+f^@(a09;b-~~+Rrt))1EbKJXbq4c?l1at0Q$JENdp4vw+Fc zt7=G%zSc_LDL{UU361$on)$&N(b_pwXLIzf0J$}I9WdYOpSxhIVX*ovdsd%H?W<^SM>q0iaSRbI$%;hV1Nt{<5B4Ey(+tny zZUo0PU@Mja#AmU1`|#`^Y)OId8y1r4Qc0MW7=BaLi*NqpB#W6Bz{Ddkn5`nRp)%#k&ZR?sK3gK}2O5M5Cd^`met7{PycaL{4s7KoLcK$%Lq{FQDb zl%F$}bcHhY_Yz!{27?UM`LRdeyZbW zRsX}iC1D%qzJ!a5%nAG`fro`01*6IUk@4QkCA52-DAApx6Crm<@NLr?R;K0F4xz8 zQj8YgQXci_O6=iS#f2n$f8%Py)9we~gaIV4>_(kOecc<*4+f3(ql?0d1g!#Z`xhC( zU30yr=ALCF9^j^VC3fF4y?v)A&lR{Zx zw}wC9PZ>V>wqjjECWU~i>jRXy)*eW{i&PE^HTdaj7GRuOrNA;FrTSR` z-w*RUj5C>s0mAWnnsHqIl0yxhXR;PA_tvN5agW(!kgk}N;yC_l)w;SN(y$tr3?w&n+Y*vbkYa*&7d~(H}CxpGS7sq8&NHV}q3>5wGH8-I%KK5Czl;rhi zK|?F3Z!efOcW!RqWN7e#tyk)t2WsJwGNe6M_G#2SAcgpu&w|M`WdHt82^VeA;w1k| z%O8e4(0&?A6GG8e5Mm;tpDk^=`Z2X;FwvYLT&(OKl0{ku%i)l=Vu87;GfhTSWEUBUngVL;nwK16KO<9ljNktuAaKm{; zL8X0*H}8Px*VyQ6hS3sem(G|mL-RPiw!jSBk4{(}W+h@sA&|`QAo#U^V#_!zc*BUb zUacU{zUc0OaqUy|<~Leh017}e-DFnXegmmMlBKqPDu9(utC{ z6`84Hs1FFj=z zn1${yt7seJ8i|ib3gMG-bb`ld#FdvN3r#}{_9tCBvj2N7^PV>_G$gH_FGUJ#ngBNX z>uv|M8el%L;p9G~(wXpxP)qMcu1SrBO$bS_tRc~=CQ#=~K&^-ay&NT+8XW&B-sgDn zN5j1)WZ8pRtDNd-fsxA$vyjBh&_!QoeA`ZY-MSN0qu_2k-fZsg;vUldkc_QB?0=HU zhvDpQG&gBr7J84D5Lp`mI+S7`ngbvfnLBqi*wia)6oxDfU5Ju-f{3{uP_)SBuSh9q zu6zC#MaLb{b7>=kS$dWHvO?W^Zwc)Nfgc6C<;nLXSaGMPW=W0b=e_RT7r+g@1Y-h9 zli@6M?}|PPOhE>5X}Nh6zUEMuvI&iso3oti=b$qp0!J>OQ{suQ=vGiYQe3-qaDtY# zMpC_#39Xm52mE_?VZEUkF9G$%kHred3XBKo3xAy>n+T*)=bS?W%o|A2fmFG`F{Aqc zkNS;!_FTK0olOlqNH@gqd~%W36HTQ|8oY<}l@STZ1NSThB8q{~d9-(-6T}vN!>b## z_6_OXWJyS>`0eJj+zmia${J=@DiAW~Adi79Ezm}bs^A?cJCmc*S&uAOvkuDa zT@zLn%w~8n4s!388+`j1jFyoY+sg6@5104!{3J7TM5Pmo-&x~tjSBQ|f!e1yPTOE8 z)RFWezIUYmdzqMxWprpqYl<`$i`;Km$|zL_NUX;FAN}J;K4AbW1DVKQNTpO9?$rI?+ek)FpBXyf-P~Kne8}_b6nkN_M4|pe%vx-@!ODVY?hBxyC(Gi znFvy;TR@+mXUSJ_SXXfofv9o*z>+^`ca04INPa|hi7#(#gC zk`7p>7R`Z~)GSSLOw|2 zVJgR&)gD8`X&%is6(^LkW#_R!-iLe3T!ws3_Qz_8sLGb!)zLDJ)=&k}wYJf=`lqcn z_=WzP)$pB*tIhDAMUF!&ixWZ@Gbs$oOgb2L_mbF)7e{YR=Tj5s)SSm&Lc-(o`4S#r^U4|*_E z55h>ic^Zt+iA$l9C4D#q9Mtm24WU9*nH-blA5{sR7(Aq@Q){heGjJ;={JWK$_kDy7 zWP7^`AH|t*XysVJEgj7l_R8@e-~2oUmoe~FDfXjJ{_Usqx6SZ?s$t)sUB!?!W&c42 zM}Olp@qA(*Bpqz`aJvaMp+Gntt*!3Or>!QPCPRiXkO$-h{ynH|1@Q-;47 z_pos;`|bYSwu%v^BaC*{MO5-R%Ho_0=%%s%;4Jw52C@%YoLwp*a#8F<59yT}bIvr5 z$+Yv?e{j3Mj{^S-%V@$oJIH=47(IHM=5e^-{`zYm;&aCm${OiUBks_586?{~TW(b9 z2C(EIEQ5TJtO4z0V4kh1%IKKI%aipYY{lFO+WZ{B<1BE9*$*7`rZa|)^IpqZ=Hj*# z*^JlBl_{&uC?z5z2%|FS9QX{RO%&WFO$x{O)dnI1hW&D+7#JCMPP%LN0a8;UrzI+w zkd)FR6BSh>X=r4T+EuQcXK+5Ugj_$bA9H*jNBLrr*rn4pU&peI#z*_N_cTvT|L9EF z{cDH_OZrZf#v&C5g))G2OfbZi0nbz#=mwE)@Kr-r~ z20AkOLR9*T8XkIN@yEXlgD-5i~DgS0&j##eVX^3`-U5UOy$bmFeGa;Y8qPjr8 z-IcQQ_8X_fZ?B`y0H7mMSs~pvB(swjY7zk&f^4=gL%T7?yBia&6?IirRg`A0UcH*= z1vn4YS=45L*(*v7XKtQa#s1S`U@Hash)+{&0xJ|0pBKp$Pr$Lx@Y1<-=~9rqV-j$! zO4q)QkJapHVRYcsSu`97W@H=?XVhS*b;OqWCGb;h_nn4rb*u^x@odzhdB_ zpxXhhDR6UFCt{L}Miu&tsyld4z(H45n+-{xdXDah6@|3xM!-pOLyveA-KKIyy@*Zwu zVOe_2^8HI2cI7EAZ&X!<(8T$ZO-N;&mIMwcqRzHEf*GQ@vKEr}#XgF?!c1Biy&$am zKd&WrZ;QgIE8y;9c#?bcXxaBNS?}MxnHx;W&52Qz+)yq}f9!vhSNny|RB&MzQD~t` z(|?lzEs7IBpHXOCboeE&VUol)H#fHsZ+?p%$UYap?A!vKf{Gk~&P(hskYCAdS3}L7 zTL5IBdR>fWtINxMgYO%a8lRHsu9N*pY%|%Bkevt#5$q_^!^^irQ`2h-AD`39P{%hc zCu}nk;pJOlRqy&9EjJ9>o@sXk+^?DT1UNw)^v3Atk#~123X1Y5I(72oIU#^5531HN z4xaK^l6QZFH4d)b>|IBv!rxB>2!D)tf$jc?YHZ zo|CS@&&BNDC@e~PjeqNWAqsIKSE_b9WU^4oDb%ch|FLX@>#zlJCW>VZ6BsczhMO7v zCjp%ZKRgXS`69ZIJuQ)?LB*a%?m!i5FF_g25_`HLswK0tmLEGx4P)%yfV@55*MaI& zkyCSCKkoW_&GO?ypafmq-~H)b`&(B(15A2g(#<8;Iuk?DlM;ybud~e;BjGx+iM;kj zPV%!5H~gM25ECO0=QA)so$vp$g?HAh5610MnA9nLQv|maR5wf(jC!;hB@jN=A`ax{ z=YKiS9dtk=`*`2p3&>xEB_!V6NjT@G@wxT}tWRUu$yBFPlJsja`O8Qu09HIQLn_x+ zv6!E?1{K?UACWn8G(e_t(8zFJ2-?A0iL1u^Fb2G9=6UkulD0QVB00q=?|+%8V3gf0 zKfxRLv+bLMTseBrBzUE|r`{JFm1DQe`;l~xUWr??{{7cAXhn(Hm&LhgEO6IUiOp^v z<=ybsOmxz^oGB+leT#DJ-Z+}+cV-ww#`TCVi94mOebGbq^4!aAnmyi?fw^**eV_0T zs|wV7AhAzFzbX(x)eahOle*(_>Gvn+y!HO}T9b38bl}ytFF3ScLxr=!_D!H@U7+WK z4y6BXHkiVt(VaEbzCtQ%*NQG%MoxEyh?P{mGYcU%)&|VC9yPKdqiU=dbBHNa&hxcnNm-@Eu4fxO!9IiPXN=hbo4T5 zid%$9Jbu`_o+|nv4ZH$q%_jwu>$}dWjfJ- zw&}eSMcyKl`g2GuXPP#RE)Ov8=*dPRlr_`y!|BDhAM^U3;y#`<#r}48(y*C_j=!uI zEMD6ez9L+#c&*>a8;PH^r+qzj`-JYL@6yCT!`#^jrDRNwjV;V1K{U9%pOPAbef79I zH9c82B>mON?lX}z!TUT@{!g8SAK2?xOKZZPT7z#!%b1|3u|?<&>nCh62AIZ*M3k9g zi9L2Fv;8Ac%x^rwl}&SYd!wM{>;}%Ospa@{^QELpx>M!LhMfrz+g(Kkdvj18BUqIx zboyib_iFvAn0X)pH^8fCqEiSU5U;c1vSo$c9SA!$LBlODY+I4DZQC}B=f7`DZ0DD< zh;(XeY=FQuTRCyc=9C0U1Zs*#?VSxNBek$a@(>12EHKFu`bqp*d!~pZEE2euqhM(I z_AOAnJbaj2aRnw7&{r(_uoswEMGp7@;`SFNUkGu-isOEt3~5A+=hp1b8GNr8n&F1I zOF`h4+yC}NyQ71HLt?wKvNEAd&7T2}bzbi_y0t&5eIG_Z)L+ptB46!#0J1N0=FEwn zIZO$XZ%2-ee_cr>G4E@_ zfty5+^$5cAaOTVeWH3y$W zVk_!S4hCLQlV47A=?{;MrAv$~make>T(z$J5QWIa&inu>!mSV=!&Y=nyD`!H(u#2T z7%zXH+w~^JD#6@;2QmeIN%nJ0t@lz|b99)^iWsj^Yl_!0b{=_oLP^<}-4M$PIULNc zx81&4HHX|~1Y>Q!&*0{WLgZzMQ$OPP1c4GU3q;_I|&Fk7F#AH>tEYEwP#5kv}CYpEu_urT9 z9L9l@U@RN6{lRIlbf}QQ{{TQ|2BqGk>Lx2$nSbV-?eYvtmfy`)xTo%b}py3exAxnmo;1C;yOnW;>Z;gw`tlV6|V5OARpQV zaSI5X9Q0MmEtWl!;U?+QF@gZp6kC}|S+7u^QlA;1JhyG4@yh)ZZ6?v8>pW4kEkL?& zHd1WE3LDX2@K1ngz25qx;5g_lesHh2Ru0I~nJ0=x&$BsRl|7~pXxYwcn=xevcvRo;=qKJidy1BV0sD5ePWx1F zC(cxXHTZiY_ZQrGh*Pk$-b7zQ@R15#R=jBK+@6Ii)0PS;4ynKd7lDyDN`DO}5NG(B-I9ZEM=;+iqr{jB@iJ z1-OCxPVY3R#aMd+lGiTsoug#DjJRBm42i8-Gm?LF1NEl? z;&UhH-#7AmsLC}9m!PVjr@DXQp%~m$1_0!VCt|z~o%&JBs#0|j{lVv74Y5Ier>)+9 zwF4@h5QwGwVe6uWOq`{%)T{j$qCU8%W(zz)MC7CkSMXYE+`I2rDq+w|i#}H8v_b!* z0emQZ7ngJ%=oTSwj770Slp6AT4f|zSboLNRP_EYw5x?Kfa(GCD9C{AoUz~t+KvM-& z_Gr>rCxJl0-jM3G@31MS@zrcipv^dx)+_w(#a-wsRiJTA{`y|~80B))G4gk2+MGXZeF!Z$k zAbX3ePoH2q;a~0KdEG!dweo?i}SYxDlqK^oMG3CzZrR`nb~HcbB- z=L0JeaFyu7g{nGLdD@_QDb{=e@`TI%q@b8sz^(KGnF+Q2so63M&C3B!z~BazKz}=B z@N43QoCV48hnt5-;e8;i3G%ufaAW-^4H*%tTqLOuY8}SzT+6|9-0!u!F!E%(#L(e@ zrLP*I#~)qC?-*ZvBM=E?i+gq=eb&xkF?faXK@6P>w&A zs=fDKQ@~_mY;0^}`K`|)H#?z_rJfEghn38u>o=jYiF)wt@_K$I9vN7ylw^Pj#!rIKkdK z)&N2--MjaDU*?uB2>_&P5xjv@ZkCXU5`-Go)LFVnuhHLlD-jia96}>NW@o+6p6h2<5t<_0`R>+pjR7m3{Vx6k9X9Q* znwkQ-XO-j@_$o#h7UnnD-kt2!m zxIkl6uo)|_H#jb6vhBw;hk9i@y92!iM6PVha2lS^zSy`pt1@G{o7WFme{>o9n^_!p zsILD0zONs?2KP8>T)K2rQCLXG#yd97Ra5_ulkV}i+ic_BnTd9-lv=O=-^TUn>g&C9 zGz)Xb4K8|A=XJJLStx?ku zb8*|Bz4ol*=g3w#dValo=cd}{+6zbfpiA4vZ(|P*ps#o-`?PZ>ES# zdD_h>GYt1%xM3Lu=r>0~;wY}xQ&n zh}pIwPt-J_He!NnHnk)8q2X zXxC7W)T#Z0>srO{GM*0p$1RtB!|+K1j`OF~p(BA3L}fV)S0E9I2S#Q>HXStkgSNQL zOi`_xd(^8%+pyY3$Rq9SUsRwW3GuhJghMkbPCMclz)kRr-`fc*DZy&-z(3!fiEqIM z82nW^{(p||3>5BAz958s4UhsH=4>|Hpv?LkAC+!ROGJNh|E>NbHrC+W{@dE7B4E&0 zKN(LE3-<_mQfzekpYO|{-)HeU3K0L5O z5B{yR`e(l3-#*YkRuKP8Kl?@XxV)PemYJ(1*1?K+ zpYS9GNB|W%0p=7_dDad~HL$X>N~~L0yMTd~T0c+m9RlL>&u9tDO&RTh0OKsUIUouF zS9bci$}K3w1dMcti#|xQM%g}!VS`>s5ER#&KVyJvCH}=8l}_j?i9H4>^*BJNZ#1GA znmsH1YsY384nS(`7eQgf$ePf4jgz4zaD#|D3zDoa+|lP4cIG&{%_I9zCCueloR?#> z1jRjOAaD|j*?WpecLT(`KY*3Y0L{AT+Yb?=IQ27F0Q(NQQGoZ-lcVTb&tOD z(tEcgQoX>V!pUHFlw?ycbL)*Anqf1H<~#%(IQT0|@NZn)e|+Yve=p)-DCtk+=WjX} zIvJtyT<9=8bF>GpwuWvq48X3{Yfu~+lZ!FYhI66RUcA#I5GESY{}>p;@TA@2-@6=U zJ`}O>PZ*2a%R#+MKUQ(rD6Zazs!aH0AB^f7%YLzM z^snWT=!X@$k8qFXtkJ#r^4FK8dQo1ojw>H0K~h5OF`j-j0nvX*>1T~4o*b`ML2qIB zUp~BwhuBC18e6PLpg|Zqj>Z3{Bo(@zm*EvY(3D{jhYJ)a z`q%z^iW7u$SNeaZG~Ftkocfsck^ct=vjabPpJ$I9ixQrs1o+bB2+tzZXKN)3Q6vL zZtQ{{gc-0`+MX(p-woCf4eg(__QA)cS0VWXpa-x>7?}ai8I5AjJRvYj9@7Iw#Qf=K z+17^a?1}v6(=_o^`-O%yFft#D86UVtXf$WEUzCGDan6ZDS&){GC8r`x#Oh&`qK&NV zi0)HcjZ=abLywpw-E3%RNFpcH^P(2`p@)aElnIPSz8HuR@SCy`kYJ$sm1J2n2)C2P z7@}qCy~!KuFFJqNC+9MIE9`ny-MhFqz18)8l- zZsXM;!MP(i4bWhLa{4@QOrhq_CU9+vyO&~3eLC>GOnM`U zK!cl5gnu|Dw%H-zEbKOY~A}W_5$(P!GL@Xu4l$KF4E_ z%YVAagbuV?D8h88qQA~in*A$2GMQW8^g&ftA#8v-W2s$Fm5A;GUd?qhok=3lUjV-f zA@-|yBQnDv$)a1C9xU$Y*bte0;mC>?AHzw&ibijk6FEOnyA}2xUNHkr=A18^H3+qSjD#x3<&X`)MhF#4PA{FlRy(Sp5*2Ek%bPu z@m0@H80&01{h%5BrVeg25J-@Md!$686#w2 zACt8ld_xU(y#us(1I>Nh*_M_THO~n)#}Q9Sr2ww!bX6MXR( z53H>|^?>*N$gJ}aNj;A@1Wf?&mewVH5GS+QEfs&OXRzk4ph)B&et(R_uh#Uv2HJgUwE zd6k6KX80oX5^hBZPzGxqJmLqq%`-?{1Wywsdp!Z3PoG1$Q3XWIbLNK7U{RT`GAdOS z*ynuS3JoWWr8)0xRt&j*U6KG`0B!fb46Do=y0Cy8r~8;VR1IH`m;#}QXaQfuJOdL~ zeQ;#O>=haLc@Tb}{m+EJ>6M=(OCudoV)1CQwABq1irW|DQvw}aR)`7)HOuA`L4ve1MM(|NG1u#AwukP!|j z&abN?7@XN8wt%DA@v}CIIAZkcm{Y58(a_XKS5&-wj^IH`x%cQIG~fnLABa{`kYGf{{2>&4m8jbgfL_Sza}8 zI?2;}-yhEvv^TSJEp*q4KwL{t@ZTu7p+}AwXXJWY0?WTU_eOEaVPWxHi{q4BPSw0gv5Bc-*Z`GBb{M?mZ&eU6vuj>+&AuKzQK z21G$B(?)1P(zFOYAu3;SGgZuyR#Dav_1SFmVO8iVHM)%kObmj@=L9{icdo8tEh^DQF#!nL>@Jp z3##P7H4Wn$dmdxLC@74~Co|)X*b0l}-N5x@o5zaC$-O*xiR0RGtSY7sJSNF4Z=*vG z@yJLA2Sp_0Wiolj+jy~04>SbqpsK6q(1Y_AI`m>>*Ac%&d6?1h{$h<0mWugZrfB-C zO?nF#nF3!z><{R#Z{9YwU>tKLExE~r3r!e>K34=}C>x=(BNy?JWIO{U%vzC2chbR& zx-=RvU;hcT7d%0_xo}o+5tvaaPGcF^Hf>lM!37wzyHP{qw9mX@swEqoleCtCROQ(hnlG^zU-2 zlSXVa*JChxkKZO8p~)W)iZU4YQBq)BNvV^~;K>^8sVOA(c)!df2HM8`2{%K_$jvQN zNJJ*kkzh|P49#+QaDe77%8WxG%|6Rmr>^cxX+s&2$R9(JTj=O-zk%|=7zp{wnA#PJ zMM&pS<=l!hiiK39XdvD66GTC~)q|}a4e`__Lh&dLVrChV*i>~DAtOk^PzIBz!UEo0 zHqrwxn}a7S&csZ`wd6w#@tatx6bn(4VPV+;qbd38Eo^-`m4-v7C@HU9e%2F_*NWQs zM3~W`Di6yKq1qU<{7ml2Hb+HtFZ*>?Am!Vi+rB~1&xZzSC>|RIfG7Sge(50(13T%x zyK(tW?0;~I2$7_5jEqBF{MJ2#ZJgHzZrq`TU?Q5hw)wmDd{o7cksZsyh_7`VE_g|7KzVo>m&KFTQbnwN@Mc6Rm+IBf5D z%E&!AkP{{oP?xV{=z4kQOLq&y007V`5mRhNxP!`0&<=Agg)HAbZ)p_eOLdtPJH+^so2~5KQP>6E$z3+4m>5t~tXf0kQ4`(ky#fxi2 z!Z`8y9Z{sn5Ihv(mP?Q^+t)v>I^Fz|Q67hrfBq+wHmmUJ z=(LKL8lPNPXNuz!Li~2_u{w6C!#WV>}jNZ}(BtY%IE{ixHR>oq z_E4hDVc;Pm5Y^Ucc)W*GI5d1EZxdQp*F+*X0@+10cpzpt_gFN^ACDO#9@Sx$RxZdR z*dk)P^8zLC9X(^bGeK)T6zi8NL1~hlz_3pUzZw>%PF z-IT%M7m$;&e?bMX3B^4dNe^cVYbc_G6^T)#{4|Z5+TX!-A05bj*f5~RPM85=AdPnC zvx`#H^O+MyjNMkYqRz`B`&;BNp;~zGmxtp2KS=c75LPxvNpC(0EBSq`t*zad>vl+K zDps3F_n#25^7{&)@h(Bv~`OWb%1Bs8vXH<9|+;OKTJBv?c@RnZRtqV>{ zKgqQJ@TslbtCls^U+Tg*56^E2s~7=woCHM@kZOsfgFT`N!R^0BG^x%&?+&DrEcf;2 zm))na0$jy!C}Tg1UHGzk@FQi2Px#;H?)-B%;cw6I|HV>bH<{@2lxogHhEOOLxXkU^ zAWKnsH%MrpN9QX<@KAK(z`FLh^KCI4YD9_*A8k_U@k1qu;3_wBlE-AL@>FeT_7tEk zNSYePuyX;&WWSu)?AbjH8{7-e%uyzfla({ME1eKT$jD+Kb|Jlj6jKB#0wV!KR_Z@t z4nCPy3Z_ys(7U{Ut;z@ynAC9flXt9-&K?$*et!AR#+0%>xC-F^nwSKGRvm`O-vBf`6AtNUKz3Fq9$ZG9e~>D`^VT#PkBnG-w-A z#V+!ZMpbH`geKsS)OWg~G@Eb+fN;eLCW>T-0~x-kqB$Jb`r(W@OfoIOA(f_cb2B=~ zD5DcFLf>o&dy6hWrk5aC(lvi$SezNAhg&Y87B|Ev+6b1HP=npq7{DuqAX{N1W(T}d z3}`R}iY`??T8BJtID9lE7{5YxcvZv_Uk<B19T-Sl&SwhN@P*VcYQ1Bta z-^6acNoMK0^>*&0`C+Un$P7lUP{%`EPva+=nwp6(Q^9%za%$Ywey8s$LEg))!qndI zb2D1bQt4U$^pc!gPbw@!kzK-RmPb(G3W!3>atP7D`<4=b1Zb}9!>ckM?O})N;GGa4 z1dMcjsqsPMlkHF*PM>yt@naIVCBq#w185Wss=aisQ&0bfMM-W#;;=(+X)*;t3EhB2 zvA@4C#r6X>ejZA(VN^jOnVtK@L$ngGCF|a#g&yPCq&CREL+982p^vs`2iXBlxn$XF?jdxjJcML}&NwzH3;r zdo_!n|U~7>^m|arKd8@ zSg+Bl8=+4d+zfn!SO*x;E$H5Y;zSDDfmw+9axK>TRE4hkd1M(wW)W~~$&hxhgLV@P za!HJnPkPT4$k*v&i5BAwY_yHZA))>F+!7=?G^I!S^3+;D&!-R( z`38n_NKlZlG>F_b*3}=1I~u=~Q8^56rVYtMd@{o6@KI9A&G@mpfLdumBDxD;&msx6 zteJ#u8E!nEToUZC^MOh4r*o#$s>u=qX%kK3Z9?cZKhDi1kDM4xEgJYC30>e!aBqyq z-g3No5+eIhEV}6Oi8*fiQY9>19n{|;WB6j;pdf$jvKRcoXqUbVz{x+GURXT@m5okT8t`euJjUCO|}i`0N?( z32M~BTI)0GDt@xjqQ_%18$w1%QFEm=RFTuy-1qg4X*2bpFsAzyQ4`qQvD`6}Y);bM zAmb{sHgpAVG-&xwZ_I^mMnAvbWzhPi~T>m&KlMe<>QlP~|dIHzWZ;e!^J%lu?p0GmN=}crR}51p-GoAqG`fy*f~4&6d!{3ml&} zk~15Mu=9EI#q@*1U)(uz_^=&pe5N?#L*(m1T?0gtYU4x0lwPpMy*U2>dXIAj#3;|@ zn}(&v#8zWfvw;x^88f4n+&Tl7YoK{>s{-7}B#$@QiXBipJ(8%ENc_ZQi!i_|`ObipwwY!6DOKQUN^Id3VNo^<-FcBdG@}v4$k{KDrf*jZs-|z{mwp8{tD`+C#u#)P#A} z7bSI&5fccU8md>y;MvZv*qpq>3z0t^=u9vrXUJlpS-Tc-g&rYxy+7D5Who>Bjag0- zUM4$HDG2D{y-vKi?4f%C-PwhyC^^y_N$K25qJPfc~O8{bj{T$j?e0ca6sLBg`Mc=fdS(3ICHB)e4 zJ|izd(C%=&9pF(Nr?(16ICRP{BR4R5L&8Ov+FTc4?s)F?O2dNRc%r1{4iok!e%2v? zf~1g#F=jO4Vqzn%m}fKmUJsvU6vpn(JiDsv=H{08A|kV^aS=|~4{GvYvI)haZujRG z_Amv+&&$Ubhl`ogeK)Q?94*DPR%jnK0+}tKCMNc}s7zqdjLPl0mo-G#cT*@9IvKP{ zdyt&>5kQ9JUu(RA&to~@EMWw3bS<$YqgR`uDvtofV~KO93m3gRfHPkQ9eaOB?jg%- zK<07(+I<&vAu4jrjk7<3wcjh9J;fsz)*;r^?Ln$Z>$?q|`YCBM{ zs*f;VVb2hb1;S6_75Erx={Ip&;X*&f?Ye&&=e)kAhZ18OWvGYHszAd6>~aGK_%@O} z2M#W5f0NM#93Kk7C}HY=!C2wcw79v6PA}NLf?~QF0VlEeBj_db1vwNSJ$_t-6ZjH6 zD|{;mL_%a&O(FRii1vIkHeuwv)@-TjEl_}o-bfs}Mb6VCJajE_m*CwLhx4Aj!jLin zYeO+)D>o&p1o--VCQ%xm>~evYhEQzrc^iga9y&pbnP7}RgB%O=grSX2XfieIVeF37 z>^6i1)Mxg=`Y9LqA-cP6p3`k^LNPSs%S%g0*b=OJ1OAr)8!~o)L3YaY33?F`=m;B= z>U+_pVfqgs9}U^csj*my4kk;`9mdTSdX$##ynE8*$@8giwPn64x1MXc=QxJK(h|s%7S=;E8#D(_U1plG#%_theIeikoC^{36mk5WlT+zN z=`ett;F-xySXIdbj%Cm)=M)7DVK?ifs4QQHxB-N7*TXY&2(!vaR$Xk8X+(YyP-vh6 z1ty#5uvh}h@t!$SOCNt{J4ZJ^N*3pA3B`NvM$h48+Q9_U&;jaqzu^qS0N&hwkvix0 zEb$@zfwJ))%co7A908Dx7UHH^+L|0NK@I=6Blibb^~i~Ej1t*NZd7s_s_HB>SAiRfhKQ&+!y3k zH2yT7i2&SU@KDMBKKtZC+3HWsWVCV`>7S5$2u=!ZI^}HhVD;1rdmvJo#u@_KrI@;> zoe~zaZC0TtgB<7L7S}d?)Jl#f#D5@iTchzbq;ZJ2=ky%z79b10bAJq164i@XM6@;! zGLMFjc}@8YL`o@n#sL)&e=>qReWBgz)=@PwK!GDyL`LRUB*Et5zl_OJBpMjTXEe}G zFNQvoq+Puhs>;ozD}vf064kF0|KSbM*a7|mizlR#jXaKUbx@z2;0_EQf!6(X6$5;M z=9ym^`4uQ@qlxQ-AQVSi5EXwM38#W{F$?&rsdT{t9dH{N(QuqX}PV~TS2b6`WEk$W}lZZJBF4ba_$wp&a2P> z1U!&RS(Z)?vZ*KC9axZY#nTTdZGE8va}%45NY*J2X|{Vul2rNz#!&#DNPQL+9_=z1 z@Q}g|xks_y8b9tP1{mQy@?5hk-`3SxVmXjx14P2Z@lgD3y+@oUijq2!>#w4QC@|T^ z$os&~ow z3MAUpjaZnTQro92UZSTB3;}t6=GbvI{%B+BgS`S0|6!f3AXvMEg;3a1wb+>egceRzFWkEGFSv;DUaxt<5Ju@dL?3)^k!1HqDskK^9 z&nJxPg_d#y!b{a^el1NZk&)jMc!A~ixWR@|Dwnw8V!5RL#A3zDcO>ykK8J}$>5xPs zF)KWa7<8V0fC3FZ?mZF5#tU9U z810l$1FDvtVlB11a19mD(3ROBo3>e{=T2XHpB;oBDw2s#VJdB=r) zf|GFT=^ZT2N@r^Ad@+#W`#7fZx+82MWuoI-Dc>?j`E zR!+FI5FQY@=Usuf3MC5=!qrT(MrWq=;XI!hq&{vZ#@ZZ*)_e^7j%m@7${qkxqb3M{ z>!N#1iw#{bxtY+zpg|l4W0Wwga>>qvJKAP1)_iOk`IZxCgQ3#)8(?dEtrqOlLh1H( z(jqj8lYpa95{P-5+g*+EZGh8^kq~MgAI|WYc;MahOJom%PB!WHU{p^ZIU}-vEfj($ zL85cps6K&1*cincMR#B&w5-^h51m(p;171w8NE#;MviKQA`&36i1KQ}Q6Q@sgYy>U zpw|4gqqs(}N5VO3u)v>T0pKzHAI{!9EXVeH`@Tv@p+U*4E@dc`IWxJ^U@T=!kuhTt zk|~5HQc8wQDJnx{%v@2K-NKDbB_U%-LU=wa_wRY$_uZbqUVnVIFL%Rr9_MkawXc2O z*E$%=JZ8pG!JBe1O!12suP>edgc;(sTz7rp&#$>(OYY?EnpjOS+-u+9tx5;EbY1RN zthz9pM_GWi>MeM96t52WJ@)W281a1_2UJ&N=pjGGHPrIX5C{*HWB8Y~lq8(X7*o0g zXW%vMJXEdj4}e?e<&}Ow14_G8n@Nkf{2I(HNDwhtnqJLY*&Fw(V))gsQu=>f=JxM+ zBe_V9QisdBP33q9!$JHweD-Xw-j$^nDcDaD>E4HLR8u6nz>t3a_pFk)_TP-|HRW)t zirvW;Kk^c(AlUN_Jv6J;6`$-e!iO{kkvl|_7aCa{st$R?%0c5bnv_=!-g~zA)egf( zJy|rdd__talNoSTx`aj)zSKdH!d3nM;-oS*F zcmKncJ+XCbc_(m8EPD4I&orN*>(tYqf)`C4*p@S>3D@Lwo%!!z{vy=~d&;6kFXP?v z2BIfGMGE6qg_I@ucB{JTPh)ghre+ zTv54j?=3Z7MokeP#Gx$L62oNNOzb|1(-dvcw4g3bfeZvs7>yfQ$dX?$9Af_Yw47&L zz=iBG2}{OIR>S)vEgjO=B$N7lTL-)qnjQ<`ePGZbL=frgm3+&3y!D+Pa3Gc$OBg(a z{XrM;K8FX7&Bu((*I_wW5VE-%MdteB8d2AwW-k~Z?mTK{zmXnqgnHdU*mrc^y68Do z`mM?!;c0YGHJ80kS@u~thX9<=a=W6G={sja?N`E}&S3H}xa3yX~0j^q1q zN{p<4R-HQS?w_YpvgqVuyj%2mu`?|i*jRD1JDwqFpnF$k+hmMkxKdr+NS>{Q5H+A& z(;eSqGTY8pk3EN=T?g!w*;-$(?c>wPxc;a6EELmPa+xoSFj64)acD5< z5oXVi^0wNqRN#HZ*nQ~a(`2rI)PJ>Jc#6Mv{g=PCpP0TqJJd+v$qxvj>02nHMXjc` zlQYNcMzMg)nd^cx&+|Uss*wbAF645XHMR6kT3h-93)t@Ep2n&IL@efte zeQ_ROStEi{ecn#zy?Csxu3>t9k3!LWG#t!dvjVsr`(8rd%0ivf_^iI$G$@skW{B{2 zmekA%Mg+sQU_zxie)-4AK&;KCa;t)Kk%sycxe$fHjBFO@o!7Wf<$l_3xl97yOXtCo z7}t&QGY_8{C@Ujm2@2oGh0^ZF$?CkMe3nqAk@151v9bR^2WT4HRMUWLT=x4=4|1yf z^U!q~>0`aR?dWLHp}<4k#T(krM;r2)chiOI>h*X?EieG8b zeXMb+7>c-#Dvw|q4S>XpIWPPP!oq(BZ6Qm-^fhZ&8PgBaWDrT`^?0e?r9Qc+r}kk1 z7Do$&0ZMb}vKa!f8!$RK_6O%?FbD;+T@CS0&GhY+b3_Zus%r(vaZ| zJ<2AwNI4rhuuaCqZcltS5rd2!L_L-PY6q78`oW&v3^ai&fCT&IynoLPCJwXPTJjID zzJ2ZLYHSuxIQgF^!xZMvc&S~cTs2ts`AVz(Zu7dDnyOAd^*=x*)&E278cX9}x7-fG zdcAqv9NgBlbiC$ji#$AMAD+Nn%tu>}s<3$g9f|wTFkgqHrBEN@w8_?K$7F}W8Gq)S zDz483%!8Pvb{jpes?9|@H%LX-nrzy+^RO%@c#PTzx%{x)7~OYZ9hV_hJK`?MfJ5kl z(oz-enRPJ~gd=0)=sFZnt>KbqN80wLPEOeHieO#aI;v2$yj4=LCjnX1ADcw zhk@eHS6&m1iZT%IvX`a*v}x03-i+-obuHLFzVV#&%wbOIE9DAuav@m^GL!%PK|Oq& z?x5y&HPVN9E!b!osHT{6j{&Ei>n|x{@fF9ds4QyE5X5v-uPc#q`}hGyAMb{7d`pfQ zMQV)(6Tqo@@tAUpDJ)dUlU#zs_zwx4)Vhi3b!h>nsk^ZKr>56w;K@po4!6M5xV-yH zDD2I|>(kFeEF7D5qsQW7(SgIKlbt7NsE_B=9tWS!ew=>0WVph9>}lU5xvB#)=wnPK zz)A*vvKjUfBddD2xm{cMUgDsG+zrwNhn5%+*wrzc)+t;TR}IP(h=02x&d2trn)lSv zJ#-WS&%vR^m`sNLgGMhOM(9v82QI7^VhL(y{F`#y`->qm<%Y%Wb538Go2Kr%b;|r} zwFyP=8KdMzR!UqZf$ztYwts-STMCH%+0rlT`Oyim`iXz*_Z3^Oy@iqRBK&a3htqr_ zN9;$p3C0y9K28Kx~IHVqV2_X&Q5^C(%{c+PNd?<-3Z8TKfB zHbT`rEnWl9Uq*MyylG1B`T}PVt;Lrm@5H>l+q`L2|t;(9&9X&T8x{li-xc4oFvJhxW-9>HlVA27M!1c8!EjhU^AnSV9tsPl}w zt+DPsHVsxVs_s2DOPoAg&s(NN$^@Sg7H^V?!a9hf$fF61*@)dWJQj#`TsLu3q34zLB;(d zV|M@tFMZSdMcYVSewn(w%ilXH->j_J>>tV+znp6nQM#?g`zyT=kyY%^$7dcgmiA`% zS54Mo@12AhL2j-w#H=qaWg|E5{-<)~4pZp@plB66R{ZzTMy+)VM2%h7%pV~l3B$Ii zFuc~GJ=@c&PnW1%jW=bC_~IA98viiyq;xJK+uuefH2pQV%^qb!mq8AB)pUAQX-F_x zoQ-H)$Gt9GqIs9DKzWjW$Tg-{h-vV9vf((5GWo~vg6@g72i$&QO+I{mxSFCgl;<*9 z;_hqbZ7ZOoM(vEPB`|_uo4YXiADbKbSV}`PZ1)4eIh6R~@8f!4{F&ke zJPo7GRqi7_f3hJ<*rWpNqIT|V?;v`Tj6#n(9oSN#aJ~IM%^16|)ZTmZ^xPSte+7kK zpDGLD#hpULQX$)LBvm%-TA+n;`=x#G)|i;wMMjE%fU0-ZPspETYHno*j>maueOY5w zumtr+-okN)#DBc946ic}H)>W-#4Rc{2gcxxM4;NCX{6b}_ITQ{mPsM8tD+W6Y|`&L z6O7-$&xvq}gqW=rzM`UV9YDG*_UG0nCVTX08k>aER%}H4toMf^&>lzN0un9aztF@a z?0;dLFk4#>R=$7uM1aKD4y9<$xy-_pg@#zhZ%#ZJG@6mv8$vbZOGNhqGy8xQFaOk)8Noz^H6!uDdbhjqz7q?~&7|CrbIs z6xvRWJUX}U-1%Bve&u>{?-)ayn#D{a=+=UW|ym~+J zHsuQ!N>z0l;r))I0DMDYI{3nVWyW#teyf_WNX;qJ)Ct_Kg=v)K>SKE*K$8PhK9iOfTqtQpiciP$C zupjhq-4X$YvmX%*^XRFNe8S-p7`}hjW+8SMcN|UBX8t=}oKXWHq|Sv8o0HM^)48Ml zC-wFA2TpPHkI)=&V@J#x(v?{F z*OZ_o!@uZy0ElmLsaP|Dno+9X?uK1;z3D}hc%D|*kPwNGfJ?=ef?dlGz z5k31+KF4uNU`<89GqhAE!Oee9WV02OKI74kwn6htKa+GDO_|b$Dj8KEkC%-g)|cch z*PXM^=ILzlINFBR_k#404TtpA3?^G} zSO`s0ylA-qq-MCfW^Ias7*lphX;@Axx#d;3kg7$fs_kCBpbRnZ)rTSRkz2BBBnFJ= zG60dQqGqS6clsG1=^&F|<`2A99ddhdq-l6=(H#!Dw+K)?jGIW zWQ4B|q|`$61?MU2G!+3JONrM{dzPcsPoY=uchn;`^H|y2QnAWAmxC-g13de))db@J z;SA)0zY9(lN)`$>nO0gHLWe%CuI6THVv^bOs3+*gyw`codSpB?wxUO8W8?P^kG=|f zTCHH5n53gg4gDo=O}!Z_V*x}f{lP`&l!*nt)B9zZkL_9rrs+-V?1&4^doH|AOjQ&l z@=?sWn*+}B%g|TLPydl61;jX$u_3=o%5c^e)H^ULWwR+4z9dpg7dN{P11-EOYHIVP zr7IWF(&q2(+?E#(PB|~3?Qwg|T+%Qo-J0`ncF&c5wsw$KGSgt*z@ko_I+^`S@zTn8NJCH_ zo;Yc5p26`H5{r8DT9<@lQ^pJ~s#{?XLm=IFQhLjJOD`{IjxMzOXNC}C7hExmNyO(a zE+?6tGvQLdd!43Sjq<#e8`f~d%O%r_F8xwZ+yeK=ts?rJ9CuVypZk|&cK3WI&W_-E zt)w*G)@|-inRNZIP+hGZRu&-#ASnbl3;#!_-ghq%wuT8yLtq28R~d+I*w5US6)2VZ)9HJ1vhW)Ibnp<=mS?l1#qto7!;l!=9D^5dw9EBWL z_5dhg;nZuFN|x9(4g8UNCyqF=`Jd&$wj>isl^6~B6Q0JK!x zOlEJ*etP!Q-7{foE|2BL&sc7_54eOrJd&0ecJ3Tm}-n3u%hT9(<$fH69m+SR4kW|3Cck&>7EnEQ@&fW95jda@ZKeg9qjF&=H6{EWOW{@=Xhofu$%hEz|4 zB3I9-eg!hR0=SCe4##v@<`7UfY@(Io1nIT7i&`yC@(X`{pIL~4BmQ@w(ef}xo3J+iKyBX_m-Tx8D!U{71mecO zvw;ysA*49VYex*4H5`nD|Je%QD*aKQmpU?(-(66;~)?dvlJy$UUa4T0ZeSP3|O_ z_pyj;(`gFsZtEYu0^S&2EE%kHxAM*RM8tn0w^X z!Go7~L~9Qjai?=$lc0d~?C5I4HTqv%Ic^^`OU)O?k1zDC^f_;w7}n`U?ENpFNl49S%hCvu>?dsa%p$ zM~4L2*CO8L1`2N|?)roDT(NT+$}K;B{{GLY?Y$Q) z$fm^BFfuZ_Ts$w^XV}hO{pZc=$Y6*xGJhr7TgbBcuWq`wJ=9NCjDfA8;#N$jt&Ay2 z%GWe0W%rm^SZGr(TmqTg2A?YTRtCSm2XDI$*T0(ISLgAOrW46gsmG38qRs7|VR%rG zeG%QDVZ6lWh{+qTOt$LMrMluQpwQdfyPvObXDahjcJ>5RsgQ;jZ|&$=Q%6U~KJwDT zpkS9vH*ePEBwwThxb*ey7TjZR+@eLbmMvR?c50BylS+bAV`t(RV6aJzIZw|9b7NuI z+xt4evTSydVea5qYdt_n`n+PT7xPYedT&_0x|;NOC%sm=*k|_Kn04);zSEV!ON$=_ z1({#JeOr&MGyVSIrdU}2p_dOIWETGEYzsJp8eZ%>iN#DNBBTX~^`A3&HxqAKj z^=t3c)6+|m41D{{qPdzTsdJw6$X0Y!So@c?mK2%pare$0zOc^DKFg9v*BBgWx@F6j z0s>k>v$l3@pN8@=X;!hFKD~87j7~4AZPyQ%0T(e<}czTdOIM4?jU7eEp!0#J`OA6VA^%&O%4P?udG% zsbQN)eVf)N3@eYaDPVWUw6={m4R7AJhJqck|8jY<>z}#v9%b#Vr`^fQsvoG**g>BQ z+4dX>Q|0I6=Gt{wmCB8E)_$eNWXK@7$*n=pg>9+s|4amySbG;J(1Gg|EcS^D4-aoS zsa^@buc;HDM>RDyTaWrlq?W&&bvH}i%?6wV^}!&s*%1xbs%2cc;?QA9l&QnzYu5%i zI%-kC8$M#*ZYu0XeMddLh<^d>H3)XW==ZzfO|p0Hs=quZpQ}Fn7B8Miz1cQOnU@OT z0Pi>f@lquHrSTs6m?vxQaSEUNzpmwwt*u*@3^I3QudS!K~Zl3f)@$) za7cT;?rJwbmZD?ki*N3~EU6=lbg2pEjsvkb{;Sn<&9(B5P>!#?(8|Di4PRA#k=1pPJ zRG_ZZ4+a*Tm(dJP+nIb2@-yV3d&UOSHAM>+Ell=l<`%|Jpk`nmg~-G}z%KB8p|Z%^~68H5Hd9cf7a8 zG_NB9^J-mnPKF&85ckCg_wH>05?)ErG6u*FsYQq|Ux%wRjzznC>z3Wvv5m^g$|g*i z@-?rRogH+{iTKq)bpKrAys_2S3|SL%qV3s~vrU>dE&TAoJz~15$I5JHUhF?)7?|0b zKr@lNI$+&#KKbP2+~yRP)l4)v(+9JnRHr8bLlrx3O+5-u^z3wE>y|AUD1`NH2l+Zu z^hIph62!qXeE5_JOWCi8lAdhP?EIFkTD9oat4YR}I18ouj#b5$9$oj|4aLR~EDwUH zza}ZjevFo-#Sqn-J^T09Y}~jppEvogSp7aU(l+4y$+J?CzX_~-2Vs$4o? z(^;jfjs#ve=7ee9ZUAQy2RMECuO4{c4Z6K19Snqv-xw7Y$eG(cY*!z$jq9;%*RKbz zTUV3neYE{xmw|*8U7INXF=IxI7-8E}>Hh^LU#;T8v-Wy=PE&M^jK1aDJd1QUh`IYR zyGUzl(FBtYPT2>?ovhnpU|2ucfX+?V*zY&Ee$ZpfjvYpW zmu?hp5*IQq-CHt1?L~_gy~{tY&3@muK^1?;Z+Q!6ottfKRq>_(flNlnd4GJ`+deXo z`lkpRFqWqlEePvK2-+;R>#;WJ1z!K`R$Kn(*P`_=TIO|y%NsawV2x>4jPoRd?jdyB z4;{KTCB>4xZ1@v7E09lS?*1d^(j_??X%`AAt?BUbrSaqaLpdjyQ{E>0RJLg0aq2=U z(daS3r`52XDq>}zZm(|ouK%D$`2aQ82sP>HYet~Gs8lHpdjO1QhIgO$LX)?nIOSHi zXV0GL>FK*6sZx(0ckMM|$l$>Q4Gr~b)#&=yW^y?IC5~ciMn!#9z9s2y+RfNZtR1CT zJoZotvXyKp4EF5LJ=HDfZub5AMj2neHBrTnwej>)>P6)BT3VsotXVT_pXcN8J3Gk)Akey^cm0um1H(RVG_AFE z?OMr0@7}++h>frxb*|#4fz59Anj|V5%i+nc-Oq2a-+|n^Qhu(D=JnX_^$4=dia%Oe zTIyh(%f>5i;-HTYLbd@DwDbCW`0zoqR;~WUqTCAyl+qhoNx$2i#{s(P)LZ`V zKiO!u#W&E{_^q>|)^{4N7kv|iGU##3 zz(;HI*s^{5Uf``+GoxnBHsBI6k&}A} zVsT5~6@C26-z9B6sETeHnm{VEw;(3hy-@iX=YAGKj};w95Vu;$BkOS1?kfU!wAqnt zS3d7lpSX}VZ=JUi_Gn+uqIN4NDq79HY;(fstZvsXU9x#0q`wW6^fOwUMJ%LMO=r&I z^p&_&3VAV>^MBsz^RxEKl`EO$*hX}YRe>ts-<69sG&E$2jbGX82+t*%i}Nxv6nM2) zZr|RR)@JQx%a#qXHOte2VrbCYzjL52ZB^zBTR-EO>vm>g9bDJ_hYqdl-u7$a*yH9> zL*ZLUvgqiPkS*vznP(2=W$}51L*&&i>K>X-lV5O&{q5~HFJ2r_yyD`uYlDhcthOJ= zR5yKv6`tn7TdewD^a@HeWpw+46SEuRIS(b8wLW3=cm!^^+5qh}2M$=YZP%`g;U@c; zlwY+wZmrER9DUZ$XX~RC*!ShzuHU%f^Yx9Zp{Z${It<}OD6@i(AH!iU3P=SZ%c{S^ z9yLg1XE)Pa88LG7vFUnLHUI)vWdkwSpyx|Lf5bL2gsB7e%|>icgQE!;NvaX`R?<>- zx~QJ3dE3oOrGGV*t!*LaP!Yfz_Ohq*X zj2T)pXU>$D%)zzgw03T~Nl=Zi+rouid0=gld@QB?K4}44I)k3)HwKgvAI)r zShL=Zc!ieZ_Yctm_ zO#^}wJGR-9wvVkm`oi+w($thtb_Pn$Z^@DY!-j=}wH!0XddGDeM82S%{gtQXHpl0$ zUkC9GQYp=_My-go7a8uBadg;=sCo#ZXq?C*_Eu`Y&HTu_)I33-;zgiAQ zuLEn|cKm4ragq7|QMoKfpWC*9i~5MEwGm4i<3|{@U}w)Id)Ht5&Y;A2B9O`|xb0Abx_~pU7mNif4a*uOlqQ&2a6ru$6fo!m%79sG_R> z0_~EKpa0|ddrYNPjM<}&V1T9?U(_YuOsPm;@V36Xy1K5eZUE1w7~Zz}K$T(4wryI9 zL{>WZCplV8;qUM7?c>wtND6oYLB1NxcB|Mc*m32e*(%i(m(mIEAQ>tk=sQfz(qL9X zW7lMt+uj|O%D8UV2&{$0#jcaLQ-Y%nD#nc)_aAzp=4Oj#toZW_7=PDqEmjh%n`6Wqe!bZO*qvGV$7dzBerE1?$-$J|;{QP(!S7;wxj}P+mvmQKX0G7u`nn98&efO}F z;?|1oyzWeTirjnrtf3d}F>+*mhV5ytSg}H*X3Z7=oqo5~lPK9PW@lGFeE6{C_>N&g zR{@_Igjq(hWtY=9OKsECMl<{K63dRl6TwnNzckIOA;HkRCa^^)AairAj4zHKXcL%( zNq~5>f9ge2**P^ja>DqunkEw07z0s${HRs_qen!Erd3Y@&XJG7B~Llvt$R`1Si9ql zX7~jDc+Oc>xSuoV8PmzcS%=brFgJ-cO!xZ~N9y{R!h4n^#ZdB4jMYXn|6m9yKuhaC ze7G*=VNHc*?b?&m44m4)lIp^swqgSZL-gB3n!b4XQrWU)4QNu^4ege?A3Jd(Cof@8 z;k$PSKU`LgWeR2URzqX^_3NjA&)c|TN1ffT=Q1$1Hh?XVgv5}89m+v>^_{YrlvVHK z+-Er;`;hlNeVzwC|8gxPVP~(!>II8$8|s~FBx(O@=D+{eUs3s~COp?_g4qDgmALa| zq|}x^%X%~q(mR()AgAi4&fC{uN`KW=qb5yiE3CS9l@B%OoZF$DJDUMS!>GWG(aCS? zP(^m#(tUnQfJ=>n??<#?ItIA8HGyxdWuhT4<@)u(V7&qm*5w;FU_c<~8d!T9dkoZT z<1~770|d|wEWOc^h6h2JQ0y}GciqvW^OFuw0YBcRyC$~RVtsI&`Hi9Rv#GDVpI@pa ze8huO^IH+Z-N#zFA%sBe)SzN%@AKV6X?{lAsC~w>XU(aeZK>e{;;K8tLE2LN6})*9 z!rr(@n}z@u76YwfKjJ_a6>Cy>9IA()pt>C{a`pFpMdRr3p zC#4zaIdw02`*sq1%In9cI>`(K+QUrPJItPdK+_P5#?#b#=hQH5r_FxU6>6-20B5^z zR~JYuqyX5_Zs}*v?Crn0Mx()w@#8-{yP#vD0m7}JtsUC&_;L6>VvG-rIYpI+tXjE&n;2m5#i&Q#X1w%rIMe4Z&Ri1|8@F z!OH9m5}p&oKdr5;cax{vHf!m81e6zTeZWhosh2O;!uNd{Uo&Ex;9O*;8VZ1w8aBN; zFbt`zMYi#F7|*>FEZO-}r%fy3wCLK%&zdoGsLOXDt`k?exw-uV<1v_7LZ1SDB09uyXKiP%&C6lJ1Ogyrs_C?SjH zooWKYHXrp9b+Ppi6QE&@^XJcp+~RGm+p*(`MK}dRF4JDLh#+B&x@h6Ju_V?}Q`c~{ zGs~L+M4umFltp@b{p3tHmZK(V5&k~<%*T*pW^sS+K>2mXCwiv?v`yyXG~Ox|sh~x{ zD|PlO^Ln$v?Fbh>g;~{!Q4=_Pw&mI;R-j$|N6p~)_=uzg%tUp(g3Y@KWF zT`J?&bIh>-TH&>n5mFaRf_#VZHfZkJAS`;$NBOE{WIq9+@?VH8$=y3dlr#OJ zk0R>lH{7lyYb?=u9f^isdEdXhQtP!sP5RrNPcFf@m&RBI_ydaYLVE3zL+s-_nL5~T znsHCmj_tjqK7ny9$8t^R4(V>L`0>LNz8UX~N)_)va^%Pg{1$28D57~0oVVv5gNPcm zK-7YlaXx#db;W*8H%+=~AV{+l7E9~=AVMZuvB4xk{4kQ|Pz|}xdv}qqR ziOp)g;&(6V@0^Dng6{T^EFn#& zd3vf-ae6Ry=xDWOt#m?}@me-kycew*ZtJ#S~NHl{7*KR-lbo}X!svZ+y zQ3R^0$_7r{V{m)B-LPRgERpxZh3)Ts;u~V0L?wp#mA6njuS6q}aZUJ>8&UYwt%8>{ z+O5LuGVyMrQ=_5w@#iWrU+wZ3+1fRd4X9Aa;0N2jWP0(Co)q7qG>8k5dh;BJ$p9B8Jocw zO7kwv(vKNs92auR|EsxTH~A}}eU!NsML{JpYWkv&JL8%w&6`7bC^C>|Qwc;&h^`GM zXvbDJ9OT)kabx*;n9S3?3)R8QIu4N&rcV#TYfwnMOQ-YK;!j=ejlnzz4U6p$dtBJS zel&deo1bsdhkUaeqmgWp8*-Be^k8j&cwY}}am*-D_@Mq&a4S0{I9<5b+0cs#_;SgC-e(Y0w^BRTq5iqi2-OpxP9s}?I( zW>pWh{BS_CZbvi!E+(I5Yv?&8{eRTDT`@81u*)f_(!|Z|+AGPe7{y^`=Fbbp{7L^`$5exn+5NUF)+c%8KQy6MeP% z?$@(1?%MUlrDcYnJTItRnoN_8z1Q3|IpfZqdH}a9z~RP-2sMYujf`w5nXVc;i7;ag z15F1H(>UwYthvtlfCfmn*o;Y!$f?hTeHtycNtNe)@CWv_7qNPcSdslMexzH!0#7TdEOhqu}k^P%3|Q1%NOdMnuUr`SIh&j9bGvuFX=m9U8c` z(?eaSlCv}VG`_iDKzy^q9?%3zJ7M0 zZ`gXc*=YOyPttv=HSRh~Xk|gIy?gh@sG#5{iEJWv7)1Bzorrq9diRDLJ$0%(-5xs3 z;3zu&9*b!QhU@XCpXN$p_5^ZVvW{^5ssOQOj%72dYP!{{N~ZVa z`?ZV)sr93L=4YnCsw!glZMGi+8l@L<1M8Iq$YZRJ5Qoz2{O$o9MaB2;6VVz`PA_5R zP6ga-*|u$lad+bS417?cPL+Q7auK*1!BDFQqlQ~gfd-bUQ689~GtT=~FptZBMEdE| z%C>E5+h|te^z)uK&wRlnj<_cu9o$MFp^P3i(V3UWA7{hAT+Y&^y%5W;*A|!h*b6_u zeY7J_Dnw~^d>dnLWa!?4q8@u0xq*cZ>3DJ%wIiprI-eTueh}f|*mdZa(8>#Vt1O?4 z(p&S>OxDMLNAkA1AOuzXtd!*HmR`+g<_5cbwz~Yiz3&j|+zj1+y8-n59cOIh&VkeYk2NP~VxbeMnl9 zvOf;gT*+?G|K@rr^q0Oo^0~Ty+3y1Y*cm<4^$h2M$t!7iL7s27Xrhw;diMY&O~so7 zDZtx8_fnRu~37Swfk_eYXFW~ZPuc5pmI-5hdtqL(n zC@I{22{SALGYBNEu^S;3x{Cn8mQR2}7r^vq7srJjs5(&9zZ7l__l5;0mVv<1=$he) z3(=7wzSilkvP=tdOe#H|sH+a4YWCqkkkL=GxFbF!!3h(L54)cWf#+gxHcObZ)_tCS~RUE3=4lzk8qk+WDl z31+iq&yJ1?vF}+ccvvst-h%9}NbKh|S!lIQc1c=Ash{=qX)|{B0D5VgpOgy0HQQ9F z5XKwZf-v5E4gfsL$JGl|n%p7M01YTUX^Sd)6;*v)#&FG*R5qSJ&Ew*i2PeK08xqsm z>wptW%gYCncsw4pgvt?+2O}vZolIdeX~X&T7lEu#pFW0-T3>wY5*IQeXW1mRdiClF zvCS#Sz&3`Y6Bo5S_*nI-m(3HD2#zMbz-ijq+4205d$$HXwSw7_D8R7?M%LXuePt9{ zI1~dBel50!sHDp%cBIcXePUeNCe&V9%htt59+KHHft2C99iz>q)Fbd<;`exA=FHn? ztrWChR{Z=abP}m>)&BjhShl##xorLTUc8`e=p;kiKj5}Cp`qFc_qD*ZVECBRmN$P4 zbLnvP^xU??hxpeF0ruW$I_6@iM7h?$gUmlrtjhDTWg|9jRPcN*{a5bg(*m}kGv}kPBu9}DIH;woKNx*;4Ls6( zcfFFxEK47*9|}j*%^qZGqJii!an`Ib>J->|i<_Hp8zI$j(EQ&&E10*H+_Y7SEK1I` zy6WxQw;z4lw@vLjbpW0At>oIAPu-CSiFQ7{XUv|xp6+R>%1Q5dHEakn0k7`ws!^kc zR68K_wLs${J}}vHYh2%I)v7_1R)=mI^)W{2KR^r#lCVHlX+UW%564(UW}wSs9i*-I zr9Mv8g^j16CNbndsdR3-&VE0AdMyYX!pzPq4uP4?Rl|tGtIKAy496fk8?eKXHTdb0sYH00MymAB{_Fr2UyLv-1rg^Q0lm*Fd==hMtp( zSuIQSM5l;Zvt5yqtMG^Fqq#YS)jx6kcs9DjzMBtJW2Z4k&wAmz1~+&14yDs$ z^tf>|d+0~pqz%m+4qXi{sqEvk<|6Up)7S)J5mt=k=+#3jVkmyiJH8sqAAc}eL_43g z43s`WeSN)>eTaX?JQ;VeS3McNvRVo`5d?=Z`%bX6Ra9TN7&d?g%tI0aULDbSKUNIIz$eaGjDoi*_SvhCTa?T`PJqd?L&p> zVa1w1-KuxQLS35z3==lg zQ9cD3Y9zWMGydIbL%Po2$28?zg|7+lSc9%B2=(ns3%e? zM-c-M&n@z6nNWNuKDcF85oDgjexx$)$A^(toGNPwXIjBJ;n$0~R&4vD&Go!IeFX}~ z(Ruq?;vmsjydh!9Z=|@W5`bf{IM7WC!zkeKf{?dkhOP8aH z_F&H!6M4VF4oJeFy z#}T&l-KY@38{kD5!Z45Gz^RF@A4*kMm*l?i4}| z*q#`5XsjWl461|c>-O$FG~FRyivn2EiLlm+1oMslPE|E3K+Y@Bt$i*I)WTP<)?sxw zYS_-rd&9BR)C>kf6+-euOHDU7r}l5m@CQ9My9!m}eC6jspobB6B7&a!aXo2D@0K96 zr%#{aHG%5uhbvHQaO|+CAB*d1HwZTWKYDxy(s7&iE$6ekV4rvZ@x?U z^U34K?EUWCVAzDh*=eC~oa4GGBOuKD8qA?QHA2%Ko-|=j<9hX0Q%KurR{4Eobc5C= z{Svv&bny1Bb%1sujh8q?mQs9XVk(jzZ*L5>$7pg?>J(j_|JI~R#*2Ic@h?UwP-p}! zA~Fjx_`-=Dy6QMNT?W`Fp8%)py?b<0=lAd5)=VIY_%b=Oi`<&V@@S13HA*;ZG!XM0 zxCh$wx#0KPoQm4d-8AnYM(Ntk`!oR(J~%o}N5(*w*sJDt?%DH9$2t=>nFd#T8q`@N z#0qe;;h>xT_+1fvQ)Zt8u?|M-1~KW{Oki3n1kI>-T>0LgpiG^$hT#tr@J%OIHc~}b zMd7Fkgj=jD1F6;Y^!1%zmWwrk`bqRini8sP)pP+`iCzXWs8(^obN#CA%W$^|GRHA= z$3>+)BvTut2QB2!r~@PGNVP*$nCJzW?wTBRT6zTzD3uX6Pr+M}MO_bjJ=Sg(^(C`g z#0*n2Gbt%pB9#7ZAOjV9bz7w483~I0gS4U07Y2SaUFQu0z?>#N@wTYCuki_Qua*jTeRO zar&MYlU1iJU=MNQXF?Xj=x>=lXU+zARH0Zz(nBCaG+a+pNnK>b4D{Ctvu7VFe4)xO zBzUTqR`!3$fss2jr9QScjQBurdJfjxjW|1!$2`@#b;(|pJPIVrjj4u0oS`okaON^H z`T<(ZHw2>%VKG_$qoh^#=M#7kQhKcWR5>udDQ#jKUca7>bTIOc6UR-4o|5~_tHT*c zE!}`&y&s;Q(^hEe=-9y`CglTlno~`gJGZW`V0Vo$7y@P*p0qXC+@i|rs6h_yI&c2` z4%B5*vJsIWWj)zjR-HRvLJJYA@=44!MdG1n2ArWzCQ4oq(%&8uj3R)1Ewuv#afneC zkvA|V#zkJ9hQahq}?u)2C05_+!*(#kutV zP8)6?Pq<*RMHY%|m!|6ggkB`tKuTy(>+ZX=;muYOM1A^MJGJqdTIG!fZm<#&n>LL& zTwXlePL)mbS$#(A_n$MTJ-UssH*|YvT)epV@^o?iNE%_IWkS$?Dk~Xb+N#wg;2e`z z)#cKH1`QfaFWs)|I;ZO7?nX8h^STvf3`J~zO+hsWN2jSPB*tZTSmRL+ZQl$BT`u=w ztS0(S;^u}52xM&QH5b^m%3I9h+u@IpS{bcHY4EuB<|*PlbZRmo4Nrn^90IF@u1UL!UDK2>UZ|aMd8+ zAz=O$Jh}*(MsZ8K654Zovbbc|f_%gcOM8!;0HXU!6a@3V`g%Jpb-41d4TYXKZfRcO zuDcxJIk@noS_S=`7@D}qGXdP7QkmaQ_nCx`)%^D^h{&yjYhZEiv)3fq_}u;GnEz7l zkaiGx7^$Re(`jr8-k+Y^Vk4ptP<;IDsJfte`2CBg>~UF1Wj6`$oY-l^;tlkA zpWmg6sac*oB>{M;xV>phWif}7l8iXoZiLTylzZWkdDq5dL?lp=c3`lxn1m!RF0664 zy2L>!vp`b^Y#$sOalgQ1?k!2=5gN@$2xpX~CA!Tv&qquZ>5RG(WWTI!+qT0A5a7f% zYE{(hDv|N2O)=44M05HC#*R7to~;)V6H^nf39~E2{hkR$#@z#}4jh=dH6KO&B5W-K zP;#V5p>aT-x(+--4wy?1e&)1ll9hly|!(^|AtDi1ktp3-3#l_yhe;O0p& z^QVxGK4CF9VA@3gRRMjYQ`*v+b_VU%J zHzBw1`&qV|ad1>hGerS?1m=$R@#BEdsW7#;Ckt5IkoRk};}IV<$>^k-`WAxA^XgM# zVWt03VHC4>_U*Xp_aeb%+1HnG0~E%J%_+Za<70>)am z_M_fO3zE1uP?6-UK-=Qp_-k6QcaV`nQXn%3z$j2RC^YmUeiDy|`wAvzpnC_^s@F2H zMN7(#u?EYnr=suC06Kvt0g3ty5w3%|%(-`b(V*6GAxmh{EX7L(G@hr62K91Ut8MU5s|!z%XhCf9kw?@xmD924{uX zzi1r`7XcrtW?Rr;my*(2wTZ;-MyDzmYLtoD5ybF4f4dHWZ7I511#{=`MhV~9+h)Lk z(6jx_{kq43L}*coO4d0asC3>ym@XtJP%QXIyeDI<+q$(O?24H2aA0-@;9Tw?DQ(`o zj)A(Mp5fDGDG!-C=tCvQ2>0|;rzWDqwN;ts1&~9ta&y;Nt`F|~Zc3-^`VpUfa$3b* z6=fBVE>X(w{nr_pzlGKy5we1pg#DO2IetT;Dtg;SKRbI>yf3%4h<}FuvX$6_dM*3m zl6LFzWld>Yg5ef67@cN@6&f6Wsw?OeYbPDQ315E_;3g3fs7MKb{we5YlKl8DAt{m* zb>x1xWtVbuFVf^dOM~N3yZD|`KT`-{^{3t$&aEN;SuKefU@Xp%@-{l^!>kmSdU50ZhS;=;zn)^kjj;OrKzpG+SK9f zP_yI!#4%|D2r+U%VOS%6^+^(_^-S2WFhbbOC|*j-$}XX~yLbP2-qhj2F|T^kddL=_ zC6X}BrJ*C2Wd@s@w7+Plim%Z&QkmEG-?jCUnrp`fJ#9jjj7zvbKP7F?^0!E1b^5x5jrR97=G!KtQ=oCB>Zoun1dKxKGaX6E4&eiyjzJ@5ltNY&l z^ey-bf*@}xmmR+`Hnuja3}et-N*;jCy)9ApCyCP9tQL{d&e>T%;Q5+AK;65l&dNid zDn3mDQQ$sn(6FIbkGgt>`i&ch0uN07FH!FQ&(5)8D2PNV{E@f&IV#^z zoHAt%`TnN!fU+79|Hs;8UGuzO`|A{slP8+Bt%uvYQmvfs^C#g8FRN&_}rNr<8SxkPW1dju<3xw<&# z(U}rkm`4m18wV{U-k)Dw@w{-wC^Vs2FTVg$)X}}!A#`G#@3B|kD}nwI^?s+ce`@C& zmJRsN{p&2V|DGS!jk|2HdCmWbR$iMZyG$_EWB;`uG-$xz<3|~XY+cj$N$>|KAKb8ON+>%kC_`e=i z$2RTT*R|2Kk92vLvwMPvZPZ;X91&f`%*{0&(h+&CYzs>wz2wri1;=`u`2$^An&!;` zTi8%4h|7~Z)iZA1917Lr>xjyLzAd|u&!^tn-B_d*wqVwc8+9<@6`nJR&^pDA#1@f> z1;7dinlv%V_io;`t2yVDehexSG^{`w!2iIfWy_XAgK-E1Ctl3Wtw~d4Q$$gb%80oW zk&zo!)KKMLgu|iBCiM9qXwZQ;La@E$#QHDv`}H}Mx&w$pdrcT$rbNK%+k&9_wxu^WLLb`w{ z&#{i_)~~--$O9}4_q^vR-;YXgG!*l#_~B4Ar2`>*@LW~AJCy^r42vl9D~y*arKKb0 zfsS7Ecp?NZO;aHiIv&_hltrExXQ8&ASUjTVq%rMriVRo!Rir6Y5kPpBi*!gcZC%e0 z`h^lBvsP1BckiCmwnfB=g6lYW5`00;^WL`r>AFh)Bh<7(r%wNopReyG)gvz%Tx&z3 zltx+cPgj-2S$y4uNt0HSMQ8k&sq5-57<=8!NK+FOez=$l9_;r1Va-7ndzchS0=@6; ztbAXJBU#crVW#GA-+zWsT>pbDS-y(yL#Z5HIDIdRi@)YiqPAHB|26@JT)O@cy8>aH zGPAPc?jAj}*w4?UGas!GoVXZBxmH#da%*)92Yo>WY~|tr5uC>@?hLqLc2IJM?h)mu zQ0pL!h@QH3f!nxrbcb^=WIISN%u9}FBS&VbI`uIF9 zTe>Xtf$7nx@84vE0qk-%`F@Y!)OuOQidrLyd1ZH1 zLPk})80z0?eqRfm#E9k~OIl`9W{-f`79H$AqI=_}O{+O5V&s@%KREszA}5{Q!^P_i zb(Dj8DIGFg&}7>o7fd7dLjfVBKh>n@XOi zcf&Bp&U`T-{_5RwHbE98xA3*N{G}KQChb>sON$)OiVNp1i3JiOq~8sJJ+I#Gef3@IGUd9Tjg304M)$+z zf7?j;c%$ZZR6Zer)>GX2XmqVgKTIB3Jad|IN<(BE2Tv_?Aa%hrBL*~Wx}MXku8=t? ztoi}-mJ$D2_^s{4zG4c4Pqt6T8> zqC)B18$5xte*&@9dj8AWBzTz=lig7Z64Y$yx1p+R8Dd_+e$#8xBus?Jse{_4*QfSW z0nwzJm9pMw&YU^2v}Iqvc8Qh#qjTs-8d&Q!s#l+QHL4AMx0)y^-|`pI+9qT-lpSG9 zo1=gE=FPe~&HG$pI>^<>UFlq>@+5ZQgf+LfQ{i}cO=GFtfmqU7HMYvd>bQC1#sE^D z64=k{_51vy7KdAkTtX&fwus@7h~P3HEXK>7w+ophe&cx zp{kz92b8c$(I#Vj2qn?!VZ&$5YRht_Qt3{fI@J?`a5Q%W{TNWsYpc*gSk=W+OFpe> zXP3*M2x=8Nj|{&>f;j=A>3ECb6xOe&c8KfND5ek-btL%FlZzn5@K3#d?eWfQ%LOJi z?BUxQv@CNPkZ~aOH=mwOuND3j>06Qm%EOosc2rq1kag{a3w`Or3ZfNGQE=Qm;s~2L zgVK_AV*zFn%PeoM<+P34Sq=Mf9?jJ*NgG0gf&ySeVs)5cK<9`yJ%&=cLMgJmik-VG z>eQ*jO+iZ(w{GnbCF+Keah@7ur&au3<^qS4icnnor7<~nb74>X0&-UgVZ%{}-3mK3iF2+>`i>IVh0qlS<6cJ(Hg*1CP$g|2fhgU=zTu;HKFP*x z5A8#|gdlY_iVQLksfPMZn)861ijejmaGXjH7`xnJpw_M)3$&oK-h>Q^A3>{$WHHvu?ZWc$Y3E8O ztzy??P!eJ7NpwGVx}~SSms}JfNuR1zdfhj|>nd#BE4567frVQ4WTX~i^?es6B$%~n zcZ zHW{xb{#;zzgrXwJQR2aAwz*-01_C&xPZg|)Uu+d%leIEt2cR>qV~2;;=ElAh=Hz-z zuNbH66+?jdA2oHw@3YSf7Z%!81EH_O!X<+jXnmA^bqB7J4xnZr^snJb4Ihs3?$ZVE zOIGWm7oOXqM43is3S|}s_-E^&3Y|*rgN|#-B?Z#_X%!ezz z$bbOx#38D9A*vhdlW;g@(g|z3+2r|vxp8^j7rZf`pG4b4L)wpcw-?GDFgCC3{UamN zdg~*#w-7DqD&M$m+t|sqtbmBP@SNK{Tg`iULgZV=QWjAkUcP$O{KPY+SwUl5L}gzq zLOzDobG1Q-jwxktZOH3`DBi0nM3{bPOcsIp7*xx_gBf(ZOAH_%95gil6h=} zqQOupiLgkE5Xl%J)1qW3Wge2ELdlQ@C9JY!nG!`&rWGY)A>;mB<$3q>9PfVjKK43} z`##p~M*sis_x)bOd7jsKDGIMunk+gdiZFUj3$Snm*P|Cs4&)0CMep;7n?ZLY1eY*p zG;5r?+Cd%QI^d?=n-QG3bf|;i$?-BZ00W2|2+3_uYf36|O)!5<|0fhMa^$)Zoq+C* zk-IeIa|;xJyV%laFUMefL62wi$Kv@57RZ#S=>0HB8hN>6RDswf0K^L51|89%TQ^7A zT$6P&agp^tCURMId3CPm@_+zkpt(r`6o(LVGCB7F@JEA_I~2JcoMsalY5facEn)W? zc?E$`?M971am`LQ5!fmmagiosxJv*L81rn{@O+a&ZA2DRvXR3(m_}>DfgoEc2{3t$)Yst+XBvN!F32(09n|%>f9$3P{K=< z`mg$Nbop)S^bOaoe);WV=HF-0iH?2U>PYG4fLpl0>gqn5A-{%Hy$ ztOUYFN1|WxdL;>x!ULy~-Mb%e8*S+{a~R)8KTj8+RKd&Vd+rnNYrWE%W6m+g-OQHc zbli5uWarim%CC$mq5zP{s91$X7bLR^{gSY<{Iye9YY|*B0JMO4U>Tm>+kaua%lY4^ zlf|v)6O^Nv}L3_g?Go=NPi0}&)?CfGSM>?aS3$L&|UX6IuHkr zMlIlx$7}K!X@!T>A}F%*^I7>3G&wta&1wmfrzj|S*P_hp4yVhv-YmSH%oTsxYlvg= z+f<;B#LBb3=a{IetqE2);2cqy3q;$n19zPsP#^;uRu$3n1_d?3v12RFT~uFU-$sKA zmx}UKm>wvhUf+6cFxf}?MDif+C(8Fk$E4D;pQ2V*GJz(_LJ2yOLebBFK)B6Xr0+vk zZP;Ng*WyLb+OKR!-G6x0K8XeRBJm|E zDMkgyWd`))X1a*80boZ;Kuve3MA}B`svq|kF9T|M|3g$402>vzxz9F4NusAD2JirA z<)UU9SzIn+U-Eof=WOB_WU%A%j=lFLu!gfy<^vGfr9 zL5f?xJG-(R&a`W+nemWTU6gjh5_e{S&GK3(+hil}K_A^On$yCQUK*%zJYMePuQavX zvva3Rek4|6Gx!}pIZRL;XHL?ec$!i!yG&dWkwYZ%>S(~^CukS@{gh`+`HYyEvVYy2;^>OuFjzmciAvx*`F z(zRjHo4|6k4s;%|Gcw$g{}%DOl0`r4-o+-m9-&9XgxrTXy}c2qt5k<~Zh;xwV~PPj zw2C5Lq#~i9HVpnIr87TUyX-72?e@N3T&76fPsm)`TwBk;%G!$it%zj?Lp`3m?(@zs zGNezYV#xQL$zx6L7!I_I6$dZNgdLs=2E%2%Z(7v4ZQDZ-J|}h{VqNx0}C6uG8Z2j&>X!R}S?;efZn2 zBB*mRruWxW9FcqphHkt(+52{VpsFn7(pV72gbdn~FBw*3 zb-dB`f^Ao(!Gm8W3N_c(Rxhn-=-I5<7B{nXlD<35EKaeYNJKc*0KZ3MfKx84R+kY$ zC~7^+fLNpdBsj-3#*6V(!G~0)sI$xR{uyFpo%jJB_$Oo2rp@-a% za)9Mx4gG1Lzy%l#|Fe9YyZaQxZz2pynp{DAC|0b$DwoGtXP6&7frycT}2WifpQ3a~D7mLa@LC%I^r) zZSH%dEodb3{amLcmIKJ_D2e^9bpXwZ-vp-g8BB@q+e|zQX_dkZ1~)vo(-rQq^^Cme zE2dwNU$qWczFhl9nZ}f;V4y8|-Vj2rV)=-$B)s)T+gXPAN6-$z>d}2&0F5-wFX6vs zoF1r5!1I)qKP&-Yjz^!CfFDz5KAOdpf)f5cYUhUh#NVe`>}aEJvX3HOxiNja^WoLs zk;Wg(UlFtbc4NIV@VB$FWiWPpb@h|OEF%zCF-R;;LXbR4(M(|mK#y-i9VNr>eEHyI z=Ts^gMn?66VA=jLt8LR|o1V`LH2b74@l?DLGFs8pmhz5BvdY$No!*38E z7-PB&th$9L|Ady8Ai`%`g6;M1fBZ2veg&p!0_3YL$&Bu*=~|2-g0KM$;WA$wB_f_V zUpSrsEsXQ=4C7_aNOVtH2T&<-VGzZNWt+_pQJmPHmp7ABTca5*StY9yfpJ_^Xm1gG zt1F{gk94u8{NO(Zqv7|3k`KjN~U~k zDFc_%hj;6?m9EFWiWem^f4JhIQ>W_V+DdJ=o+_|qzfoZZ*;#uji-s_b{mZ-{lR-Qm z5<*mQ_W?&`M~i z;2YF@E4&ZBp_*OClXwJ@7_VU$?J0;N%x@{|X0Q+Hj})yc)Kub3`t<43NH{9Qy<4Cn znF9>LA4?2$1ziCP`U`5^0>$ux%KOzZJ~HV*e<=PavA9X_`^G&@+rDAaE1GbQ48hF4 z@+=@7_>>I`EuCf7&)Kl938x-=%e6p6GV4@18L%4I`;TrA><{Vj7a^g zJ>+*3G}@bnNB@G`_Zl%X?a)Dm?$HWL&!o7FkgKVqgMf_td3-GbvTF#)M6-#6KeUpw z6MO^h`*QCjmkTY<tku@EXk|_X#W3=qYAKtPd_g|nYv8GcL*f$~icqy9&(C59I`;kO>W9Aag*LnM_kp9fHoEKP{qX?HK%`QLy4rLJuS zR2bX6)lRo zB-5MCpNubqY+zdjpn}! zS}vjm+C*2kY5K>tAV5lMTI?izb)EkOdR38O`NVt=vdivT1b}W-t2X#tsMtQ@SN&=h zX>5yHV!2u!e&ux9A}aT`SV&%=+Tb4Z;R2E1XCbc~HA>KA%qu>hBL zZn%g3@d$)F9qQpEkbCDLqmT;5H8EIQ^G~!V*uBHvr1R%+CJ4W)!#hZ;Q`Vf$WvES2 z2z8{(JHBo7AAp){1bz@`SeQ_Jt42eqPG=r1p}l7BK6 z9Do~=s+I36V?DvEHxo4?5u=LEz0I~3-ifv8rPc-G>a_0454WB&u8aYJ$mSU5>!mB0 z-Jv=*aeNG=2D6&2As7-#=>3mCYzs0(9kBU4@U@Tvk>kd{wozZ_tl*5X4khI8Y{g^T z#&Nu0W?{Gs$h6Qmai8^>#xz0F8mJ$G*ADSi{F8=t*n z*RD+jOGi6l|6mcWgyKKkTopLad5S$B?%BKQ>aOp8Fp!e5V6u(YE8ISkIcfNSM0LmE z{4#Ce8VWOy%z)OY=aX*mP>I*#B4#nCxG&kOR<_vD;QAR6I@0|Yp7ZYkofR4u<~(~P zLK?9o6B{_7qEq!-a0m#0Ph>$wXs9OeQp$)%0=q)=hzF7F|AHQ2c~Ns{0PF)9h2HhA zdafzCgeWO(FHQe&N+?A^O2Ak*Qgqr$oBQA#Q)<<~vM_+typEE&Q>H2=-@8DYjJJE~ z{C7JKrf~N$>V?$#Em^!HIFi*BhZ!yR4zI4N>bBzR6860}lfyJ?Y*cP=ZP^tL@_;<< zG0G)O`#vnbpdLb+6~2FUQ+-2!LKG){tq5@c-r4~#`{S5)3N(&YxxhN2km0CXGar`Z z$>6roAHOs8DShyr_`Tt~ZQXIKfAr!726q73Z`d&;%6zsgHWTS;hk7!)`}FBFs0&Z& z#JCCIL~EOiYd%11)QiNy+@_h_WCSxU)pZ`aX&P#`WoLuySiZh^+E`ovWm?iB)3)I% zCR~57n+I_9KsnJ|Xq!^t%GoX{#k*DnLY(HCc)SV@ z{x;JBRTn${B;!ckki{dTg2{Vx_SdEFYg3Dz*in%MjLjyAtX2f)R9|A${Wt}@Nwj`~ zB2fovHM@w1co6X9OcTq{yGBd;Zs(PA;En$HXU4;G{iJ{6nC@8WvaWmk`@CCk_>g+C zT?SMs+BP&C=!Fn;Xvxh*KfW)AkyQr3CZcbfAFh3VF~4M79E2u}FBU~XEWI)F!uwY+?kvR~ zV>Odr3JZ4urWY|F)F1$zbk-Dmbbg0leX<_!&}rKg4qo3CU;FYkwAKY@y)W?w+X}pbZ$|V7Q;}B6FJas<%jaJfP@hcAs7;elI zATn5JmzugSE2s|28X8GZC2mtpz7M1SZ&@nCv!Q|UBwon8sE~ExdCl4f{Gu6}EOtDC zKQ-*&hy%!M9HfA1m;0o1vyHZ&p_^cI+>%!lA^-fP7M@N><8>m3U$CId{G!#1Py>rVWiVSa z>i7@(=T*&BiQeXJ#D}6+CecnQ3?0`;>o3l6Z4HR=()HDi zZ(LpQtsmPewL78qb8g1O7;4@ppwM6`-EOj%+&(C7!|7TZf^hjukORv=!GCk`l6xqY z3-aZ(Mn!jI0bsOj~@pt*z6h(bh{nOp7U849Mkf(eunQB*Q1U4H+VwqJ{Nm~e5P*f(Y9?rfLi7iU6bX?mGAbO4z~J{gDSfWMSY4O8+8ZVy^4%C4uJr%VE91zCh?7`%luMo@*6}3mdI5GpHL+KwUfGQU>)~vuc3vFAnnSa0R4LZ=XiHHLz|^}j z_P=36&vFW&mR&FX&~n)9pB9+>vbniAwxP~Vg4nIb&;aza;K*WA4>};O7W<&g%texe z^_tkH$>^!bN1ktL&fyi4zV5q5B#mt7^fKjQj2=?yaVMuWf`mA{J>=(dh4AN`noq#V z^C&N}lM5!EllVi+IRG8N9TsTmFyG#g!6bMnzqmg_O<0g^WoH;ZU4y>wPK-zPg3{^%bwsOgdw59vR@Q^lzC__z#;;i3v(e~WGsSCb|_FV2ddlj>+Q_u*+r|!^cEe6{~Y40$pz|=6MQHUx)4#! z2VF;rfr4V)`t=R)R*TG8YaP+Hmfr6I41*Dz{Oyza?HNfzdrR-`$wH&O@AlbV5ps{o zUwpr3QMY7)tY|sxQb7XCR*^WzeM^u(WNK8OFPURKj%pQz#u|SHN1!f_0UNT+2d-G87GR^(_ zZ!4?|p*QLV6uY2Piv;|ZDSQefTB(CCaJ8zn6?1tmir9@hb>qF+*&0V8IyrCR?|dwt z)(qm_KP_YWQ|8QRTBqvNS+;8rqBcMt!2`t)3F*NJw(aG|yE?2XSsnWhUfOitGP@qB z5yLKMn1V=jVc-rtJ>HQQ|L$bTq3#q=&#UQ%>#<3vZeA&DWLue`R}@5-u^==j5G%{9GGQKFnMLLEsE(LMkV z+?er1H@!qGe0kEs#EC%y-GgijU@ehD+^NUSU+i8z^)O1aN5rwZl9H=A1E)_aZ8hw< zL?mcz+(;xL95=iHSFKVJ=o708CV9lIOtg9XZzylrGA0I7B$$GyZn!U?IPRspYJ-As zjmc#vi;dpy+NYm5Pr>=k`RxjKjnDq)ZC&QcSK{Up2M>Zn@K3boe?b(bDtj2LpgSL1 zzKGv)nJ6wR6%k)efq@q=EFMkUAR>dTzT>_R0qGZ=1r?%fOvKG6d-F>AMzKcXDC^V8 zpnCp9MXPPT_R__R-KcAY2m@r5saXU82MStRQ?F$h%YHFj0??9VcvQ|UP9cg@fG0>6 zUw8HRFB$!$EfGHiam#039-(~{ZX5FXJO2Fq&uW2oEoLhQmiK|7{!8;4L*7H^wtSiR zVGfugGFFxDRRpIVDZVNs<4e4D&gR>^C&+Wfuu{ye>8N--qKA{wAOt&+V4DKg5TwOh zC?w=R()Dm)$38hr2sSwn;2=A(WSAC{2M{a!2fabY3G4xD#hl61?i=X zvaUOGrk9v1)Cpdq)qZ{nL-f2#u(lW1$H;Q}v}rPuV3>TCE)hs+9>eRk6TVE5$dn(W zHk3E-WpO2n0Ga%PZc`(FU6v=rw^4x7F2m5QX6cKHLaS8kI}_hiBrN}*T4UMs{-+Ry zxwxXe$(4tBk-tloJ1KL$SP-@9KCL0gMeNv}&HcTRCxxW&``a-0>3ZKh3EFd?@S~sS+`nRYcF_`(}YF@UeWK&T##$NA%Od) zsb9qSK~$nLmse{)?GZCkUob5YI6}Fs@8XbwME|s4^NH$6<9tpg`ttZ33VI zy_Dg1zWI>1v#SrXpMr+Lh+#^}`>^J$$gjOXKG_Y=oUTXm?% z5E&r+)xo_%iCzHRy(4HEGG6m}R3O)8~RgG0Rm2r&h8^ z-0hcFg-$(jO`;-0LeyUamwot;*xrMGSk^rKW#kGpv$aE};idGQNY&Oq&N+{k0yI`c zs=~lfMK$5j>H@fDqe4YhwGnKK{G_2cGB8oa2p4H^C_j=)>Mdwe6iWj@Cx)^40w=iR z&!?kyuiZd!p>E=@FR^l0k$qV73}~w}Q@?05j$`cKBs|o14`w6A!&+p1>K@P7-wCTV zu~oSP0uM(oY8Nup#gZ17>SxK_v-lKRdr#liG0*E+YG*M!aOKg|E*hZ@)aR{It!4vz zH$+RnBe{`=#w7;ZgbgC02m`c)o=8QJ=>tr5AVdJ?zy?<{FDNmo{Ik9F+%6s7#%X9e z)y}#8KiV3y{#%4~vXBP;MqtS7OvolrzAL%U+{fIm1?5s;WMt&j9-}e+V%f4DYD*Lh z2vl<51sooHU^?h8s3dgifDVjGPn`CH>2cmvqXrEc2xM^ON{~rsl%UzuJJfsp__2Ml zo2K~(2kY#0fBuOW*l1QxUwt1mLr09>Y5z~a6i-wx;3h_-9tr-3QW9xKj`PUBw^D73 zu{dv9@HsR#24^-M8!jt-X*`{#e!<|PE$u!^$M|1S>a`oRR7(*ZfE+%t>D|Bop+k{4v5>Jp zu`5x#h_M;>r>%empnQE(M?~rD9keXjPOB-K+cRK+-&`ou=@VX4q8&TD>Q1aY?cEQ>$^Yq3Dn@JyxtavGX53g@|nPUG}A;w1|!VIOn|?Z^?fqZNZJXV8b>J#$EzaT40vd`?w(lZ zT4tgJLLi0{*7dm6+_eRQJTcn`aXc)uGd?_dSzQ3v#OdSel5EGsCrf!s9~11eB{%bLXQ}({ta9X3bX^>eS5qLN|haEmtD@P~2%QP9Vb17GroOpd!kyNc{OILv~2c0^n zJt=C-hDonE$hEffh;MPY;++UN%M>d=-MY%27g3lpucK%fD7LZ}!Qh}E0KC3%@1Fab zMtq5Ia~c*Ucj8T#n+cHzE;jBI5bGR6zsWAl+H;}GQ3c$sFU%2;uPl12Yx;zul%|iP zI~Y_Q(nM5-GK$b?+XdJ=p1}-u>dJU6<+lh{ZYKqqoRLacZa5DV#G;O55u#Kq4QVXf z*xFjF3&cy4gkzQPKR>-#$ZXVEwTnmDGkx1alC*4tpkz=K!JIhdD_BoTn z{bd6t^R&A+*hk+MV@?@9g>pz7l+4w@ioqIlvpZWuI88OO!vFxvs-cQRI%@8S%U}@& zg_+9-1U@cET6eZl2N52LLzT%@q3K3}DrH0F;WZ&7A7h9I^L=y}#zx-)W{iaLcWPdn z56w_AgQdwDA4>Jam(OX;>KutEcIAL&F+3`D7{H|X$59xG{mgu3US!vbjKWjeT9Lqo zIV?|~1< z@z{0A>#r9$)qJIcABd}v@D$cLAOo(tGP3=mal&X7(aUh}cF(r6?+Uh=B_IeP1VFTp^i^AZl0|@nu-K_N9L$fPZ zO=54VT?CZ5v7P8~2_pn~!a#hdflh{(YU8{3d1k%WXaW1fQ^6fdw5`&&p}y1V^C|HO zs8wz(PDU0Mno>IecZuli+~;RwF3gpkjbhP+NW)!d;)wB}3v6#ZYV>D3p;OKxMc+lV zW>^eJWskTDBu!=DZ|(K^`PBn8xb3@C|AE+gg4Ni*9dM_hU^qRQtOgWy6{r>ZTXdB; z#PyZM-zmx3LrO)yLip+Rwd;n{4isauY%(RKFMNl|iuo}Xb!c&(o|FZ=7(yKAD_!XO zJWq`qCNXreTp`b_Koy(K{tOWThKRX1t{~WNg1NIxg3uIq4biZSI4@bx!Cx(AMgLoP z2d#ZfDy8xbcGsa!*i5CR^I|?V?9Lw3o33q6p?Z<3y4Tw~8y*_fMHxI4r)E2<>lk`z z>*yRgHqN@gNb05KloIf)+ir)w7jGvcRsKcLIT1Sj`4DbB4yK7gr@XG*c$o^Lk_0j2-pLZcg8(I{vXLI}yR!Xk;K2K$ z1TMdO{AfZUuh-I2Pq0Y7D3b8p;zRPw2$%Ed5g>GHk;P7!ef-dF|Rv z`ACL(=$z~X#AQy6>EGZB$N0n%6B)S|jXGzyrT_Z3-Pe_@6K7oTOWL~?p`Pr44JKAA zSyX3}`;E~KO(?D1iSFz?Wte+~)73E^HqjNJ-)pfKM`UE0Y!|jxlnT5BC``@&sT3ET zF`ALk$A3|raz{i}2NRguDYG#{O?Ap#ZeMVhEj9hQ%1%>E^ef4)Dy21HrzkqQBsn%- zVyQomHb$Nsm;X}Rp0pJ z>)h_iF8Q5@+>Lg&i?OUeXt~_kBqpPaVYt3YQ0TtK_omKXlN4eU-g-^zfByJT$@a0i z*9)fYX`8kVik=)&DK`F6VX;2-M`|qH z?=k6#>KLV(-R2tVjRpZuP5QDmH~#y#*DH(6`nzwBA&fsqkL1FcPaJ<9Tb<<$%#A-e zEhxkyxrTAOIK*c<7kVfjlgatA#HXwKNo(Yx#x5N#7Qa-h*_PhKj+$J%d2`?FB-$~w z8WEIW@#uZDorXkCn*eTNaOLvl;f!+4Sv9l7zfAWVdR%)uyIW-3PycD`u?98$pqi)K z@Ps=xXwju6ePYQs@s0;q+ebV$ouBp4&n@`f$B*Y9rxgFY((WBD)aWb{`esrajL%ql z1|^JtWd{`{<@&%kS!!+i`bGkr91$vc;6M*@xGUzoG=fiGzfPn%h~x03efv=K4D9yh z-&RQ%JO8GfP|J+1zt@wY>HdCCH-uQ2j5!>_mXBCwW}c?YkJ;VbEBFnqYXaB6avMOu})qk7*3u5Wy4YUKYZ4~uw%QWM;tTk;WLu>jxNV_w(K~@sAnpcPu z#iV)qyP{_xys^NtpXs+|=+{rUC39G~TekLeVX$>MI*zm_n+NRG)$sLBHPx8xV*@bVSx~}~m=l3i(?0QMGni41i;@i55i!Xd}%~F}MT~$%C?=r7}lU}`CsprBhhT%)q^_p{5uV7}80UC&5)0Z7{iE|cJZHs$|@UorltOvWa(}ee2saaSmCIKjj5Ydran4}ZW8<8{X86`ixCK~hxNqfL)|-3WIXq{N%M-Smwhfn>0bNzwRoqR;;RjRw8J36c+4<(`IMQs`CWX{SY1(!wlkwP zrD%?kk&!L>pYw-nWS4sP!sk!sx`^Vp^ z@w(44^;VQp6QGGBG-&0dvvu*m&Hp#V;L42~H})q5wkX5Gt9wo?@4u(LicK`f6xmGy zOrQCHPG)AK5DlC>ZW(dl^)$b^*WXrGpBe3_D0^R0F@#0aHRlf5hT>F06ojg@u=OzASe!5V=tT1-yCupv;)Eh(7{ z$VJt2Jf)&hN_~#swgulez9OgJ_qxu1=dw)FTCQnmJ6fZV-2xoFP( zMeX=m31E!(GnSoY#ZYxl+qusfxVXN!lC&8S9&X?ng1YK)%2umJl4?m6CdS5B?l}dv zc!FWXT{d7F*il~Gr5<(74Dc_i;nB>*7s7vRMA~>Y7YfqvU*C`BC$fHdK$iiEViN}~ z7rb%fZDQJ9U3Fjoo(?L3`)q7%ym12oiVbCLmDzynw{MTZ_UG2h@9*2I*Q=*BWk^;p z$z);CRO|5!kuTbHo8Q7DpYynJJAGCt_+W19Xcyq!q%UI|8mDn|Il6h{mMv#>%gU}V z_Y8q|NHA>U7&V9xvKwbPNs&$tPXPdRF)C zM0>quFO!a5Po-`t`b2{?;JY1;_w~{iRmPLPc*d9RIVn4apVCqaZ*jD%VHe9m9Fh$W znRiCC#-_J5EQ>q{TvoAv*(>9y@W5HNBSwT<{uhtnU>vVXBW&4+n65J`o0?U_OQnCv21=8aT zN{S>Tf(=YWY%q+OlK@D}RuF_Ebj|vm30? z<;N6oogzX*53%_?FyEghZCF9I&f zyc$SrmxZs2@9i3`Vl>uo$HD6N?@v{nOMRbYsOIZK!i)w}Iu&E+_*cT+6V0+x_pki! z&z{;DB!libMuP@TM3;AJ1>eqL^Z7R=Rb1-o;`ukUU0t%g(7BAncqE@jH|_`NlpKZ2 z@;-^#{xtG;86mCW_Zs)^{UPe(@$Fu5w9dQ9wytGz*?h8Zx1EzRnBch3_76n~9?t`= z!UU$Agm4$?&&1q3F`$>Gno(cRw3 z<#6r#^}G1zMicmjKM9K_oR2pi(T%jh98@y&Rb0lJx?qOOCWQi>IR zBhNmZfhyYVt_xp3oCp?|@pN(AkDj!p$MKTMtLggv`}+ew`u6TgP%!wo{N+int5r}l zxMfcO(8V?)BOfounlE3zUAkzu)B#V@3wURt`U z!D)O~TYKV6*_UUL|8|v%4wSHwXUL`9F(DRQ&eKI)m8xrfO62)EaqlGz&=lOb7pf>J zbH;6tA?wconYw>|<#|Bfqg1Okq;j?>p&3mAjLfG{OTJLAvC+r(Q&(s$I_mB@BD0O2 zilPL5{ap5C+{2@)n=kbapcKRQD%2n~J$)8{e9g@8ut(^vh~WR$Pchd0xy$iNY_90kF%J!FZ zy_!dV-3x~!)oYg~k_#21Sop}X(@23lA zvBZ%ad9Zwb1<%Cu#yG1W*0M3FU@-l%;@c>7q*Kq~@;Asoffeet1ua#!6Vdx%|4cqH z)GQ?yqJ?%_oBB@d`MXk(NYoiHfTGC8%)}(#_WLOIFo%9+++Y52vzEXrOY*9*P%!G> ze>}6=XY`g=-3ln+-A+IR8*VX-XEgtJ`>Zo8F1>J1lRp+X5NK-K^a}Uk-Exfd^)DV= zDP;iz*fRvgItqBE<>0CE zX9F`ZA<1p^B3gSst*F@ZI*sQ*&cImJf7!AD-8$6%$WS>W z1N<3@rabCyS9bo~9MqvwJy8XlBC3ULu^&0o)x{-Tyl>jIJH|!3a z=gJM94oPR6P14D511VM?tyJ@MrEWc@4kn4$T)HQP!<_U+f`nKUXI0YnP8Wf#Xa~md4)s7~Eg)E6^^#WG*+y7;yPq=e(1TO*d%~(fvyXzF>}GzQ!*nDhBqU(s;&X2rWJN^yEm9O;L!ReS z1V&Z)V@AIKnkgV@YwPLlKZ4D{rJ+)9I67_>dXg%ppM`~@tXupjn>^1EHS>Qp{_Ffn z{Fu`5LfKP{(p#t}^va2aa2ZdVBqIn7sOc3eyFS?RWw7)lxdiFUv=AqHuMmsI4I4W0 zFJN7>M>^%S@?W|%p3>xca1*ToQYER0=76A_{B8fvow0R+34Y^7&=o~Fae%_EXlC$_ z;?=!B7N1GUKgZ|DIwt;ZZ71V>{aG0uZcfq~<23rV^cQVcuVH>=VsZ`f$ZarnTc4 z$lpZSx?_Hf;XaMDo;@9G_?B6>%+7tu?B-Wt=3&#$>QSNRfUBH#()z`w)bhTWG>oHw zRJ-RqM-MHT#=NNcOWF&{@0sLh>B>SZK9{a()^pzDzw}2n(Heb>Aje5@^m{$kvqDiOws!7*oXRBAztnPh{w>+8QO{831fHW zZ9bx-@aCJ=>^?>9;PIdOFG~sAF02cpT{=iXfDZpzdT)9=u*a)0YYJKGye=>FjBnX~-PKAeEdmO&i3VRLW;#VV9ycB{by-%CyW^~@)xov!@kSI}aU zMOtNY(KPq%kT@?>wgzsDtgQpYk|HR`i4LQF({|$`@3XnYs6&Ur#Prk4vl{S$6LdIS zVFri+J9kzI-hv5|eHaBv6;E%{!gCfmcY7|3YflxKPa)TCcUz-IP!`kEe_1Jg2+23I z9Q-Su_hJ_YI;(NIYvVwNWeC~4+_wWs#G&{wrSgmKrPJG-aH!$I1i~ zSN?*cw-IeZxZMWEB}Ex(hOKMU8G@{>3%*~o7s-x`Z1paB_>_G^53h5FmB3l=Ij;_3 z6jXcL)-&qVw7umq={s^-p{*L(=fQ$8ndOZ9t+;o9mft>10CeW`FNMBU+2Mw^au7h$ z$u16bdu=SHuHC=dZIFT5b*xmsHOD-D@3T8 zj~o@ih=N&8Smxv0FF!n-OwKyR6EJPGTPw^-w(L&@%&=f$d8ppi|>7=y0igsO|`}pc!q2}fM90So9nbV3r=$G$>22K@E+2kjLtwOW;%(2~0 zzB8Au{OE)^so7@lRSoa7oqO&eDbSl#6BZ32I)hOigg4G;G@d7k++B}lEnWUmzj(GJ~H4C#}&UFjy5);!xmahj+RGh2*tHb+&^TzAF~wRM4^mP@IUSV!=+S30UeCT6xoS?@ zi%z?;=~W+SiQXS3`k}3`#Lt~xpmL+s7mp2=P2QX z;hb5P_NHVujC0lB9k&!3b}TLTJ6k%7hJlOcbJm~FlZbz$?!YBC=ge0%+=a+VnAZk+ zORP}76;QT?@87@T*0Ql6V=SYX#FaQ=q2}8h21X1PrR&ALH6G20;519A1rTSN2|qp;obQQYDhcXdN4?T_97RJr z8nNVxbA5_^*BD?BeLaEeIFS@Z`t9sBZJNNfsP4wr!L=@_eqI1)es}f%4Wd@YZ10q0&pP(U4Y@)@ktr|N1w_MR8bQzmum-po19lOt4(T=ec6h# zC;$E{o_%qm!riZHG^e(7ZrHHlO+;5`Sg(u{qiB1^KU>I&MS2%`Je2*B)A}v@a141v z93l3e-qmkezH!=pKtZbW9L$YLmhhMP)PNhin3e_$?9DrUc-Lql6I-)T;t6g&dnDq4 zhf6EEvT4C;yO%eb`SROGZfKv?R@+)YmHz(g9QW;vt914ZA;TQenY83l{QOp)PnknT z4sO;|ZJjLbv2$M6SUaJAGOornw%(clc7@K~U!zj!_98Fl4ikK|NcN}c(CFQVlO@i> z^Y=ncSk$jgjG`El4Nc9K$v)|s_fE!SetN6{T|3!7!JkzF&VpX}AFuZ=s5btPpxKXO zj80Sf6&Q=XukWK4k>r`v%B;THB0t^%;nIPF_4h}UHpK5X-KT9}yF?<6NtMN^W2h(n ze4Vov@=-1?5@VALN_My7h^z_-`YzL03EK@PPcFH-zrpXn z@z-(fUHtssxn54j)TiWH>(rtdEf;-R3`(+w11S}ewDJrIcDj1f$n*y9cK`|%wSxay#8%EV`Q6Kqbr*U-d$=b%!=Mqbqun~B9bYjMb@vCND z4r%RKw7d8D#5l!lE)RNlw<)V%o&T|7`fwHdwZ-PK6tJ@j{BP{-*U@;TDaJdFd(U7~ zBU&Bm2Xu)sXcYb=SSsIT^%s-e>wo};9nx}aSR7nmS%e>>TJ`f=L#e~Y&|6W|#4?64 z3;#ogVajcujNaEjhCafZ84&vDtro-Ro)%}Govir!3x-8@pbeRmUfElG*T{Hb3tm)y zG+S_cWS=;}!Z&Ze3tGQX->mfHbv~kJ2;;!!qetIRy@QA*o{UM0d0@8ApDJ1pxm2`1Ar`~PnFUq6bH(vwEzs}|D-MYffJ&~z zhW!ay8iigpmhT1Dwf!e9$pMsn5;nVNjf5cyqdlIenaS`O_{!++q=X5FTvqhLXPC&7 zM`CNI+04j-f@%SIY)ImY&&&e?h(SpcA)fKq?A zIz%Pgznh_BcU{YXaR@1#mn=!~IeKvK-n~cfg5$0H{#D<$q1Id;zYFLz#czV(bDZF% zP-q&1O29ym@+fnUK5c?76zUT;o$x{`1f#in zvI;O(T5PG8PaUZ5*abhhV!?onddt9#FrhiEGT_bC<1^mow3WJt!WNecN#_hx|}+sYIfyX<=2b5YEqvsGe+MSR`^*X zWh_I6z^$hi;4d?X8Rgym<{zd^S$-!CZ008=NeM0{050|5$3z<_g&~FNVby|v=NwQi zE$0ozNO2{3zRT4-@a1@Xlm>k$^i zV!UQCLeY!qE}?JfBUtP1DE_>Rxa*H-%CUC)NP%Y`#uAUl0l#gg zKrdNpg^^wwoqbiI=av_#W&3Y?ZW(kKkH9U%jI&nxxg`dLgmiKFM#E^ZZ?bVyt4CB2 z-h9A#oV0t-E{r4qdlLcOj@<))VD$btX*mu+VmFf~#DI-P7Nz&4??GTPf#Z@}Fm`Ba z;pcird!=?GM}$R_-Q``8g0~2}jdIoW?jL+e_B_k4F~WS5-vUISr4`K^)3FK2in+FL zfE&bbSTvt&CZEP_HgN4qOVQ`t!NA?ldFj$LshQVL2Y;}-auSf)&Y9XuBVg!RE;*gD z!PS#mEnEH@xFB%W6fa_YsWUWFyyQ!;LTRswQ12(EV;sg&9+A`P!P1K3O-D3J$s~>0 zhY2iCb{K}QLIKei53gRpea<{eX;8O+P4sKU_DP{>CHlBs--hKGiBU?oZhmSO82K5S_e z#}%%|NH>-%%4p4b13N+@cO`;iQGftGVJ=Wp@&h-a^gcalM(b9Ty!N`!*mGR?D*j~Q zSSNMIB}BI!1_0*IS#s6Yv2wucbBCmB0s}xPvyUY3ERY6JF^WSgIxWPw>SH&k-?4Cb zMA$#drUh0ASgfRkr@u;bK+EJde*A9PvgtO--*ECm`Gyo0KByQTK5)`p)uO}VS=%&d zS)pm3BiYs2bR(nH><}j+4>(0=;`yPcw0sxRA|I;b=HcRD7L-- z+MsDaV#I=JBeD!B+(*&d9Y+in$+F^mA3nuQy869l9EYseyvNqdp*q1>7lnV>H;Vp4 zgXms8AaP^X86w?Ypc2XLFM>MnH*%~bcL)sjU{R3|sPsblzQNmR?G21AEi(m!T<4Rl zYTsh)cGW;tG=Q5zEJRoV8nF1@JPjSQe7Z22S;BN;|JSg76yMy`Dl7ntdUSVu9B$%j zFU+N0z?;DmcPPLmFD+!T%QN?-i#XR(=5Ue-V80pjp28U2kD-w-bO$22u-zc5S{4ri z%!2Syy5Cs5Z_l1RGTtMMl=M-)zTIG&hDc5mj9pYFq%8>hF#zm<1Ng#p;o3ydmx&aH zZy>HJ9H;&RdY6X%pT!B@b!b(n#V~?>7spC2c@@MliajSJU5*SPw+nBf1V)WP9Fl$c zvT2tt$K+|zx}ThElw~8FXj5^%pyBzg1R&|Jb=jY*IPXQ%lYiXjH^+ZE_CwYy$Vqz(EIqL zsZcLjy#%e_?_;i_l_7K_WRb8xy-fdgqgw>b0kj4VZ@06B5zfbjzuKALJ_SkT9C@ER zG?5W)SDVITbb%R zoAVkLQbHGPrV{l@S9vv)YU2>S=wv@VWruJdimyBO@isI+SJEVdERj}`kS=5;C(wav z#D#!gec;E};S#}oeNW(YQT7sTaN)#Mzh~sVCQpW-{y44Mp5f1v)u}~IKt=ogs_W<{ z`!VFtbD6{U?W=s**D9zwORd@81pF`|Dg`_LFn)1AhJQX|ZF(Fe);^3{;W*hZ>yf*P z+hSTbJ8BK9gXIXo4ebOCozMx5#|vJp8?H%)cGTTzm<*nwJuFr!;3)Juxq24K3TD3(K*UW2eZok+qWO) z@375HZ=oHydOi!KM~&J{3)>MUYx-ps`(spQ5ZUuTBZlU33(bq+TrAO+h!lZP{aIz@ zaptO!6c_`S@#|9mgHu=x1O5v*xMzU!^+2()Vf;>pWsah|ifm%JuhD(}KGrjUiZ5XGlh_=K4?e3wZURGOQ;Z zv^*diZEas~Fr@;jM-aO)v7f=H>>i)drkdag9pRVDT4R-^O#I6JnDeuacQ+s+gh2LQ=iGJ z2PuW2S!E0)x|9@JAn337VM!U#x1NXC&&G3fz-?f<7?OSV7J|x$n#Cl|POGu~xtZ*^e59 zxZsr8fZ_O_1ygD6ewA9L$E?ywNdQz~ma>jy!<=Rp;6<;B2p5(hKX(=ENc?>^_adHnvWPtDM1WXl(O2z4N~e% zUhXO)bsFCo##{==RwRPDOPZxliKEcJzFDJ|4}@+z2?myajTLtj(yyt9TMM4U)!KW+ zE?)Es=*{r@nSHG-ne8S~l@=ILQVhIOMpd3eE}%BH275pKM=5X=kY!Jw(YQ6}_&7OB z&4bok)?4y5%;RA&aUT;}G}r|EKr$n$Q?XsigEKsWKjT&86VJcSnh33tkDOE>k-c=s zvuMV`q6L#f-c#FPk3S0#CvZ`6iFbJeCFLZwXgr7O*rz6i!jLFgF;x`>H_R4)X(J@w zl=aqDSE_`>+tlabiC*?xx8aGIpW|PpJZXxb$;6(PojVVgx{ZF|sGMK$H>v_q=rwcB zz0J`d)huN{E8#pocyQ(fWoKCf$haMs6 z0>~~9sSYsR^x;r|#xh{fZD5+ji}L0UYNuX!%19x`!OfWQ6^=>tNYW(ZnG8(ANf}Gj z-M3YbGn!rZ1)q8N95q)q_u)5kO0Tp!gnQL<%C69}XFqyfPESpJK`-tC&8)Zdc?bA< zS0aWKZgega7F<}>;utN_%m-;_PI)S3rhWU4tBoH*7)VT7@%8O+fu032C5gHMqXb;b z*3G7Sd;9+iu=vg1hS>q~yS;NS;63l`C1KyX<{|z3SaEtdn`!Zd7I&I|M zo%M}U>0fqDS9iI361?v)kMUW};-$TETJ4SrR274Uh~2wQ?8<3*54KsuyhSy|gX!`Md!RnAcIY!;PpS)S4Xn8=Yh#ro0=F22@N|gqYKfgoklwmw{SDF#o*(9)j zRyBN0q!d1U&2Pl>rnU{k+oisq0^964Yg#?WDiT{|_htv$uKGFwvGsoce_NydWtA&6 ziatb{7W#ynAgCLNeB#u9=z1mF}lTKq5lE3r10$iJJKT3kPk_GApM#FH5p0m zjN$~QY5c-I<8V%^3ENy&EOmZgV%%_F$vc0p@;Xs5C9e1^DkjZo97h@O`y!2_Uu zA_L#u{_1km=F7OJ&3r9^P2}W&&D}u^c?TiO!k2}&?j;3IJqmKl`9nHCL{vl!I?fRl zk!Y$Jm&ipED$-ORDBSCtm2e}0VmPW)dqngFSG%6|xE;XHcjJEvon!*sNXvahv$gM5 z9>MM?MKRk{u{!X*)Z6L|ZoPcjP-!R8K|u9IuZrWq$pI@p*3j3EIsfB(Izz@XKdkEG zLvI?#s|AP}@G9lfD&tjQ2bp}AQ^d8bd})NPco(Pp-sbkpURtF8QzP&Q2`bz`dc)cr z7MWG3*DS?Sz)*E@UqM1u`g@;}fKx+;4rN$Pk5>|WX1(50Tc{9c^wF}FDdSzPX-&d6 z8YJzuZB4lfgQ{*01y>0F52#HZckMKzR1{Y8c33Kr)_;E+;U>I`&;5L}`MvQ}rSZE< zRUyzq`ag}h>0w^TXL)p1wV&u|X!=(ls+eutl62N33)q(oAAQXcE1P_2M50Gl&7eLc8VHMDODwZh8c)ps$V0= za}q6y;Y(k{>=5dzoTz#WW%9y;iHqZS_~>icgS3MpMh+>*OOPwX?R!~JT3zzvyBmKN z-Aje`kEU10s=1ZHCrMdFbr&lyniN`&VnbT(X(96#4UDudLL?|s4e$q{Pf)A2VL8ev zPZhHc{Lj3ah?tkgi!*VeO24iVE~bu>Ls5tO-AN)Wwi&eaG=Y6=y(@&K(%_JY^Z>q@ zRU^p^qASU11;qE~Gv8IS&@k>i(x~G{&&v^Qdc5oL?~t5U)K(rtp|9|Z3pX%E$SQs0 zqD9FDao`Y0&qGbd9*gbnmFuxk&+|Vc+Xe3H0*V2ocN0k4d)Yst5YuYide`)qkYj=Z z(oerY5q?|Bk9-;cP@Or>#6#gBq=TOG9#U!JXPzp=RUFiIA(~QyJ(P}g??*t?o_!39 zSigS#q-B-81b1vYN;DX5f8~R-K8c{#xPlx4K_8lB9V0zlk?ZkaNka^{eEHD_jQsp9>@&9a?V%SyN|o&0HYA_#?%rCR)k0~Xe0Xd z@8tN>HP&%3<1u{Le-{-^0E|J8 zC%Hr9S$6WBA1-;)gn89Cik|BMoHE7ePvH_3vlx($bqUwLIVZrxv>CYMM25bKVT+Q;cDT;*#?E_92Jyy*g zPnt#p?KA7xi5+!f59Tj!$)}blM$%{E;7cB5W`(GvkSv%_ppnp%10v{;m-|x6?*;V6 zd07p)!Kyz2wBkyK#_8R!F*tHi?0jL}7mcbP-etCDBah#}c}j_F212Ef9Jei4(e@vZ zV`qV_42TvJliW?1)Oh&tp;S4r;e7jd2l>kVS-l4oS;a6*JoDKF1IxI$+O1n7cLeU7 zxjx+LN^fRG*akoxq;#A`w`ejktx9_q=ln49rsWt5-hl;;K)@z4A~+|rsC-57*fT5j z(SY+vbV&-IN>qY-kQC@YugpCJd=*1uc9ho@iwImW+&LN5V;dY%QzB=ey3Ycyx7|%W z%~luMOOzoqW`*ZAqxlpTNHPEkmJ-`nmwqFkd}3(FoY`HbRF__x&++MZ=h z?ZX)6sBEp_>y9bIQYfNoE-q4_;ZoD1#Bh%NBU^CU8-qUXML587!&uzsM<*4nNgO(Z zIIfl`<79%u%|KDwZEl;ns$xyue2jeG1T%`4u%}O+%pX~eb?l<6^-|J0?_V;6yS2k& z7$XQv=~Sz*u`%h{v*+Y$c?%+==d_se2T}82IEtuZv9Dj`w7-{+>@xclk7mezfBx#g zuTup;M_JiITRWocV_;z5)&1!$b*^hDR`XmB4pA579-S%UKZ{=OG>D_q`4FO>(vv#u z8k$}pm7AnAE}5YJBsddR8Pd(%?0U91iU%o^YNRVLR*8gh3@jDB`h4R-rx<9U-Hc01 zk5_YA*&(||MXW~enVXyI9Oqnfei~|ctDSE|p0*sDLTO1g%+L8| zcb(pY0Ua@n<1Y%6AqM(KZP#fWya{Ah_F~=zYh_RcEXP+{Zb9JGBgE#WKiVGqL(OBN zl~tCP+m^IEB%}J@Tyroe$P^*#-vSK`YggNAK*G5BXJ+A z-;w3e*9eqL{iZ#5d{#9);`85r)NI+mwA}bOwmjIo+y(NL1sUYMq7+V!F^5bBU1UCh zikk_zoS-Wz1q&%TpL6r>viP{GXNYKwWRD-2Pa3%Y4f?`f6RA(g@q2IiGh;#D_kD8# z+6Wffn1Q;R{}*R(0+(anzki>`Ff$D9F^n|^SEQO6OO%qv+~KljsYI4RN?B55BGrsB zO9+=1QY5kzMb>OJV=pb1NjF@`%FzOOW4ugJ!kiwr!F%?ZWr`fL4 zNDvgch`_Dz1cgTmMgViLhz_IDFY5OLFmZb;pB>a#hA|6XC=Ndgz%JG^ta5Mw9_Vl> zC8_hJvn^Vd3;rgfL5MU|VriP++7A6kBYRr>;+o<*8kF@tkVX+N=wBFGLm?ZuTAzADq{2J5G5-h?NJ%3n7T#y$a zbbwXSzZ&>)%Jp`hU?n>2@sxZ!P8PY=RVNN0|-T%HMneRwLMJ zcD4s}tUM)TJEaX}sHoiG3|2mVp2%00);pe|aAq5a%2E>6dcuJQkxNKW%VDjB2BgYg zdZiXfvl663O$?%@??PImR@}{KsVAdlawnPIG@Nql>+oe~M$t!rV^(#|`SjS&NmwxP%r!;WfcFUONFoZ(t(kQH8R zyP^Dqfzk+OMY#`P@)*$!amd`z&|U~%Tt`lOO|)%)te-H;n$DfK+N|ZUjH1wc_wG3Y zqK?RiUztpOaZOYzZCV=r&9Fi&QPR?y!5(hJr?PHwjuF{4<*;Gtyme#-_OQq?pPYI* z^~knHkA4?qhWyX?<94benaR7!p)p)tlL-q}j|JCXakYWr3Me>XucD%&%AYsw&ZxYF%zk4h?@<`BGP7P{-^MpHtmk$qK+c>wqrDWY z+r=1Glcr6tpy4xS^Q+bbcmfhvmY% zzX)ARO>Nw#&-knH)9!)Xh_EBZ#(x~WV6MzUkmTN#531ibrwMZbR5t(I^h%zC!4Ywn z(cjp!5{Z;nUgA_RHL%u=B>ynZ7baq@oDd>o+yKH4i;gCv-bdgtYxRLO2Tey1=&%Nt zRfeL}`^vc)N~3Dp;==Z(qCx@FaAlNp9Pbrv(9CF|RI#8YLeGPpZ6y90YDL_cR##&# zAhXON$(SP*kATF|a1cLRb?i8IbNT5vPki&ov%G^>$&ni5=tXSwqL+P~y&7j5L~hxV zBvie4)RSNLkzKzU>q)VCrtNl;lr%`v6SG?v({*QDxNt$1p3K@>+96^h`}&;sKq6$n?qA-`RO9=<&tTHE{E~To;OmMW!q70@d0ZM?5f0~T7Vmi;;Z}it zh^TSMX49-}29;z8fLQb#sm)Gee!(Aef_3`@p<6ILV@%*01cqdJVG(yPh zE9mV`-|q-&dj$w>m2vOj5x5DhlQHC*m$WpX(bxlc;$3#6pl`+7CB29rX)`{ z*)oorR)8dmx)b6IL-gK%vz5A$gAHJ#E75Y~0i%HUPTg%Ky!3(mrW za#T{%hOLGUa>)BMLLJ2|vV+l1=t-px6d^QgqdJ;r6Gd>?MZ?iGr_t`FR5DHkBda3! zk4F1F$nByXht?;i!^cYSV9vqFORImkfUI8Rv_X2uEdf=R0+!dO{7)kcZMR-Z?2S!K z0nj_iw4LaAiO&abm&5=~Q=NDs#ZAD|Oms;^JQ)r_?0xJjYLN8NR9|^V7ffUORV9oV zI^y{Kme<UrkLA|7Skx5tWRr)*bf6{mJuX%GvqoY3#UKHqcGjk4iP6|9i6WrSy}#C=dkqhQG5{o3Nb111zrn5ObBRW9~VVI8l}h<6sHbKYh)P z@?=o+IPxu3XRvEmCG!5)OfmPxmiCkGpD5{ao-(ZU*HDkcU|}UMNR@ix3s;>`itYN$ z_7HU!<(6ol=^Cv2Tb6WP9lq7%HBk(e840xjNvKB4JLF2GM6rl6WYm$zH9UsJR1Jh| znRC}=r?vY~scLB)yz?0u2$ygpCvP&$P8&_WUw&od{{7z;wOMe!twaDir~7W@=;tSJ z(^96Js!`4BMQiPb2&Aj(xGc9^J%<1YB5{cQ4RRByyqZMO>riQ+hQ zX8^(SFtP9fzr+m|fgH=W=|Dl&5wjaHdW+n44#F_Mkxe+3YF_c_DU_=PkdUIvD!P}t z)o@KOurPEW(vl)-h-uy0DE~%DO9q=+w1Tg!?j`;uQju1$i=2p~b7u3*F8u7z$cF1ZS9C9rz zhBJfe&e*|rDrfLt=yo$}!$wMattvf+GP;SAgh;jsMwy%n|Dy2QqhlFO7f5&Qz!w|& z`tv7ObhfKgMZaPqHbVEsw;yjs=LM zixy6wUgB7uHKh?=!_RMi^pWAM?^_JiQ2L=s^P+#w4sFON*})4lp3p*|Y)qitjDz)X z5IY5QZjaY7w-e|4LmwIV`ytYb47{*tApjr-8A{30agd=k9BC$ODlM-ChlyZ*;(WD! zpG`E!KnYna4@QQE{%UG2YZdpQRxO;ZAgAzroI}B%u z{MheSbcAX&5Di#%a^uB@JeI=-%l!SPEWUHA7x}+nU2{9$Q)~$4;MKjfY?Hf8dN?Mz z7OI<7&Q(o&aDeUyeJsiW&PU4rY!7W>D3yy=)S1$apSWDe|1-a1WMZTDWTKC} z**};2` z5PZj~gXQR&WpC28f;nE?b<4Vk+1n+4J)Nyvq%WKr@Z27PR!lv_=thhZD)2&+!k4nZ z+X|VRe_)GGKRxy+)eG1CE_933;YITpP&#nIlbqkGf6&|KziIYR7f9}$QU(?=ftZKN za`xuVFcq=jkwS5cpc9>#5uGE>RpWtgANO%*gmT09j|v7!WNz;BvcZez&o5QKTG?hj zRq+2+@vSJ zlJASVSP#)Qc`(mo9$-?m`t>hKs4ANE zxe}ac1qGt|=pE4CwqS`BYG%S6weu|Bb4Nc!vCVH}~ ze*__zdjVL)iJ)Shkag+OfyMsQJU82N_3kU@ou1yIYx-%V2xdjDKgRnul2peefz6Vq z=3FrWZX5ylaaDf`y^={jo8upy3O@)yT5FjdaLPiKr z=}7P#Z$C}!#6sp$1Vtv)KzicGqh;i?7L|sW75w>G1HPU>D(V-6-0eYKOnCe-6frkw>| zkOuy7uRD5I0j1S9>K4n0_$HBQ4N(aaa6Py9Qm9>HsNtWpUygxYuhP|Ud-G_Z%s6s6 z?)Rq0eo-}(a4LpV>7`V$TU)yWa?Bo@Y%j8E-(d}u;X6RfJP>L4Z8o>Hy))mJON*V= z>#O2y;yd+muFO#|u4zAXDAg68Tak3)$vEh_Xai&mtR^|x<|>X1vsSNna}i`7H0*1p zG$*z;p5U-&+^uV$D&C{e0`xv_IcvT&IH`|DCGd;s@sgeRu#1wmP~luB5kS&>!#p5U zYEy)n-oDm$>6PJY*nUd{U*vi~(a|Z?K*m~Iqv)~k%46JCeNEme_-8#%oNiGITpNbN zx>o2T_C%6ppv)41)0$>x9BelBNz8e87j?FX7XQM^#>3YE=-hDY1isg`>!aRn99qf+ z(3l-(I8~-d)UxH`<*kE?{|=goXCfddU)|e4`f!T7L!db#CfC>DO!G%>-dyB%fjOn| zGVH&j-YfLA`Od;DG3ss4JzoLHlW>6*&kM?_s~k zlP=gci2cQAdy!_zjC}Jz=tPZ?0({h096{=N%FPM)S&ng=y7`V>Bs3&72Tap(> zxXI)X5X5!rFbilal*TZ3B6Vs$8f88$hDq11gPC^m^Ap7&PqC%QBER11bJTo&cClOM zFTNoB7u?-SF!w%H2`U3i+!X;Kz|G5$ncV6HMSmso<-qM~4KE1&EtK!-UQ4k*mciq~ zf&GIMg+rF<5w_PU3hkv2w*4j|vwWGVqA<=tSwUW-uDpH!(J=jOF>F~G*wC(lOT^;B zeZzcmrew0(&uM4P+JT1i*0JZUFTmlI&iNEA&;@gAKbC~dWGy=F(lFQ;XPk+MU3ID* zfobC3N`ZK?A>D8S*uaqf6jhA+yGMnkG5G1rx3 zX^RPjPU>sx9UpstBV!lR0~2zw!D{8jialzuZ0S-tG6yB=MyUS9@4M#OkLV=wi@m^6 zv>*59{M!5c#S~F>RUi$kjXINJ>SUpUdCIXN^98hI%e+E(9!ZOg{>03>JH$Pwp&Xua zYu=T(bB^Vs#Q%Wti5oqPf(Kg%yxOn1l(i=LU%_5(E>2svW)2KY?W}%N#nl=OJ^$+9 z!8f~#i}oU{e*}zWR;JCG!u>z2n0LY!4)L(pl01J7$1>95mbWnRyP~*E9rdeZP?L-8;?0YB0RQzOKEQ7K_$aKmidlSs4I_r;x z-MlxyHTHG{?V5DVdz+jNSaf9ClG}?T5ks7t6d1KqpOs2})R&c@iy(|njMWG}1ozy# z_%KiG8Za27;&w_7j@QimN!vi#pG?yz2!tb;jo8&=CG5f2nRunJ;)PN{)2A(W4kw^K zb1c6F<_Ho7=7HDL;5 z1P22{XX!otC(lplyucP*SP6o0)5Mm?hJn{_1^O2x=oF#|&`7@f&HX=ozopmtwkRy6 z&xhzf^)Q-pZofe>(-3|+pNI7&B(3kY)6OEotuPdVB968@5lkCm{{79j&&eumprp6H zKed(F|0$;r*D$1@`b2o21Sp$>k86slNzNEEW@M9Gt$;w^C72j(A)(7->otTd!oBvZ z$_d)){HE9W-SG9VMeGWWcrZgUguEjEM4vJJ6Dn{$-U3WIz5ei02Is&w#L|#)ycI1! zQc5O@;V7@Zj2tt#XEbo&Z2&NIHFKEv001g;EU!iQ2k&8~6}u|7nSp_Uw?9j(w$o$= z+rf{W$h1VF#nbh>jcC$*=9C{Z`(x-Llek14QG$~C2LDt!l(L7NiM+^}2))Q|Fb^oy z%>_U=yw?!2w^8nkX-KZ5mtJ&w&Y9b{Z;$QOK=J;aajF4t7jS&kRa}nLOH6k>kRmQn z1RPE4A03g$EQX(ku~vc)ww*cQz>IT)^SIy}R&W~jUf_R?JX@K4F12S;cjj>zAk1TM zR)7e>#6MJ88qHa?(kdCQR_R7q$OzuS*(rnbh7KJ%LS{kKF;l)n5AXYfpM=go`;iiB z1Q1mzBbs7;w7XzTX3CHAppor0`oit}$5hn7m5!C7ijsLqmF*YOl=5KlB3C~5*nR}O!=?S*A*C@g z##0^))6VQm9UPF=2I(|jT7M?B<3oa_7GxVEq!rZ}Qrw4X3N-3+D;kS0SGGa3_ogL? zn#B#DFL0W2WAUZuB2LD?DVY>{vwgg{ySC?%Qtn9;Br#3MDZ2eGuM6SH zh%i%rQEOYn?^ye)CsQuPN{m|Z7=Bvx47W--h8L>6Kvb}VKnhtNDpHTB`UFUF0IdDa@_@p;DZ{S9mm?l-N98Y)wC@vbl^bHbCbG$gcm_=1 zCTpfFUG}9mfjlfyL=hn}RS?x@rdU5U!XPWd?=2%AWhczA07w}m6LdSOpU%NKd*(~n zRK+Njd<^C!@@{MECj80pwvyDW86hbE1-JO|*>nn-Il#sPFnZO~%86j4@1=a+evsqJ zv30aTTJ#z5GH3_v!2%Z{7}K>!Y9;9`Yb%z)dtJ7(eT}hzV<2~-pN1cV{Y!FDL+nL! zTS!YnqMa zhb2Frtdxz##nTcZ2ElhLkB)kNI|B}1mX=xxhMSrI5|qoLvw)mw{||rYpb`15yxOdj z#y~tJ9^PQQu*@r3Q)KK&slT#?)Z1^*sNNyMKhe?&e}9`MQqXtNr9)X~wDryNSG6T4 zx8GwEr>~SYrnxtw4N)D7lNXP*H6D!!E{Njy+eOlmGUBfy=8JJ=w(Mrc2f5lki7+IU zI~x??Y>IZtVp=s54&|FDFW?mHFI6B#$yq_FH1`iBP2 zcgZ(=`ZLHQ{tM#PB$F)QHLcuOYay=8k_l_Ulx{!)=VV!a`$fqB;X#pT8LJ*5W!cW4 z9_T0e=!p^G$rq9|gA&Kc!)PNd)ua3D6Cp&A7FNIe=?;QE(IKP8Sp*YcVCgOMRExY3 zA%<|8Q>d`p%zk)*e>Iwa_uKQWUWLwx-66I{;(tOPSGbsR<0am7th2(!V&)b$7o6AI zPo8X;gRGdCw=f?nH7$-O`LI$lN{r^n6Iv{CUnTAnR3+WDY()b~3MG;QWB!Znobc?# ziUzGl0!AN(frtTKLxAken!|d4QdQiL*r_nNe#1WF6QldHN>ndJyU%2*=f`eIR0 zvM+QyAO$?>(@^tqB-Pvttj}X*wS2g|(hJr6c5#Xo_k_h$)IPo{5lrqigp~}e4?T+v z@1kfq^eiQgnpX)CqSujmmF&>Q$M(V}iGQZ(G6lT0#gLpn-T0pXA=#MnbX&b1paYP0 zM%sS;rz#~HwOme`4xq&SAfm0jIpis(6YDfFR=(%&Tp^N&;8b|d?=52VIphzRqUL2x zdy2SC2U@e;eT)APKEU|8t9%Sn39lmCY6Q$-KuZAN`VR^|xL=BcKOw~owd`q=3Co|X z+(j}`N6a?K?3jw?et8=*k6Jmbtzg))io^*SG8nSsn9xBYej~-5@_YxB^N;wt!NjxZ z&$k6abKsXIezAi+Y^PoF&x5gSJr5u+ikbf~FK5zhHw#ew6QT`3-Ia44)q)gJu}-sO z$;-icJq!&QeOSKyaRf`lo#B*7jacYh(gy3|>91ciw=GPkF$fkMEuSyS0V4h$zORVb z@$-m&@1sF2AVUlA&*PB@XbPReWDju0^Bxw7W%V5PSNb`y$_m=}@xz8)biB}*)xr#y zB#+PJrf2it>}qO0T;_Y_L36Za7d%qpLeS_0w-kt3AQO!9U+$pn`k*4d;q*O6 z_=$q}@y=^_Hi-QI6!&RwV1!Ag2xvVI4g94f|Q5ZqeOOx3**AvYKHtX_->=(Q25N^>BNzR=J>1t+S$fV1FD#m?jLORVz7zOnUMaA**Gg(0 zku(aeNE9*q`4=$6HBCDNWAH^e`NOh#|FC$tjOv<6b5@UKg!PP%8m^+Z10IX#p=jKQ zzh51z!wU5(0}G2t-5ww<3?cCqogFg@=_12F?`$h0p;Eqz4`xv(+!5a9)Lq9xW-M`H z3wn%+VbV1n1535Za%gk2MxNt$5z1PL{b&*eR((PhdHX5VF z2f2UB0IJMmoU4-pT6r`V1{<#K$SZcHsCPI9L0$zLWZ+OR4iwZHEer7w)?SV9&YOCq zJ6Nd|0CgZ5Aw-xSw9{f44tWOEBxN&~sr}gKIUga2z$p0BaRG zDj6U|NCoa3kk2}CP!qipIGEG_Fm6Zi#zFz~rgaj@z1V%;cjvN#@BS(u9gO5dCMsz0 zuaNu92TbGLL~0Fkh2yZv!dW0a6e3S}RbI!$NR7epA0}}+2eGjG?#$F@ zIwz*rB=23y%GzfX$HPru`K&p#wg`U&)5L8u-DZi^yXqe7Kqx~_ZeP)N*2RBO;agc_ zFzOe6BIIuP>H8`a5;m841sO}8>R;L#EN;YA!?ZD3@7AND>wN0nkHXt%;sBa2q!}ouECq zo)#1&Q-=t^j$D{IbLL?tuw+1yfdk#5y7_x#n2&Hm6=qcR79^>NUZCdLnHG@cqp%FW zf=>PJ2cVaanS16F<;64dXeFaaz&bLbYP}>$4#cE&{$|YfFb!XjzVV5e>B|8$yCE!f zVteleB7v8n5`0_64?rA_vQYm4r7?y~5x?{IFS(9o6A=kLeue755%i(jQTlLJ^gp_4 z($_5shfD7G?`q!BGM4eS?=jqnfoQ}E|HFHPO_wj~mrJ%nHD3`idHxk5!laVd^4CRL zin|@BblHBQL73{nfgbqw?VHU&XLxuxvkn^Z_@VJ}#&j@gz*@b^*Td<H?yl~g;7X){$9FD8 z*$q`iuXMBL7rguMCTame-t@$Sg?Vvc*PxB51+q)_4aw6QtqP@~7xK>r2a3-(w+?4dZwyTPcHz~x8f7bm$mDza1@qqGkqPwbXtDnkV>fe=)m~)>dvF4Z`f}(x7b$>{ENeRr z6mu=2y#O-orfZ-$SZF6v?TNqJdz~@5V5-_BZnI}6AOXAyrK!5za$$0y5x_od8WTb? zLz2?WhR-njBV1%w6g%Qo_JCX-Cd7m2lOGp3)8B@XbLP3Cn3l|Vjb~iphJ{nxMR7kzCt+5 zJF#TJC$u?eE4t4*8_67r8VjIagb88MDXX^?T z-+Z)+ycl@`7wR;WXRg<5@CIlzu;&mWMYnNd8=@HroH zT-|i!=*n7pGf`I2ds0mbe2T(i9MVAZ)OUAQimNWzlB15f-6xRg^8Q9MWKtMS7Mo41 zT|UaILr45xPe#Zx8-TC%XWIIsf63SJ?n1ocb>lXXP@|Qmow+wN|6NbR#ezn!Y7PBe zKqtjJMbr#B^uLliJSpyQ)I>@lF`(!v<5m}aZ8@?-4j$??1gh{wQ<^0uQk?O4oyeTx zC^tHRC-_sPiDQCBD>%TZ?P^#}Y6v{dp?(RZ01~das#c0;oJ3o*k$QSSF? z?cW%&YUP+4G0GwE)@+w%`e9oOxh>4!^*zM^GoPOn`C1TCKLWP%Xigk^$Ec!->LH0w$fv>^F52 zbfm*DozCadaY2r;S8~R)$9%v`>IZ96bFR*{A)tbT!kDaB0&+9Sc2un^O+vR)ZlNgu zS__BScIKP(1uhY*dx^r3NTZ^zMEB-FB-2(a>8J$zKy|b-p;%m}#Fm61Jh^?xvY{9z zOG=Y*MLF1qUg^~Rh(|JWLWe}0lk>I)bm=MIM$Y(fP)Hjo1})n6y?Dg)d_1D%O5!ey z>1u7+ZgQJcb#i2z6d#D)hU*AsBFfu0IHv%%hJd-ZYzF5zlq}SYKR}|1YnQ3|FFwy7WJVb?=i3PDVc|L>X8l)Z3G9LF zQ2FmRi)#%*liSqt$PBfjQz7&vS^81YA2~1VZUsel)-@!aJlSjTCUeu0f!eeGppOeB zF~|(6ZikGmV<~aWGYgK<0+k>Ec`{hkyqf~t`z)HkC#f%op9QpS^e=UWmn9K!ZXs$5 zu)O{6aQRz1jWbUhG!$PXtrW$}e})R2CE)@!>JBpJr4&=)QtGjTmZLS~h96ldc$@fg z%0W}eeh$wYTlwEMZ0|oec-4#!wu(_OX-a#w8J^MS7SfilV?)RRD7P10Fo-m$WYC-o zmWtfptW!hkQ0Y(@Tiqjm%}D%3oFj2ZQ{=g-O}|!{3~N!sU!m!`PHhs`LZ+0|m#Z9( z=%4@&(Z~5MG=yqezo>4oH?uvZ<>gDxJ%^dp?I6Dub}^)}TMLHwP)b;Urk%tgn{wKm z#(P}lj*CGcS*5kxNO|Yr4dNUpc0J(Fq#`*kNV3S`?6$sF%17}xRDv-0qi48mqtK@W zOSUvS>M~r1sAo~0|3DiXjaEdTUtv2|$#Ly9Se^z5Y%QiXAmeg67O{a9DlP~|KUDak)WWwqzxKb?SB!5sZ zAq^JK3M4YAM1A)T2kG+#B$G%;H?oh&!Xgt#Nd}0o5K|80rryI=lAc<9*)>3^Q0Qj^ z;kYUri(SC%n}NwV2E#KfIcLQjDfA5ko!~q3j_yU)b`3$UxEhLn#GwXD-$PmIjr1NU z^)+Azh3EJ&1LFmrp~i@0bZXv68f=dJNEX`{+J@fIxHD)G>`)S|qp_Yg2BoRxI4bHz&D07@zplikM8oA}$+@M_cxmOy>G8b^4^VR^R12yIdY&!a#E~Ptk${|yBx#CiZG>BC?Ik!O8Keba zK+RDZ;!EuL-_!}nE=x}zDeKoo1r}UKsI%v6dF%<7EIDehD?WZoNvqst2{5Ij7Iy5hOI=%*gAw;jQzo+4wa!Sn{oYd3 zJmj>Ole6AY#Tshafz2}oy(>5y?vGpu`jDf<9S+x@^kS6YF4*&{lyX|6fOFMt7)Za$ zIu|_iftuokFnST9HWbG~Z!v_x1juO*Hxc+0hC;Y*CmU79-6W7m9?*$i1VEpQ&s5za zzLo#PR=zje^P27A^i!ua3@I}naK{{|*^&*xxrmPgWH6B0Vib2k z4$h+yGv;O{g#G$?yEx%X36E%TV&|v5^TT#Dy=ziKAVkpcRK9eLxbol) zk`gmw#E5HRTn%;R>SH*w;%%0dXdj6k3}^9it{=0xcs+vL{_i7*)blji3XA1vq2XHX zdP+q!o>kFkEpkngPsTAsH1&|@0sdHq$z`7aDcHAb$2+N*Api6$pyWr|KRsPR+Lckb zP(DUEhwu0<5!Q*;6{H9TRV>2TUFDBF__@(e=}`XT<P>lNH`TbO#jsbW-! zmw;+|2Vx|=kK9@sw+xvt;MJVP?`GZn(@%}Omt$A~@!n*L2oQ9u#1xXQVG4gL;wRMu zxds|ziq9=j$s`b;*KrrnU<8iZ`0aN(P8_F%gnU%^eg}KO%G!m>2XEp511zUHE~Rp&b&Eh#J}WWuKC}=T_wb zyg?4`bg1DE^<*@U;)!T7wVc!D3`%D{U@Od^+L}@NGT!`ai_S?5BVTMQV`y?JAjFY< zl&X$$Smfgzp%aHlIp&g*E3dZ|-l7 zHMf4We`L&n!y%UtC*&E8XwWw$#dLgxTUpN5tz`CclWDcX17956Dy~|aY?m)>4w!R` z0gSxR&y;3zgpgK;c*ykmBeXy{2(C700O}WSC}Ss=TzkCtV0K1wU`=Ai!pA}&)48Ob zL~iKFlT1$-?9wFmi`zb#-=vmBdHGz`_Hbj#jCP4|*+2cez&Az3WgUuN4QXz5f1M3= ze9z3@_G4!;KAIQ$xzcRLie~+WxkUJ53v^;=gZhrGci(j`B*{q-n>>6D0V9xB0lo?w|rwx_)CsnobQrK(d3I% z*)4J35l+SO|9fSd-V|mUEi}IGxz^72i52^NW+)zQxt8U+S*Q+D89X(4InBQ`T=JIo zj``qusihGkVwg0@*<}>6myoiiGnpcIOw7I`H;w5lmi5D*N%OSxwZKOQE|euCCMEa~ zk!>r(;+3de=+8(fotG69PNh@FwyM1}CQt|6nam@K{Qs@~t-am64CFooFd@kdz-Rn7YhROK1wi z>(tymdf^a8V;o8sDN2A-Pu9RTpzpK`5%^vG4x%#H740)TBW0vc zo(=BF6UD7uQ$cYX_-sXIjpUvr;n6PRwGM%f#j-5$$r9-#U)7TDpLBn;h^{J+qr- zajVQc!!9ch-|ULzI2fe88?m<+`}XgxA+@t=E#mH=In%1my|n_)Q`Q&vDr7iKp4%qwJu%cg6JsL9Si4`J*<^ zpogqti5O}L{5r(rT{h1@K8K0((pyMK_w3u3g}g5+XZaJ~HLsd0v2)%PrM;UR3UhE0 zZ{NxEqwcL5?FZ!|LQpYF-yQuK6GrWLXYihBCKCvF6~ac6p7q*r)lQg{eL!21>}n` z8g8hits??W1x_6@>_Y>rLFH@W2FtD0#TnW9+~S1wjZB%jb5OTdXTH2*g#7kZ5VrWx zr=mN&j~sta`XT){P|`m+yw{JFrpt4h{0*atZ!Q|d%HTyu|NHC1?_Oh>M#wXra(t0X z8OpaZjW&99lk|j#cG7 zU70rR_}*#b8Y#i?W98;`n0IKb^f&;ArRbKI`Q7;8x1~8uV{G|yv}G3*8zXg;FFiOR zeCsu8qqVLt0U*Zc)|yUeknVnk$i6VJTG5)R^wt{z=bEKd|a_IE>*tcZQ94XpbSdpC^gHz$PkGinCf zb*r+CGgG|({>v?5n{9x(f10J+QG1?r+229+FN3OCe{tj1Rn>g0C~HQ!G?{MLaq;!B z66Jd{4|@6 zJ$l3}8dtjccJb@{qrIo62fMP;*>t0H$fSGIXJ-ILR-x3~dE`jH1z4*|qQ{2mTN=SB z7R6Fku6YzRM@*`zUg?Uiz2u2ES^SM0NUW48g=3E75Wd|PUy_4dskUM@5w5be$Ival} zpX+0Xx*=_`;v^GLu#M=Jwue0BLW0lt<|xs(ciW^5s|wUUl#2qq-wMC&cwTM>c4)2) z!PL6K>eoJGqBq6#AkRr201eWtQVsWF;#fbHr*>LhRb(Hy5Z8d`=Olz<8?{AK0ooR7 zdH45@pez^(_%ydH36`srFXEf$=v<1K`{g#LpREjY+r+w`#l)!mHQmdg4r1<5!Gf|V zZm;y+h&kZ5T4}?gF;X+>yNiLd)CX~^#!;-Ky)8AjqUiPoEO*~`IN$T~ZobuoKan!Q$^ePuHI zw?Pd9x`MKf;*~#f=9;B10of`7Dkm2wJW6=}LuZ?m1zVTDxWry6{P!OXVxP|=)A%z) zbMQ=aYq^H-H-pHtKA;Rc+1fxVd+LXPn3`}Xqgz>a%x1ie`k>4689xL@A@;M5KbHLD z&q*a$p*U=~GvUcC2EAH*kH#Q!z)R9Z)|kMm4b)#<$)oQ5isl{!`(;5z&wQoWZbM9H zIyP*$4i({j0%ayJUFzj-Z#+Fcs}|J1Sde-*#3ahnLg|}C!tHA~_<|jE+-4=X@louM zNG*nbzuu&#FfO37dy0d{#$MeR&7Yv#!E(`8hzTyfSr!{K1mwL-(nRrIiT4R7QsDzt zqsg?rOB9W;7lE^fH&?0Eo#fofnkO6{+Mk4>b%~Jl&umuZK=aJr@|!IB2z?;kDY84e zFJH4=H>Q|Yvoi8d6ytfBQG-Sz<$7xU+wwuxYlGf)vmZMlbRkVk>v#|-rSCDamlcox z0sV7=Pa5pD)x+J}bs8SmRGbvn&0IUMUz;h5#@1C2s_#Qp;dN@kubs(dqonZ*-gzLa zUy;VYK5{Nq{It}xOWik5@G}YIK)R~LY440#ObIre!5VNHhdp=ddaVwR%>#2*G*q5# za%POu2^3Y??@5#T171KIP|B48r04jT$2j{l(|59?cX|VCF-C^OO?=2Rn0J zqvU|q;O@e)sik{JoSK=`V!&PMZ#0u8)A}S)nO6WQ%C@5Q>9Tn7Vv2itu;`Fet_as(<6CsEs+)#mt7m}Rp@U)OfU45@}y(OBAtl!c!@`KSf&(BJj*|XN>)waix)SVQx;*>c-z(&veV@)KvPPD@xSLC{JR^`4z>4-F(&`D8RXKpnF&YV|ktd!=3>{SWd~MH=xN)c+ z<@IWZe5>FgFC{mpQofCEb*@Qxyl#hS=8(~>HoKW8*lHc#&D{H-U=wp2G=b{xL_dlv9M`xcqZpIOjrgg=SPR?pHKChCcVR_>N_iz&T6O-rPo&-<=ADMmC3(aP9gP^;Wgd`m9Ivy2JUC zOx#?EIIBAi96&Cgf8?fTVn-}dQ&GlNiFE|wRLS~1CSGZuPIaZw4W8LvSmd zGq!4XZXx(|fzUk&uD%N_xsXSYXwZ((7_ELdn5|qG@zJcNmbWk|K>R)9l)JQ%`oIoT zQTSsQ_2Lp@ckb+d;_;EmX1LTyva2mM586b3XR8Gwm!d(kvDJgv6)9A9_BBq@$MBj#CQfRpM<77e2aY1@DKCXa$Ui*X2jp zOkGLn$D~A;@)Mm-9l`PO4cf)?(j*7V-z7*tUw(t=KVDE6xzUjWf|#Pv-YIrdFD5os z{?PY|eVbBR#0(aFeHl^MQ~3B4(>{dLTm0OLqh+c|hK|<8RP_rb7)*D3w$ePH3*H8P zTYkLzhrm7n)cb5su$qhuNUFETm8VeVZkpVYXgAG!Yd7EI8rN@ zd)siiSwm%cqq=f~>dS-$+R)VO*w;kmBoPHWb)mg#HXRG*Y_!QyXot!$hPh0{kJR`> zxB2pMQY=e(F!#W@apg#)25?s?BRu}PuH<^dcpScUZ&sVmldXUF>zAFslR(nluhYiH zu$q%4K}wYxXVftbVf_lp|BAp13j(V*Gxk3y=+&9dKJ+8;AXoyqq)^-O7`ug9##McP zw}l96z}-1uXFZv1Z_b|%ij*?S8LOL~Q1jE{bf@rCGnh$vJb((h* zC7lkFWhgCpG*w6Km9>F1I?{`bjOx3mY9&dyzE=98DGzTUI@srPiqqQrX{Rb4Mv<;Q z+QZUmJ!|*keSg7*9tw`T>Hf4vd(dgQk&{Q$y9;KqW9QDDQDYk@6YX2YPiy#z$G0Gv znH=@;|JDPi~ zPK4e#sf}v=1y`fQUSi;+8-tEso?c$nDAb&unAi2nV?1S}pk?@Gi~wv-Z2cws#7;^d zUco~aA`00>i$xx(k7S{lUrK8tHhCEhKIyoQ7DPa^gxMoWZeIZDH2&7-{zc8Da@&`w z(%zd438%w;=;rL;=ll@6k? zK?4}H$A_%Jo%r=2fSj{TnO-NyJ?r_5SlnHpp>b-1r6|p~O-F8Wt7AI}xYd`Q&CPZ* zl*drp`dE4F6sxJcM>TLF8*A>7!r9HZ+4|S3b#Zg|ejnGsX)M|#5R;lG-_#}kbLOS< z1eb`OAzdS<1^ReF1m#m>mrC1g4Ot1=2qOFe@;9L_1SggK=bv*M)?$=*+&RQZsB}p(@ z9d3}HP4DTMAVF%|vL;IXRi5EqR+CkEw6ASGe{ayB%6&(tN&(eaMeTVy?xBA-^iWg` zVlSD|ar8$@Vsrah>gU96Qv>R$7dK6`DF~O`ff(jMpoD*^ZUp{L=d!l8tSHvDEZoiW zj%?x*`v!9>HGc#-tA8uFv6^i-%y81vG&a4I+lKx@1w+)22>vxrf&ew-7R{ysi#@|P z5TT0DjQRkk-)9P8_}ErbmLBe=|H#C35qZs({l>2lhHODHGeGizKqUN6iif_NB)o3Q zVgE=Mm6SUG9#`;E=}T#j$**kmeiPUYynN?-u0wmYZJO`9F?s;(;FS>^`wz~`${$*R{V62^2=*PimV+{b?jtZ$ZjUngF8 z^+1&|FsU|_(s`kccq`M)r9lykgZzV}sgx`ty{ia)jHvf`^)<4V1cq&EcWoy+eSrW- znPVmSKVlC`PL|^C(7YnQmH9UV-#qTn$C$R{voE_}Uj`8^NEX4EEniA~Jo!m``>{J9 zK&9rmMrb=9qK_DTef5bf7usj(h0nv?HbE4ZbL`u@;roDCk!2L*K4-siX!OiIFEdj{ zUI;aatkuf|V)w|BSQkIVYS5I1hK>zJJ|HivCoXkVONgzVt z8-J|MQeZa>&kK@3=Xr_ZDW+iyn{Ocib`Kt>c-Y92TXMQO=5n-EA8iUSO*sP|SsA2JAeOF8Fg3Sd-Mrg^nAm8srhu8|3pwa4?2A(S{^o5 zZI|g5A_aJr=1FQ|W0@o5<>Y7~TP&fiP%`j5h9Tob4}~#D3Lm)(k8wE^@>F?_P4&p; zoN;LwIQ?^Fs~d@Dfo=yqac#wzpyPvm$uBvj0=h`$EO(;#&7GIM!DI#SkXPS6Qr+AK z0Kbf|N_R5o@*TO8+@jKzZKhu2j;^C1>Vq8JPK1P?cj#w&p`-6esn94B*ssylUygyF zQ0E9}%)$q{*?|_r#3q^ASjt~n^%N65ksHi88>W7$ov+&dh_B4*_7=wM5Pm}tEME$o zo#5%=_g;KO2PsuPxa2xeNvFVI8)&~<)LnT@Jv5sS=B!OQ+v}i(Mg8lRvp~N`#1x_W znTt_7J+o9I+ubO&ctv%VH2Lgk+Pi%|h+XRkNS)RstB8$U&3_)Ry4m8F1`{1YhriZJ z%dzw&Vzr5>fs}j^RzaX)BXlR36(QBSy)p*$FU4}`rsc)SWe;R)(h7Mh%vsIK_S2f6!CQP(f z0YTPU1Rc!kH=-)J^XCoI-ET_~#@otqmv`QwusG)Pu4F|Ev&Byk!Bnw~G<(v%R0Y+9 zb_w*s87X^_GQXPeswy%qTH}xTK;`XX=tPr7y9HtN$I>=(BZJQGiUsMT>AhgqRaNOI zrfX!cJpif#eg;_`l>hczbv{kkud5p=jt8%ewwAncw#8Rstq>^%k(M+8Xtx!4-2Tq>3#=?i& z;;!d8N4(YQB$^8sIb@N%yE47CQ`oAY_6^c|q`)<~k#+^9nS3-`_6`R#3nAb#0|0ww zpC6xupa4aTT;}hkBMc&|LAYv3wUVUe)wdggNwM25cB#IX3?uK;D zbLqiT3D27c0CDl!-^9yBXf;P+{Xwjv5yTlWYMhCK-9f3L*wmQ}G(@jxVQZ=2#4eQ0g*4Z)63Qh(2r>W*&Zw>aVzYjc%JNr$fkEhqF0-enq#ph8 zx7FvUvZAC7pZ4ydbK)0YC<#y4zy5)Q9_Oh~X;9R66?`ARz7e-wjJ4=gw$RUYwI6#Q z_BLbv&`yP6`Zg!naiU7JZ8zS0inGo`C*@F@VxTTUYlot^ZQ}1XgS9TpgG$eRAI!!__Aen>t5*B8Jjeacm(>rHr;l`(|6!<{v#BE|ueXQ`ckPN+Qgr ziILV9$-TPi*-;aE$){n6fS?#ASPC(kCV7m)1xjGtrXM;`vNrNqoc{V`?R9Hug(%MR zO(0I06qkQ51EQP!kb_3Z+pI7WAmEr{=ovWJ#EO+UPp&g&E zpSc$HM>ZY1UFwzbXB!5OS%bu;yOt&X?2ode;K6_P)^9qX!o9yx@z96~cb{qS=Ff#} z-+2hSp6$8HFKI53S*s6}p@FE>>c~6&CW3*Eu5rQ(CVi66~_N5qYS*VB5_Mla!Z;c*%dHPTP0XB{Q0zP{(@1%5K+3T+C)xu z>-Aw9tk(aQ3L#JoaBt+f*}i$SVbI&ZThLEDY&o+W`cEESac%Yzk?T16%@Vlm8i3}` zP<>k6`&Y=-LI=fUuUhMO<=*&8VN)8K2Etbb4h4zh>T}7^=IEc z(CypD3xX0mj@qn|7>r#r1vCuHVG5puccSmVi`nD!Pw{B=8dW@df6K2Tp9~y2+=B^hK<$VA8tj7+f;2hQQZ6ezz{DR33geepsf@9wg@Yz~OLrODTBkiSDha`Xhf3^xj->71!Sx_F*eah>Z zxf+326rs8Sgd~BEe{7`wL}{)6iL@3QH0+%$WEWFQ`|5xAR7LXJ--6rQ&~2?vZ!lp% zlx#w^)8k?KztR(3c%lIxGqFahEzb*Qx63!>*#6NXsi~5vLp|+I`DD`f2)RmX`ffe+ zw?ArgoM~pt%`S>I=HL~0_FitPh-zTTzu9jYQ#tk3?uEy|eem#1HzatobuCCEhR zG62_swKoe+=R5S$M+cUl(*%I8$kb)mhYoBQ?07qw(Xosg==;n=b8D|mo88L(z~J~j z#qXXhd4C`5R*WOFd-A4>xp z(ZX@U)mE~5@;1GOSILT&hyUz=7$SvNg7MB@SuTC?rW}=~0}vtT5YC2`{o{iP7 z_g9a^P-BAz1^#6|@YdH}PxhZ_UIAovsz!fR?SIz4HE#0V)ykV<^YnN5rXgK7&rQ+l zOW+Q(Myby+7ziE8=wn$tEIm;)H9P)fK;_;IDr&NV=P9qg3-q0RKc0-VTtYbiZAIm` z;OxJZ-2Hkr*wy$5#PoL*j-R4Ems zpgFW{8mqs5`BECo#+mx}Av6HFqqWmq`ngflgy@fzs5|n2M)fO{Jx;qQ++7SrlNIA( zleX1Lmp_`b4g#ps`Kj-?R`$|qiV$T@v>p@km>=TvJb7NjX9r>yo^O{5ePi-dybVyPy`lt;eBg#X(NB%D@UpHE^XF=a4NS(Aa%0 z=v>mUu?eP`L=>d!{eRSNT~4!B3+BX-wGX#EjMuMZ#jZw$34pAe1YlQR`#S=9zW?|C z!JPl!CZzxTL2e43k52Z2@yO(@P_z`PSqySR+#S8;&yu8586~D6>PwHOi21jOj;VE= zjnTo5tP1?%;hC3m1KeElJAC*`eNAQ4^bUCStfTMcUg#$N?w#q-`E=yTY0>FrSUo@cW%a?W?@UFq0gHn1Oq|LKGafhJ;*O|l}_}&8ZGlQJ{*RblpD=DDV6SxPGLRJyNIE) zfYjRed0*5keZ3?Qm57cJr6;lP8s9oT)FpopkYmLdsf7i<#|^^s^2YZ!CU>v;z}4H4 zLZ47@qL{KYqXyh3V!%-bT_>i0`fG*zang$|}8GRI( zq=y#F^M>W0@84fqjbHaecxbd->XfH}_`Dbw;2n9jcX^tqi=;m3A7J_ZuYdgqZ@l7$ z_nBtnBT8FZe~)Tmc6cSw#*#h0?$oW|UdCE!5dP2a@&Gd&-F*JnI4G4tll zqx^Mw>hBU^1s>A}7Kvxfedy;w}Kzj@c6*ymXnMU zZ8+>;s;K+)Xq4&VK$i%SGMaH;RN0dHK3So1mxZ+B9NZ0JOToZ?%@(YB2a3w14;F<1 zuaa-ex)69(M9@*~cWwW$m1hdmI~XU#Y`&Y0`Tjl-{w^^6g0aQ30Y5-Ml=L?GGgIYb zV_m%O{1<|sYx+Tu!cVf)Qbzr1O0MZ4Rn!|Y!u-@Bt!*|RepA!=gQn(xl$Pz^-IEIW z%AgOGZQr3HgZ#|on)x>&ce`CMf9yX#>^&H-#HOlMoy<->*@)KZK5H+NJjOB) zFNp%{XNJzpX5l^H&km^q7uTpfFD&{1khMuO(>@f)9{Iwh+0}trmBA!u3!@;p5XNk@ z-|fW(ZlOVV4Eb=FTdC|G>>Yji)6%BXUJd?|W8TzPaG6SZT!!zLJSR|>)CB?!;tf|+ zn!R~YP+%z9T${Jg&eUFwJRODty&Kq%*UxA5@1W$qy*@!*Yak~AnF?_I7W`}KwzvsZ zbzcNTPVsXQqB?Tw(w?$Q@9+5Vivj8`C0$u)Q&3Irpm#H|Sb}^r1D!kSHdzbj^@a=l ziYPI@JINQLFru1yav>@$dqCxc{Pz({X|K{98-Ui$KA?DvY_#YnH|WPV9rkr=ndBBiE@4|>eTxt#i@~u z8widnjS2;07Q&(RGSflfJBHDk-qfihqb+-3gENcT=(Id2#3TqaVe3Uo5MKOYIqW?y zS9nqQM^g8F5HDhOc+2Z40nd_)nbaC2)gKJRl~(8p)K{#F6eH8g2DPwY>&fTeED^SL zbDwO>ZlW~)`9nO(Jt>b0!_sQRx{}tU@T{U-ob`d20=#4Ox=Z>O@cBA+k=A-tkk>NK`|n?zFx#0iBcm3z;+xK0QQG$W(wX{LQ>f0g~iJp~QR zx+3~+GMfy?Y+}wpbZ7AWr60cS{)cj1UFq>I!qGYA+#BvHV?z!6ZA!4YNcz2AUc{-$mWoTDP;#Em=R!GjhnEk>%` z9@tBMd6UE*r746yW#mht&8=`M5!FIq@5JT~eQT+-VtyIyd%WBx=EFy5xe=G!h;wm; zD~T=h%)%fKS?}wYAiQ7}VrulzU_H+CE;<|Ik^H&AM1i^!Y;X8=d{~vTvQ7qbj!c!V zAg3gp(zxNZbHGoaKPP$a`snC|3|Cn@N&?S&D&j)*KCz~mpN~9wFXr%Y_%2XE=*UQk z!}|t%3ScdY^&O(skp{VEbTq(gN113L`-*Fp*$e*$7~>L7l#_@_m@Xen4SD ze*ip=-eKGfyA~8IHLoH22(%&I=m9gQ>4hCMo}Y~n0uF>F1h9lZc-usGX^CCz1%y^G z%eS$=b*o}LPjf35ot>Mh>y?kbqsu$k3UB-F>IC!PYv*hlPvm) zWu6?1#_RVobvsYW#ShS97oj zpxci8N1tMISabb(p282G$=+o@injfFYF?^deG( zU%t+Ar>*d*t@@+3j3M>&`Ib1PAU#E~$%6IzBEs7AR?r7ati^mv@oxIW!9{f~@Du8w z%Y1!dNvx_qHi z;PY%#darB)p&?y2X&2JKmgh zQiN3MQaEc)iZHxp4R>5zFCp1ODgxlSm2YctE$OWd-;PI416o6gKv^5ZrR0}MmeOrJ z`|PeIbR3+h_=T|%YylD(`E>@^$PzI0#fS!+C<(-I$bVGgkvmKHh8EuIX`}P5+`6@*D4v4e>5~}pede`%@<+#V)T)HV z*Ws^xSvodqt@0~NnopSMtyo~gvCmBRFnt=)l8=u8@)go-c}5&P=v0O!YQ>n_6_}Wj z73S>%Etn{J3G3x~;;{m&j`sFkn(B^vz-jRC?VIGR52WrMExaw}cq{b6OP*kri%Vmj z`0V7sE2ipzmxYR4g~#@6yvrtySceG`2C*3bqDC{+7d;ew_@0;Nsco3Ah+Zy{X2dga z>%0q;R|Hl?GUyx#`$(}AGIanLi>(mLX|ZH93`@0&tqbZb96N%NM|KURH@v_I^T=YX zAa9wYv~yNFEy;k>r{Q~{?8)Cj!x)`Q&|j>Yk6D~9tOVl2Gf5MUgLkhJNgpP9z|1f( zuZ4)2eBzCd2HXjV9jDa4EJlmKC0r;_eWfhI(1yW;us3NG(knqPG4_rQ^Fu2 z8rR{D236-l$@Wmp2|;HfRB7@8zff^<9wF3AkdZIyk_0M7u? zUBUER@0XAYQAjQX(*d}@C&4CZ5Wn>_W_D6Jj2GR;~um8rh_PFkuAMEVy5ii(Hf&W z>Lgfp1d|?Y8!{0Flx$kcvlCA`?g?c$cJ&&2(kpd}g3p zDo*e7=J(~~|9rF)DuKg5L!6?~QVX+CEKswM zkJD-W7x&&dp|3uvm16uP0ws7}GF;sLPzOvHSCv7;ogv16)@AU^Kex*(e{?@zw3LY52 zLSMX}G46tjX>p*o)>(xs=BgpESf~eHz+?aus(0%b4uYf&}1xBa%trc^jF51*BlAT zRnC%kAmK0_f@ZyaTqA-Z%(Kptprsm$-*VCyAf@tmQ+9myU^J;^$_Fh2J%q7-af+-g zHQNVVgd)&c4c2}BuF-~PO)B&*K;F&sK9GpxU0rV$v<=qcFP31qLLF%Qad=5<|m zuxUs!r4nf*s-R-69oO$oJpv3IPkyZjKcjxkx6gl;u9d0cR;#h;{Io&k1fbe!ANLT; zG$G9Qp%2X9nVyRBaff10JaN#*msOa4OXuIm6+iEe6&o4#7e!PfzK3*6T??#6D*nbL z9WZi)9KIEVgI7b+v`ySIyjA#srOujKa$1DkY~WXUjp$8PA;4m;fsSYtjO)}UuVt6V|CJs}PX)k% zdfHHF)y>?{J-x{jBE5~KUj3$L0pyRVk@e*v zs(5&rK?6?P;n>TjVp_zYA9#Iv3`b%Rt8XW&18v;ahJ$07vg0AZ#fBWby`%(o#rJoH zEl<8M$Hllx73Vz;Ovc79cdPe%lOGWH)~+^d*w?isLwzM-XlN}E!6uQy-dR)Sws_`_Sw^XD7&xp zVD+fj^+uXQ%{CFsn$qt8w7CX8urF4??$Sn5M5812Fv-dP6XQUbHYCR#%x=h8V^Wy7 zVasDg8Aw-&l-zIVGEKs&4w5(j;VN)e#A65V%#~a`O;-^7=26&@>T}MS4Kf=jqv)f1 zq@y(907Ssk-+l6?yaTQ&%N23Ao|SG&vQdp{96Hii$3z>PDzhaEe+*0kc``JkuD7Wz z!z0dGTdva499Xmc+DhJbBf-xBOyrMTOO|UB-FQ=cAA;R*%^3osxm<81W z4DUj1Dp=CYIE_j-N<2?u9;R(k>A>0*J;=kRQ*iLHVHI7ZmyzX?$Y`hnl&hfs#~J}0 ziHvvyrY42|GM=Kvs^L?lJ)1g}ypCX!j!(H4gi$M9Xlid$oHM&z>ZvErit+n6Jq5!- zw(>2TR1S@-TIoUsT;3Q~j1BzWey{c2ir^vBF1He(%zhN3!FME6TxjmWK0GT-jH#K2 z80b}Xvk$=_i3}G;e3~pAhmS{B+!d*4aCGF54i@4GNn$LbP6U`H+eGZsG?4S*SQ3eh zH(fl`El#j1(Zd@?$Yjj*lyg(f4Ng(Qwaozo7%+3GY7*cK+)U}(a43eWVbpCu&cL`P z9ItX?9Nb*oK>iwki}AA?~^}0~c-fy-&EDS+Io7Dfy5pe~?s`d}@OFh_rX;7vmGSE?7-Cw%_U3M2# zMVd)y9%f`w7uV|0FfVZc_M4B1JE)t-H{4e7oQe8HO3WRgL0E)7Z-B)-Rt#KL;tlZ@-obp~=(j_0cl@G|r2ts^JA zK42l>&INqBS_V^Vv=SWI6|9mp(65YOGQ3u2vR0Tk!u*C^bjg@U^nwz$V1AH4vQhNn zqT8NYk zBShd|!LpvK*6_3XlJE%c;RGS$RNhk;!7>8|F$So59`m_$Q_(O)5FumhN26715l!)= zQlE1(P1>}p^mR^-;ew+h4vL~j`?zTGtHg{9NQ~+E)X*K(TU#a`oX@5XZ??Y8$tw=W zw!+TDCa1qFy1&f4j-4H`E;(&S8K6dbL;R=8zVqj?WiA>uZEtmY+T1)~$pCCYv@@P- z8JMP``P?|2G=?yP2Y&{dz>yi9 zXt*i7_s4G^#41~bL>T`y6XV@Bv%7?b;0vn&dHrs=B4Vcf)ZNRFMQK>pmHUVd99HLk zDaJ}L%QhL=Y|1TQ4L@K{K}KH_6(wu27x%8?ECg82q6JZx);nBhsO%wv`yY@_jSScQ z-*yGwWSojp-%jkSqOOE=j<3*=7rha%38o~RMtYPKkkgv$VVwO@+;y#`XZlyQi)bx@-TsPi% z6+d*@SPv);tdXo6<; literal 0 HcmV?d00001 diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/Operating_Profit_Transportation_Sector.png b/sdk/ai/azure-ai-assistants/samples/async_samples/Operating_Profit_Transportation_Sector.png new file mode 100644 index 0000000000000000000000000000000000000000..a5ca50cfd76f12f7fdc80fba683701c5ade1e914 GIT binary patch literal 152440 zcmeFZXH=Ehwk?WUYLy~tnI)M)i3*Bj6$6L@k~1O-f+WcqL%C2!c)4 z%hS=VuArm)CGpo)_?OUqm3jC>(CUn;m4cbRm94JDHM)zsR_4ZLR>p=`cG+CBurxF? z<>Nla&BwXxx|Nl=r4SE~$v@w~ZDwJ>Q$CqDi5L0JTvE-Fj*jI6`EQwNg|!&nGCDe` z(`?KUel7ml?b_{D33SA_&0zu_ILu=}@`FW;kdDRH;t#?x`|q#i!_G(Hzcs)9I%@N>=>0%Vl%8r^ zj(xJvp4)1gc}{As^OHWuOq$+ExH0s9?=C(sDVZp$9jkh{)R(U?ttCc<$D}o@HciA~ z;<{T-sg&4E-`>(d;rcX@>lM31ryBIMAF&&Bj;xr=|KpEeXzlqr8#ivuvgm$zMNjVq zn=H%9Uw({$kFJErg%CtD+wRyvaR~;Rfw{R$5*nj@H+|xa0a+nU%(G`7O zI;sih&!4ZlvvMuJ>w=R>bK1p&2M_j}4h{~+4AjQNSP#}2wPvMS^p-iV-Mg}+p!T&X zS)IB zC=Aa}omnsT;{>faJwGaqX4WJ*`PH2#S~gfr&D5;&^%;l7`Dv5lDS`Q`2M!;;B<;gp z%j!B85-4n||3gs5i!3AyOT6Q7=DEp%T7_JP$s^ABMdv+!&dze^#X`E$1xID3%Tqk%O2Hbp#kkzx_n(hcOV%G~cpG~DIs4PNE|a#L#_mPK%a>n$ z?S(hk+;!hw-YiK;g-;Le{~Cw|d+omh&^#%%Tp8=R8?zTQV)XyShTL zBH6U?B821qef#?Gk4^cm`3pa~);rHn)+73#IZO`+#x34Fefp2QnE?*$L0z}AOrTJh z)UyNq^*XLfg&xec2|D5{SFXJFvmySWySsv>W&kY*ks4=~_p>L>o=4`wg&Kq^bKOY8 zTM0JVAVnmYoDH>csjiDI&Of_-nsOb}i)WbxEl!7?^Q`;$VAEs?UwNeC^suQTH;J|z z_3=t~SFLNQ3SuL%ij}o?u)K7ZTRSK6=#9GTrG7_u9=`HNo9m*pbzjAMiAy~foVG5f z+vU8uVj0~#E?ZjDQD+<`^~B33vzmYW_`%N3E{T<^#Wparh}#Yai2dkT5vQ3c&+76c zq}kE4vrawFDYNsn%GlR}gRj~@#cO4Ui!c0;z+==_gh(hw$?rFF`=*Q}B0-@)yt(}D z-Mh6p_LleD+!(pFdrSSN!#%7=J5EZhPeh)SJ7K$NHpp6&|5!vrgX%>;ektUnn)Tvy z9p1a0s|TG2K3}dRbsNDw|Atl~k#$CfQy z_9aj_JzhK4ue1Gl@<#sQ)BnC3FP=PkrJ`+Lz`NQ!I?H-6`i%S5Xr25#JlY%0OpAkp zf@;0x!S$aWGp1gz++)&^_&%{5@6HNa@ZrNM zZ!QfhtT$Ef(xpo~c`x5OAQNDBu5@+wwc@RTBKB2jn%MaHnZblZni+}PLYL?4-M8=b zNY1!NadWx9fNZu6Es3k$x?T&x8&f*I{XKlDN(k^U6s)*W-oDrQpoK>vrjH9q;@7`2h>Z-uw zH$RLvJl%Va|MEM!i!XWKhqZN7N66Nz=s0&Lig9vYz%Qtxt`|3cSuJ|tz}dyQanG$h z+OY-h+ZZWN#4LLrX*-RbdCVjzj{q+p=x@rIsFGgu+iy2jgB{wM(@d&EB;4pdQ3hlZ za=t%A-nnWQ{N>BI4Gso5E+&FlB%a&}ui{x1oOfy=-_Gw&3v)jQsv|s6GXlg|SXhF> zEPaKnqjw8LE5(GLV|A|So~Kf&T5WY(sBY`)zkPnjT<3F8;Vhn0zU$LtrPrr_Lscm@ zN*$;QlWs~jSbE&W`4LwNqrk$?URIRRi+Y6~*3tp9Q?%-K*TpzD1Y~&g_dmXUyE-}2 z-X3D7onfw+#bm*DvLcmkAryL_Vd8WmFXH8*qTn38TC~A=y45y_ zVa&TR&pBH+(|LA^ds{gY^@E*Mx1-m-Y(|;nYc32Dcg+h)92y+Fp;{KZ-HwmsqMKcx`g5n- zEFGJpD(LdSV-tdLUv z1*M=i@=_>2AYgM=tjGw?{AKo6t5>bcqS+4)g+EIv_b)1vG-*nanr?T^ubyAKjXNMr z6{p#l{gs=WTlW0XA|(J-8}^{y&(F@`S?a1~#kufkRK2b4XI8G+EEjtAaZTijpftD zRx94o3a-2*;Ay7Qjg_hAW6+Un552s+l3LT|nR3cSrvo~-M-`l~JqQe@YdDa6&9Ye9;m5&vRh&TrTBAsd3mdD(?TKHN$TPcPw|BcoX_f3 zfjxUp-Ce!@7(LerFe`GEBH1qK>Q!3~DN5?N&ZkoQ){u<%?)D0^Lxo2a(oMDIv5afh ztvebUj1ve9(fhVb-)H_J+qEyxZ}yD^)Mmz+t!_>?tH+L+srHVHBmiA$lZTcVYw;Z& zHGj+^+SEC+Wyg-F#kpSb#)lmH_I(@;5V81nzo$GnI4(`(vr($im*)p5)(E5ugvax+ zOP>Lw!rO8kZvZ!4cnXF{Pw?0+jXitze9-ksUA}V7b?ovX#Yj2trt_`k3$MrUR<3SW z-tjI@g~T=>7Q}JN-y3G@GGcyA~_k zND8VNN6Haw*-#ZB8*H5JBWyd&>BiyIM8N8-Mhl}H6mGPVj=SxrCwH2 zl9w~0YDq1T$ z?VLxK46~4xxoSF!Z$z_cOU5Ne0aO0iY-CaZiwI-Z#klk~i|xMGzJL2n3F#Jn-xx{P zSZX4B)25n4lwyLmDD`m?3cET|kie;l`CtE9&TL>=%G0d`k%G_hX}q<6IDg~Pi4(V?6{Eg}U8kda7kFO~K=4*!pt!5H`!=3q zu`!oIDd0}l3bZuiMtP*(Y+ad26wbj~6`kBDmR~?|@C&o8Ci*UEf$`dp_b7FKxTlos zV4XgF;W*eD!R&aYL#VsAZ{I#7_v#i7`B$Ac+Je$sH=w%bBL+b(u*?{66>#J z=l~Uau!u%?6?$@jao@ao6BJtx85OiCx=D`DPm6Gx&0v-sXeyr>9#iJF{krz}`Jp|9_HcUhnY5(FxIm|~{IH6O}3>jSjJ zyrQh}-trs0Wr0F?ufK2Fw1tT&mQZbQEVZ3BlQuVJCJbp!snWn(=cS}b+5|fdL+Z@7 z?i)ZkAwQaBH};xaE9d>c*bo*){!(OV5Rx&B51oj;)WOc0(Xm{%Jo~kCVt1HOPUPLQ2PY3XfQGI)b zBKW@AQ8tP7_a@{+GmxK)%;F%gNXWrrKfi5i&I0lVnbC2WYBVs5rak)O-o1lnZ8;Ag zKTeHHUR<>IM_xl3y@Uh3zxCzFJOVdW^q4^zQ@W@@jtP=Dr8~!dJlb_}L7-Ouv)BH~ zq2zL-mW-Qe))6v+RS-dv`3vq?>8-hgJUQs2YV)e}Qv-vBhs^S~ z5->i}mW$%~91HLUgbR8=Q=UYYW6EnBCZqBoQS#Sj-om4;31*ls>X~y8 z)Ez;sc=hTVeBBx>4i!!3-r8--k9Qsw@s@6!k6%l7OOyoryRNOAs+YIFmh)n9ox7Qy z(i)|}Ch?Se|Nb*;w{jMZ-nmLJOmUQ`9ux#54{<}JLXbAghH@+(=#(Oe0FL^kaiP=X zfYQZ4AtjXK8XTpn*7(y8ejlg^xeID}(1(Y&#Fy_Js!AGHx?%NMR_BSQ`wa2^2yIWX zp{ctqo_l+xAV6F3xK-a1Z%(z>0BzUt+6=Q{-MzgLvm^OBq+A%)M!ztO#ZO<#qBbzF zh%}h9OjQM`sQ1sCRgVKd@8FKa!e}5m>%rIXe4H19MAVSxY_iNC(oB3<%M%Ggh|@pF z8l$JDcht1yIHag^m6eq;={VO99j1l|2A9gJe8x^w{8iL(N)b<)>@=kD-sRy+gWe2a zIf?Sm$OnyQpB}&Y^^Z>;%rOL(p1*LxszD%h`3&OvsP%v%A>Z{0I<{FC5wt8$qbG@w zf#)h8)jaY=>EQfsiRvKPXzg5w9oorfJy~c^(x=M1QhLe){dGAD9&V|Iuv6tM5;Iip zT=DAUjs+Uumr6X$^@+Ypb?6(_{u}1z=4zl_O`QAitzMLK7-9DSX0ouBGUK zrID}#SQK?OH+lLE&ck)oAc{3GZ}0~y)okVms7y_wE|p&477j&W=t?UohLsZm{PbDL z{xVe_b)$Wuo;%m{3EK=+e168Jgv_k5HI!|rHM>4!+6qz*bVrM76WgJNOPdZ{;B}hv z6}C0?Ufbt20YrYIE>B22FoM0**Xfw(Urn zRsNed`{J~7!O|v|Xa(=pMcN)z6z7=<0tBnLf~2dUei2rZJNAjOUz_PXBFK@T9E6Tw z>(@-Hu2U5^5^}?N@_u~iUeu7$U7RaAZ#JL5Fe5E1I{ADnw`O08T~H7Nr`*vGYaMz6 zZBs0J%dFgcs~$7z_mudCqS)?mxbdD3OgnP8HG660^K1_6-){&mOn5XTH%IrqR>3G* z&f}#54iI_x0;GjemlVP;R9cpcM%=%DpOe)9kq9>EzG^*7EY{@OkDVJgZ3+b+Nv$u=d`=p>bSxYycdARu!)|h?}_33$B`M2G>cRTd>8qg2oU0=`G zP4yqraR*#aLGjhFnL|1Ukd-7l0Fs?oXDKA&gH1`-+!0eTw^yvzA3SOa+0J3;Z6UwK zH_3S%SYMa*qW0h4gv$g*qi`7fRBe3m{P`Pzc2$xXp)Uq?+8;P@zzC(*VXR==B$Px{ z_Jwedz`&6#I=ZVA(l!~51Utre6eq?b%b79`2+IbEFyy|6>h&V*=YmYY@elw;LHd<1 zQBfkOgX(e0L<^buUd*G#{TlQcfp8w;n*4lLkTF>m61_-%q*-7 zUGA8sNE#Gr28J?o$@Y}oH$xET5AEBxuQjM1hog2dAwNiW86O*psuHsNI?+G&e{OGY zH{>}ZrJk(+n4ni5&$o`#rpV&501gbEYMfh9-O4)`k07ag(iaR$O;JB)H6sdzpohp|-T!jf0tm{@|5UPH zbI`Y;@J+MzT(iBE=oXmGP8-cG+T)-w2dM~9#mWIgy=necL?EiDkZZOeyTre`?dy=) zRH;CFUteV?i}(~ETy3&}EJTX(e%tp@0^ZC9E#@!I%M#jd&;Kj*1>kQP?DF~P@aCGX zpFiwIj<`Z$+XeFu(GllaaQT~k@5<>>+;$m=wVt`vH z?m*5Po6yM8FAIp8pPg>5!XQVpO^2?>0!*goBh^!*gPf<-Aj9i`k0hzb5s-;A^tHm> z2&J6pk6E@O!y8nZG+&sOX!+Uv@&E^G04s0NkYL{V!8a(G59NkvUVHmLtvd<+$|s_) zBPYp`%P5+24F^2U-WM|WBmg7n1IeRhz04N=dE8zcVw(k?sir*|}*_RnXC2fm><-Tb4}N z*=B`F`v}Dr5Abu5cQ7YT^aRudVdH545F7~Iv~I*o40>ax91e;x$`|oCM<&dfQ9(X= zvW|F9g)yFynPuc9a1|9O3f(dys8Khwv_UoybQ3HO%w;14P1irAfa>2pG(H0G!RK@_ ze}}X;=S@{xK7|dL&o~4EB?TyXtmsM5ei8))VpHbiCyE9a?Z(@EO6ZkJhfZAmq!xr% zIsrsmVxd708IcAOG*BNe;kdNUv=n&&c*cM*5t41viU(h6i=a3eHK)Zj=Ubi^Xe(Sz zzy0!$GQGE|ME**>Q7;3qlIUzE2^|52c@mlI`ujU8)0U3_0ZcR+1oz{x7fsj^oi)So zeSN(O1PWyYrXnCyvw=7=#r^xg(i)S`p&L~JMi)3WH1*LS1nNeM?m+CGGY?5y#;EGe z1CyDTAdt{@&lAY@=tGk2i1vK1_+sV#jeClpP_nb3-gaQV@>nwo{P zO30Ut<40_aJPAQ?t5AV9I|(4lCsv`aCVut?Gjd~cmoWq`CVx38&E%h@mOtq6hlsLPxk8hW3m8lY59)+opx1gX@`N8~h6{97R=huqQz z08C%!@3$rDUcyAr*7Xbw%AnO&s5LP}bhtyC=&$DEdgV%rk23`RHv3*?H-BFI4ZoKp zQqmeGiuvo;uZ=VPs<|{XDmw23a#)@V7P3BH^>}R>X=ODOLInhiMY8hOoky^sg~4G_^%+sI)b2&rO1{zBIG z-tv(&yQ&4+5YNu+t`WbPomGjt63##$47qV=a4)q>0ZxgIM8C~y5p6=EQ&XRAy#hAb z<~XwBVJcw-gmy=qDMrakg~9?2s~U~7qk5m{r3AQ>*0YGHCL5F=qvta{<~TKUb9+_Z z1>Ymlcr)HjQnO4i-k{wo%te=ipG4W2xPCYhZ<;hE;#PNfFbk(ROd5%fzFT2xn2%xu z9{v_$oUSS>Y7O3kI+j!k)k7)ONU0S-GYeTqG5o@ggsJ6oNJUI_g4yqPPl~d?$QDoV zX20}7SLXZBh4zkz3K25>$SQ|p*8=}&u(WW_`pbOy@F5`26LeY*IY*#{9dx6mIqeq9 zrcGVEAFAViXkQI(RvY`cN%lH40wKq#*f)%#kJ&Ezy+M!WXtPOHu{2Na8|W6*iNnZ3 zeuwoapfDk5hm%~MRv(JT7o3;z+Cp|E#Eu1=tz2xjq<=aLK~Feq^q)FaSbDol^VFP9 z($xoW5p>e`M(BVHQu!rdR7lJ%Yl2Y#`q6Wu!9c_tK*949bIzjL41?7`U~%);7(^Y! z+CY$H-nlcnkj3!=>Ri3+!Z<64Tgg-d8gUctiwi0Q{TCPJ`Thwnceg+_I~PU^^Zyc9 zut7@3@kD^IEw{8u4ayAggem2oJG%eZzZE^DboPRJPom;oYnU4Cq^{fcTeemIYv?Gg zhGQaeI0k<=4NY1}QehDA-w64hWbJOoHS(!w6;ywjO5hsA?EIszF%vRnvj4=-m<@dwA4Aw!Wr14f=mo=*~$Hef}#S?_U_-A8{N^@vH|^ ze_|eRA9wl~`9yvzgN$1G5(H~JC znjsf*is4>9#7_NwJKxON;cU`Tre@&@Gex=|afkt2MTDA43iRw0w;O%svN&(+{QWZ< zTKo|;DTWH@j_GV-lgWuXrES9`WK|UM1UqHwcFF=)8EQ+}@IuZZK^OhffnFsQV@R?D z7QQ?ZX*<#;*hATYgO20)!Zbl)wzoRzTCpa}f>1=}mUj1mb=}T<_TI4Nko25rIy2EP zQ{Jt71KFP{LNg?CDs<~=WRcnkS=OT_O^V=fwK(sgj~NB@e_D^h(t&P>w@G7pxhz_O zw*9m6fZL=EL=~E?Kj66c>?7Wfs&6DW5+K+;V)g$0`+kpKdSs?T1O9Mx8^m!c9o1jy z>gwu@{vZit`awk3^RTFfH$$%VE9rLekq-Z@bB*P>?ET>*bGBik289bX;+(l3gN>6dYv+#@l(P-hMb%V|l;?{2}4snobwF=nQI%Ah_5I}MNXSQG~Lk%-R z`3TnHwY5f5ipjw`A_AL8T8_nq0a%RARWD#S>3=NHW^UM?n8-WXUtO4JiiE`=G5@t^ z0&UKIC+gJl<;#f_gk_4NGhGcyGcANSCBz=eF29xvOYz|xYT!;|Ip{OAgh=EzCFVME_ruyZQFN2=nRO#|@P^-DPr3Fl7 zq>DEdyhE{KTqs|?p2bQ?(b?wSAAc-`M5cfxRBNQ;2%=G-g`z^JU1*uz7o3xtY+Ezn z*X?!bX6W@heZSBH8t=D${pP4je?o`uIR--G&-@ zFxNP`ay7i9Z$i>~db?d_Pk);eTX&oUk}HlJ}iq!9jy`dGm$U~5IoynU^j|J2@HS9<`7ORA;_;70II zfY2BiipwVD_rF1D=`V}nZ0%dt8mq=fT_%vRklMqR*93&p`p&>KV<8Q_fje{7f zb})=PL%ruxOZu2L)YbJY&R8{gAo+9;P}Rc_ z-kclt+z~6SX0O;n14y#s?Eh3yPW&dv3Y+0J*s^V#Pv_{ns8gp-wFm`}h9*!}I253Q zPoEBniM3VC3Me(XcY>8v0|)Uj$p}YCHgkO3V(8cZ^Nw0qkCdNkK`g5V zh!~!5%H4nXaAjvt)kL^RV*3G;WzMKOLZJHIrngFe3vd>H%ewnL^U!z+b2S~o#>mhq zlgDJh*I>{)3gf50X*2V~tz5*jqLPq1F`+=`1cTh)h`e0Rna~> z5$DtO-B8k0&<;srig11_l3RIvD8h;e$k0zYhFUVK69a0?JG~}wU>U`_8q$3?+Ah^k zuBC=iXuxFK&1t!?vL?&0jDRn-Az`#>DUB1h$E<9LOPE#6ST!9)#(15EUz%GLj)3*p z*E2q4mCV~VBhLA4hc$7WPeV+%ROc}Cb}LNb8~WPcR$DrNZ$WVoc3 zRRJ8@Id8v(&wc1w&??q@!>??F1f~An3zJE9x_9#Yg%MT)HZTx0!B3k(_T8lr+ zU6V9RQbwej!H<2k$3YzR*k3um-o739562vSTEjGJ@%f2oz(C#JSNw9(=o|!QOq;aw zxPQL_%54fnOC+U(t-22dOj{!1Njgc~6U}<(Ns|r&*d-|MQnuf=83ad(94=3rw`|#z z+oC{HD{+}o=yzP%zP!~Rdah~4rK0k3o@#%YeM`15q)eG05kn0DpovLZbN$|%4qr&! zOI2rHs(V#k@MrpHJ4l)3PI{ZP;{4XcX$ZVHsL&JMY`YD(h=>QrW=dahs91HpTSn~E z$i1n`+IH+oNP6f3)u zfxacxJRH=k^!wX@2M-?H?91#cDP<_01qCqtcz>fzISgP&l>2|SS)DRnV>)#}v-H@Gj&?infV z5ngC76^p1UJL(`jT65#RRhQ`gxE*}@SNm1_Z-zyT()6MJFbw|w`|q0}zCbVOhZdU9 zdDa(Yk$v~>lRD0mFLb%<9swiEpi=P0T86R3S%q#_g+oo^_D7(hZQ6DxiO)Sw)e3&F zc}TYoK-;YjOa?$9QC|C{2LS)Qt-2_lfc<^YA^ap&X5({39#C-J?>Pxj|RH(7Hg z-YVka$rW@(YNo-mzVMx-eH$uXfI`;lKyN_1)?J*Pti8(jEl-tT_K(ibum6alind+w zd`A3i&t%1JN*^AxId~^i{rKlYvu$S@KWF+kmNP@-P%#6p2Wn6#8&! zgu}U<*@7A{PJgchi)Ub{>O6a87JqA9&{jj6QxtL##TvTS=$FMDc<4w6j2|e>t-b}# z7OE31e!HkcO7-*`Bs$Vo^n-DWxIV#SKXsR%nhI76e9UddClf42tKe&39zQobI~#NL zvlr=gV?%q*_kKddr`2XfZ)t|Fzl?nP#o3*Vf9Zr=c^bR<`D=DBS~g9;E@|kkN6o2X z*4@KKW&u!2Cc)F|`!!pk*;!--N0`Q2O*t65a=I6MtsSB3zd4opk-0sl`bP5(+eh(q zbSE78Zo>pA5^da=gci9Eyu2oZEzVC1@+GQ>F zfRYfCB{AkA!2nqrW|Lbfe5B#mG5{z~+i>PqMSZ%UoiB5)IEL z4O?`*kJ9tZ!(tEvoENQ;9^dTQdk8#tUq6~AIZ?*|Wd^5uA4QN?;cXvBl1I+Pet0L3 zs127S!vGistu4(mQ9W)awBfUlws)rKwX8XL^5hJH5e*DBtqk+`X$xp}KfHh6JR^iM zeVLA~#BHe|$1W}|4u5hjZ@{-HS~J{}N7L!|+v5+3W-mqJTfSp@7MA{Gk&!58&npJh zXTlokkhI{9HRxAu8-+-<)0+c!B9y!=krwBs-Ey7BV=ZlZt*1UZ#isMk_P)$sh)<_l zt>6tt`=5Gd0T3<*+Pn6nzA1f>Kr|fs5b5c;tM;KG+gA~?q-e|z!@dC3(%SJt5c{eg z>?hGtJDr_j#3CYL^3m>l!YG@X1ZtxvWHMM)W&C=3wPmd?k)tSX zgV3-?B{ioC&j;3v#Z-T^^Ghx-Zv4qPJT$v5xL_Hb;4+B}mRU{KZ6>~VirF4u)QhCx%e%ojWuCcuCZ41vzhOG#2wL{>&7yy8X# zF>eN)K~bZ)H^mEquQ7&Z%{PjVaO>m+#I*=J&sb4t%LQ-GJAR>!x_DK?QdtzjHU+kl z0@EPhPuqb zquoP|$;48ktA*ukbs0BBmxC$K4FCTUSD2v`(5l&CtnD!SlwIVfU-yvkRhf*>McQDw z%;7poix+5X3}#2wnCPBts#rn9drjE#_>F4VsVzYmJ+YGZWSO-efXM3b=hqA++zBY%50HD=o;=w?DHFm>N( z?t`%b9B_%WZc?9q&NFxrrv!ROW|X#h*aItpsRK<%VmCB}NUSD}7mdP2GF4P4!?h|6 zh7S#sFt%O0P7pK=jrE4cH2A+@iKh!kjA zx7h?8X*bi#7LG1-yuWM8>>wWZ>ea3n=!g(cAfZlHvy#h)Ne?*v}QBOJ>L|T(UN00PIQX|bwr_eeJf zi#pbIy1O_~xtrD(_}RqQ1`GfoQ@Uff&Z)$yzv$L$TF>UdwzY%It5CnAtC^Ith(ePr zdirPJp2J<6~+Y=aZG8l}&aiUC#)@IKmsR`r+7k>I=^x==+SOmQ}su(S~8S191g&8JdZmH~QvV`9hj3s$K zKuXvuV;@N0-g8rBrIJ3}T1XiQ+ND+l_e;=-?px{!fOce{BsnP1!Xrq}rF~>5;#L;( zo7_r4B|tppx+T$MUKO?^n~^r!+YC&WnW#?H^lcQFRV!NHvs4y$ldf7^n9iqf{)O(H z#-Fz4*AQUQ(dCOpksSe4g)O-(NHhj)wk(kw+A_+gigz<2+1Z;r2R)SHitN3|kE`Ne zZL{L$?}eeZ^`l9q<}s$6v?$&<{ql<>divBzy7*t|>G?U^A4t4QH*Ty-%+W91NkpD7 zX9XCKdkj9Vn^HK^=+4p$UUz&p;7KJcbf?FA30`!gTR$sw-kme1t zbJ4$z25z8O2W5?YR6rQ?@z?d^Eon+!p*BO99M5yURu$VLb$_4@k zHcMCv&HDS#&&?^qlx1ZIP|=z=jDtS_lN`LW!C`5{!U@BckmjQyXdh{mh6}JCkl7r0 z**sW$g-aNlARAk`v0#QNL^mTB3B4cMjtacr-dz{E`&r!JFFMSLi8yMv`#}tnRRR?y zIbE0yVzK?I*-n~hyTuc9^5fwUFABRdf-PVUU-xbu1jZwpOdby&RKjnk*(7l2(0L@f z!?8C&WIg=!L(oW|bPJDVc5+Q62nW3kt`9sVA;C-$S%BdO!!5qX=~C;b7%@UhdijiX zWat7met}qFG)S=9msJgP=*pvkR0T`!(xR?(z|f#?$4Zv?fNW4e*fFvB35C*N@pj#p zeX-lcoHBqYA^Zq^OJD;;bEE`?drg317{Vp1W;zH3wHmN`-QX8*t{6y#trzZdTh^_e z$B3UA%gSE;hIT9YWc`b7(`m6uJlt)@GHpgKE-sispZ?Z0zUXre+Af)00foC36-T^e z(7N$HpXtsM>(`j!a`ahgyf2DSK13Z#6w!mPTIwj71xuj@?a>n zX(k#=GqE>dH{GHAQUtpVM%c@{c|7irQARZCDW%>MGd4z8007A{T~}QPtdn5gt2uZu z8N&}|ns?sOb{Z?-+@|TlB={O|?Q6Ijecm$+S~rHA8zqAns%bn`#Nq<)c4og`d-Rpd zuzNF|9r$c?xCc8<of?<(sf2gC-6-0(Nvcda-wD5TT$H)e`v^?Pkp?|<~32Y*K@n~ z-o1O7*AxkAAy!C}f|`Y2lsosm*bj~(>Cj(@-Gq#`d2?$Ssp?>GgiME;J{Ax~bLz;q zWgRujG2G#N^tzdatvfH#x(;_A$$=ErA0*@0UUE<{QK5LzU*%Cy2 zk9n7OE*Wq%e^vk(T^o9CJ+qioLs%DP=LK6U73DL#kZ~>w5f}#)1aoTeuvnD9i-mIj&cA*rF5CqI@LZR*aQIb-M0j7UuOts-Xznxs*!R#j)!6$QHX=Y8@y!sfnO)!K|1X1-56C#+CSx+M5 zN28g;Wgv@r68J8zfBpD?VJ`=a9)+Fy3})lg)AdV$&@Uf9Pr7>OyuZO&*r7d3#@+Gm z?Dy6*m!X|?z3(1k<`j7Z-M>@U-Du3poH_g*YLbvxq(na)Dj;+gBKr z0wb!NoLt34)@)!WKYl|m6T#H&*TqM2g+;y>?e#l;^EDREN`b~C?xIOH7~nJ<*?@@Z zSxDka0N!K=(zO1Re`6stUWbj0VSL@}j~{&);Q4Y>;66DFn6Tr0bR8zx*N{{Uq4SgH zXL0D;4I>{1fOXa5E47e+NaK6S`yzS({|GK=$*%9{=jrK*Mrk}iXhqsEfVw)S?x_<; zhV@F7Dvd-bZZ2*o~fh4bg#`vj|W6G1}=T>X@dUZn`8} z!23sr;>qCrl`BNNCF(SsT-F&Op%{aiftm*vc3Z=O0YbC_Kp=c6k2EYq9AT&<#))v* z;CNyh#Ejk2YCylkH3RRaw6QqYamP;|JJwzJpqnTP+Hzk~)eelisV?LJS(uvh{bm@v{zz~}iG_{71^p8kk#<}On zwGg013Es@6dNkJIFw1gr$N7eI9>1GK~ zD_g?!L?EN#dXNgzZS1v#05Xq4IfHbuzqRl4R*bcyTj%2-U@s3F1Yu3oig$$VAlZBa z0z^<9+`aq#?lS0f1IQCKjAl7c!@9y4g8kFXZ(^*8sHXi>05F+cWK^I`c!ZssyUhG4 zkvf|&hD$YnABBC4fGnYmnS1`&a*U<3N#vUR^2;yR!5WDveO-YsNI%(8uOqHv=_bv} zXx`3oJrIZ??~#n3Pc=P7-35~uWPbb_F0IWKT4+nIkAk3=d$!gkmMyymo;aVWrNQK;5O6ij<#TXza~Dp>mLDWU^`h^Q0^TD9sGV;% z?mbH)m?5-1Ba|&}V$o74!lYfH;Dmd@Ycv237LQc-OQA=E4mR1&=B%1I=?fQ15?8-Q z6u}%qWfE?u@s*2&lEebH7NuMmjYmIT-8%;^yxi6Q$sqXKw{P^~*@X3xh(~i}Xe=Yp z9-0oB0qTbZE{zDDayZUO&MTrhhRlRAq0DXBxpTlfO#>ZDyIk!wV_!`54|w~ln}(&g zIY?vt=$O;FC#Lja^O!^L!?gC3ea!|VkHT-0-VM2M07|)o$Z!goRKe908T`iSY)Cvs zojtFx8t{J?6ck)*kj?=&m_gk*B6=1KViNPXMjh{NQ!=yR(~Kj=1K5&t-!LYu#mIIQ zN?vk}aH}>(8mu8M5w4$Rjezrm%wod79!(KI5aH^P(z1-fNRBhKm6}PI9fx5qgxu>$WkbXVPd!j!^(XlhJNai536&3 zm5Y?y?Ti*YCZCHgZXweh;rWXTj&47kYIe#x+IGo96KQEx#ckJH3Vv$=u;Fy*lk{58U^xUC9y?ftGd0nVK>^BnOc{3!Zj;Q zFFss?lVZK}jSXaAsy#m+CvxEXsm}q4v9&l?`P_XN#@vF7;PI^(EHPKo&j>gS3AAiDMsoSnpE?t!F20kGaP}#_w*B zI)vFPLyT|wVRqVPsNrennN<)*2{<7I65l|p>l8yyiY1c3QmH8M?V4DdhhQevAG|Ue z_P<7`5yXd5G6?9E1=tFzGaSkC93FZy;6jW?C_IvQK1B#7 zxMhVMKT0wO895~&il|CyABI6#>1l6lcbCSJel=*48o6BvKoO5QY-RT0H~3`%N6B?W z;f@N@GF4=V*SJBB%oD<>XoV}C&YU}kwpVAk!!h4hNNZu>_UH)5Re|`ED?QGfIdc#R zldA0^coG5@>A{#ZCY`}7fIqaiZ@i`Vb0LA(#%mD+xFUoH)9fI>^iu>EVPKEiZUI*` z>QS(6HkLDUGTgg{qF(~jWQW0><`?sF0A|tv8 zkQzu+P{RAte0$T7>B;fIwuWHLUIMF>@_6A%(3OIRQP?T6x+-h@%2H6hAp_Ktndd=x z%M)-S?BCQ!VA2$=a56B~Qs)AJ*QA;^Of)B55Bo4IRuFd3-{uL?j zq|J=>WzLzWLen5*gBZJ?Zg?y{MXugJZ;SoN5qT8rI@C^bvxuAFo&cbkN}!H7tTet! znL*%E_8w^Ar0SXM_2I`}!Nqex!{mX{&e#Y7_yH1ISuL!vB64mnFR5tP}1( zzYl+lL%KJFkc*ay+&=?+rtq3YqBj`)7<(Fpx}1e;7*1Hl8D z4^MSE!Xrj-9XTDC!2WBJ8cB*wQX~2e248z*hb>$rE*5w~ib0~tkTa?W8FGfC>>zwv zp$`+3op%}m`b~@5lG6cS9>L-tg|o?ySvsf?UxLMnzc>tw^CUeZ^bK61;A?Plyg{A@ z8{E=dvx_L*7==*Eu^VF&u3B!YLrMjjV}uvs=^9O)Kp0d2u%N+^l?A0A@F>bBBh4VU zq?bShKfoZ2XtVS3$`L?HyvsRuH_hB0G;3N|B$1m@n1R#5m{f2X4^m$kYA3m-4F8pE zY~N_%hbzFycVIh-Ckt(M**N{&*~teeexP<+Fg*d&K8DPjZ(O#s%OL7W zT2~MZQ$WGh2d0YCmiht;hV-(M{N{_8Z-k zt|b&1{!DEdbI}9Lw;OH$Du(ijb&vR!{?9x9GAe1VgFN{A<>d$o0Zs&k+gHIqzt*8o zR5sLHcH!|u<+w-(8^9ksh$zZFwfxU-8Ra2@3odfP9bY>|q0o`6x^%rz8o5}66m=qt zB<4YxHL7@Zn~nO*pFeovpilbJHG?JkKnp~EAcM!)&q!FX$%O6l6)Q-4PAkXm5+*Cj zz{HDy0ON-G_I90MmzgLhr;Ke||9mNPfu(1-DnJGlmcmPmdp zR#KQ{C2qg@=i}ps;eY;4M<@B3+==t|$E|%!@9OVQ|5smJTH7O4(MlYFqK?YYGD%Y^ zhxuQxeoJ`!N~>?zu|;HFDV=&tY6#aCcq!&OSo54)ZpUEHz_+Vhchxt+l3OBfYEK4v zSA1vEKM_QwFS2~O5^v#$3QlOc1f7V7h}(`Qy?=k1v&j18vVZ;T9kg8MIM6U}-c84H z?$4k8>&sie36Yon`)515Xn(&^@@LU+5aHkDBaQ3&Q&(mvJpJc4EImNxe|b7N!j%60 zII(nM|NiXBX)@yS&&QHm=AY00^NU{O*34YyU!DYksBs`?>+0$f?IfN0o|zl=53g?{ zBO~3CbmhDMSsC*9Yb}`nUKzZ%fq`Sgk^^TwO?rL0+#FSwB*~{zAVb_KJ))S_i z>Q3IxVg4(MNsL#LhT=aTONry}&rbaQUry~lsjV@%9nWUN2?ugXA8rS_ckiAdEO2cM zy=kAA`pgA-{9hcx*g5Ee~v;Ee&f9>*rEWjR; zDsBb5wts&I&%be_w#P4{0mxR;UP#@!jK6lA{PUpxH{0=Q^}kwrnU3mmTOo}F#eVpI{f$|ilTtV$_e5TXu{26yEy!|uh)R&P=itq2X?qAE! z{*!(a5D`JaBwT{!-hX{!2usjcn>#ZUiU zPW)-gW~;eu{%gJj53U=m|IaTeE}KUSRvnILGKuz_y_07CZ$F#~`Ig92

4kuu%Sz@lJ2(@>gdwvFyc^0yBw&D>z%_XlkrPs z_U~}lKKM%7tJbKrPLHnR;JL3|OA!Z}+{UnM^!UG3<^Ng~?ZR#aRaOkpXaB3_(~Yi5 z*j#}t4t?Va>E1na@ZmftwR6+PPEyhu0h#uZD{RS1GyTu6YVzxJb1SY@A>t?+e;0{X z3R~h9q#Y>wAoS9BvS>_W5!X0`Mooa`V)kCexKNx0hS30^A_ z#D-klfsTE#MfSh;z&yr2?A8KAS4l548^Zu19rym>e+-v4&V#+nu;NNDlB&tAj^y%8 zGIr!%tR;<+Nf3p^byWTCR{P8ZY$UZix+98qJQtDJCExS ztZ_9}DbO<7`t|4zLaY5I?@!vF5J^J-p|{tO2~%=A5flb-!qA8x0$HKgBKBX8-mwll zpP-R0^QQ^P!Y&p>4B<~EQON`+&uJfP+^$zaXe2sx>}aGu^!z`py?H>5dHX+pLd+x? zL>k55G*Vg+TFEvXDea=g8dEB1FUn!4u}nB3w4yy_NqZ@hA}S?CJJP12l~R3Q*OBSW zd>)?9@Av)s@Avzmz^hJYYxCFl6xN&3s4xW(m zz1x21v7#0M@O>=(D2c05o}eKIrZ=Mz(4ddQ4v!va52C>nX(i5BY;_^Vr4`|Jw9DX? z=r z+*f19yG#=issAf0bv5)3ort3fGgkEtfrO_SqQ{Lu6|I{|U>07E{75tG)bTP0@ew^K z`lYQK_!d#>946e};BjYVDI!i*qr43tuz1Xj16w5nH7FW~^47}Rp*w)wKii>P44?-( zUnUEEB4zZB%eH5cCr<&Ug!0ezpxK_{?kqStndi+ul;-aQ|F6M;L#yGhtZH36XmoR| zU4gG?CHlb;+v0f`Om-7A$gIRC$g$2`?Ar^-qR84FZ-HQZ=0o)FBs58}4oerUfhgvZ zl6_cl3l1#;dCJ@U_TGtPvl-##hLO72i>uR_n*NIOD_@UPy4#t%R`b+J?r{t8mk!Q1 z?*IR05qJ-87s>}pCd4oQ8R`ZYx6>v(kfH3O@-|?FT8W#IY%jO;0AkMDelw)p45*Y~ z`b&{{MyD!;c-<*29(xOL`f^ib6>OqeH_j63KUbiG)q6DjOZ?n!A+xk== z|7#k9TAGXsiuA5S>E9yHI5q3rXWm=&HZ;ay#iUr|De8onb?S7;2EgdO((E!qJPXLo z`%jKEF1FbJ(s$mbqS_Cr{<(SV{!jhiMiOO;1<6X8ui9(8H@)u06SN_56}zHP+E8FH z(PrEHo+-VQh{R?mbgG^&ABTUp(Bp9%eDfaiT%D_AOG`vGN9psCAeJn0FSpG(xMRkt9=eyXRRv*A0fX?uQK zc`sZhYBlcgr?&|Z45Splu>U2THTf`O=0qnaDK*uWwN7>H4 zbVz(;#p4j$A!HpQ;}LGXP`YKxT;2PICr^2M=gS-RO=ZV2K=y# zLEG0q>RMZGA_Z#in0?{Lu>w&(D4@e(Eon``V)P`tNLee+q-QYBO0h^xfuq#Vq8xa# z)RsGG*65J=i5Iudqxe^38(SKa<5NTJK{-M>aIUa8106ww5g_k8a{vQg|9Ei!Eg0vo zlhrEB+5>_;!q1t6mP4DM8`c*Rh2&7wM+T5sVJDKyvKGCkw&VQWKhnilc>BIoQHc*< zv|yLGLT>rYU#97{sjpA>`zdMCH+S(_IbG@*XOW}RNEH*%G6f`k81L-_Q)di{BcWwP zv0&ai%kkEIEhAbyo$~#bT+ix`6)r4J*i-#mQ_8auZcf`BpNC0<1D8CV^;*(DOYo!Z z*l&YX+17gM%Wwst{ER{FQxk3$S$}Y(>jTmc&$?32grqZ;WCb7#xC1JvC|k6*s-oNF zWo=VI(6TmNKceO5m5+Dbn}L{ZSKqe+ijZivIPWe8@LN6Qs=MF|-ZH@Mu$ZZO2Igi^ znTXJ%X~%DL(@KNa+YrPKVT3l@j>P^5`T`;-QXaF4P}Yp&@4UT);lJ!)RcO^KREM|v zr^9ar;O^(?a;S1)03`&N1%O^&@Kfu0;LWtN89ODo0}Ad$eWZB5#`4<5Y=(>83pRsQ zbmriiO^nu6C|Nm4mw$9#{LQNVql*f=6Oz15Ls1~8<|U#_F12nzWlUrwE`|xLYag5% zw7>_Hd1N>=a%yh^?0%S2{~+TY4rCy>7ZVcUV=ZqZP`lW=Xi7BnvjL%tA=pQX-wQvR zg{TVAEjXtvJ6)_GXQch4#pXA@Rt1{$f%-6=prCIa2)SqYI|Edlo1hMVj<7)MX54*! zD5)vc08p%9ahRq7{%TyiDdlM%sZKms*lJCfJlVXjav)y}tTXYa32D$ct}mx?6)imw z)hJhnK}7PxCJq%Hovm4mEl$M)uR-YUuCliUvSOa&1Qp1>{N12~lgZS+^P0nLycY1`V~K+BIGfdFwC@pyQ~V#6eX#I_l1?d1wkaZgsFJ8JL!~a`=7_+TyjfAQxwS7kO1scyL0<3yKXvs9X(sDhaRJ(M=$8^O-|A$q%iU`~W4M zF)p!viYclr3M*piu3L8;BE%E;7HEoTJ`78jh-xrW;pIqsi^NSzsH4=A=UEk1U_U5+ zrrZu&YLVK~ub%;Wcw=A_B%mk^83skL{sBo+}mTyx`rf`bEBA^)3}z{21dq9{Pg7ALf# zlX@cm+_}m%ryGU(AXkk5zk{bW7>XD?E9O7%h=R=M#aoAE7&t)kDC{UdC%k~^0etnY zX|K}pO^9D&dEOk}(Ai7s+MO8PI34hguRvB8RvxFUhAsL!NVrHM5>RbVKmOj6h)t*m zQGohl6v=he-BQpXpe6@lS_Bm{g}G%d#Sc8EG(Q`^k)s6*p$8(~g=T|<`kECX-HCQJ zlnk0=oqQZRfI^hsvgm^s6u=ID6xGelMo13|VcBy)w#{5LK~|X4qYaj&7)l;I{1Pm{nw?E{!e5{3^Ih9|B8a;@oqYLh~a6g$3& z;{^nY@X=!_t`kRvsLg)pjHsJK6adS48Y)hjLqmpT6XiR=!SM4SwB^03>Mc+$XhEu} zgPqsJ(it5SS1jrl6iz`z{otOo>fN1Jqs5{?FywxK$Cfa*t{I(I10a^qdFq}^XQcmr zDqBY>#g0jVPy=t>+TL@wLDT74P?D{Jc{RYwN?`z{I^F{Coifs3s9(fEtC6B34`WIM zPVg&udK=Nw8bB>;YN-O#00JBhfp^dvKtEuB07l>#dU%^9kZyq1TTPp4-iOTqL8r>A zQ$bF#l&CJ6(1Bw3kuG>({7M;Go^DWK$8|a*E9WR}KBQs5Y?VV4HHo(ojq;S>8Qk%3 zY;pTL5Ay76t(@0x!Ar22U@su%G#;zPs%;Adhe#58G;+c#!z*J_`U*#?*~kC!hXl)b zAYF8{=+!a3EptJZ6#dn>h^;dDo1pW7G;wx1VF()-8pA!ERs)$5mCp>v_-FF)g+S^JU@Nj{)KLzSz>>qnSwqR27`Xs>N`kv?xZU{i<4d8UtT~s&bEOln zi*L?gAA?=4?fveM zEQ<_K(1yih2Y^^@HE9z-r3cuQ1}`6}*70~Dfhi)h_-(^cv)2>&Id>7bYe&ez{7wOZb6`!xajK=X%1eFrSkPnX2#f= zjPJygx|Tz7K|H@3u|yJbz==p+;JKWtvvMXE!m%<3mm7QN&&TT-H5N|tf^m&Sq(|M``^h~nftlN|m~zO>sZ41bjo zN4*0+8PzW5ODqkrxza|y*NX?gmp_ELZsx z#~53$GDrw$V~xe~{yzR|g!fufzB~k0E%)*^7|;!IB4ed7F@|CTq(|*NJq5SLan-Rk zy3Nz7_!24lqXZz}G^H(a@A1lSax7qkC{7KZd8;m3OAwYd`>?XU|JiB5 z!{P#E4Q{8#Wj%e|Wpc{CKxB!UQ@G8B-X9N>101R9M;efV3qj(~QQCQcwz zp5oITy5DW`%Av?iq&7xjt8Cxlv3>a4*YhHBFS+yL@E6S_bVC5QndM}pjX zl3@X%n+#Mq#+I+5_Pj|FmzfwletggO7iTnoC+da`@U5KPIq#B+bQoo%!j2P27U*+E zd=;P2bnpLP5p(Z1^Yw#JrU0-Qb+ah_x#Y}7Nk>CuNSF;o7(ks47>m<5Nogh>$=;yh zs1leq{8??i+Bfq-%Gj7t-Hg#JkeQR6$J3f*a(lw{ABNwW{P^pW3r~rw{E9~$$bTe4W<_m`Q9zO&?MuvS#_9^FL*N7Cng%r z+YG1flOv-ucedB3eBi8|Deim=@F43zS$4o*-`qa&h;K=EGr&4POWirBJ5um2va&O<8QWX?=AT%L5Fv|AHF4wGM5oY?33su~4m|udmW7aJ0R)!YVmr2tjm;99Z z$2afT^6|XFpZ_nq6x}bsG#hXkF^2yCy}16iI%AipYFYWr8lfFrz~<|vVI)oFn)1K? zoiqVk^;8s%Q0H+wbI8?!MtvWjKa4ppHCr49WCHglA9z7W_axv5EE@l5$7I)YMM9su zbA&Z6HVv9loj?NOD9qK@id<{a#j+ICADvjnp>ys9v&i)t+K}3e#IlAU!G)syzCvX#gfE}hD8$; z@%qq?_BkNloWjE}Xptn1sO_J@fKQ-Ao;5=HOeyRO{uX6oVLAHn<>O0Rh!`SARo!Cp zULntqOr(@h1Vtla1av6PmF8Y-h7X~xwu5GUAmfl`eGssKLYuM`&dC@Xo>J?+KtIK<()gg|w^);U z@EC63EnZKhbLST1k5a^aSy|co@(MC2!q6`r*as8`Cq7MxAR^x+*+$UD7R5SEq$8kj zuHo?W{ry8J5l$VX*2|pV@fIz5F zd}9jDU^jzXYdHoc-LkgFvZAtyuqO24&k*uYDUOClW9+W%;ul!UId7KMK;jBk`bFuk zuz59FtA^^_AgBS;!v3N1z<%RqxlaX>RO7zz*7|E}%}}ebitxI(L@URUf)j5g8h=H0 zzlRWwCgJ-W86H>!(%QN?!KN|_G>@Wn2Oc=0{H>F*kSVP<#wGA2>1(@+G~`}9r)eM9 zS=KLkj%R8!J{2bftqB!TAfK5O+w7@`?JN?T&78``;V^{Y!h?{k&W7Dq5R@|HxjsZ) zmmio)7FozOimWH){)jd!%D=K>WK!{}|0$gMj~|vDxLJ^-TM6A8DhoF0D8={KH)*%N zpl(I&)0?xhU8naVjtK~vg)y}6838tl$l@@5hYVWG5LHjHz5|0N4eVvkg(0tUG0bD( zXno7i!;Iv3qKs41-gEuJAp_=Jb|_U8i9=DC|-O5S^SVp?L>Y4 z_a83!3jCqt5v+Mk3+$?N(jf6Lpmr$)1v)qxtvXI%8)yXVwZzIEAa8kECE>saYedU3 z4ra9U09;W~^#Dvqdi(RxU|h2BvnZIkU_nu1TB}9tt^w-=2qG0L zdr17t+fWhZ*$#B*!#M>mi%fSC6#nO#gG54*wV;F?%X)8@zeu#0u?Kot>>@)okgP|0?C8QL)YL8NShEwjdZiN%LifV-)sVK&DB{h^H=tzzg#|v_D#S z6>R`N9fcE&RfJ%2c*VAENw(H^1vC0z>isP}l z7`}?RN}E2}*XhtI(+}pbt6^7F>0*z0ovkjEYs3Qf06Gt%4m3 z)q)h~x=+i0QvH1;-v2n%2cBK> zrffut!w$tBG~PNi6cEvBH1vz0K|qV-x4~4y*5vx>v;jpQT(z4FPXO6U6DnX1qB+}* z2>_Ji`djPYXgIm))*ttMbK0akFE+tX$B#4r`R(#u%ofd&`7cM=9k=7-10DLaeUne( zNPzUCSZMFGWFk<^W@h!eqO391u)n)llMI zOi;p1S4ueHguoQk2<|4|AxM7+(L$AW1>OzwB$<^-yB{xbxskXs_k-^KLm}M_>JP5{ zF&awdohzHf|BQ5=G{Sinmn5wo_;?^tX=%{zaX|%pxBUC0P66UAiDVbk6o;FW3ouj} z1A%?uc;xsXakRW`Q{&|Tr}Ep@YjuMwPup>?csItog}Ix=!q<9PeOQp0c@<4=X22VY zu0a(A-R`fA52g-)&D49k-ZLN%9;cQbT)iay3*M(rB|bSQt0d$*;isRrUfBN``-ZIl zp?xa5SK5aBwJ5%)?@7Xs!yBg(y5%Ln5|0OZ^jegg93h<`4r{`<%v1K_3xatQWCb@k zKeF-O)wlRvX>w7Rduskm_|D#G#y38aZy$cCnsG`VKlFgZ+=Etz3|1i6t{~gTw;PiU zsMhehnmQ8?C1E}AXw79IfA=}c1$_+1^zEjp9M~m^QD!0AVnf)`w|FRCc#I!A)?Rtl zXVNYr{C6z}^n8GY7}`wFs#jbFd9fb7r)zjZ|9&fM|6g*Z6xL(H8KgeCfTnVtrX^ls z2V!P9VQsi<92>!a0ZOgFBlJX9g=W`c1@2Ro3hft&Nyz`i6E+l} znzqjHmDE{5dc&8yqEJpFWm76^W)qT1(6>E>O407HgovyQA#31D|J#;7+sk$k#%I_U zw*(-}#4vi1@v_7fq`kzjLufD8a!`Z$WY)mf*w@$z^kG9{;wTzd7(~Y9XD3kwoddWr!g|092ujy)=sreKTk$rcg609u=zp_03 zekvHJ_3BPqI{Zvg&2(p15}ik?;GPk7kR~)C%m2pYFbopL2#O!*1S_|Gi#gTQ?P9w_rJIJSt*#qiYsWv=onlPrFHPoKQ{F5>{Gxc`Lxuc$AG9$JiIX7oSdzQsVC9}XapJ4GKgFP!dnm8qirx-^CcmFh zTNvcteX+QU#)mwQHI2r-;O5QZGA-J6Zxre^4Gsyj@opa{^i!nEoIsaZSv#O=Fr(pG zl;qDU0$bdP#+Gt7sP3jvfAp3w60XA;N=|&i@=k>E<4u$RO8~k+thO5>9y=kTl>}Wx zwHdAxxD_UgUI5o8Tx_A+@NHx$o_#HK37I9QG3y&a=yp3ln50bDv6eEh)4(Q#;i^PufFV^!A+5vunGCe>7U;ECDc|_&IF`HGIG4-w+V~?ChZuR({LcM z)A5E$WsBz=0U__56nSpa+1!0UWrcW#Be9lBbUAkRUEOqQ5UOxE^N+0@i2Nyzw6@iG!< z%sdy(f4HUWVCIQ0HCrg0i@-A)&5V=AJQCCyyfEtL9fZc7W&U;S&-a9JieY5f|L{=$ z=Qo_h{+U{gUN3<{VKKScTD<-c0#pjnFuL%EPBzgG{ zaqS7&D;gq}WE&P+1G{%T=;=kni=vCAV67R ztQvk4*cG+{&Wsz6yynk0^kO8L;AD*v;mmQb#J89EFV_#Z`1 z{sVrW@|Suw)D4$=z@5rE8L#$tH(?~54!d)Q6Y__s=#(`hlC)s3W|G$V`~$wEiCjRK!9FT}y8gyk4S@!%x3k-(a+q22$SpWCE79P7at=RxY~!ZHSZ3Jq zBfBZ!lqqXh`(o(r)bxruEf3J$XMLN&c++`Ug-|wQXWM;#ut3;<(N?sYD*ewY7tf8lj#ug z-gfBj*t!d)25JH&YU$+yka)pG4S!yxa&BNRixJszZ`p<(Ps5FDD(ug6x-~J z705#<10|H?Z>G%)+VbbNsgN>-*ra`UaQp~=rl(LyxG`OC zv=Rx*qcZEh^Z4M`RU~(~23cc-ljpVwoRcZ1d zXirc(eE8XH$=oJ+hA#~$q%jeWQ`s74a2s(WGP{)C9bPK_^)_OGrrtO(uG1c|QFy!< zewJ_R=NyZiRr>-~Ac^^v6IG{u=Rp&r4D+vl)XlH{dz0wlAe}*S(W3ZgdqUvJPg0!q z^_#)mgxfZca6`HSwQ@B1_9e7*c6N5_Dxb>>B5LOT(Wk?w6GQb_`IN%l7}AFEugb^Y zhKf51G3w$MNo3Q)8Y?99Uv5#7IbT0;6~Zi9UEt@C%aZrOqnIU6hW%&H^r6Llq+_&1 za3zdhgb^ET8yWFxR`T*O(|$)vXvs{5EG+EY4XqTrPOrn)jg8jxy}TwyK4e4Dcx&sF z^%Ms|91u`NklW1yq`q_kfc~9=4p7)SqKL^;(5!p=))F42p+Pynm&_f$E{v#Ouz>OH z#!5;`3j|kEW#gD5?wp$!-5^ite||P8EGp|L*k080G6g;%4kYqT1fR>{fxCB`3WOEj z+d@Z%p!ZwKf|;S;esGga`N+b^2|?=9imUl@AvC869ys$$VI(p`x0WomiZMOPJ2A1z zWxp|Wy1ExHUVQZU@$LWmy8Te#;mxB?JhyG?>ejPtEdYwmr+VT;nTks>#>cha z|1o+c!z32u$bZ|UryZ(7xy;frs9_WnK$dPs5A8?jpK! zPi*pU$mJze{28V>Q=BtbmOKvJj8d-rb$> zkIhV+))G)IJoAwGhr96XixCQVO2;D=5}h>IGAl<(hSjd+2hwO)5t%Y&7nKO#-kfpc z+)$05LzLsmlY-GJ`L-^cV{cF8Rx*03&2&P2`^u$V6RI&mWsKXj z$Eegw`_*hc^If~77A`EN#|ZAWZx$yRtMI5tkbsHGez4omqBqJ#uaq)0GYf=X6OMDC zoP!8%*3r`PW|A)yWsFX;cRzMpOAsA2H~^uDbr1tWd1(Y^X*$S>5TM$=pDqwdjv&EK zf^MMme)Q;(EF(GLV=<4@5Hhr*kdEovet?7a4|-ATRTgRiFlLpxty%ODJxWAcd!NZ47SB_T;fht~UDJEfu#-=n$ znVXwas4BWTZQLb<4+d9Mm;hkuJ)A=$K%wc^&*_7;fO=9Kf|M)t`q*Vs$wFpICznD0 zh~hW>2&;DcSnGAJuX<4<%^JXdQ4(Xkk)8{jbXfnC)xNDLY18bumLJMLAr! z;f_`X*v0?^kWXPP2RXDMsF$PhH-U`|4 z-k{{FU!aL7!z_6LYw5g$u?}dwEpd&>3qlwd!BO24OuY(rQ3O+f~^wR#5A%+9#ygc zv7qjGb^A54U$q+>8#y7kAXJZJx2nLBu37v-n{ue-(ShQhoB}r7$+(cp-P96%r-4((H_H_)ja+bL;KXZ705rm zCUkcqgbmJVt<@O^FQ4hD7r@|8*g!h5Z>;0MYaA9Gn-X<@2L*3SO!+)6OB=lq<~cO{ z-j|rs-;&l3ePPX1cQdHpyILz#?{`p^;MJNt&_suZhvyX(T))4VZLsvV#o);O7Kr!z zFJ+v}n&5KHUAw{%)%Mn9OVOpb{17`dxtLwIUZboaWgh>w1+5GUy*akdmy+|17L=^*!eqETwI^~5=KEZJ;(6S%WlP4GEujEidBFgUT zMXzP9cXm4DG#8y*DrcDycWNff!ootLb<3sKf%{L|FSCC>Ut{+C`8v?SNbU;)>=xLw z32R0oU~Yv-m%cu(9W;f%vfvyAfq2igk4#R0R?j%RZH&4yeZ&u7HgHHuuux^A8dlvK zL`m4FOE@8@W^jklqmw^#!NP@LKMdY1ty2qMQ*h~Mjk;UPz*(T6O%sGec6oTzi|^jE z?)WS{T_&slk3S4@%KOsX4FeS~0|coTAG8|#>b2v7B^w>1Hf|go9N6m+JATJ@X28WO zHzOweI>+Abc1=e8gy}y8ck}Vpx4b@SZ&}xN{X*P;Q=qLDr+GlD{_V|x0X2g>T ze$_^QlTXf4xw};c_D40<`veBYo;-Q7FP#4{J^!JAjj?+5jctEmLk`7+GM1fi!3jMGf?`ZlX?nY^x;ncxtwRgI z>D2`*Vs6Aux_W!QNQJO$)6}Ai({gr8x9lsZx}G$xh(GN_(oF5Z9m91VTSflySc5Q` z8<-M_?qThV7cUlQ>Y)<{T)K|M(g6u9n1y7+PS+N)bHUevqLETMpcIVgETh5T&~a#R zQZO1D@(|M-Pe^5iDcnJv5bOp`ILnf@`r*_kYv>0Lpv5U~FYOU)!+3yLsL(1u3}BIm z{ye`5%1DWYTRdJg{iH3#zu}iDs+vEzfUiUs zztPS9Luc}&y^+6 zsFQYO`*rZ5>@h#mEbOscbI|bRgqQWL_0IXic1Yy?=d#Jl%hNz_CiP)i@D;7MZyI)L zc@t&?p!tce++99>gYWGW0zJ4KO21vKMjhpbrb?r7F_ehDrcM`$Q~*(&byP=rBbsqE za){B5S`02vh3UemBPM+`6bAg>|qMKmK1^h+w!&>I?7~pTgddRdy{Jt4syi16oOv4lH67V+T>910x9{A zCQ#D|;@!Jve&~gES?u(b=99K;|M8DkwyUXl7CqSfZ2oX*kjd=@AS6RCfR7?o>QK!; z+aJ=&zkf?)y=F1ClvLTWUBK+UOQEKavWUIXM}q$lfz8 ze9n`w(%-Wg1jamDKmTjSwW6iJ zsJ8$g8IN$K;89^Kk9vI<-E_cl!;&gdc)NP8(caXkn0WW*m_%hWIinlF_uU~FjmPFW zt9**{W6GI@XJ2A^&%3tty7R`zE5cFL34ClDcM>%?idpl}rEh_MNn=Xh#2y}KHX03> z8vJx&7D%&jH@Deyi4O8#fe${E@J;$fx%3!n5no6SPwT1>^*yZJDM#gN`WId0$ph7O(SbKSpx4-RVY9xyryJC1xx1_&7q z=vKLE86oKIM`QSE!GC6 zZ8gOY9qq;$&kYJf*{!k^JXQTV)m98Z5QAi|cTf6<2h$!sd^q=Z0ai~q9^MxpJRsjg z%_`M>fdY^bXUc4ZR)^SG@xfG)vh@9l(_L!#a)jFe9F&Y(Qlb5if-tnnaExs%qIOd~_)d#(v!e!2+aB1syLb)O>a9 zhXFJ~mf)z-a|I0TIE`BddwsvQ9$?9iK0oi$6d5l2F*uU|%nUHfZury^ENC|F;08$Z zv>s%&DjP)OR199#?A3e>Af+sX%7cUbkDfd!A=VYjO;Z$xl!%56RSwwe)0;x>c&gzH zzdi934@`u5a|EUnR+c2&? zn(i=p(j>ii-6$nms?z(?3JS#e%ra4-L0NDUW0FMOF&uys0u=+nd-W|#Xf`&wh0x<2 zV7CmhO;QjJN@FP02GX9kaGs&8r-><1X8rdp_GOJ*(oj7(kodq`JUS$VX})tOG2!(s z&LsvLX(W+c`ysx}O0l&Y5($6iVm#l43gjOj-geK12|)&YIr~n&Mf2)O+$b!jM9E@1 zLP$(7?pzq!D=ODG`PU9rg|H@}v3@o`J4qUJX5{@5Zu`I;N1#vhO)9{JzKvNK0CY5A zy#nfiAs~M_VCUXb7`)sPz3s&eXDm0$c!$pLYEqISeOfF-_-XY}t>fWISumpmof>7s zQb~^oRZ>D?HI=A{_`=-zXqqrmRaIra)aDSxT@XUh%tTlY=Jo9RMf7^wcE=yy%fH>E zK`SfR3fxlMK6fsb2<-^F)BK;#6-2w)x*SU*nPeRxm0PqCm5`Fve^p6fQJ$0Ve-CL_0%M5sk~*RCxIh zRJ+?8lNeD+CddYO<^Ns^MZ^5SUqgfCFIw~o@j|C2a9F1p%>3;inv#i!u9|t zO1(IRmjS&zUReOQ1_j&WsVaeqaox$E*mGW7;^5}7^Z3V%;W8=T`49>0rO0(lM{2;M z+}zU^mn2&KE?$fz!vkffA<8!jq>6#A4?~s3T+*<0BvQq1f1i{g#Gi&!VEYu9oR8u@ z8c?k?h9JU=x^BY;G#dVdK4Q-U9FL{P9kz?!1zGf#wW2&1G3sZWq=V=>2pJd8Mt|?u z482Gp1p4#jD?O*713>cm$d+u8EtKV#tDp&KXR_pq;fu$fV~4Hx?hoySg@tzU zU0)rc4!;x|8e#A-+kK0=1c>d0$vUKl0l4J`_^S&5mv2f7 zq2btx5fTO3XB+orl`ovx#wBrMP0LYzFXtBJBUDd*YR@zDV%r5T^Ai6LRA+^qKo4Ps zc(ddq>BRg5qQUMjMjPse6SE7-_GmOhglz4Q;dO`Mn*0{L7pD|75)}GRDQ@mC0QG_) zD}jm*OOl>q>@8R++88T{Dkp3x4gea%r|_}FK!T2Q?&kfrMkp#I0w%DdUOg?;W)*?2 zCvrU>M*x$BGax~6Vy0sQfZt9aq`m)kDJ2#K%Los2;{;4jr6)U#@eLYxi(@%m|hQ9=_Ysk>@d)T!AML`YhbH;Xh@k zeM@+Lj8P+0VKYjZVPyUD*Fh+Tc=eD(+&!0aN4?yG# z@D5DD_Q}Y|C@Ly)z?gby{w-j%+I(~SQT)r!sh;>>LfLasJf-jNN%+VEOfn4q3gz#F z+L%Ce@27QWIcf3+CWg*Jk-wb=H5&}}92^Ws8HO-UV=(5rZf+^ym1qtr4U$d)Ix5}F zYayMUSv0p|%l?){*cVwG+0=In`Y}$2CMB<%7Z~&eK6omamMqk*FV)>n^VM@G@iG|ZJ}ukksAm(Y+)*GKn<$Bq!MNAnSG6NwHMHGQ6J>f^aN zJhGCqKk)r%0ChM%etyx2s>q$2^BIm%{7ssbQtLU*-)=pld*6aNYk;#|qjMC6a1?g~ z0RsE&4J69AD?72m?u40pmqA@!odv{v^lIo;a_na8MGtfdDMj3@U0QenlcgAbMlfyA z`GOerO+@B>_bG9MMW5D1vBJ54fEW;94D9F2@I4)=Zu4@8Ada^eQy}5IuHll z8##h|F(3{?na;(9zxQ?poLS^iA-gJtG{T#NCKG^vDLgDBkBrAuFG5N|U}G;KcoEw2 zmdezPWVod4Di*$kEiunZnp>RyI8^KeGxCm>6 z+sfIZ0^EaeoZi0b!C0iD*WpEh<0~c!YR%tzr^6Tgso@ZS3P`Pm7*pI!vgyCVN zhlE zi-<^Qald%*y;Wzu!Hk8g&Iu)(xjp{Lz;R#82rjFE7UA%O2FCj>PjI+AE-2t(rhnn# zKb~{>A8-x)FiF!rFs-&*Jd<(ZvWIEo>X7M4{(b+3771Z zIDaS&1Tdc^N+-YtF*I_sA!Cm#ZN8y**T#yz zJ*KgcbsNEXh`Sg`-O-Zj197W$hPOKPMs4*q$@lTZA?yp}f-e)B6A_oVtTgSC?3Jk2 zn?MS>HC*;m*xzjhlWFCvh(jD}P1a4GhKH1Ybggg^z;6Em-gcu9H;Y{{CWW80)qf;VPgeV+2H#g4cRHzB_tIDDU3B!2TP2!BGK!~_mFynBTSja1Z{YvR0<`_|^gsKx$!X$zmB=x!JJWMwCqP>8e zTUkCH4)U-|DM%)Yn{uSUB^8vVy{GeQO+iZ(KBaWSQ*L zl=ttq)yzY}sY4pPdmfQ5Q4ZkSm6VcAs{NpyN=iza*rSxH%|of0L)dA~Ie-4Gizxx| z@k;wM;>vwjBb^6ZiuS9ez);oN;!ufbDC;F56Vvw0O&mBZ4mbz625qaBqKc#0S4B*jgu6L* z4Ym#bR^KcC>u;QA&q_C{H9uF|+xO64Oy*;o>k)`d-PJQ)`xIaMkBBlTexMgF)}~hL z&n)c^$bYiK`D2em_ooleQoa5Hck<5WKRkCZ+`glt>5fTn_2Bu>0mqkxR|4gD@~Az+ z-mb2?@tuj2*PYMr?BBHMe^z?x>)Ya=pFiKZAun{{;Qn@JsZag_?G;~M*vyD0Riq+&W2s~?e{bE_SNfWR`&hy@TJG~h=H`IAUz%beOkDH3H?29-r3(8 zflY<={NUc-G&s%^#5d>w8LD+LeYR|bzIV>cMCUXo!!e653kS46({*^X5H@GshXL)M z*ERVB^<>&dUCTJDqNq~vNoDdut%a)Z{iT}Q-Utb_`)BB7dno9~+uzdLBHJ&AuWuUs zqhwOw56&Nq`no>{D9-WqKda(;&3*rXmj79e{D%Pt4%7$GzlR5%O|?+4C^|ZpbwWfV zMSY)Dl`89;wpibXoUtM~9v=_c-+J$#f5)p{7DvQ+{YAY$Y&igUC_Fq8oa=Kh+$4Jo9~XykW*DP0kmw1$g=bEHtD{)v;J?; zD&IV<{dmdD%q(HyXueF-`6I;5UqQa;x!n+if0%tcK$zqM5aG}nxKo4!xsz|zOZ;wyM<#TxF5G6?qG)SKP!m6y{ zpvY-8YP+Hc6#=QDUb{o?VDj^f;ilJTBXo!ACHRon!OB?RVsS!XB$rfGO)Km71%3*# zDr@)iWza3!hu;6u#{f7M4l686Oe?}(vUA@mcE^yKY)GlipUf+E50J+-bOLUzVusPn z+T6wKd*;k#oPp&Zkco9U$}C{_;^8-xGzafLZn~OV1UxgG{PjGE>2zsFjurP@d&z(6l zlNALk=q~7cQC6L}I$XH2U*VqD6`@*`F}k6S@!g@!hYFgQ|GFH2SP7(PV%Z(o_nY8d zdJ0xyk9pGYTen?~Lq3@jYzd|$b2TZQAd97bZ8C-wXvik7!bw5XJ29wXr(*=~&*J9J zghGcjC-7}SIyd}9sUvg$J-cW4i%gcM#ig*4<&13FGVxTP5eS9e%}88G)yTu+4w54! zNaK3iSENAXZpV-~?LXct1ImR*rkz2Pv#z?AF;d8GBJm2H+~2=VU1AUP$;d#G?ox@~ z*p$d;PLX@puFZq^Mict4cx4{E?cDw2qEVu}l5I9~isav~#bEqj4AA0z2l^D~UY^EwCKrHxVLM`1|TkoylF zOmUkrapHFRb_I;~{_ye(LUr$VMkyDv%is^pmDeo3t^5PSmW1^S1hYOY#n>PE2FYBw z2F$09b`?6c>Yxx6%G@F=?z*w$ynA|I;iihchvsaD4MtKTaLEWp0pKJCBTArHA=&BB zn%Nn!O8`@|6APqAC0O|5vV$%usPq5?XwPjZsBI*`iVC>qDK04(R0`=m{X6}e>Nq{j zx@BZeK^B`pI*Pb>Nq8mL96bNQg?*?vjfHgd6yA*hqI2zPz?@@!+W3yI!1ej1fG=|) z!pO4gs<%z`gi@-ly^W8d^4eppw%gEBU@wFt^^v@wuZPzcxt~VHNJ*CYn`9=ukg1O$F$ zy*2{c3G9}#lb-t_5{|#;7=)Z~TKfb9$gqa!7zB4yY8{X(^qCZu3KEFJgjf@(_d2|? z>|Zk^|0>ezLMyy@%|Tff%CMz!1wbz5vb+bo=}B%l1%5I5E{zd|HJ zF!W-(Gje&|TH;Ib`(iL%y3(=KG`qXL+M_Q9=D}I?J0PI|H+k(y97FcxfLNe+pNx9r z{G%$2az${sG!343J<3%9+y*92`ryIAj&98j8%n7mqO2seG?e>6BWDlkw%=EO5gHmwwqYVLpbhMT z{Zx9^tSh-D^fNT_mQXHHQPGr^gmPj`!9Q&JNlDv{mJllc`ACymv+X8ypU@LZCfrB@ zQcNQ+Xe=;c>2P-%i7&NX4r-}ArJY5Az3gFA3iKwCX2oaKTrfVhup0sm?W|8kv|XJ zqWM9feaS6_WGxEO554sKnSv(4n48mFZpz;IxIM<`<_cm}@%`;I)B85ONQjH0vFY<} zmg&|2!zHsSDNXZa5*t8v#~xYBeQUJkoPeH}GNq$HjWF4*hQ&=(x;bIX8@uPpH;mgQ30i%H&vra@_>)~4ULSS5y=4vnUn}EI`ZIpd7 znS+oBF&O%u5F9}Lpb6vmB^<&!QHTWM=jU(4>3-VR*B2q~R{-?C%sw-sZi#cNiMX1Z zK4>(W`5aj~YLVbd47fgxb_?CaB6!(F)(H4iNid&_KuA*vWcDwnGH z3}@O_sm(+dj>9~2dCW%9oovjEB>M?Y=pp+Fq!URC)(5X3e-N6mNHBorQh^#p5Ya}V zJ)!0ua&lXpBkI|-Cnr^p5dMu%oGmjCBa%53Yi8>7!ra^v!w>XGSrBGHXDDJIk3kED zvP&P#=7RVb#IB%z_!aIFhelnI)j#N0!1*r0f`&i`q+#B(1e~HYBAaP}@e27u<7|Ka zvKm#uRm%CG8v{j4nFBI<b!mBdg!nd1o8H}jM#cKZJVPHPr6vWwqa8XEwO6IQIhm}reE3Rwy& z9<+;>``sDZ=J4!MinI&jSy11`T?fc2V9o_qWzteyRK#Ra7rJ>pJ8tqD3!F#EFK1w@ zDBQcW2M4iWP!gKmQV0(q(%Mt%wA_JhKWfYv(6gYbw16I(Tl&ET zf%^eQ3Uo{=U0)2rfe8d0m|<);e~WBN!>rw&{ECx@>HwGh`!zVwk{Ht$1-wTlUByO; zy8wkP%?viTOg%X0L0gIEHc2QS5ehna4eyQ7zG0(*_5c$i%)$~#ea%%8N_O+F&I z9*M4u6GBKNqR~k_pI12?*hh&ido$IqUT=&?(<6eDtqppp=8+nd_5*!TVz#Vl$RP}{SL zkeo}*HR*>DlOdQ@2;EaKVgT>Gies$nh7H34JGD2^ zDzZJ**49$8;p~FfuU_588B98jqQU2WFh}WMK4F$wc_A!Pr8Q|KCaQ43I(Tzwu^V8K z7UXq+T_8S)I=QRM_CjHf!9{&52DKQ@E4g>Kwvi?TPqRr{j)N3fHq`mg$e029*hZk@ zpIrN(1K$9TSrOrMz@G4EVrT0I8NJ?1AQ9W{0*VrY9L?Uu9$UL9gvlyx9(jjGju9LA z^ZH^0{+D#p&@dDtQ-b)lRcQ(K7_;wT%IA&0P;?L{Kq%d|sosM-vB^(eLV zaU7aVomnIH<#Z0T!LdsXJ)Zrs{iCs;8eX3E6s5Z3$ugN-Mxzpg??mxUa9b*Agc? z$}gX^^mOT&D>9Wxu7qb&54Dv@MIy=sH;l4D27~XqKLu^QQK+QD89@$?$kHbW>dr9p z_+*|Vd{*56H9OW|Z6ifyl5VSBM$fkUuSWapyV3iH9qe!3Les%Q@Bg5f1$}T=t=9}o z90CA&=Q(Adqoa`xL@AlPOZpvY`@Im9!FYMFYN?r- z8OfIwT;6YmR(=C|+#A2>W-0-q2!{gqb=tt^sOVT-+|`R!A_h~aMDCNhp~4*%`+h36 zVee@U&?Xpqp-Hg~ynS1ND~%hIY#eWyeQO&-`zW>qEgZ(Fm;d1g4uRN^!8o@KelSkR z6q(Htz*Eg_GVJTMi1FbFPnb~qxR4P}4F{8KUnR%~GJV@kE15D`uyX<=S5i^UoJ+?W z%Mxv`Y0cN0RCMj;nNcDP?XIW3c*jv_ z9PtT;K7tUN(d4M>{9j_M(kP2 zWI;|^A}K)@ELgx~p-?1^h|tgp`sU4>wHz!oxbkfSu)DH4>xsO%F|su=vls(W*}51Q z;KQWufH*kHX#G<1z+kl9QIVbZt|A#69?%u2o%Re}k8D&yNC*DmhZ~E3%3MvWkG4FJ zl6WS7SY(oBjK>>ASy6q`%)oK%ax`2r^CuO#W4S z=9kMU=?7Ub0s~ASZHY#$h?Ni{^y6vf>Q$?RQ!yRHoFv(i-mo-B6A^m(F+;`Qg0GmItH%_h*sD=OwzQ)9R*b1rBr6K`OvQk?dbSd~fME9Yu3RTvEj}K~MpR;FQ zC)v2x9u-jlu0Lmupie+tU&rAD^mUjtEOB*8?UbQ{OZFt56II=P1*6aPT%4IDPgyC) z>usg;^E<#xHo~1y^Q%IImX4C?nceDbaGik^iurQOTv86ddIYh&zNKXpXh#kAYjK07 z3P9GVNYL9p%Kz@g{$xB;HtAi4$h+}=rBpE*PtXyRl^<{zy;sEIT-p|gF98L>{7Q9b z{$KE)0$xCqa3E)hMz28apV6tYHs)w<>fIM(J>cVlvuF;3<-KOUU>4=XP)-hEWP#M;Z>q6!_*$gy(BJeYvYnwb$-lK`%pE5V*yR zQ9u#Jy71meDJ$$rN2~n8BI&4uqRg#1h9uN-E+=sz2_wXg87m+nxunGkyPOC_}o+2un40bGApEmg|`=}R>AXcO8tl`NYOlcM;e&koi`g@%1taG7H;tFioJXTUfhjP`JkVm$=2sQd)cc?PKP}zoH;_qKjJT}V zXubN#|I|39RD53g&c7myh8X4BSX*l}JaAlH4*6gBaIT<9wY^zREf=X&;aPz;QX_h8 z;~G~Do&zmUN>h45;*5?3BP7Z|-=#Ut9aybg!twu6+X?S~RKaiZQKF5dwyt5Md!$yT=5=;eo2kbXhSbqD>%%*o|O1GnlsWcmqB60kY@~ zjZZXX!==}Dig4~d`m`!PHDFO#%5O1Kz@^WQniWbwT^3nv8_rB0@+7B{r6t3AJ&B=y zvO^&{dVP&;vtHhckZr&^jT+5bY~qs_5E4Q}6zsu$9WeyLq)di4VN$YXm%dj$0H4)Xl-i*&s7_`xfikH4DT6Q$|M~_oC99%NQ)~%|7s_^aDlBZc28;Pg)>S*J!S)}|uVkv^e(LJgGGF(~jgjXFKx7OxLyFnx$n)e_u@_*D zTLu$eqsO>!s4`;a&V!_nHa2Im1`(~L(bv4Qiw{FE9O7$c%%qySE7Eet^AjAVs4J^M zydMvvW}1Zq1So%-@x+qG(fo?p2t3G9;gAiU3~R@eEv0&zA)BLR+8m5Q8_DQv1gs&p zaCeQ)NLdKJF>yXoJ$4@N|2_^|!feXKN)t1$1ET>J4|7z zY}f9v-Rg_*A^`A}Kfb0`9wqRPp7GG4hI(cpGnZocN14CLc0&`B3p`TmP*~H0wwB(O*b^E_ym%w+db?igDU#lMB`!^*JXs-vP7ZEZiuc1DO|^HDRgen+Ij*%TQtS`L-85yMeE?sH(-akW2D2XUj?kYjJW{{xu;-RUd-mZCTp(susE?=zaY4hs8%^0Ti?!(O%C z^#Aj|9{L?q53$jcAp;%~4ZzRX5X|BTfwPZo*sKyx%O*|lzs54}M9gl+9;`!@{zBP{ znkLB&bSt8C*e~%$(#d|FB;<(A92UoIdE+57v$V{C%w&Um^nwk#DQ_XYk#z^qkBQ9e zh&ygrlfHK2WFlXNS*YKo5EM=B$y29VVF7Q03TTsTSZ_-Qo-wxv221k?Xlo}S7cKIN zjAsl-kU*!NJx9ylIX;N|aUX%0DSU0WMzc%pfnsKokow1a52KbsR3eg@v{%JI#@mY-8Ce*6$?kmrN(7Su@ z$}eL@G%YI{)2+XyC(pR}={fh624-H_2NtSkIilu}ec(dIoL>2JiL4$G8WZ-d zL+P#5UYz|$YPKDLRX-G*l7XRmMZ=QD9T{pXv8`%i9^f7)mKc$QF9 zsdQ-r&EK+rf=_%Tv}&vQ&j__H%ZhdB50-E#b0rf)PU)SiB=F(H(P=hgZDI-Y30M5X zi2SqaHg(?#4`(xT)l9tu1xsfkuwTrQ%DrcP2enJcrbWYmVbR$ZDs2D<4m6V_7Ksc| zC4 z3DbaGvFxKL2(R!#&nx|pqkTgDim?~ z6@wa?P=QY}gnP_byo}#4gS{3k8;N5VER%|LQ|g{^I{qnc)25MI@@NEszCC#-?8GTK zU#5QQrH)SFn)nbZsd-Ta>jo~%RqfEI@#yV|Wj>jsaT<}ykS<-`pLb&kqCMm!O$@KD zOgDe#K12Idt*2*{O&W!NNnhF|(hU>`8mT2Y0e2$@tZfulcPLHv-CrNUEP?xZM5E(p zKdkfLHSQD7yTj8%RIBCtBghx{!x#z%P!s;MJd^$GxcK@x9GgDB(K>PT;WkMyVJB)1 z@NJ@g2fKc}l+koBz*D#LmVBvpIQ@6s4ki!ji=1n15gAJCm0d8|{VTtZloi3!L8brj zGC4e%Yb0d&G4xk5WFVb^s)?aF1*;{}Y~M}?c-zM$20cnSPsk^_EdH6IS^cQ)XA50B zT`;<9Etyfeal?Mbj2S798|oMb?`qQ2y{g}_>StLmq3I!~{0B#LnshP${n_L;m5ls% z6jsRHEz+Wu_-6<=0MV;+cJ@T~21R@&@eKG(=zfic`>t%fyg!- zNl8VE7k(etdH(?vuX9z^l#K;$`jhklK9JU7DMz_WRbVc=RrrKL-x9^E5@Iafp0+m4 zSAN=3^pT%N!n_#jd9b%jev=>^w+wGT*A|2UaGb@Z5G#4HNQ0=$=1Mt23 z^@W)e^uiX|v@V*}k28uIz!Q?tDQD^+lkRjV$XcVjx`cDR%CPw=me!$Wj9WKX+l%oNk z;*3=LiuB7?{Se z^7^`7s_SwU$NlM+y(U21Ls7$S&xr@)jKbTGjmB%TekU!(($21F3fZ24pG>Yy`ExDe z;n9Vo_&bbL&M!4ObPd#PUqw>bL8-J4Xcn7HS@6%EzRu8-iBn5Me7C&hs|XjpnI&Li zSTH!^7{E&wZH+!M20cn5GZV7)0-&4p*31Wm|Gh4@#rcf~DMm`KO2p6@e983}6DKOa z-3hLH@}jqoQEo(gsB2ocAVfq5G(X!i6D^~zSX9V#FhB;G8}t_sFZFs-D$Be9WyFd7 z?$sQ70HA-!9&0v~%pv25&)%ohXVI|oXQ5FeAZ40L?G6oFg-;9|t4=pzC)OSIJ zQ;Xx$UX~{p)s-6``ib$x*I1j`6VSWUyr)6(tjpmM_TAP^Vb#$B-syrho_rRJ8 z#Faf+WLc$?%1L^WLa&NxU+3Y<_Rbf)7XuIV&b_E!P*b!lNZ-Y%u1Wt@=b5LD7xgH) zSMV1lw0^yw6Rn9p!TZ%S5Aj;XJb>22C!BERa7qtky`j0>*4+F_m&di-XUaI0NH=N5 zRo>xr$;1owx8!nf&P*-yv~exu&$e=hQoX*&u_ijAA57Ourwp<_{r$2_3vy^YKcVT` zr0cW*n08yU?GVXTS|x-9U`eINli86cWy6RxGI6jQTe7HArz*2E=;l#d%4qoD$;RUk zzoy}+ccp#F6P2_pD7xfRgv_p>gdX)5eJ{Z@|SQ#;_P5cK3LzeyxIt!(HKo+Y=mYd0+NmYBR!&`+YF9{MS@sqUQ! zdlwpg_XvnLorW!gT9I9Qjn@s(HrOsZ_!yg#QTf?8e$k;zwwg6-R+QzGOWjka6FjEw znVt6J7r#S(Jk7vPHfKqepUElPdncAs{C@&Hy`eL`W%f=Xa5w_dV!?Lua&_e(X*Vug zi*bhC4EUY{D4v5Ho#h+#hT)tSHF zb*wKPo<8O6zR=En_$HiD>pIQ*O(2|cob^t%O`A$b$+aCY@ovjaX3@Z@p$>t)N;S`B z!1zlrg|@Px9jh>L8E~oiZO+SH`R;3{zK9NY{_bodopM&T{`hDoeLBRU@<< zE+Mb)9@-?MQRG2|V>UG0XMno0Yu69HKa+^4vPA|5E{B?1RCMn`SQWo|^#+8MOCem< zK>+!T6NF7NMsYam@D&w>qpFbpUK)SBfo*0U%#vMgTi|HUI>DaHm|qce%Fa@REBDby zI=5x9gKZL5Pl6d@D4EXuv}yIPfpCGROxr6g)Mb-5xArdGc~7=O&_91|-&2ud7d+lIkVEs8)$=6yzL`iJWGYCGdUU;FNh6vz!Gi^*q(}N_EBVsc`F| zF%&eV!rGFekBG#?dBQ^fgp`*}#Ez5W(*FL5!JC?uWunKQeh<~3=8Fmw(Hc>~%bqZ( z;G3UH?+ytSgK}mV0;ZUn-4W^0Uvo5B#5aA;a;OUizkG)&8uz=K!Dp1|zi7^?Mb2u`ct>lFlZ{EyZZ0&QGF3|W;DxzLF;a8^8p3Q&n zjs9Z#oWAY_7V;rOg+tUVsnDQBp{UTRQtSwY#Fy*6;3wJ%SiNx|DhQksmm{NtnCxhgu9J9Y^jPO-re`$jIqH&Z zxsnbvMTNs){Ec6j_WHO_S3%pPcQ(M7B&SW4+7RRug+5Y_{A*5QqN#COsq`{I4a(lV z9}3eCxf>kQ?WWnfb?dgrxuF>Vz#Nncf}5Y9Q7_h01bRY)B~qLroDPpGm_p$PH(Xfa z^Pvwq2*@{}Y*7A1=(CJBZT78jpt{>^EZee42>iOONX>|~CXrXl2-&aEn?3+4947c| zJAB*=Q?Pk4M`*tK{= zUdz~Icgz|1Y1v#D(YLMy-kgOy6qq9hakAnlE<_gvOQzi#k8`C5cZrTCLt|oE19%N< z(^L6Vm+74DD_o0_>Jd)7kJ~U96lo+m{+Uv1RMh1?RW=2fGVJO^p;JV$!OMHY&2y!{(dvR>Jo|`M~(ut<+)8g=0K8)7s zH^iQZUPBJdEod9tNaeUc@4k=A(l-x=)6!cNwade&%`7@B@=M99d(XI$7$FgKvui#0 z05QL4xQ|bRBjXZnq3o34g2rzOR4sH9|LtpV6G&D&d%EpJNAbYuD}+meC@Q4T5!!xm zh<+R{lo?&bR?9Y|naLihmFQbE>9-$%`*dm(GGhcsUqBSTv)Rd>IWEJ38j^z;CrAeO8d8uM=*e}{##gu|eDt1Aiy zFu^vkM!u7$=jo}f)q`Fg--;#)!{Ip$hW&3moyc7iew$x?q;3O~#CI2eX1y0H^NvYg ztJp&5UAkxKQz5%r*)aIfK@KWJgj73IuWiUddmz5sVvHn+SHASN*={AU2IqQ@i(H|( zJ+qqGSazajjgNPg+ra+Bh>)pO8i&Fc&@ah2?g_Pmz(*Q0oS^A(&3@cuq5y$?i>iLT ztP8OX&~rBjh|GHqKgx5WygJ}jcatn_n1#nmXjMD*w%NxG67PkJW@R*g45ZS|LwSG~ zDyNEn0Nruy1tS)ypu=Ojn%Au3I34z2CrhC(vb?E&_`?ePePnGOfj!i|Lu`G`X6|mx zm{Ex(bWMb@Jtk)WifmFyk)k}7(cxFR6C~$QS#B@wtusN9X0D^7QR~N&?Mk0EDdI{F zzG`P^IACsW!hOEy5hQkT_)4cI^&v*1SJ=-RL~Jdf4+G!BS7TLjt6sh|LIFpN2#l4O zFEtwd(L(^kwtylUPL5G!fCkWb`;%ql8yShIYHj54tW5Ta+K513wMarIt$2IKiSdW_ zdfOfvx$k1ZKPki>A%27n;ROmB!1&AJyK(E*1OoWXgb8H8xrb0^4F#SqLT9(=M9=OD z$CU(2|7HQIW?Q7XL&+yY?x8Wl{xB=}n#V(qIw%}0LB^JOaY(MytdtPzMgHd^{UYvu z+s_`=ea=-W(?uynH1TaZufj_xADMa~uju{Ahh`Ud7LbJFv~wZ^bU|pQ#arx|o9GgWx4ql&^T#ICK_6rk4~z;MP{W09B2P$ZErI6t-E zOgyRqMBAPmL&tGPd=58mz3GDL@ihdj*}uNM$FB#gf@yKN5!K?OxuZ~bl{qvsM7JvS z2xe|=m(2O{KP6g~#2`hb+H`mN5(KuP814R`WE}fdubC*}*?b@iSB>KUkuvQo7PE2Q zpFi$flf|9EV^2KNdLR78_-2%N9qj$HK2Jai4Pe?{d6sXY!XI#-y^*b$NH#uvMOCrw4t!e-xDCPN58t1lrq}x_e;r~R#-0))OoqC~U%!4m zTh{IXaAq-rj0kf#!T?2PDd07i{~VL<{-0mpp~g;u$l^4c{pv4>vgJ-Mf4<~?A*hCWC`zB1zjHY2HgfE0nENS#?3uZ+i$`=zI-;ge# zx``wYAZ!*ho*YjV|Eg>?C`5D{5^zVz1m4*fp+kk@(n&e3qKN!(o<^+X=Az&7Y-Bo{ zE5Pqqc|!e@)aTg362u|ovZ+v&-R4DJ78lbo@hy7w3SYiXQ81vY=Bw!`M>>?ZZdOoN zDg#iGXgX+zwO{me=H6w4VLBxA$-ZM>4V709=_SZkJ>uz*R2Lu~AF^ti@#Du)_-Bdg zffNO)6r(+6-yxvPMoh#vprip&YVRC*#*MaJpMfHmzG<`**Ds0+d5cS03zorpNG6lw;d5=&D<{YBWli;QqfqZT-Vy z*12@IWh~*#j{}f@!oW3K^=&d`-)N5*i}ZPf&u%lb?vbKvnZFp<l=aQL zV;u_B6}yk|66KZ2Jr)PoN{BGqCtui|eiwBe1Sa%Q;bjw*S2a14qehw^5Y!-S;eXsq z{|;D)C9wDov+?yZGL5b>RR&WMOQ4%jzgtP#$E%_i-r#Xt(}9Mrjq>ZQz82GEt+iPC zQmYWoWpxLlh;3Y_Ynz%+nxb-n8c{3?gt_I>p<9~;JS-X19JB+yKFOs$Y9Z}EB+G%YBn`V1Y`0wPv287V;Syly#@vIq#BY9BJfT!`+yMQo|`Neqqdt%!%}GM zTKm;H9&ufkQMlc-!~CYML;kLW+&z^n5E7g((=j21%VtZb2|@?V14W^*vqed97sm4W zn0i0NC6&3bc+#|ERR@a;$BY(`qKu&cPNXBhiAf0Y3>GKPg&79Y7P)^&RgnZL>L)W5 zL{y^eUG$ETK&dxLdGi;&J;nSl6MVzuXO2GRa#U2fGoZ?&xqg{(uhDcj zZSKhE?|o5lg~osYO_@i0#Ib8tI_%;m{XQpemF?;=x3$fNO#ksFaTX)q>wjyG5F6b- zgCd<|qieg|(-%ZfVcfP~zo&6XdNb!!as`EiJP<{a4H@|kQDefCUB0^vB*Xz)Cm0wh z3RLAKX3R>EL2BN2+l7@V7EAWsTM*w)fk}pq)c{A}iZ~$i&PPh{rqS}0A~~~WBa`*i zq9%rhmlpoxpeI5t@FeFS?SJvF0!5oX?gc(mA8eK(sFamew%&z z_G+H&SwgVbK!5-7W86wjh3j&OM8@I|!~zAA#pe+Dl<=Z4p%kc4)Z3@8bcc-hEYtc) z5NSUu4^7`BuwrNoec%VBCseXRckgZ=_p4hghU15tp0cW;h@9h!w`X1-cr%$cm+8R2 zb5``np`V6H=w!Mx8oNxBeN5on6i@2gk?Zd8JvX76ay--dA_t{$>m=_3+JQdNxcIgi zwXeATFiSRvxt0aEQYQM83|R&G71+);YW^#(&eKcx&A zoizVRJe7*FW5;q|_3ccYT=g{*L>L(I6;%O6&sL_k2}$iY)HZprK=eQcI3Gl<*i@8Uf`2(Qa70qWH5bj75A|6>%>;ATMH^}!T=X%dim3f zwK^CfCaw56H(J;{wI54h_*vc)p@i-AX#F06RW zlF0THM*25({QUfS_vo3%HhYza*F-;!AWvuK-p3SU{4JsTrEyNAgBg9piwr3ZV+KQ} z+=vF&GA88oyjnw{!exA&Uq?+gN}|`3o(LTIc1jPw+6D93&N25lDK*?N+1;erJ?>-N!IZegMDU*y26LGFq>U1vgb^-geh( z*wA^N$_3YHLNxNMc0ZS~$9=2+#6fnp#mlbKg-@aMAP-p93r5QppZ(*udS!DhZS5$+ zS>(xq)iYzrh7yM%$WAl!%}0&D=l1IMzGJkZAk{d-|A0nx@1jwqF!^^BWn4sFD&Zs~ zV$hEF_4Zb!B@zfK{TY!u(|_PI?by+X z(xq+d)`O*GEk2M`h&O4c2xj%@UfVU1^Et8d&)3w?bf3izDC9Q1m&2kKp}($GuXj`? z1A)U*l2rB@_3k}J>Jk8v=E_yL2eG@6e&<8WTY!>Qd%m03O74d7gUo26E%64`Zwn7^ zBkK;An3XtoZaHub&#&~|>dL;At9_Ku$+8b5HrYr?NW2>pU;GicewfO8V$~XF=@)n0 z*x-1RY7GmpgF>l8kv)!2z%Z>82Hh!taB|Q)w;Hc>nVXe83aH%C1*VFmRVoQ^PwZ6) z9a)h0lq<4_ey`;DaY^&yP$Wz`{)A_Bnrz0iBF+T`yU%5(!e~M<_!HD~7&5M5%0BKW zU27b}1ssgz?dP(bB95yk6+iuf1W2PoSt}sl+IY|EE6*9p|BvPjK2T?8rh)rXo$Sd= zF{c!Eop|k7BCXGB@4cm_!(VQh8_R6UcYFw+ZD+5`2Cz~%qX(3|pLk;9A;_1Qsd*$x z-yJc#H`kHS24W3@gYQnOR#UxCX6(B3?_V zvdL^Z2+0Nao1E}km4sMAWHh2$Fcv+L=&^(>lOrWtDdgs%NiCtTk7V`90ZN~YRnSGF zxmyw+1=)$0#Jrz4uRq_RtnWUA%M<1~x0w?-NN+8XBO;3JoJLqu{N@$Q}CrM$`p-n zx7%hi58r3T5WFU~O4n^n^PP>O?ZPmfs%g7yQfFxJ6|~g#}Fp-%ZDv%CEd1qwxOAEwm?Z)g-H!v)Ge zDi2Q$?I)GiZBKEA$R+3Uoe zM=cduti`A!1u{q&xY{vT+vF%4Zu{=NlS=8E$lVYu>&;3+$wxAyAX+AI91#$8C!)px zDuxBqANq{q;|~_}L+${z_MsH-MBikCOIFDs!kr_~;VL2`Fu4k%bf{iF)JqGys zELwq?ar*qG&{HD91TI=Io#yeYyf!Tg8Hs|zcqa=>ru#!5sZ*RCo77*%-)Z6gRXA8i z9nciI0fP7{RpS)kTJrVVENY@WBcC1V+WeGjX#`RR=gy{S<|0(7C4?NkU+VWWRRk?u z5bh(%;)5>kN*Sg(xGaY>mvMQ$AvLy*BvYWz8CSAKE3P8o6a68*@fMo^mBwVrl#bSs z+y3ga#TYe~XV|Jdj~guzMaKOd9UWT@!mTV(L@grRCys2t2*ja>v)8C7%J)REQaaRay#5c|wQbaG|*S2$T_j-zgq)NC`}0Bm%}O+9V# zHz+GhhZ#0IKJHSeS~`e1mpCZd{|(2q;|9&xCRr}g)hpE0=SEg5kHwu7!>=oyPp|sz z9lpArqF_UiPG+rd-@e82!9_LpuO08<$Cam+KAY_Hpt3S;--2xy)E4@->!F|AQ*|MYX<#N@MQ@$Fk%Ejsq{^;@_6w4$%(&1%%NX}dgar^Q5A#he||IBZmzecB0yLP32XcdoMqK)alluPm3VsNxEE95J_Un7?QMKE6#7q;)~9Y9zL|t zSVIqIGUt4p^&rO+CA0b}w70bWRF>BeBQQT>o6R?-eZzgL3!Mu!H8qpw7u(`orbZmh zfMu#6y$)=>b%4eiPQ%$;Tff0pVFS==D$Y;vo!DiUzSsAH;U_fL z8r$sXF<0+xS=oD9SfWcQd7Rje2X@G8H7KmfKf|r8;y=E9)NQxrb zM3k)VU2RW5{08pbRGKPY6$L8BHkV0Q?vUgPpEYY#*iX25%a%2uewA&9X5_eq#e`%Ev3shZ`Pt;=hSQ$o7!N}_P3AEKfkQoZI_&&xj$}2 zbPIm6U}r#pW5$yw7qX`Up9VEs&FEWCNEi2)w;CI_YBfOP(wja`vxbD|Um~K}v(Iot z^Zw1KMv5ZqdAFR1^!MQ+^bsM1-2olLuUxxUJtQQgO4oUAOu0 zX*XX5Td8RT4zvx3)m--LTbBtFCoY7OtAA!02ljb+dDice*vq^6ukHs>3LR>_ldy=Xi8 z@Uf%37iZ0l%A5?7|BWVd3%@j)RM;MFFsi(`$istG7}eWPJgXCGJQ*>*`@)4eu2Z7# z5M2h?*=fl|de=7L9qjd0Fx>OEZ#MvG=Cm{7ax|({V&&Kho^uMFV+~Es%Yem?24_y5 zv}6xFwqjNY8V1&?8EE^CT-d1VG!0q+Czy%p-Mja!zV){}xL)kxw|;#~Lqio)Q&U98 z+Hc>!O+I7fWD<1Q7AlD;ll4<>W3qLH=BlAz8oHPu^GOP&RQrf3HeE2Z-{B3^p zR2<}wpE={a?3q>kNi5y~(1xtx`o6c&(*9_GRq>LqZ)xkp{1F@S~0$+k}YneWiMQHUzaegQ1mpPLTd*(XfSX3*eD<@7dF3&OMXF z{7Nk+V?^FwpGvfk966E=+vt{I=+H+mJ1{nLsI|3)hX?YhuCCLj`SF`Q;+l9*Mbn{L zUS8h0S1$_r8sR-!8O5h*?hHFTXLsBP-AicCYpocmv4(Dl*hr;+G4%7{#kOzX-gWNW zV2TEJJDm&K+&G-NPf(E(LHG{UuAlp13F7KJ`LbH==Kb{P!#xhNb!^QfK+8YBK8_^d z*T7)&z?^nA^;rzN0s0U1OJkv~cRJLMdq7LIfXX>P#A3igw}zIHv@jc)dv~YXTD@Dh zZf=8GDZAK&YBg%4q3GPB#~|u-O|wqM#%i?t-S&3)NJAzF%gShkxJ_=@#?)*d*+5&{ zsjECqK&z%rXlJxa8ya;{g@GGD=Yr>Ztjx>G>QiOh^3j^%-iLyYoF3mu%c(k^i)+rD z>1Aeau8n$O#Uy+8E+=nK&|w{0u?(ogT6HK$IxMq)Tl1IXb_C8Fhmckeb) zxisY&I1IEj`h0f9_pT&L>5&p#t-fC@e4KFT(0ZnGuRMCxn2E;gXX-SzAEd3L<45SS z)L26W;)=rldsbjj&}Htz=CCmT+@Znnof%wv(W?v})Y_pP?=>Jm8HnWL#rLtHFDyo8N^o63CE2WYU-3#JA|ZLbmTIVR z6h$N<-7ewyD9HES{Z}`;cJ12EkdZMS^6J7^gxY)lvx{{UtsXDhX5BPsoMv+O%KSyw zj4sT^f^j_(JEo>s1<<%_L~Z`!@zbZRz^(^30!1Pax8vfUgzh44U*|4eZk*~`EoaxT)?1&$ zAw4{^pgEdtLz_^K&}#e6*bS1!?Qm_nXIo_e-?Fha)-ZIPJb4XOP9(p|did~k+D#$U z=mRy@z_F{tblkh#riH`N*N^L3SjE@|HBN?(N+Sc}jL{(M)0?ugI`!(+V?E??pAouE zGScmxFoX!=ueN%2<)yD@*<)Q1ws3KdteM7Fw`w(Y)~o=|%@k_3N-x)XQ#_=yt(936E!PP(`OFu8qB!E5fFmcD*%L%+vo zk__({vxIX)(_VF@x;Sy7i^#~!tgX^vxz}l_qP|g9c(LT-gJ@854h(Im=;SMi8DtiKyx`WoL($C+(@!t<8KW+rZ1@Xr{iZfUIG|hYTNdDgOq@;CQwrCT8 zwVF5I1i|r(Sg-Iw7W)gBrDf^LDE8xk5mr`%z+}m*ew)H_Zr;1M>aipiY1)5kz5IQl zI`1I}gLfH-LLW-SfFY#J%5HEhR@A78{WmrP5uoLwR6o<{N8 zbb7^-#Ub&MnH0gPD}X;Zr%z+v=@2U`HR;yqUwX!E#iUb%i(qp4AL_{WXpdT$m}u}f zVIUfrd37caci*u)AV9MS_uTU{JC(g{Z>UzQ4{lkkZ`-+q*>K^9JoB0kapm}I(>i9&T8CrPfkYnTP)4ED{AOKt= zxrDuI_a})6xKW}B(>k*9?&23jl$Ll)XlZHrv>kO&p+LMn)}qbPrhq5O=C>$@nA2Qm zY;!#;>l*KFA-sgytBc+p&YV7dm9y}?pdhmJy4Cd1UAuw_gI8|fu1EPxg`>9l>$}HS zuc~1TdsOqzC>FYnpBvt5|+)*IZOx(ymMD2}y@?lqhjqe#on)?(dI zr}tmF*&(6UKZ8p z;AmN<@izJ3&#TS$o+>8_L1b?hYh0WX91>EOc1JKBuI6>?)@^n6%tCi}3koGE4*<%s zJ^I?)Jj%-}qCrRNyZ><}Qn`7$rWG!#gY(~@yel?o3#5o^|(xBA^v zb491G1OM{?hdMhql+4VA!?oBVRA)-+`i&a``6xevfR}qF-HUbL+gu|QY5`5i(Pl$U z;4h*mpm-uRfCbrN!{54&;*XO3*-IG+AOK1a7+y0lAl5ixLW(L? z4S$TO<+o2SG^NYpzG%@E981X_<6aShDp3fp;flZE3Ee(R;X zcN@S9HTOB6zMcNIE2=d?EL_r|PrfpB;ZH-;J;E{G4uo2PCbSR()pSk=8AOf;r=5Cw zKZMiqQ>SD{O>d2$rzZG*nYj1ZO%N0`2DkL0+hFe^CS~-=n$;8nP3e5};71+@`V=d2 zr55IAe1FUwb0Mrf0~Gy}2NA9z(#62L#63WuDze&Nil-ItL6NJWEk%l>A~ zK*yo2w@N`yhz|d7^U0e@ABd!^Pg%!I(~(K3%{q4M2zAx`o7PUH8A+Iu1jKx3SebO= z->we`IGsU<>$^8@8szY{A6ASSHA*hFySuy8BNb5JX#{vWvZI$eMtA)wvsOA2DC)8y zxQ6ZStxdYf&dF)J=vndXp)oZ%Gefwsi3V$}r|aZ?0E|gViTMuR7J=JGX~$(j`e(D^ zWkQOFI_;kwI3VmTvedY>*Zf>s5)jvSLPv0%42@fiGx}Wgq^Gu%{LlY&8Ha&LlV7lSw zn(dz#^Z?r_;Hs*HO~)eq&N-+)B@|cN&ME#t&$F$*785m2_6|ly$_@Vh>ivFI43roM$Nx=~i)M#xWk_!p z-W(qyq6+&P7#PUN0W`+kHDxXrnwQ=`9DcB{ew^-1-gTHH2XbRHp#v8WLu5#X;WyuN ztqX~KV@^y_pJ{xj>G0610*B2SnZ*@C00A!d|KAV+1Xl|)Gkt~o$Cty0MRZk1mOAd2 zj@2NEwU{?(Gd(mx^JEN=Trj9#nsldkga!lz?SB}0B`d4r?VrawmbMGO83jx_CWbMS zDbvQEnSUuaw;^wR&h4R&dc-8TGcslTp>Ywc(O{?vr^juywRJh-weKH3waqeYUs#wf z<((g3wSXm$USHleW~glwEn3~P-Iv?iY9dn0IXb=<;({0FQrmcsdLKX1$|;M6F%@!+ zVf*?S0PZ#$*Q7gknO||7jg;i4Iem@nBjh`F@2+F1QS$or6sqJ#{a3BH_z{}Mb^3IF z>i9w)-nv7F+ED1!NSP4AE^hW~H>H4i0BTk~^{d(7IH{b7`JCB?D|7>f=6?Z54FSi% zc&`;~&F5hD=N$6Aw0f$Ij!nlQLv?#f8!H?D6J|xwskBq;Oxl zH23#if*HnVmH?$$b-M|(Zb?eqZ?m_X*a71c_s^J@q<745m zRSJrqX0__Dx1B(IWHWu6nR!LNT_0;iKUY;TY>%lf1otJ9p?qUcgk>pCYxrQicI}1@ z*Yi>eIN!YBA3uKVfG3kmj9aDqeR=QAK>^km1VA# zJRt-JtOLLGJ%z&*w5Nlz<`~CY194mUSrC&l?_Gp55jv26#lWoWGxy z|EMFsqB`z0$!?%qeE*$~Wq{dGhr_RbK7aGOzP5dOx~i$Rwh!s0kZ?&Tug(r-HHG|= zE_{{xy5j?ss_ojfyPA;!FPJ*4Ot%b{$FXs+b+M+mmveqK^TquuS~?t{8o!dCuTSf@ z88{)EYw9+{$RUL^e)-w6<|s1KVF9)+j~WkFr`31~Ut|e5E0_cR#xR;tJ3u19a)Gqz zzI20ba9gPsU4Qwv3PTY?U_#Yjp_$r=Z9l%Lsr|u&Ic%~2MX#3<=Ygw3w66$!^Rxr* zm1AWB#;xJn(DR6y=Ie0lyE=z|v^{Uy`^D#}#g}YqIyTk_9N8c+wm#8fjg~5%o6FII zf^DDlYtyC%v+A`(>aD0em%*7c>X-F1(ZE{LV z6a>Kz>}TZI+SCW|pElSSJBE;Q306Y8S+fwt%?}?v3QKn5&I6yKxuDTgChv;(MGIEI ztsV78pW8@$nnINXrdfOHR5yJCgOn*67jgha?-{o!pwYlB%UX0qQdH8s#xgp8-s^pI z+85zZ!6{@s&EXxxg{$}aRuT2#0x}v`CjPJ*S+qCx8A2ht*20RzXsalUjg4LTlNuTt zJ_!j4*FHATc4{@~pNme#whddzw1eEJmB!Oamg`n?x5(mtOgH$Ireg3 z%RzBQ>_+K|#7M9s+r*=bOnROA`eb#Szq@C zR5e^W(EZ*$9Vs2?Ax)fjjz%ty=yfQ|D5pSWJG##f{ZS3h(Eh-eAMvY>jc~9l4qCnY zmqnP0#Mn5K?T$ad?KdZ4bqu@0Yn%+eRwUo6@3eyQ4!OoPkdN!Uc^l*&f+*1+XD*!& zU%@n=B*K_Oho+(oACovocf-bwiJPUUmD-5Ec{wAa2AO6cDv1v}qIb;aq)F`uXD!tE z*U`pC@hv#ORV~E1Y{`_;*D*x zb90CAxq^QiSZZ+Goz0v306MlsLMO4S{fzC z5FdDbcu`X(k#^&&aj&dK?M6PJQxq(!l<3ErR53i-_5-NvjEv%qb;dV3KjQt!--k0a zy^rj2Y}C5*xBElsOKh)6C$VT4;ixHEObW^ctXlP z-o=Mo*E@tLzkW5ka~iUI3M9%7&TaX#)Osw^5e6RmHgoPz^qr@7hX%yXVeGXd7~~7D$dlE@R^LF zGNOhrc_?fUpzJ-!UCMB*c~$789D#nbQi~)HNnWE9sgl z^%-33y|7U7xzG7isTgpqLFnWPz)((_){xhHB{lJ{Yvy|rt$hAV5Io&CZ{w(UMMCW5?Y(_&r1IVPSff$tzf*kS@`x^L zNg9(m?VVZWWRq&Ov+C9Iq`R;pB!redy;BnJ6f_|EkA^u7c6aYg7;pz=;IXQDeSG)a z+}!mSE||lq<)p5zbf{xu=uksG?HgaslK8Y5IgU9Oyxyz!ul&`G`N|E1KdH{Lva(8` z^!Bd5fuu6oqga!lSpWf#;6|VuQBH;w^!lr4YcsDVdDWqAZA#02BnRAX=8hRN#=_b8 z%+-|(KU>7dIxb$F5NUEP_poz49=R1k1?dUkm2k$>kZHM2LEkvb=p) z(32$ln$2%I`hAzww$zX~1CS#)VQ?m_{!=Qa(JDE*lShE!-N6YemKL zfyQmxRP$QaDVoj;rA+~qp%)WvT=JGq$rynFZV6}t7=m=OH4$|TxDsOx~(P%rFV zk28%6@18iU?9|DV70eJ%=CJI(F-Ujx-{|umWy)kOtTa*ekdHu^uHCnU}sG_}PhDmh~!HZDNw5``9wbYLD zAJ+N`8w%vGNW%q-f;HrKF{F8y&mb?BZBzOEQk(Jn&5uk-sXut|V0g8wr2G>ojEd79 z!L=H7?!0;J$dZkhdi)kJ9f)3dgF#x+nx%Di_0)}p!J$S=sE z6g~<>SvJE^DLp$%b3b@GnAp?0n*suCqmq}kJJ$S_Swo}fpjRY2co0}KYN-P~QG3ce zVn^I1;A~KS*W0wYB{hO-J2Nr#AA*v`dhI%OBzmhTv~63HKGNe5%b1>g z9Z@Ak?ccwaDt7y~o%7t>HuB%5kiRqg;c8M_!Aq?U#~MT!P*G5tG}p^6-FWG5e1DIT z>SR1|v@E5nXG_Xu@NgHZFV)*4ZEY_jBXa-qYgzHIhJ-%p0qoo`+)>H6Sen#b?+XWE zUzT>~jziv-0LKI&{=fjz>J{RzNs~FWt23no55y4iMZPL9I%kQRQQ{lyj%}-dt7)iZ zU~trzPDCz{S4w71M%5RiR#R7?>`@_ob#%((%?lr)QM2YHo=%v$XF;*%T)yWu)Vtt_ zs5`f;USDlHgQy&K6?&;Q{i*2#>|wEJS?uVy(v+(9*@jk~`@-zZU|PMvhF8tl;Z!ah#pnl)?IsLK>J;8-2H5N2K#7JTD? zSbdnGudA(e!#MO5dsHbO%qJPfx93_B$;A-(;1^i&QC~iuMM0=g&@Rbrw%Pj1zsCg$ z%0ecRQMx#Pf6DisykNm5ZdVHmVxjX64vX-`IDviu9EK~5F>%{}kEyKtm4j3?}uj-a-`Kha${~IVf4l&3`X~?TY)Pkzx;PpzX z;ChgA)B75#(TWMnNLkU>X&TI$h&`yJPmu1VKOf97Q&EVd?w-qu9x>7ToT=lhDO}Eb z*zSFpG{R)Ss&EN}c>2dzw}(CnvCiWG_|X*{zO3jlXO%j- zCCbu1XH$8pV@}PeBQuZx#9AHr^6UE|VJ#?I{Iyhh!RwA5?LcfEdfG2WOI6IPErb@k zj^=qIxvMxL{Xp`nH?LnuGdb?QWXUzgN1&Dt(HgdjnSvUlw|jwt{o%pP86jS}eE+^v z-id%9hP8Zvq=hgz?w+1kebsA0?Hni@W2L;yJUwA}KbHjw3ubX&U*E6w_1&}e&`t1L zAN8@x>3ROb1$(486dmypv#Ikm45c~s3#nwQ`F;5bzZh-TvMd1f&0-bm z58wK6W2Q|r#@#cJuUNb+m0N?{vQuaFrk9ks4`|~3@!QXzQ%M@%;0+X!!+v&TPeiP# z9^EKt%nWE&lm}~hYQ+&{(a8zWFVArXTuam=k{&!7E8m-0LMcCh5}gTiv_G8lw!{W4 zTDr8;m))qiL@?7aeBj8D^;q;U0MjSe88b!~zKhZCY^i&nTd|NUuVHwcNCpXd@~YeK z-@o7EO$SauznOOTZnxVDXU?2C;jCv(If4{|P6m6dJ8-C@m6x78G2kWnXsL2?uX39v z7A6UyMa@f-f`JBqv2NRF6X=K&kv3eo75c&tH+LJ`z~$C7Gc(J^C8+MX%~+?d!jZ{Z#}Vfq+Do^rQxsuAsUm*4B=zXf0hqS1)aTEW}@?B(*D_ zEa#h6ZSOvP!j>%Z(+5s$?Ml;Naj7UZY3r33*EG zUAviCBjLASWwdZ;EbNf2ROoP`IrPGyU&79=q$bmD+BBGOHkG*^Q7U+ow6Boz-gA2t zmF>DJFBUXz)W{V*?2g_Gqmzv4hdt)~SHQe0S0@nFnQ`zzw0;>Xh49dB;Uu^Qry*8G zVatHuRMB}|JM25;6`4`P^HXbYs1Xz%g29Lu#Xpbri%I%hj^oe~NJDsb%huXHmAVr& z&z5gbYBWmA<#yajJjpr(}Jv~ zqrrTMtaS56V9-|rg$lwyxg=LG>89#EQtW6I-;jO_VlK|n1*^mD&!5fI$k`>{9Kva_ zJzx#b2pTvIHj2xXxd;N2T5JuYFk@5p)Yy`wv$(8yaYXDaE&wSzbSu{k-0XP9q;mq5Cq^%py1`^kYA&f?*I@x5cqKT$bdBwYBsCueoBze@wY zyeEaAlAMBjl+?rMuKSjhmd<{^RZDloi9@pfR5~a(Au@nqa4nz{G~ucW0-DXTti%4> zP@`hYp&~gLEw4W?yogvl7}B;of$k`C0|7`^KmiJ1Fo;?$vgC5l;HOsUTzoaf(>w?# zdS!Z&)@Z`{ixz^9g+Whs!TtIZCr)JYq&{ymgJ7FeMfJ6Is8olYG9r8_>wxy2 zY#tr-8g0(#^3+*Vr>=z!6Ty!E-b(o0NC46|cuv628Y*DsrPcW~y;uE8uD-QF*aYM+ zS`8c45cX+^p~Eh^Bnq!S3c%d=R&z!{36cX`saYV&(va`F|N6F&wrE^8<(9`bp?<>i zK$#GY)(&f3O;K!Kk8&}E0)GQz;z(0)v#`or7!XjR3%XgmYuBz1ugI@|dzT-Vw|1&& zn!eA{>eEZ1r@=+^?cX!W>X5jSkiaF=G*lC+rAI4chdMT)V5|Um(uQNroM$~a(ikTI z@ubzE`we+dL#m0jts~&!^crKE{CC@xi7<}PNDQAi)OmWBu+4~9b#rrbC}$dHLusts z16$4&MuPsUA6Z_$3Y`{y8cHtC6{5!s!c?L`7!^V(m*^WQTK4r8FDI(spCZkQkcf!J z5G@&izs9?%_MI#_%28$8jKjX% z+7>-}=n@tcf0nLX@bI`{zdzrmCcbDX->=r+l+m6ZH|RX8rC2h6DK~g z%$qiI=2Q}o;3tL9k))RUj?NIaF&xC+q!jYEdz@?606df0=u_uv-M)W`f=ungs`HupZRrC zhn>CV=Qjd{Qj%u&I_?J<)~XrfZ_f`J*(v+uinXKnEa^$!}arbwL<ysnNGj5bGpJJ90-g()J-fsZRSiQmX0$^9Pgs!G6oCL@Yc*+LPwR@5xn<|h zT4*&bc`ZKS%iD{Fo=1{hUc*pnxh&|FCU^QF!%~`ewBl})N?Q2&K=_G5k7b9G@%0l% zBWb^OoFCc(W^EbQ-16!-p;X@%vBTsar;Tu(H__>tf$?UwbvZ9;-a`d z-th!#6jYj00aAO+Y1ev7CI^8q)a4T{`}(h7rM6K(Gzt#oZMJAv;cRr03!EXqt-NIpWk0!hwhpb#MCNa zn`)96&}?})b=U!O(QF?6|+`vp=@^j1FSyFCb0 z?!+jqHoc}n4f!c_x3qZsGQ41a!L<2UyJC?hh>osn#F2Y$t!x~Y4_F^-M$#QqxQIEw zx>PV(6Jj(EpI$p4K-rR(EFpU;RV`Db>YFPoH$sB@`THBvJN|zddlRsp*Z1G|lMtDT zGL}V(d_(3jBt>Odl?FpHgv|3)Dut+wr2&g1MFSEQkz`0q7MU_-j21E{L&o#EzpZ`t z*=O(TT*r0owbyU`$@lyDJkN9A_j`I?-A>Ji0)7wuNdNn1i?EQJs~!!ZI~$WeU6+Av z6Dc;*6WnRRs_^vX6MQB2w0p!B5VnO+&IVaD2-DAt2ETMCr^VXu{h;nHi#eNwyj%b! zZ}D!~>pQv}0*FII-X{7JW@wA&9mZwW1$Lp@T)8rGYg&jsggdIeSY`m3TJUY=2mayw zQj|(DF)jag!ZnV>4MN|Sgicz?)?vSm+{*Yl0y0l@T*x}+B^EAXHf!x!k(ff za~X4*Pk)XM7|Nd7?{dpLOqEoov1_tP!?;iZ^2o^0UZd07yWCig?$R_0VoRRzYMp8z z5luNq@Z1Le$H`Ib3I-VoWy_LG$otdEx%Y|{D|+|qckbFXT@Wv`QGc*Q-Soo;-Sg-4(s^#<3@#rbZ^V=)T(a~5FOoQ)#}x)?HU5kp2f?i z%?{t^8=H;r{sm#7{h04`9S306Xga{R_Zl7Mk-)$HYQcTTW1ngJSE&<4y@7C#If|2( z0=VLGjdo6rIz1U_UH$O6bURXHpiC4hOCL-qm%(5~zD;e93ruGmdq z<1X2Wx2bB|dd?Y}HfN2FtP4uKlnn)0@-&TIsv6W5UKU8+Hc(7GpMTK7ArlekhCLs) zZCfp1umec2V~~%LIcmUP^sjfcjj0jh9?&{Bo3g zj&C!T2%xbIB2LKu48!$_|G?Rz2Vnji_Q3Vp75;KgiJ7)TW3?+Ds+(`wCGw4E zma53D#QGDFxHDyY5-ZGE6F)2e`I1js!ERRZ0 z1ac1_J_H;}Z#+Q5@gh^eV+LfGU-u7;>Bc70q^HZHU9U5T4y-r&idOyliASX5Q6HDl z9Jxf`=Thoq8=`xRIz9)^SdO(d0S6BrY(A>tj7^(_v`lhx92R63?tJJsAZdwAcAdF^ ziGeBB`u2MW)dm7zg?m@1K^=jn1y^DWljdve*d3Xkx{-Fwc_QzkgV^(*a*c-*_IyC` zojXm@9|WSm;%}Dkl?~7_cH~Is0U~(j`)Z&XfdXyMDc0YQQ1trPZR#RFvX8x|!bO!opOF90X!3Y&VZ ztJ8it?VELkVOfb-Za3R#PP5+gTgkfdC5?d|nAgPNhMwO!XVHXHyTzRbVJ&yvjJRO( zpF8ZK1E5e~bhKMozJ6%m)Yy+9DplU!D`~&s0pgwKV7m3M>1-0EI*c#O@4yh@)xF z&^D?j5C3#dci)KmZL#rVi6@hZ0LI?)RSO(Rg8&f#Ptg@1-uzA2y=s z0-wxhY+V4F2Z5yD_v@csz7`{HMpJWrzLU|XhE6lqox&g#=8DxhaZck`$;;$=Gi zY!oTCi-qmnxj{x}8t{a<1OKCrMfk)yEO1{pDyl&K96~~w;DdX#4rpR8uT#p7qGIS(c9;rGh|Xfw#*Os^`V9Ho zS``)*wHBKVt5%$D{6cPE1w1s#%WEC|m6_Y`w_Kch2zQO_m3%ly!FHxl|p<7MLP z3h4T7JNMU(P#(8jUvc3mu=K87hNMeuhCDh)h2uPN=~QR?u3h6FcU@t<<1?73jP!$O zHzfbYy|(p&c@3-7>2f^D;3y|0Pt{6J%8O`ijhWN6wY0Q2A|}#~$etlWhU-5$i3N)e zH$H=?D{oQqRS+3zNZ7~+Z_eq46{>OLnrbrd(Ave;WuDWftz}HwyYlxhp(&-2bc}4o zMyAVe#hEAGR_JbF#^N%qY5|WX^=a)WQnxX$_o*&{)UzQFhdOS*H_-n_Q>C}n4iy^- zW^p5DVjBP(EM@v?EzM}_%8J)_8v!4l=Z?Wuz$Itd@Ibc?jjxw3?1MfH!;6C7R2;0# z%VS4kt29YN34rju1yZ;`R2yhK>U()TuNM7rBepm;itum@Ky{mStq$sW-FJGN(~F{% zCU9G{E}GJ8(O`F6P|O5|GIx~sqmj;I`tdOv$zT~f9+hHI*qvAd?2P3D7ygeV(S9r`SEe2Hj zP`twd(YKc&tir~in|tZiFM&xuY6KU&WCb?uJW=6OUV~r)YHEKhrv1#?UuZ3!kPt5h z0mL$_cMeLvhF)05#EJ zLQeMiImq^gqMyUB;s5Z570 zuB_(uV34i3LbCG90n@YYr+oBTSZinmg{1+!oYAdsSb_WT`5MFlSTToGR8&la@dw;$ z!e+a%w7XFP_h6X6E>Xr%^BqjO zW358AtwpgC9FPMT9Pp<-y+uE`yE7mHoQOAv43ArQv7q2AXS&EQcxWV)YANM`obxYnlh)}+wy6}()L*cg~vMKO;!f!h%@Frx>nr!kha%2tJPAMb6TL(V#bzfg@g0IX$2vYbb|C2GR|)A zDBEA)(~c{(d=!^!t&}$y%A2uxJ_85XaVb~kyaw%v|(3P zvmEI2_PpthIBO)^xIc)=KKC*4iD5rdAHMZR#0}aK))qQ*JB&8`@e?>ge0OGE-E+Pj zb0?OlZi}9EMM1{ZfSGX(XAFm8ldXZUuyjoQZ4F)|8uezvBB()WU0pT?`r^hupc0*g z5v2`yKBny6KmXLFRTp9L(SI^vC&kAZRyc?wa?#7{Cb$`DizJX9E$1m9rhvhc`o~?% zBM4vUzPZNY9T! zX?!zL)4%OCbl!&a^mL09GjV)VfJ8=zIXB&Kvuz4D#Mh_X;#GMJUJRuu!@Jr}>@nBu z20}ce!cP?y)!#1+aHdl_Om}7Redlc?T&M>uS*di$T8c1{&|imQ-m{XXG2V@;U*5(j zt^i=rD7hg%?ogI7NbLAx$KYVwAPxbz7I20AZ%nOK+~w6x&JM5}g!RyL&aKf-3D$RS z)1Wn!AVondMYh6(7v8ihwd&V*tzU-ldtnTa5pQ$9L96nzy36RkcrF9ge2LJBOJCm! zq(N%cyLZ!f*&jDSq4G=r1?y*L9-LQAJ6l`S56H^emuJ%tF*QQG)jFJ5`j8R%8Vx4p zKrDHx%fsU5{twKgO&jR~t8hutQ)OAn9fRqHTS3th9n?LB2qVzfXmJkYd+qjHOIr|2 z$nw2SHndz34c0$rT!=z=pqMWGYa>%d=_|oDaQ)fDJc44BzZC&o=|n@6_)QW%Gh+n z=-}Xj5P2tErxnI*L;*Sp{fbzk<>oBObO1Vl7ARV3LGPJMClR)Ndu>I;VxVvCWdk^D z=fAz*M+ra*DN6jvX~y{Il1IRzD}=n@GNO~mDK)_S;>7lOU!1oOcfGR4x{OPXE~bs+8=^^w&_NiA7KZlYjqh$wW-!BUUG-j6vbXRLL{o`Wy>IpYvN4$dJg{ zB5Ym2?|O9|f9yIv@`FD(Bcbpi;ANAU2ISbF0QA_gE76o5FG$A<98P5+E4#pU*sycL z17iR#9bsiRWK%=ku%&=Z)p9TC{(v-Dw`b3!?ccg|?|z;)fn8cM0g!Ws*I4Yc7x`^# z9x{Net({%q@V0|Fh|}Jz(JA*P+5HMiO0+*ax(M#wqaN}8>`#3(q zZYb6axFDjLEb@l%mgU(|zB~8qDfTUi9Uu%`0?oZ+jP3X-7wD%C)Bbf`x!c64iOawy zUXC%U7V2#-0#zsv-Db@$#`QskDP}-82wEPHg0muOI`m8E4m4tCSbKbNw&~*mH=LjJ z2d&1j97t2E7RzZ-7p==~BMg_8jZ;Qa4lqwgz1vCOWj3Kjvu5J3$rF3vZ&FqGPD$ON zUL>MK230JD6M%?G9_b zsM)j~vKnMnZM_I7At@Rx)Y>QqQ}zw4>A2_S2j88A_mhr@g)MmvAPa^PT|gO*4MBgT zto~1J(T%0naSH*Gw3Kve8_~*hlZx>|H>Or83Nu#Zv^J`Ps7^{>aoVTE7xGWDFHxaR>a6dY;g-%+JZfr(xUL}>yZKC-t*f&i z)&ZVJrJUxUB_K9TjH0I8f)o_xWU^^Tn!BxP__dOW4Dd_7?jPd(k_Dr9cx+x-S=z)o)#JzU=k&QU>4_o>faL?YcL`uq*(Q5y z>)*TifPC7{Na;#{rkDI08*Yr#UxMkWMXqUnTmVe`7NH3G+$@%~H|Mc+=}oXHC6}Lc z>88iEL%4R6LurFh`3SH5eqo)%t3LNNuh*tco6@K$@B_W6rCHrLxTjSOYD};Bypyl3 zwkOuYJEl0iH4nB<-hv$CacS56$?-4#yf)C$MvnxK*443zj7l#3Bue+UZ#OaAJfY-C zk`Oluq+W<8drE}`SOol!{Jt16O|PqpQ=rUMAdrReHYv+5D9{#5WI%NBavhf`bg-6m z5I9b^(A%(pqmI8}(Gekqr&Yp-FGN_IeB|6yO(6JetchP=AWmv(!LNK29pRXuY)@6K zwhyhwqzyFHE#->FjSRPn65VL8_8Gicc>U?`U1yw@?7;xE9O`xF^y9(loa!exUHRQu zXT^UNCxbhuY75oJX?y0;qutakPrK8HXmuS1gMz%I{ll8E8ER25)z5i~y~#f-xr-oM+!qYgpH z25~aCyoQoZ_3|i)H<2Z+X;dP9Ho{MvuO1r(RuEx|P$RN8IrtC-CAxb&X!KzTe?$B7 zuPGo(fjo2ZEC4^#VnXRVxB%~`j5*pCKLVepUgYN${IgbT`ySoa+TP$pC<^Sn_rqQ| ze{%e}!eNhQ43{Xc;vq;-Oj#Yfcq;+>*qcegTCLwB zWlTwb#FA>$w{LUan(7TL^heSOPR;B?SC_(`39J2oSAMP1J;zSamQx3@rV1 z5{l1T6cY+VjmYV#l>!sUsREOK7K)n;IT{Dldj>#4+-3y)1rZJ?#Ku3^2k3zsZ$ z`7*S%6|IPp3(=nNQ}vDPlkN83O^l2_Rh3|n0I5x=fi56`s}7qj)02q18K`+I+8u^8 zK7a4t*vm6Q?DrjB_m7G9S9cxeIe~6CaytEq!$^8+i5gNwdeisfGe0dDBqmnf&z^zr+9t zuF)atXhulXq$XDKu~1?i&d9K#O^%#U7My+%G!}k%=**ca=EijZB;+RV`RAwMKz{?G zvlNd3b(?jb7?2#-Fv%U5U3|+`;(J7G6vlO+5WK|qcwtJ5|B`SZ{Ln;*!H->?GCR&G zYw4TjGH9lfX0 zuGi9_A&vZ;^5Rb-lqiixPIzz-9XY;ENc>vva%=ku>wEh~w|(al>Q{)Yb0aZ#Gx|0R zvxfx~uv!{-kW1vsbwvb)Yu+R2d*6l6Y6(sRvqaOS zFo-2t!cUxOZwq?Th1*2u3>BWgDz?!cOZ~sWQElp^1n5WH5PE!A* zm*@hiiii22^|*Rq!V-tj+xP3?BetG6`T6YDnlD2N4)GSR-nbzOUtxX;LNnbg_uNSw z6z!c9YN*2|cZTxwCAg7*K~)BlHpTo{+ZQ0cNyXw1RQP?d*WCdO`l=-qj#K5OxYW+; zsA=ci44JS?9|ssWEzpD;3ytE+Wu~EScP1^v0zjT+2zr8ibjieA^mnD0W(Ud8J zHb9hdq9*IvvvKZk*A{6d$D#w(zIy7Ofl%vj9}+;oIX%H`2e*GP*^DDbxNQhfzr*=cg0OV)YO?v%UVd2 z%A0asIo(K~R8iYK`2bLIjvEZ{ZA#FgTVtaZB+tT`sB{r8ec@rQha^}@o4N@1M z-zicp2Jx02#Lmv~p~8obLrMZ5<2(~69u7Nu;UllOg1;Hl!7>?kTyvrc^Uq%U*S2lb zyHG(QolpD!6yQT#bYQpm1JKEOLVT^!5i>cv zSeJMXsH&#`-a*3gPNWZFp>4|?Sd*XzFRU}ZKfgWRhw*6Wpw;_8Mvh`>5%V62|3lCr*^0lDRqD7Nf=$u~-z+`UJH!+w3Oo%H_YRRG8?WjqNDI zd8lecSs^HN4@iFnf3)=N{iK6mt@E5LX3OKCP)8iqWlo z>sy;Tj5*R!iQIkqkhpU0du123Bj<1vH^V^ewSgW&y#YQi@zv<)gr%~oWzc)R!UDPNKHsQERmTT`CFu#Ei0n2=GeSJJeohl& z3`GvjNXsgMq>xm_nhhI8dJI}o*gmRv$BvzL4MlOrR}#D%1t9_F?Vm$qp2fX{VBwPQ z^Px62XUIc=tAkZSg#ie14h(nFw!)UHYg00u&Jm;4fwx!-~5OK>``(W&-_Uxlt* zQqI)4LtMr#wFPYffJ3;-6o}npbV;T_(8Lh)pJ@6d2!_g4`{e%NQkpJ`dWrtVK{hRsQx4GVxW8S^5vLh zKkEtkRUh~mIM8DT1o?Sj&GtpE;l{G);8LbP%A$SNgEYO;j`(w zC_R*18_oq*Y{H2@XzVsL-^#{uM4Pe&6`n+_5EZ3)ocYXoTd3`dDk~{)0(x2QCBOn^ zc8_KxscV%=nwZ5r)G(1j71y0C5)W~mfXY)hiJQ5*Q-6c1Ob-86e!opGNw*JK+~D}} z<8qU@tm%bJgE)&N^h!buu!*?f_h2p$0uY7|jXZHmXO%Qs9gi?!5Y;2VDY!p^<-_Oa z?A^N;0<~`I>QG1I+Sx8zoNgEpY%7TaV64EgyiUjDOge~Kb?N{ZgTJ_rvXbW<4aii} zELQsrk^>A6goG)IIBLe4|e%@pkn zC)4x0AGzN1-t?km$@oKG^~bT%?8mpEKOE< zP2mA#=FZVt3<5Q1xzR|H5GAKRH_i34t_n`S5iahPxAOA3l62*|_vswDaL6 z!|c0{%Og*fvvsvP!ILgTq|54B2)Bizb3VC5!hP^Yq^XO1g(Bze!&7w-+@kb~MG}v~ z+S!X+TyB+sfDKddX+Vg@U4>p1DP2IW;=JPtDX45x8Hi$We z`L7GYphcVNnw-#*wq4ScnC02kzu2mpgBJG7dgNATl~@p$XR?2O}Z2D!hU8gBg~5{ZRO%8J$~) zeN!hCj07GL#{e++Rlwwj)I+EuovBFhy zWyu|}s@1evvq>;hR1hr%nsn%2f2>Y)K+0~?Nhiu0$+O(^Ai~VV38!-i?^11!`*o$x zw{NcZC+4o2e^_tEI6%esz=Glc!H$7!7PwD<3N|li-ZsLvB6qX96!G|1H87N+!RVc? z#@W=7@ZPUEYsYnPv=Pg@1e;473DosI(VN6u(F};S62(ZB1y2KfT+;Hyif})xn_8c+ z^}n=&H=soYEtIO6rnH_CZMe4=7H%)~r84R}B_ucv<~oS(A6r7X{xBwy1<(H071`Tz z+^m9*(9`Xug;2+!9wqMf{m)gdqfBe(4W1<$RzOiutoClabJN%F+#{)Nb7m3SjV#K* z&=9rW4yR+7;>6YjT)3IpDY3)U{kJTtUt83n9j5EU*{ZcY0C2j;Ql{m}^?QZ}E zTX|(AA6Wn-j0)0pfF*uKryW@%8sbpKqT|4Myv7fR=hat+)VFL64m5+xQ&Lhgp9kDE zgD6c-q3PV>q;9f~*~BF?R}7FdHwc(k!g$F|S063b&U4i`lS@P3id8yB6F&SXYGy{a zQsYX+Ox!Hel+oCoDJ+z;;xJQYXTNWY#fSlbFF4)$`|~dS2N57SBs`Yq5-NBesvsPt z&AF`AP$EGn%CfMLqElX0BmJ21eiVhdKdbBiGmKGVSMn;D3`yny{mti+Hcb$2u%e+P zq3}zsrr{hBPn%M%-)Q584H2nap6Vc`_+oHk0egZ&L!H#7Rr2E@b(q}gcE1^|f1cX4 zM-S1DTqDk298MG)iFj1Rk0MPxOShIHVl8P@XZ8dJ1k`TTs+A(2g`ohS!s^MzTmd~9 zppycL25~v-w)>VS0_(`sFe_gU-1nXfD$io1ktPIDE8Y{iIu{`Q!1-bw3TcW_C*YL8 zlP6bX-K*1j;>?-a5P;nFWdJh?nHLj!U3W%HK7n)T|7NJAM2#1vu7n0&t{qZv0e=Z+ z2n(y=V-C`<@=x@I+8txf?wOdFpyzb#!;Qk#6j8p%SzSs&L>xecBf?6D>Q%t~*xZd- z4ZRCb>@2O0%Z}}YCuaViM7ds=)l45{u|D5__;3bQfOr!n*^+~8M}racF5J$Sao%P= zT@L<_1InM{2}&k3hwgPfHNJe8;*9LA9(tLw z%y8qRingn4i_kaE|M_hR80d5F)98Z=ZnM2O8W#n=NhP;PF)*~(-!Zwcm@Y0l$q&T? z_t*rBrmo*DwD8t8HWHazIvixo@?x|bO*R<;t2wXlKX@={#*EqsS|N(0*3-Nfd~cy< zK{jKNoJ`R$cx-PSk9tMl+r8Ev-?>irvuGARWc$BqY^CSj-@Hr$RDcbIdH!cd4^gzJ zF5Ab;26K!Qp&J2r<--hI#!BqQlqpkk&Q3%V=Ky8{3O*SOU(C9UGCQ9eaM!}rNk`Zv z>clGH2OahWI;3WrCE8-@bNW2dI^iZ)I+V_8T^)u}+rqVF>X%{hGY~(Teo$_@)#-#x zyWrKSlKy#ptZIpKDtmdDrh7;*0+@R{ZjWQX%%xFO0)V_GvF59$Y;?=9^zzo2jq&pQO%YVYBIsVphPF2_dOMN0(P=UGc|7mJ; ztX%T$p&=^{reetC$wnN7no1%}piu3At45&HP&FcR`k%(!i7clA|Fw0SsnhB^FsO+x zhw$u#xn_3ZN5<0}mm9k2-DV6chzv_K7M61FrwJ=t9qjGevO@E`Qfg#8wH%!s ztcE0iloixT{ljc?_@Nh;@%5)>eQ9eH*Xxo5!jW4lJ%DhCJRP|!L#ErH%-Z$qU%(zL z3=9a1C}@`xy{stUn34v9rkyd0j%Y2@n*NClVAe67a?hP>Q}}@w*9JL1T|;l5ip0>ZjY=i_#@F=tGo9$So#O1g=$j>y8f{i> z=4bg`gaUYXIIEO?*ibje}%d06c!Y zpI9xIF>)ijVj`$IS}wE0Ke=n5hWM8YldKk@v*9Z%icpLEKPf;QB6|1Q^3XV4&tfl# z^PPt>#QREE)x|Eyy0qGuRpn$mWjFfHzT~*XtoA#PSy8O#RbvFWpX1j8n=vXomk_tv zkmNE#i5Z*5e-db=Y{ZECmET^rli*8^QMnHe>&^xBq_KwMfQK!vKYUgg3T{p#yfmmK zLbE?4;|VrX?vS$`K70)Z6{5j{#UMigq!GL^actc678qM()(2SoQj6Ntdhz1)g?6tfIg^=LyubBFodS%Cu3OOkikFG1_A{fhlBaQ@Q`onv zL{R9CxL$bxC@_EYft+q*k~7KTWPsDcU|(D#U(HY`Zuo(R8^O{$)F-3csil zDC`_F%q1ov!9<2|T1z#*!K$lG*DCb#8XXB|VAl@2nMq@g4{%Y=mvhLvf6T6$s z@a@6iGqvv!=OXA;i8?|ym=-Gb(*HAIkD+J^b&{mm17kC^C2W|d5jtltJ*YGv9KAITrN1vLe`|ej@$)N&(x3z1 z&==E3sA zBz;!WU8!9C`t=jP4e1oumJSE`o<+f8FKH_BfH2H;sJKs%gfACKLoabJrS~SHa`1c0 zH3-6ILE6XPx=%sN2IeG~xFyBJk^>|uXT>xxucwlV;P3H;izOA)PuJgD-!qQC{ca*O z_#Wy2LS+d*5lMjJ$Rvm0DRn;lM+ z!Spud_K4L{HaP7{X#ZnOb8>SD-BtH38-j!N{qgj-GBbhNEq{IA z;r0kN0XxJQ!0w_bVnsuH!8Vr;cpob6hfjJRk zS-1p1ZAsqSD5-|}w33o6grm%^2>0s{$$_;nx1hkj_*$^DaBL!TkyLl)cc24_IxTp5 z;=~C~IJxIlbb8YGFDbiujp}A3S3^g%`82`rxVuA_pH!@`{D=2I2eVAf!urZEChuIM z)r18Jt(a;5AU7eD+Amimm2Kh@r{B9v{A(x|HW`k4mc@G)_tzR7WC2^7p>&j6&dFeC zLsGH87UV@!gZzJfa_dcE1(#qQ+#N^@*USIMatX;%4lj5*1Rv8sfAwm>ydRWl_vfGa zKntNjJMAAWTCm{aqLI*1+zdpC;Z#0|cztGWGkQ;x<0nsQF0b5tf@=(!{&4OHYa;C; z0)hhbrFmSI;Ns;R=d>PW8jC@K>=36YHUk4(e;j=E^5rC4cY=@9c?t0)y5N z*#Heqxt~MSyH$dk^XT7jldWZkfzGyf(`(jep)rY$f9)@ndWVw;`h?j=uHU$Tk+X-o z^X1x^i*Bvlew_0d3+0m>YU4bY?;UwH=2ZZWBid6k4mo*%i0#!mG1QOFdp< z2FpW)?)W%MF6m*xv;8Eoo3M9Ae}UIP)8%3>?-;|z{}y>|kqB2h_m6LOq+ z+{u0Pvkgn*Le~rv*Pf$;V_LdraDmhOc23bWWs`vX#Cr4vc$98mo<7$S>iw_UY0*`2 z;1*hL0Ul;wcp|-#@wn#zZO1Hj!}$vrN-&yuXLd9lNj<%}Ei`etXoEfGrpcvr)z&}H zz2x|tk~D+|2B}*I&(rj-ZNtXO0mQJ(J47JN|GiV@2>elpIlaXeM}tcXd6uHx8Py4r zan)a}gOcCB-u>Q!bG_`{Zfkuz6j#jZ3{7FrZES3$*yLXFG6j(K-}9WVpb3Y_AOL5P zl3vdinviow{Pq&-h?KWg>(+bje2>P({TR~oJOjA4#QhyP6@=>-1OhGk47DbC{c=YzL-R+E z-Kqm%5c2oKY`f*SQQ*XR1q+$zv1@bbi_3eqInJe5dFCCeu}Tq82~NL>w?7Md0dpk4 z&fy=eDa$5+>z#>t*g3$jxs)o9ZZ-#HhY6}K5P_%&_|*AaVzZu#J)>XOu!0XG0g9^+ zaNb)mjURV>#g|TNbUJtLEND0-!L{VG0Nh|sov93RSu}Kp#D7Y22F}={Z9COziIAfS z7xQAk_0+aj&W@%nt$WS59&i1ee^!U6aL#PwvlY!a1S2h?-idEklL#KXmf~g(U$J2| zImpEbXo%uLxca~1-*H_X4^%kcFYtxa9~Fb0JH>ZcWzHek9mS*m7DSApgTckX${(Mq z<1$8#+nfsg_3Qh*(pLmOMIapQfy55UhGekmN3#*Xg60iZxkX`tI*Z>ZQf-~CVpH%qVAN zAq$;UrtmE+opTIoRr+zhvJ!4Akv^n*IjbGlYKbt&O6H)yiYiq2!2lc2Z|KVTu-)?e0MwRsO(CgiaMg=Iyl>rm? z+UKD=YBzGH>E~=_GS!6WDMO-lssYQFh75`CNt}T2J2WdO*l4~3(K4JZC`i8AlhrHJ zC;M7UNaEEXb!$;5JS3V*gve<8WYEc~*R-?3)lzw~LXqDHU&+`X1Y!%eAlC|3N$y}8 zSz=9!z2v&qANR2WEhMc7Qv?kk}wg}a+Z6tXi09=Sx zjw5s3HS)X(6B2Euq_$P*Ht0zQ|7XhLf`I=KHwK>uu_>!M%l*)g?=he)S|^->K1;Vi zOK8O_k|zXn!}Uh0Ziqm|QiaS5I!icEh^74&zsgSK%MFJd@r{e0L;ggjo+Qcn+f_={ zWOiu2AcL^vB30owK}ig9Q3GP!m_9?8TBQF4CEkEHY>9Oi4PIa(5qyOeqbwhDU?+jL zP#0>$xqo#d#i3II80fz1{`x-`gxDqjO+O=!jrl1W$=yR+TpjAX9pJqVsy5MGtzADx|0K9y3f7+ej$8|GY^jK$|}DoC=C~EHxdt>TxW;4ti!d5rf~)K zw!V`GTf10Jo7R?h8AJo3SkmUG??C-~OY20B{=Y}ZF+)j62uQ#nm|i88^9w(O0fq+r zZZDQFt}wB2aZW>UKV6Dc~D8yp6|pDh2l*Deg0nke@J_+bo%%j)guFCWo%!3Kj`_i2Rxm=ks z_?F_o!lY@<87hE8$@7xzd3y>1)KYrh^J8M`n7|34W+*#z|9?9Kmz{wA2qR zd6;cnaN^Wjoj7aF;c9-N?9+-EqH0_AWP z2`XTFX#n{yTplWyj5j4Vi;&91ofoIhoS7^)p*y4Smp~;3m>Wy=;<~n<>mRCPRjIRM z(HQ>9#5gCtE8dT+T&e;hr??6o<^ZYCat$ruVrUD@A((4)Bl)+Z#g@>t?|3t zW5>Mur_YLa7DpRj*#5E4ih0`qDpkK;Z>-igz-bTfCv@Y+jyJb1j+#1yMbY&_BLAtr zt(Kla(BKGHQ1^vPt1FfRg4V5Ta`D}Vj~~s#A8E=@Ff}xd)il*7BRP>ab`(WJX7`Ho zjsARg@*P!fF^c16o;MjXwDwgm_d@-4WfOmFi<||0eE+cV+(3~w{MJwXX%g0_Z97{t zRu7$JbW7j&gPM6h_8_N&hGXLCPY ze)MR2&;VVg4Q7y)Nc%PQ#OPhJGLz6dtQG2r6V1(PMj#9T@S3S!5{n4 zsK#O?;liTA{`-;P+MQUu_aHDrbG!rWC^K{~-piMp;zUD|t&LxN;>1{V?&g<{OxfTF zR^Nm0o_43}@N*u2bI|K{)kJRTtpD54auWEK2RTbA$k-Pb_W$w%T;MX%UFX)ml=QW% zg{6811_!kDRlPg6R}{;)@Q||`d@0o$*I|!wY(V|ko098C`N^Q>`9Avl(zix;;l9av zEC4AC)0I!In4BjAWPh)R%LiX*3h$I^Id0XQhuLG`S|hp)5p_;WzY0gKwlx&x>uv-l zFOiU3WwPAEcx~3PW5-8+cPH0230i-Y5OK#3@83J{-Q-UcJB54yMD*|?Ex+a7<@M!n zGTlN+w+mOfBs<5JWEn4^?Tl2pgd5MffBf3pyZnZQY0FB`7u=ce7@`TiQ8&sAeqpvUHjr^yk+l$S&M(qWj64E zOPrm0Dm$AugdlrEmvWE>{)HMjCupJP2CA1!g1x?1UQL5lvhb5A<`^i_-z)*kL|OT) zwA2KMv>hqfsI%_6YF9pwYB7epT}D2wrYLvD5qxHxBvq+wcaixlgIMEB|NIlNzRvoD zY5_;l$Gw78h+3R9{})!kvOuktR}pe00gc*+qYPEqg>wH34Fb_79e(07vj6kX+q5qg zXX}{vHXAT;nXgJiu_JIZnYgRGMT%+LLz&Oq21VERY~O_?RyVrvAJv8f{x$9{(=(m< z`Of54ZQ2ZGcFZ=st|;2C5Mw%%rMIDr4=URtFi-bCW|x2d?8h9%n)(4| z?&!T>vhMzx2J4!=keBOvBxZW27d>L*8Cs*e43R`^^u5~mR{|68V)%jD_3ACLw94%w zsz%N;-;9Ga+T_1Hd2F4Vku*9D=qqk9ZH3jR!ZJ4~%6|K*y?(_5xCINsl%B$W|L32Z z;B~1uG4J(is>E6z$;^`LNs+^sKS%{hKQaHwDsZR0G#g2D;WKF(_D-&5vle<)Q9|_r z-mS)Kz8`oXGoa-Bkq#RT`?=DvgvZnWB=bky(!XOk*Kos+KEWcj@SU+yS+$nZqN(6gHv_qN$v? z-b9&ygpsPC>0Wxa=ihqu;eei6;6BFH7nt)CRE?=hWe*!sL?`$3#qof1X>0lgILTCx z24JHsXfCQeVd=N7UWcjBpy z?YMq01V%}`* zl4<2Lav-iKUj7Zk9W9Q#QlF1rzRn7ZgVT#j_2*0|V(>Gz6MA9n5Pj2`9OT>h5bTNW zKn6BLhd#giyu2i9>X`EP?;p?WtMndo>r4Jhk{<|Hdh>N=1ABb3c1}6Rrc?{Q1yw&5u2cDAU#Wihc^h%Rg|G=)4J z-|NBRQ90juf~woQ0hsK=NuH0TX59$;5ID;TP^EwQ-))X`7{-9$LsRyNU1{38_h?kc zcja1b75{FY{eG2*QdlX?Jb7T24`_o&`e<1`lVP;t#Z_5u(;brfh^_w+j9}Ws*xTU_S&+EYYvCe2d!nzFM zL@!*hJ1S+tqD7-PEA{pC;t3bgGO4cU-2(2*051z#v*rb)8D~l~Dj$YR2|%e)z54vY z^%L+6tOW!~82Ib!;0xQTy&J6E`z#>E?0k9r<1hHW&3Zn&n>TG(3XRa6sqVSDlG;GW z%Vmjg!i}Z0(piVCTepr2H`<$SIg35K^e-P}P8tknl0;!y-@(jbGy}noE}m=UE^GdH8PLqt)4x8M19BGK zYCB7REk~ez9CbDCGG2~k#I)mKMjY+!L*I0>Wl&slV??i)NaV*7H!kr+r{15fgO@bY zI4CISWl$3j2lWwOb|-Cbw?{Q6_EPWRF)IQaFaGQub-=#^G5c4dxz_=lVFpNf_@V44 zJGUM%Uwr!auajKRYTTsB(ymq?;Fg!>wOn*PVdZT=AQn*Jm(kI`k2$1z^zRR{x%6T! z_w@%#i^ru;-vj%6Z_a&Wh3&BvBYDj%-WISIRmM!IwGjS2u({2-d&CBOz|D8bHFZPS)G-VBMpV$nmpUFxgn=)|l8GI1VdL}C-^RJu85S(b~Dgc7| zeV?QB%<5k~eD}J+ot~GJ!~u<#u_10#{mkisR-D&7WCt;AoWvx$)bPvN7IU{4k3T;D z%%P{%Hr<3BTJrL`4afz((KXN?uB~dt=5skX)`b=<X?Qeq|HW?)%lK@}GD)KFZKQqTQk zHX*k1`Bdbl&vpT@o5fjW6QysO2pCmH1w5i(_=nGn)>5#!ekSX4$fixVsF-`Ha!bI5bYG;C3P>H;~5F9!auY_Rf?W1NGfmV*jC@pPK}(DmzYFu;GhF6d(~L(dF3vh3i*utrpacnCapV zpf#TujsCrxfBUu@fVdO3j8AA1?_PK^uWzU(J&r-)xG0l(^WM3qYTm9x%}q#1Xb+TH zn90+;vdpo|tQD)z&b`J}>pQ{TIhe=D4nz;r;nET3L+*Vp@9Z;*w&BVqEyXk*F~M}e z5?Kcb{YrV=%yZ+HIBCv5HTmOECBceKIj7sa{EaR4(}X-rdBh4jqVGi|f4apt;1m&) z?hHMYL+(PpDtc|A|17?odBgX8vm8ahrcJf=?$OJdNW<#$bYm~=o67*FG7t7H|NdJe zw;5S&v1lG%@B$Vt)qA{S>Vx6^R{qK!pWlDk+nw=0F3;9d^iBbUm!b5F;#vTpj0zlf zqb#%n8j8zHUgX%!*>)ccOd4;V$%IL4zya6+NT{habk_kM<(R2w*w>kZ;dGpzwe=b# z7fFn7?MYL-(DVWN)k+VihV$1_N@h^%x?Xre7j}aad3y2hp8FGu&6jc5N;4n2VZ%)* zll4ZO98AI2w{{uABZ)v<82IhP$&)D{nBP!p&kkx5+gnD%qivL>qY%PyB+uX}9_e6K zSZG{{vpR_rzo4mSaUH|s9{L*4EXkn zQW$<(8bgX{GN01!+oPOo@R3FAyt043TE@_XwS9TrX&Fs0^-C3 zaf(o_zI>2Fb9&CbpV)rXz84o-%)5ynP3B#_Nmi|vjcH(6y6msRm!|gb?XoS0&iBgM zhHn-7dW=_0vdIuf;hp@vk9HUaReYO4E`v<&gA-1Ba7-T^zqM1xemx=OijSPOToBpg z`;Vw`bKBpn8^{o9d*pY+lz{&71j0cVw#(SDj0JeltfBH)1+6!w-!H6Yue<&Wkrwmx ztL1`ER2TI?0*o`~QIFKmi=f5mBZrF#_UK;CoSRaG1_zlQJW5o5GG)O|tkBv&hnuHG+p@eed=T z^T!NxdbIe)2*$+>gQb9354vi6{*1JECK!vhw)S-nLV*K(zsR;gm{t|1g0@~$*ySj=}UlOZkadk1f2ytHrhes{uWjj*8BfAXY2zO+E88-TFsOU?x z-`ryPVT1q&2&R*hkX?23O@B8g?*|Jnbz~-AwA4A)(%rpN#|~R}$JvjB9WYKy?-6>8 z?J|>h+7}I;htErs%iFpPrkY(oI)T22gsLQnIq3s{gH@~2b2IvQm&gD5{^++ns2Xo) zrJmXsILl?<@Mh9BeS5d^w;5mQ+pAPFLQxSpO$p53F~IKx&7#+epS1VyW6eS{pjc-C zvuBo6_WyE!FQW24`3nj&{+u!;UMA8B{*^XBV$_SW2Pcoop}XdyhGIG6BhO=4IGn$> z#Y?f6lajfC>v}$c^f+1$4at?I-h+bLAA_n-`NejhO4gf~5fXY6S-I^8gWCxQwjYPJ z3MxBI-ilqc>r5roj0R!E?g8W3eYZH^rNYw-l_vXv9VEjO%pzYPdHl&t1X7)FfEn4> z)w{y;TFeTS?_W9gZLd0);^TTtRnXVbX=Nw)0r9v=ncM+Vz&&E~aw@0oIrdR8ID_j88CnMZ_9WPtVd2CDNsmYqLKo@`0UAxeHK)%j zIQgG&{jxvjm~Z_w|L+mK9>fFM-^=_~-@#o@vT)F(1SW;kmVS?z4}apFupd7BCc6#p z!&J+-x}qF6?%v(`!Zxdw(S%4Gl1`O}a<5Ob#|E_aifQY&B(KTqLY@4L6;yF=R%&UA zY=(!vA4e3wTJbh3-%&SkR#ZmavQoxs^nZ-*+s*EI>5tpcMd}4Z+s98UOk>Y&Z`@&Y zERqwzSUM;2D(%9_%;`DqFRT-HHsR4pCW7MBx3ZM61YwIr>L=ET6Qxn(Lb;eN1$<#9 z{ff_%peD!!`WA=Z;uF#EMFZnTdY)c+DDf&1E*S@t9rjo`p09bx?$)DndJ3LxilrZb z97j#NpX^0JlN_u-7KRC;k#yDU^HjfEC~J<5Yn!`$xqA!CkcODzI5+SJ+*@*=)@LS7 zNoA*O<;qodL{ZYHq2U$ajis+1z2fxjf!g-X!&BofG^H7|0(D=o*!l5}XBmr^_@|a; z==FTu0az$(@r-vbF8wk8#18tkSv;UJ(rzX7jSAs`MQziD+IpjdwDl`E+NQJBPgMT< z*PQ2zzJ32*xQG{i8)ZZ{+1vZ%@(*!P%6kRnVIb+` zATY1AAt&Bs7Dq%xJd0hU1pFX?Fv=K-gI@AV*MXV;47NK{cd=q(&$K;d!!6JwdSU(qE8m8~S`!cO&mK)j{n*tE$(fP{QiwsoDG%eXs@ZZs0qDP64K zuWlC(YOkSqCvl$|l|cK{I&at0RQfySdXKGT#FgiS2kv9}cuTEAO=!+;^5PCbpsM=x z86zS0C>>2MX1@MZnip|k`*=R!4h!!ibd2s5djhM)25h=6LFVC$=@O>(TUqH}$Q%jY zbUbbzuTs8*kobJa8Z{Q{%>ps{JJV3Z;W|lv!zf&%cmQvnZ27^~b>?3;bfNf1X^yIP zdm3$Ucsx3d6pCt%vK9G;%My_U2*^t(&^}f(piu^?R5IUlXwfE(j@4K7qFPfnFE%~r zI@z?#=(BP_?xTLlBZ5TP_VoJA-@Xr^OFE7};k@j@lIU)pn#!-!U>9~L;w)n_;$s%i zsI%$*6#cQCCiqo1oy#ho{$0a9nYuGRJc1}3p&cy!Xrg8n4^EG@v5PgOd;GiUUKtFD zBD`FL6-pqbsbiwB#v4TFC|mxZ>ZQKHVty@C>*U9Uytj=l`}fUX5b&xQ^oU{~E8_;d z+}Kk-8B%S_7-L+SdY!owxd&H6ie3NL&vz?laYAUD{z8>*uUOOJ4)#Yp%sn;QS-{zFeo8PJL=@glg2aXtfUEnl9*uoW@5u3Ypvr_5qx44&Zq_g;puD z3F?FFDXKL4+aILG>?Y=>*yqH zftWgv|5|qzm}| ziIh%M6}t0nw~=}^Xw)c6?+`M8fxQfsj%h-hQl6HQ6pD{W5NL{PA<))Q$tZa*ZN6!w zlX=JwCeE+?8r51AbrzJQQTCqe9KqA$W~~zeIUMQ6)jv&oeh<(mDdLs*aWH&4*S7*l z;}3Cfm*7rw@oqDpc`;5T1xJ2wQNIBU;=2V13m`l9;1E%L)>XNM!*K zT{z_$Cf8e8R^6t?9Otncb?j&%dp^MQTHrTD9V^@!ZX?S_&2$PNU?1TS4Lka{-um7geyf>hBc*bq4y(erx0wao zl#P8djuPn0?j%Mf98!F5(f~POSMO1pzDO!LzNJ!j4gG}B^8mLs@9pkJOmlyK%<=4L z8R=CvPql+`Ua=a1j3K*6A6qT0mN>F=`e&il_}#22gJHr#9oK$Za&LLbK5s9tl!L1Y zE)pWhA!n}rTz!NLkDQ$?-}T4w?IHVJX^@I z*!dm4`|f~rvykbKRZ{fHkwK@~qu<_8*_+Awv}ml!v%Z~P1g25n_PNN ziWU39GjPf=XI@ydT7+kI$C!;(%z8X5GX2GW=`<2)5G%3#xt_&=uNpSA)G)#&Qji7U zj2!fKi@`IaP^d@Rr^NI8Lt;+(j!2oSo6Z~RAL3tRn3Wc zlU|ACxsR)h1*a1Fk1c@C24@$2a)ucQnUnM%cxCGIqds$fNH?Wc-Q`}~E-OB0@807S zlrA3@b?lj;*W{V1e=2Plw3ezhNT%$MZrn?KQ3ZKBRzr(jUm&eB^S)|_&GZ;ju@z5u zxu6tqGY&-4Z!dB1_S?#hsqEM&oNOIWIDbo2NNBV9*h0nfKLCiUdd}WgRTA3~Y7Sn+ zc=6D%f$DMNZc>y)vbZe}XiH5cF{?>G)oq!ig%nYs1aw%F*L@fU0d^;D_OJ3CSsZ2^ zJ9$>b56z|!&3=3RYpWa-&xOwIhVDO<%-pn15MiC10zgWRvJo$zKYu9w2@N>Mh+G2E z8!RAemo)qAlX_yXc7UxetB1;deoTdYG{Rd~5<%FF?&p3}tWL8c634Fjc}>@9(~`CQ3K`1M@w-t*`4EnM!1>mziOLN|uN`H{eEBn(Y7hdDp9s~a~#zUAc zEz-quJYAy%zJOyj9z57y&Q3Y1I&|o{Xwf1GR|4%;^f6Py=1cba=v@kToWkbWOU1t) zIg{bUHBBBja9H*0MRO|+z+X9m&q|nhINz%01(P8Ue0@zij)Akb0iK42prDAF2{u^K zrfFOhRL|3aISrvwt_XcW)YiQ|$?e{fJO{k- zw~8|~^BwBBn{5paKFrxi)(BXCQ;PGOd=BSa8JEH(DVH{~rV9i2uc}m^38*gG{_{d% zo5HD^cgUVfo@Wx>tf+(B2umu<~zXs&)C*WW?4SGgWvEt_SbFNU0Swpf9-j9MfEg(=&-4fB1G{x7`qusHstZF zV>2qj!X8fb6u%sIT7gg+*$Wn%QHucE zr(!z^G!yKsI2ZTHYC>EvY3olXZsS-X=(?mwL*&Pxd=RaW@77<8`2wm2PeS^Pe!jvz*1Fwtk~1 zVY32u^t;D@1Wm{2T<~C`DOWfxr}4$$w6wKJ_2%3jQxMg%xOYth&o7|}rB7Uo8GWABj`Yvz1RKyhM%UnSf{ocgX6e zkj(!~P<@Nw zNshOpKK8$Ss^LMj7irW!IBJ|0{8>BJTIM|b}CbGt8@o^O{#?sY_Xa8!GsVzY+1_4Lb`#R6b! z54=lDL|?j0)FDY--#^{6=oSOV)Zl~fZJR!W2NSNAJf5gFLPvjM9$p4CwOWzl%Hpv> zdgR*vHChc0N(di`BqJ@;P03vjVSdxbNsfkJpc|a}(=nZ}#ixTW52(JFgjP^{^}iIITIL80P2}hMP8TkV)9{cWowA~;P027_ z#whJPh`^w-$gWV-oQPf)+5zZ%;oamD;(>_hmm*>7Fe3GBKv@Gdv#iz+#kQL3<~X_3 ze<&!U!nXXTk!H&jiiyR(=8Y9W24 z(izlP6Ap*)45^yRvzB6Q0f2|O&(najhjbc{`wXx>YBij!dHSErPT(ARKpKly{E7e6 z1j>JnfrM!+R!>f~Nq<3#0+KAT4dh1ms1{k_fe?l!5)6HS{L+h6q4ldrO1$HxO9lfB zBKoS)?xBFxdJ_J)I>Km;Xr@&=@%d$sX1&x>Tn&vF$fE#pP zx!2xb_xKrSh$ft(O3<}5Y#UdOs;3nFa(RHo=M<42%1h-yT5F&N8p^x%;NHD^9=rTp zUl9_bDDHb_sKAkM7(5=j{An9Xw34V=PMfN~`()3D&k_BY>1gWzTYRAY-ge3T?bc>C zouT}(Wy{t6)MXZ4>415BBV!gf#j@)9r`SpfVrLq?(%19LLQAHTwrIx7OIC zcssAmsMqA``ST+wldW-)SU>)Deox< zT>ma6u6DWH<@vA?z*wG73+q+dU_gbCV7MT5ILBQHx?EfF6x66eg9hAWYA!bX&G)aj@>xQemr-y(UAC;~@3x7WEoT z1@RG89lUsh7j401a9ZQ-fgAFNwX+(F_cl=cF}aJZz@Kv>etFMRj;Z<2tSL7Jm-m?n z>{Nw2@awn^F-=+4Q^;7r`})#p`{_aLr zt|fmB0R4(erS(zYyQ)B->UdSyx`+jXCV54bKgnH9U0f=jA&j73y6- zZw}dN4RIz2rWYxYHKeUb6k<HN* z*g{)#epC=S#4+;XYzWpGSr zsE6xRqEdBrbtz1^u+Xxny^F7ynVMO73eoTM$B@JLR!q#$lgtPxm-FrHEoPhiAIOz7 z6$|R~tD#8B$md@^KUI%965!gE)Jt#)_lSLW+v|N5GX}38wn?7&C$!sa@Rp3APN1#W z?{l_0jY@1W{x50)b*iq9Y`y^akPLOZ`+~QiPpkt=>Yaq{zg9q|0Qp{lLq~FGXl1_| zTCE1Am-oYFn2}2=O4V^>@Yj(=y6;y5e?#1$L7yjxeNtP75N){$)()}hR}aQvD!O2@ zzT%3Cisu9U9B79IdzxLtq%5}wQ)8!CqoCD)BnJ#G+f{qtH1qp-!ekt}?L1bUCi=QH@gA6q z?3{feBv(Pr*pT+kcJ-P)Vw+1trReW?6M1@)^$Sxx&nXrZNiM4U%h%+ zNy+No(|7rT8+wOGK1&g5=8!v3K85nV!NFQmNyhJ_; z*qF5ONX!(A5W4vdhJ5_zyy)X;Cl(AYxZda!qC#UBP2ti0&Jd`t zqn)odRxa&F6CoMU|58ctypele{vVYDr{?)?-zZiA`exogQ4oVTY=cNJaHuZ$lfb9pRrk#mmejkjL??oB@QwCO%Bc~9@8Y!hZ*%#|-?EJh(RC$bP8ZWHq4>G_g0`wp&uRQN>s`YMOeXa5T`ssS8w&Zu z1R$FOQf#KBrQKii!Tl`G^O7?01$A4u?n+)K!mj~$J%3H;g7}=GlE6;LKSU-d$13=P zCcZUT=e;FpE~Gbc1;v7Mz~#4}bXA);>{X_ugAbpRRdrRvJF;|%0iiF4n)KX&toLWL zo;_pTO7J0c7n|fr$FnN}KXLFxacQMA5ct}UO&7gIIC=mxAaU-%9Es}k7)S?%vM>HK zb~!m=ReJsAaTN_M^~p6T{$9{;wh)tL`6;mSC=w^#qiLq2fgey%*teyG7A6Vwr?5J! zsXrY+mlFNj^NmZ__9w89>Q*aHs=DIJ;(>z8ze)dl#9Z5P{<~{wfzN&|y z{!RgcQNd%hOs)m6K1@7*Wuc3R8Nz1`V}j%-e{9=2T%!J5yWjxRBwvKP@g~BOLi&qFGgg(M zT)Yex?P}aC*l6K8IXCVtK)iTkr+&^*WH{hSj)nQxGv9t8#|7a=R@ zu)nIG@{pRAA-t|A;e^nud0P?HsQoevayU>{-KJVD6QJ=<>gqF;ks=HuzljoLDVh_?Pe^ft(wqnXNN&`iL1|*1<4uv? z@Z*t`n}1u5IW}5E-^c?F3Ghw$uptFMkil|s88>$9^;(ieqaj1?P0?kbK@x2a4Mnv| z`)iQ1oe1?#qUz|*&}OZOd;(`u4kvhwU}|)730r(}!OfEx^l=sfSlQBRX+?+a)<1WK&HW5moD7AJy<*!&#D@D7XA zKuyNA*yBHnY81uBo7lCV#_^F!b|vz~IZL+3zT7G|lV}w!=ct}j+eKuCga6Y&8C4_+ z9xW-7B2fIhlJg{8?k@}v33YG}he%;O5Y31z2PyB1qFWX>K;6S`(T_fvSRt~HRTUvS zZ?>2l(sT8<1G7kM7sEugaS~`@HF<^TT`OvU->S%&kZ(^G)Qdt&`k~mZoeSIhfUbys zJD0jmlprp3)lWcV?9G*3qBnLnuoNCw{9AzO?q0KhflQkD?1be0I&6_na960K{L3L`uR;=*kyn1hnki^Qd%I zJl@px4P4`VHu>}v?+AI?1S=7!!POmD7C5I<`S7jMM>}nBNH{2mG#w=6k=~ps+X<~C z%MHE?AG+tkE#pp6R-)I^AG#{PU=j#OkgdrnldQKs)rsdmj-~mg;>|zjn-3};qGPcj z0m5{~tHB*RKHTkJk`uAA7nLF?k&rXwsm@2j;lo8+^M=*Ako;V9QG(WFAy9qWW^mLp zkGf7OIo0vd?)hfQF$*tkL=oMpI{vD)(c~Xr>SNS9fA*exD>)j5e0ZJ!W97TprsL00 z&fx3PROX(S?B!H=0?;z_$-Jx0sEyAg`T5l!f?Ni2ZW6UC->F3V{NghoF9lE}g*q+N zga8+urwLSo6xizE?ae~!F0}VSO*;?LYoh2 zg_M@o$R!ueXo98J36&Klme(2|QxpOI zKi<6J;krf=@?6H$uj~$!Rp>fwSekvTg9z$Hf=%zQhf}?`bN;X^Rs0UVBGs_ei10%? zU7W{%QTcImPi$$fs8ZoWj}ej=qFtGuFmH>58su+4$^}CU$BZ6rZe}>EgPiJ3>^{UT zTfW>Gb$7Mn|d!!b6fhk;_Dp$~fFszf-#$w0gR=KPhfUQJ??k+>(& z)=FiS_d`q7N!EJ7=jb0q55D_Jzm>0Omn1pwN7Ee5Z1-ZE>aYJ!Pfw*zS@XG}qT7?C z0$}*lfm~29ZP&tuL zrELwlNEJa^IxccZh3B0*vF_ZZm*=f%A(XaAP33*d<6*D}DE+uFzk|4)MWK{iAc+;A zH5c^~?SX@~^@77`3?+#N{-6zefr!y5H*mCHFtBPQ2gbfd^{TeL(<~T&p$)wFZ6ZzxN+<6 zfi6hli&JK-JVx-5$)wghf`Y=?YlUxJg2It0T(?`KlYEZjT{<$6&nC4pGSNU>cNIxpyK$l90OpbkA62&@{YHcE=BR5WfV^vEy zfWvH(ZjcLfjb29syy(nZ^r#D<`Dv1w4A_Qj^j^D6My#Yjp4!`NTQ~TGO+O_pj1Lay zLsdO`TqF7mOxg478x5P=Wl&~GTloyX1f)t<-GX|~bVLAqn~KebBFHUI^uPR%C@ZJ1 z2m?O>>&3>V)>y&>kNU24awIW$(4czCLvxYwp|e4}6$XnYz2qczVe^|QdD|qPf?D-$ z+w;dCGd!GN-SRh|cz?Kwq6tTQ6>Xnl%{gt2;2Uip_gpl$Acz+WUde^E5WhM()dNfs zmoy*zkDnA6s9=sk)pVP@q2!ZJ*uYos-Yv~<|4jM{x0i=gEBhVf#NpQsGvKNFVXf%S zjfMu7eljkF=7j-6US0Tp>hTfv6wg4(lu7t{XUr$gtl*5;tKV4hY$?c*{(h-pRTBlf zSHh;%Kwh|i=*vXVX@IR$-G|9sJV4W6XXhEs2D%;&pdmz%NUDx>uLC-R$Si^#GE6g6 zOz!I8r1O@fs}G!B8QJ@7j+%Y?jg+k$(Nq9NVlJ5eD2-3)IIQl&@;70Z!JPqLBUqnY z{lOPiNzt7(up%p{2Ms^P&ib6)`7}*(#2Cah#|c?xZ@*RYI=XJU<@zZyV|))?7j)wZ zLcs&yPT1M&uL3W5+DhE)$)@~EcaUUV`ptjn`z-R}Yd9qF2q!N8g1dN1_^K0%uIkyEKBUsa;+% zy(sawUdqEe6$J^IzN>)U91<&l2-!e$joS|;8(dM5nCBL2xPWHqAIujY1vL}>1I7FV z^5pF!1zFfV~#V#1l>f-nR%ocSf039c=SUhtlOCAQLCxSEX0CZYH-!WLVe9?|= zgFPEr&6(SMX*Y+Il9FPCqANGmv$*jLQ5nosXA(=o+RI3H=-2r(4xj!4CIwc=*b`Kk z(=+sBovgru>Un5e&%?y+`BZ5F6Ww21yIPVFwsFxyPE;C*!pJz}e7Z-=34bgX;AgNY zF^BCR>|6}~bvx0uT1Oe^NY)%+iny%6wh&arWo29`0^=)*6{NVR{<@c1ANWqSKr%iO zAu4YA_kwqwRo$-Z6PU#|dlnJmC@Q&xE_GjX6AH?pjx5QPYrWGLNoeuP2bHHf=Ud>b zTwlK2*2SedMu;Ru+mw>Dgl)jhmNsVU@|-oZ*Ubs1Qk5!8+}T!R2Xy?T-<%3$c9Rji zbL=hwASshl(P8O5LlIk!VWq;#&e39iY#!rV@~PYDhSN%W7oI^8W_kML#nChy7e9I8 zllzwKqzNb3-RWd#HCWW*Qm`Uz-HRN-78uJt^9w25kA5FI3;r!}Usox|O-FNwedgN8 zXIdE5aab1v*=z_0Tefb!G}qUz!D6)tTtc7FW<#6AP(b2r8YFsd!OR* zx;4Otf^!rhDK*H>SW}a%IXjHxef{p8XvAJu{PkHC6sH2o%N46UhRGW#1M8um0l9dNv7-%z@h#v4=@A1j znt4NH%SjRUx0sfJg@--~1z^|M(9ZUuVMG5-04AbcAO$9)zZO`(s9UdI*QEi7*QM_` zHD%L^_HRmzy^;C{ypj-AZmS>&i1$au5>iGgyz^0kyLRu6q+^}(^OT3fhm8&Qij9g; zsN@KXX51DHnkZnVRxf|nSo`kUZEKT^n%*PrlP~!8QR%xQPF3DpN{(z={l7P3ILBmFG)kTx3e8`w>nZW%fTddo+`o>@ptDne2q;Nk=DIpbFR?TkqruoTI1_uA)Qb1w z39)isx}(Gar}67*>{B}|CeVsohwbrcH?CdQe!gNkv^A)*-@&c6i;XZvpum_Omb&z= z*TyRpH)?4V-?v>y@$Q3@;Ffk~*4mXY$1Yg3_(ZVc6my1w{U(pgDF9bDqp(ByA4<|b z{JDn14wKT)ulLu^XNw$XcC@^##{Mk_@!X2#g*a*KFq#}UY&B=C2!#=R=(cF_So#_E z{`s*oON0TWDdJ5)Th@$6A9<#?@nyE5oEw~3O}7ihJB>sLCh3mH3{7UF5u5; z!4XiDEH94XdfmPDp;%&wi8`c7AUs2$e`6(CWK;T|2T{J4{_{W^rBp+9io*gP&)dW6 zWe?5P`4mOdIYC=ef zXCO?r-^<^E}v@ufC5A(|q$vG6m&@Qc0eHMGQ zSGJzwh6!Fss` zT;(~E3v{DcpXqLF8)*RJ(5P#@N|!q6l;RmCUGo(5No|U-d=UJD!8(1Qv^Pa_4e3iR zPaFKh*I~M11$}zy%N}ZUAA8qe0e=rg$MUy@C!98lh7M2_B|O`QkbCZi7tM1t9jH7(DMS#ClJT_m$g%HJw<*x= zIMw-X2)M`TcuQOaaH(lmhz+!~D&7+4^5ScnIYSFkzyAQm6d(61pX53S~s^4Cy?h6VunX9e4fAtj0^Ri((?ExN#6G$AgqgtBHtC z3D1#~%pyC%B0$mrJP*|I6%TBQfUCjMt_3ugW(xER=X^hs&_<5m6}5I$pNt~cwKw=; z(*dI2^(*06OWw^Xg@MbNivB6dy{2L?74(f9b3gE;gnSZO>fys#g;z^pTg#b$R&d90 zjnio=j@|GfY<#Qm<=p1Wt!Sj;=K$SM3@6fyF!K}D$?oz*q~W$CRkuirEkD)AQ5-!*=BLaWFu2v%VK&m3Bkf}XY>=C64|?r=RD*ZJD&vg zE9nubc)zJ@=%uB@;Ct=YIxc(EKpisl0X%HRi(Tx49(fqDalBDF4II=&j=f&>**Q;e z`XWvX1})urz+@um`_ihwn!C~c)ZNZBRJx22dM}@X#X#+RC1?`*{^z~tp)w>A7{c4+ zXHe^T1`(xYD({^XpCbU15NZ8f#_)xb=gEy+$PuA?MMv?Q%q#{gjjU>hU{n61XQd_t zn5JhY-+4S2clR!jabI?oQmG9dIJk}=vl1DHupUCR zL3aWw6v#XqQI%?Jh*9@E&wqREcUG2V*zw{C+nEmRR!B_(V9|Z%lOGKX=CpD7i?ZloDY{3WIF?h?&n*9NDh=-j6@tZvXE|CgY?b7*(rC zgAusuI@@DOghZ_j7yz|X_R+g?^-hZGB>kmjw@Pm&f z$G2}-x~0YGFV$rLfnhNHPPdw7J+T3GN6b1b8MpZZ*6&5$~4+f<cA+`yH8E&XkVX>M1ww>UB|5W$qGB zYSCy6?MqN`mNqOXbD{9wwkci+;1&*SRZiMGtC&;P>bVvJq^)KedO8WgpE(xiaxpY}=}Tpfw*Lkm*kI0G)Je_-wWXD9Z$i!8+Wzy< zW76_Vod?g!t07jdoImXkx=53@r+)efo_(lR*%_ zoY|nt>?$A-wUmZ}R{Y;owQixGq)#8CP$@rXahBp<1hrpnlEX|}bji3*EoGf2?M71W z@f7F7V)5euG`2X`Nxzloxk1^>T4n+YxfqDk*ZsD0 zGNMc_llDDMpv@gSDGwL+Si4osvd9dR(n`qKy3I^omHIigt+rH%+e=`wSm24DlsE|D z*_$i<1`j{~F|_6=fasC79?8S0pS&$b$X0Y}&N+Jp*(sikP67w9^WyepcT4*Cde=ee zM-(Bx^brNoOOOH;oHeCm+Q*km0qFs%D%+ipH%WRTvGDmr(a737%MFXKkt7MP6eYc% zmOw7Oo01^&3@Aqy35uWSQ!){ar<~T3%R&g?Sl}v6B0{Yc@xIS5DSlb@?mC98rb;~b z52Q6v`nDZJ5}UY%XKrP$-NVFJmX+`oLi#m`<5o+_qR<*FT^LezOHq#QzPEGb+B0Ts zG%^*y2P%PPGz}_NBIzQ-x`#RCQjt_CO6^Q^ce0gWS&*eV*1lU?8}c$0i<=zE6%I`j zLJ1Diig3|9gx^S>$m|4%oQNGiN#hx_sm#m-(6Cw|_6?HS?bx-;NWB72yf@UWS@-S@ z$r~zpJ!~s^y_hQ@nv}aky}V}9MbO>kZsM19mVU%C4!!?z;9GM3+W`aN&N>R}&es4~ z73wrW`v@ZLOIJaix>9Y;ej#%Alm>Br_Dj)#N>dbzXai(%XkF^MhAtZ+HZ|KHaHyr% zYHzEkCTNtgron_h%as?apzOr|=Zo>j!{4MrXS9d~t&oCDzzjZvr)1$B3!xK>qp%bR zXS->}6us=L#itG-!;g`=2PSRQcU0pYXTG0XFU*F#Ea|iGWs_TAM|gkT>1+}Q5Zn*5ld2vrP^{# z2|>jvI-Zl@i2C%fz(b*xmQL0Gu|t$zDUMcOUwf{vot9+?KYX|huKNb+59dKrbqUk} z03Xe7xtB1gMz#P7&oUarwkg=L#WFGTW~l&kYk+kjVuPpO5AH z%ggdVw=;FDy2*?pX*9rmm`)yPE5Ko7G;ZM8QwE0^0PsmOk9Y^I`CRRSpv_SuZk5t> z(dLy7i(E|SfILO3C{B26FX05VFN?-BG*}#qLxx%?{ACQaQc2WI&T;7uXqO4Vn+f*8 zhFw}pRFWRhDtu|X_3wYX;6&Y9C_#$JBTo7GHx8~BFHtCsXCP@kJ!@(Bnkt{V1rhr9&H-t!Lq*ZX$yHDJ55j7w=qzhd1 zFwMqO?eg+moxG~y3eGOhKevmEodW37DJm^JcHzQL;5E)?rl9W?~JAsKmoBr8+bOq{X$i3bwWol=aCxAE(ut`SK)b&GhFMLWP5@QfnFJPXPG6>cx^ zRo{7k>v4{n)2pq0>GqWAPNJ{-(&ZrP8Imiy>15k+0NfPcWX3M!$FOuF+2VM(;d?DD zG9vg%U&{Mzr8O6$JD>r#Osx9&(h1#7=LJl{=!IPB0JxM`%!GPSv62MY5?U%h>SyQ? zUCo}-SSv5xJ|T;C4d8qgEa$=MUw`>UoKf01NmXXtv17WgJ1ascqX1KJ0!*b^8FPUf zerb{@f&C8Rv^np~GB?bMUkoL};X8LmtH1TSI0bNP7sD$>1un9ilEkJTpi!9g*4Y}llU(*e^cS)2F|-)x!3Qtgh;`39fi-$cu& z*bg8F`cW;FHG;69C*yZSEkKusb_%2tpFZB!;+Qv#*f;b;A_gV>;ytmSPWKcfVh zYuh(&9b-KLTS@;#DJXy#tJ4X(C)ynxM_7yy#}#U+h3ir*?nf1-LGxTc(0ZL;a|-0~ z(tV&UzYFc3k?-G0Tbm|D9$B<8nXo9M>=F)(et>M|TvAa_B|U{RtE<|q$E%$k_-ic> zf5utvpk)-oU%?w-0e%&IE@UI8)F=@^gpXUf2LY2XNxYKQ1UqY8>BA*=0lm-a{{m&F zTg3}l!_F%P@Ma;h(jTu9FNe*Ww;Lb|7?>TAD*SjIuUXhSnFe@*ZYd#93b=o%^G5|b z%G8p_-V1ah7$85Q>W@vEw8CL+-mF+fir<6CCZwB`FJJH?daN~|FIp>~=Syiol?6w= z^rB^+;NcsK$=xnBYS`ZEzi#jpm z6Q7auOk3!T;Sef6$7~wRQ zJO*mQu%KVur{dC5QB~6Da{h8-HiNd{+Y}EnQTU3?XlZT|$b?UW?f|aYOvg$Y5vrCQvoCCh=@x~vCLAgwzpF|Kv-;QZi_#VjQ5e@3OuCZ~loTfV9??ho z(VwwvZ4ei`&+uNhY}vCq%dff*RW7z0mt0>piA1lX`t+j>ZDa0u07q$hRIGeKx=zN| zCohRzDA^KmesA=^`e;(f2Fh4QaxP#odz{$fDl2a840TauqeHY_3k$sfc{CzguNxaB znDFv+5cS1RYWzPIFWB%XSEj7zM2r~s|3<WCcIrBB<3*uP9jQ>Y_%Cxzi)cuZL0g*&M^m zSFcXc!2GUo*U`bZ1p%^X5DCh+Si4`U-EMUB0x3&V1#Ljlc!Q4vS@D*lK?%DcAQa$G z1T|K{HRmC>b|+fiDlUniJLKbvBzn2@*i?~{KfS}Ofl)k;gwFjVD@s&c>H}PR+1bIfNQ%VbZrj{q}s6 z9F0Wa#8m_fc#61v(A;L34bB{b18AL_wB^6qRi_Q-uF-!O1$|XY#erKK|bnf92ig12Eau5@bi= z&wR6*D5_)#^o3w8+&_l{HH#Py(q9&ZpX+NDV=3?h2_tR1xnijUT6MXwu<&*ULt8+> zau9Vy0O&YS{fmdP1ivfBv4ZMXQuseU!~0yKjozi%}EVO@{VVWDkT!R((a%#Zu)%M~V|Do|@M3lZCv z*r5Q;HDvChwTwV@gsmhk`QCGd4ysqT%qlB$ShBNcgsx_Zt|;0z=2oKXR1xCUxHWOE&>g^Uv!39S;m?DQ!0EuBXeLV&18fOy9X_e z>EcAV(EVD@i)%8_*Slz}UGR>IuA9*i@ZE_|KQ?dvM^s=-OnEpGuIP1vx#h6;+?Q2T zJN))}8~L0jFST9KEOD1XFU!gn4q`m~wG**$=*TTQf(}xgnj=b8EOaC$bv_HDzZ+e` zi{vNGz{AF!w&{ye<|GiZH;lb`7;A^@oy0Nfv>4wotqWH=?LKBJl%4@oTcvY7? zeHvU*RlG6`ZS2!Ku@H-za5A&ZucUZo1;TXKjUsQLW5@j_5bI`jUnxS+PBxZHzZN&;zVE$wle08> zqsM)uGnK$6U^vg{gtOh0b=OKF_%dRbKud0+>n1*2v9Jd35*Gw&nT7O$P${hsA6NKL z02*-76w=&&KrC_uL(*=8D?(X3ecwQ@bN#P%`D5_oaH*6$4V_xPebI&gdT2rU_vd4K zrU3<*iVf!Le&a4`9oTtHYBFf#Jojv9cN}v;KHYL3T1&xXQ0r=4ZgB(1kD&vqoZ9Rb zb*YcNo#p8_v)nj3`guLRAWhNA)~;U?QxbTpV>vB3LbO=160|}PaB>`Sa`5kvis?X^ z()0|h|Hj`ooIid;`^rN0BqDsHE0?&4sagvaC2tFbB^yip?jb4a-mhZ^DhYvy#IugR zRdV<#03~qb51-GM-{rXCUteB+2DdY0oqc&(SZn(^6{X#5?yN|Rq@zace?)#GzIiU0 zao?L-k>vkkJA|;9YCJ$ zwJZ^Xe58@X(XBM2FP5CVp%y(HtJOCVvZAm&tbQ?!8ku5~I)rk`yc^5ZV%@**PeO8= z-A+c7RzqKeg}>H)aI zvp;=;79F)z{a~S?#xXARaZ2TcrH3eZp~b`^3{6D5i752M%4yqE?Yl-EZ}UuZ64Xqz z7!=&zb4axOT>ABJi)h@KL{l`tDQfTjC|a%-%HP$K@`bWG=71^`l;$T?$OAsvG3 zQ(D0W)7xg6=Sw3Kgz)DG&`sUh=1Tf79`iU&^nX7840EvEKp;G%L05dEWeP!E?+fr| zyWQXOo+@`(uw%fw&wxgDEM8E2L}Y7prdh+HZNJHq)0!nk&;;_KFDHh?Vlw+3j%%u& zdD`<_ku@F{Bp7zm+ZtDdV9`2hob9ICkHZp~RuuST@Jr;MvGaWZp~vRfcGxXT(`uj` zW?GvXjR8$o_f1pAcKlLQil%Cc0r+72;pF z9iduX^l69gM6w5#c5d=1s4P+rrqcrVGv#r(w6g2sw0rAS}xg2$o0=i2Nn9!cE zck}VC66}E<)4+CS2Whk)r;*MaDhr7-6DNM7M9d(UGgNcm1hTF8$df%u%b7aF{|U&} zViQx!{e5aVXD8_nVc#u5BL84KOZTPW7MT!vrPN$>xFJGw8{ggD<&4-S+K!Xbrf(>_lVA zf1yC-`xzJ^ZIoRlB0rI6TrTC>VtzuI-ouB|8erdwuaxA&)FRIKTNL+ucYZ?w&t|YkPK;r;vk)O_KaC}I|mod^Fd+Cr&w!2S%f z;@yUEhi^ciCPa7ET%_rh%uLfG+Qb`{we=D=zEIe$wsbPGnWbeFx2m?Fh+)Y_Po5*? z_?gf+1^pwpcj&5Q7>3umFLA$=+l%jFCsE(z3SkJ2r9Whajm-O$_86KuBUv=fRsrID z)IY=XnqThF4)9mO`Pk;g1tk!$;><^3&mt6LK<*<(4Iu_XYY6$tTsL40 zY27rhap$vm>8>OyQifqNghy0^vbM;*V4Mn?{6l1kU&^NS8=*~CQ{cE{6cTHFJRs1O zgioaELcHw56qG$i5-t=kBhxb6D#XDD9})~Ia+AL5Rrg8!)3ViDsa!D+vZO(lfSH{n zrWfEuM>zbDa|s)X3tjTTWj)l~q3!hIl3}*n`on+U;-Zcg0imWhxUA=s>njmK<|K)W zuRwRQqfiMH=cmHMYQk|JDKzc=-{1D%u1z1`O7a1SYQN<0dsH=VE_L}M-~SOffILRP zjyhW&T&QUJMAXGcyPNF(Pz4cxUas=a&4* zEuk7FiFyf_1<9#Idn`Rl6*WLN;tC|dw?=v%MO;Zre$F;?h!}4nKE-j-~<8cq~sS>4ug8?v_!%qA!2h}@;3pjoOdKppt2bC3cz&giT{@x z)7*HIT*=#ah}udaE0S7Xt-i~#8{;B}f@A1qNcjSmGHNu_KOQPKS4d=@| z_QP(@a&9b0& z(jzQeD6r2*|pg`Bi2pmuS!_e+kpG%DFCKOy%>q2D?hnWh+8%j7%XHQ;Q5$7)@2 zufBS@0#JvF>JXfbwlSSV%r;0cbiWoypcDk35mg+-SN_nAGe1`kHZd{DOr;g9Dq7%5=A&J({?E?wqO@^aOj+b{|#{amjMgo5*?bgcdGbMlR5_jx-6>)o0c3wiB@%i$UpXnI7e)Fz+L`lJY`cu4|8@0x{M! za}BFv`{D($qj@_e??>0dKi7KvAs%prUW8VJ^=rk@Sj5BBn&S}@`np+hqv}beqj_<< zyoX3Rt_A8SmqwBp%iylxe*4X>go9z>pwEPC@9tsyp^I{AJ>JNARt>Ie?wx7+;bY}5 z)#wSmO~S@$;Mfh&nx6CO#1A$3T@2KbX(X2(-ZR(kcFR7OdOqms#kVtiy9G}%P+{#j zHcxwD$-{zV5TZdTLA2l+#_ffH@BTXr@yhD9Kk_5e48k>+kLIcj)bP=leN$POoYp(& zGy$a$9kXCLI@$7kid+fR%|}&I=L)&KrK%K(@(@m&|{E`R^<0)f9=Q2 zqzRNz7G4a3|+tAEn(|^fGxi1Y6rQ)UtU{nG9Yxg z`3Jq}-J_zi)NruADC+y=@BQ1>K{&q5w7^&Jzw-RG)p3BS^Y#=-ENre=_n%)oX3a2>F%0~H6sYcH5hG5-P5~p= zHj+QnDyIC_PD3svidHhjV-R5yX30Z~pHHj%rHREybN9A$*egZ**IcvZdeI>5atYmL zAWauPV0z!u9ch}3Mw7t<1=o6FmLoHvQfoi2Bt2-QcGSL;-{TCuL*S)HSrS^Mf1P<& z6S09vTJ!)XM^wJGiAzhoY^gjm zoJG#x_4;+sFa5CiH^H%8?mL0Mm62Phz(q+eeJ5gaO2(HINKv`9Xa2fo={@#}g`*^$ z_$PHQtR}*$e*AIthS@TU@-~pPs~N}95a~kG$nXae+)-2d9<64nS-9qgzMpS|k3jf3u zPUYdXNn!QlnrdY`3?V35HWa~+M#ez!vCd{C4T2V`C-8zb7te4eX%o>1{dk15^z&=06Y;ca7HHR z#-)}G%#|}%oQ&!LJ&V^!+q;S}&KmEtV?^8;zUPc&5b^;I+F823wSZ2PFlvxo&5FC1u~6T7y{I^DEd@tX$vJ*bU)uqIy$5#%d7^U+c8^@`#-wV;fBhE z%*>MI9|8gj7uzE%>D5`jZrnL?A+3IeC5WYb$ZS3P)9eP^sh%m%N9-TE`i-Fckf><3 z#5%qkZaZS0h5DlRHzVNzPX+bbB?kZ?41d6}oH%@i68H1R%znIm*fO|3X)TxKfO1)I zq@3E%9&1Hr2g4+}h%)L2?USMDRDe%GD84ywMNcyIuSspaci72^fH2GJ1>|P;OiqoS zL4PWVMyo_2VcoJW@a_&yI z3Y}SFZpY4y#TH0Z-_DVQ%bB_f-tw6{`>K|K)-LJ4xATx@kIy+KHFN zHkpYdfYj-?lGYY$l>WBF?ooPFmF`DBpxr^LI(E`9hN-MRCONo7^6be*i z8Uq@aWzthz=P!H5NW66k`Qj_Wsh;(^P3@)>Hk+8-Ge^XT#()_Y353aAdE2^|pS`#h zVNs@a{vz!dTQhUP3)DK`Vv)2BO7QCL#QPUDZ9?3b&Hd1Xvq+=ZK}!dV+6~8SO*ll~ zQb=T*|!>$u=R9@&5c9y5SfB(KPG}{Co3ps-5H*wAH$R>i> z_(fYkx&| z$a4^qImK_P!&X-ReI22W#q$4h3s%1nT2)>s{o*V7rqR9s&H1e)T|AeNk{>v$pe8Vh z(=v(Dt49^!*P`y&;Jw3C4;d%;d_;!xy+%-+Xjcl%|NG90f5*Cm@Lb7y298kGh>{Ii z)%!EMhau1>@0&nwvUX_KFYjm7!jmq>FAIY(pFgm%l79QWMO_;%;G{M*vvq?EaK4>_ z;YLV8Lc*5s>+ZT-&e2EO%c&b&Al;2ue3UpR;sYamB)gOs&EgV#Q&&}+)HWqLpzqqg z__e=5-QOyn#wQ#tbasj1>zT`iqLEyXM(B6# z$u{L>vFRK|PyTvB_X0{&RGK*e2ne0#Nn^`xxY6akCiKbLe>XI{{11*}@e>q@CKb)W zFPcncYZqTR2DK?BNx{!ZS!`hh=%XGAIb+9mvx2%WhP}<5&SE#YbX4)HT?(ON^2dam zqUYkklsSbs$QGq+n5`$iRKzXtk7K|1b5O}E?6-%AvYwA14kXyW$m=^n)Yj6Ub!Z7e zckI})Yv=?kO3%w1kXgc;(p=#)MCeL+E!|vC-|ya~(qG!z|5jh|4-zKKSDsg9*?BHn^TT$$SrC0L|+S; z01oE}E4%z!pKnGIio5^z+n3++|DNSSBpM_s1%eeBUvdAH{ms74dZTs1|6L__C4F%v zPKZ|+E~<<}>quZ0fRrQ>$UI@mV8x=Dfc#WQ@?nP;JT)77$mG;KlP@3iVKiCZi*Tq2 z5)J8WmC~G{7;aAO^z{)db&)x+;z(Xh2i~vYir)-)lV>1kw(JIjW0a9)j0vUN{ur6v zj2>6pcjyoc-6onznXE$W@5B|YW}KEt;$dH%c!7IHA{OhL&?xp!eoPH^WI+eaX z^Iacbll}<##nO=sXOiA`I?Ab6qNjk244$ng=Y;f_s&hnD0iKrC>l-1Ac1g7b`nP~H zT!e+<>jglb-*L#lpLgxA+d4{E9HswJa)yE{J~Cvxy80M;Fbv4IGyFLCwH$-!m=ap{ zCeJ{n}0h|N@lh>@4> zQrSZ1K(g7&{B;Dl==~da`KR=flYe4jqzYpjxGr0tcFTYROZ zI)^%?NyoZ^252KtE-rahb-ckbR{7F?X$W*kkp5H@w7OepiS=%cc0qUZV* zjWIGH=bMqxZr!Mc{057%N=%o?$Yf3$d&2ITjH4%UvLWLX_IK^)kM?=bSO}dAsT$;E zwKuLlH*wk90+;TYV!8@?GcWf~Fqmf`cX5MXP}QWm)K{iuGb|Pt=NpjkRb))gW-@6^ zXg`SlZ3}i6d>(5p`RG-~FLxHeiI?A;^j;~QdMSf$;Ib(e0*4n6uC2uyTTFY!#;P!% zPm(q_Elm6*<6E^gW>_M_(e-sx(jDHj`Gss}+Nru(vu3AjKYysAj}EJX?Atf$K1I=; zEek7yRU(z1IG6ODz^N+UI933N-9x@=#t1@SRRcR;lPSy@pu->!|L<7|AHTc#}QHAVMFS!>;yEDDo6xlY}KK)(|{(K7870y~M3_HpNcvraC0 zFq8p^M%3J(YT|Yft?lIFy;K{>5_xnc(RK-=sk3f!+1;~u9X2AOV4>&y9BHT+n}Y2{ z0#*qXFgx>0__g|GU;&RuHwb)Lo*r5OnWjVvFSx2?P;iiw2!+K@pUx}aNlpIGA8S93 zMB9BRd{4x3M`{Sw+O=!tSMlPsLeeRZ^t$Pnlu5UbWQLtZ%p`pc)L$mq&d)9XCOcX# zPVGMre)U^-N`e&(^HHqwd8g^EojMx9fzYLm^3b!L7qGbQ~82)7^uAae)Og48F~tnZr` zn9~#R&quAwozDpSY8IV3Eeu~5=XZrE=9qIf$~NRe)i94av`hH_s;N$kMwSDIX+URo z&%`iGZH?e`O~$`rZG@B&lcZZxZ7q$FX=`f>11s?I!i22b&AzlaZ9DgSBPDr^wpc-= zw{rK($B#9Ou$a-lg4(g8EQ1)Fz?Y<)eQMVKb@I`pM}OfmFEye9(1b%+-_QNX(TWHm zr04$s=dVJ5scVg=RccIoIJH;8=3{evn%hZM{BoHGdk2FY6KWZl;g8L~)KX#jhlIM_ zdDuwp?HTtXZx7k+Qys-WiBrZ>gFZ^6fE+q9(_(g-sU@1he8F)%i`4-v8(W# zbBJ&p{2?R_HX0>r;==nJY%8LNi!;vQ53FB|t@XVxFiLJ*V zu5N|-pkJ;O1v2rVAJV3*+|lBPiSn?u@4pb~MZekJ_vR>uzk~mftJ3vNJL$W@nLXh( z;^oS^ z3sv53tjEL#&7`o_2Ch9@f1{`dl7C8Ep$LzVp}SWq|F8DWJiNwpUE}Gr+l|)Noo>`n z)s;%jO(+=zseipXd9d%9_9TeV^fe?&p4S0)6~<*RlmV0XDg6rdSk&E-0>TeYxk63D$W0Xn(QFP4wbkKIvtB%r3l>?Y*4~ zMy)oRzfo#tT;BDqF?cQTnB~}BV(i%Uw#OX*X*Iq9_X{!xm?z#m@mJEB%e$aepz;Sqwdbk%zee@3#h?7FY#ydN6b-rzuW+i#)tKg5bg z@g(o&1={iDSC~mT`O7QX@g=gszj_*PQT}ijvw@2gTOg0`@pxENboxPh!l}be?T$XA z?USK{<5K#9!Rcs>uWx(N&&zCgYsm++M&T1or-}T(`|j4%f(PVv-+C64{_bqHOmjz*VaQgW*;(>g<7pLfnc41ZCc?Yyq zSMD`(SB>B89sBPkOO_0qVf(J97AUEIi;>CD=XF$775KFK>vpFWQq-jdz3|cO?jfi9 zCc{qu!#6ijA>7&>IuqkEvZ#jBv>q;G$c*}24+?pjOij<`snkhTtxkw|Kh)O-2zj3N zRQ=>EyHaffRXRdPpO(F>kEnj+ooR0G(={&<1RU5le4u2k>Gr+RW=9Bp!@ddAdI7i+ zy_YR1$6vgTQ{%ffhmv7Rum@BWDVl`m+WCCPtfB#n649>@?N>s~ytlSeSHG*TTYtaL zZsV7*wFHz?E{3BOkl&hAL3})dL~NTtL7JYXewi<<74G*^{~8b;d$akAY05FKuy?Wk zVZL4Hbdd-~stoAW@iaA3UUKlli->SW>)iN0WArA%r;O|_y$Vygfjk=kx zu$yO^yTsi@Nbl3hcl$~$Msz+hxUq=64&F#yzJ=-{2pP-H%gqxpvN7CQrTxT&_#27e zP?8qL&yng$ButX9y3e-0U+GrR&VFu?4Iv6!w@^2sHHoxP@XIaK9>(;mqe9YpD?8H+ zOJzC0%aw{2IJapKnY3clPo&}M5SFfNB?p)C?>&z;rJa>Qy)N*W)8!VbVk(q1-b`*& zL>SJUG`x#L1J}HE?rA8EnCc!z#MfNm-d3wrw_Qpd>{a&k@*ANgUjV431<#s@sgd{z z2fC4?V;7Q8Gq@(k*rbwcy;OmHaaZpMM2 z%F|`Shkn-n*WJXo&N}I`!+r5$?f5F z$JCfh`K_c4a$B}5Ao;Wb&7>EUbO?=jtNfqs@ELcHQAZ09pl!Q8tG#^uAN-7`d3LNV zB`}`gW6xeMe=ZKK!uaanOr+?4c+4w!HyGW{IpCb5v3b&dvO;LcDc|?VC0?aLt3#KH z?-O=RP}%mUFqtSj?pLMq7(|n9!ArI|>3@maprtjRo zdWjEQ^M*&x%(rb^o)zvf*Q?R%N8d4h{P9+*D-;o;OzKDNNm)w7XlgLTGU~tJ?n(xS z;mImAk-S=v^+xySD=5+?FA)2XtGyu{D68 zCAJNvsj|_(W_?}2zl_5&^XUO92+Y52NPDJ`VKS%=x|3bTe3Vnx_{@ZI#9VjiM z$hamuq%#^&Mc_3PRt_LNQR~H~QFl~{DX;zVQ9BLn?dP;KS4-B}QqM$$BDR+ZRQ9?= zh*9}t+m_~;0R~v)h;E~%FMen#0kPRzo15*HS_X~Hya5MPkw;)!>i<)}U<=SCs~b1_ zFwjy&7M1Gk)~#qz2di3QA{Y3FLG!Wx=41ulT#d0iss+vAamk_}*qq*6$(G7$k-#?eba5HtS{!jjzf)jZO&Rg1ztcL5s~zlu`!;8_XbiJjUe^Gpscx4 zdQeJ=l@R-dgUp$CwYa_Zmnu6iHmZ0>&-O0eF1Hw4=T*B({V4;B*GGyelMCvycjcj( z&x|1sG`*c}zMyt4Uk(^e)2!!6GRk(%u{u#^uK+AzC$JnpW)JMvZ2^p!HpCL%`=9rp zh%*qCjKOrSt;gpgwr9P<%=2HZW9K{%1|K!Mi5L^t)~#=Ij+R3+M51+U9b4+f)8F88 z%3sHyUITx4xp;PVe?GzU{`^$)kJtjwnUS=mt_@uUSoYqT)C$Yt`J1!tCKS&$vMa~m zcl`bR_xu0q_tGZJSzJfSs(V?tCH^(`*I9G9?3m;2igy@X3xnjn0CSw{`Z0^I3Dic( zr6+uL$rUjiRI{XTQc~k(oeZivC5nq(nC^%ye2QH zO|?H!v5214jIaw$;cjMowBa~{nHSU10<_Xr`WnHo@x?q(Q$#)5^giRvgFWba5vhXa zv?KRcN@$4~Tq<DO*Lv>#b_adFda?F;S?o4;vF4G zY>$fEV;SW@9Lb#_q9b!x4qs6S;f*U*lEqjGl%yUh0&4R7h;KYvMb2s?t#(q#2x>8v zKN7vldIf}Gl``ne$S0{3M6k)k zi1xsY+`GdR^f!Z|di%YIV@! z5dkcgCWl(ec82U$^D0=uYYm+% zUK{Y8&=nbbeLKo=?8b=jk{Rcy25jV)#9%o;wKYBSqOf(}>iN!Kn>_zE#76UOzjYbN z_d<&E0lSUg@iNZXO*-zguG?ylesleYI-Qrgj&uR_Oft{0BH-Ju&uS{Qwa`K?_7?o0 z_{}fhrnuAk*P}b(v;>W+0tbt26u3R|1Un~{mlU~a!s5Ndah9j47i=1Jro|Vqz7>BC zIk`Zot^!>h3$g}-U}XJlF%Vob9c$QmhcS4`Hkghvo9BbuO+4P*lySJ(W2u2qKH0+O zMwAfrSpDZ!I!G$DQR|`g3NS#fe~>(fF?5{0Min(Vxas=l@q^4tM+{VM{PQC6T$u)U_k}F|7i!l$_{)q zOSvxqq5@|@`ax7{`ph>@a`#kYNVnY?VU7b1|9HPjKS z20K-F0gVatZ{5Aym@ab3@~I(TBhEOozqmE^K0qW)2HD6mfl}>N`q8&V6=r1(alL_G zhBRF4V(8@mTDQigQBiRQuNmQE&A+4SHIlS|V;eWRJBC3puS|+_mGy&PJJvM2XEfEa ze&p12oZ_Ladn)?&H^vYIyXYowc_HZtH`+q*SPDycb5AL!_qro-eAfT3!R*nDCwxFM#L3(K; z@7J#`dO^O)9wiG>O6UgJNZP9LtZ!7Sdg>UZ3(UtZkJTE1677NrJJ38i%{&^?EWrKW_b4F}&uL)9tt2SBO6W<9R#@M}l(mOgo* zb?LjBuYj;}KE2JLB3sn+0nT%Qdrv)V6KJ4ByEQLv_qL@lG6@!>*QG8W5+Hs0+1yE-x0H!0@81gS`)a@bBA$H!Odr>vx3G6q6 zDNBPyb!W9L(0WeL)g3Kw_1hY2Gktn){GgoDVxvEcp0=-vN-yvmku&}H%{SjvB~QgR zNWW3}2a(CTiqA56iGT>EvNSh!`34DL2sJQiD4Z6u7mI~`9LeMy39g7Ogo{xtR_S5F zn)y;QqwFNQz8ivxtORH*Sn_5dd_@te*_9j)2!%EX!_Y|`hrPy+-)TZywl7%B6{!VR ztM(y=QXnI_ikM^6pmuNDGbXjEtmA%^P#Z^J zV%D&$#-K8Lv{9>8``!B+HJO5PyjW+UOAokGH_%(KWRdUP>i1pdLPY61(2IIOnPnY` zI1<;7dGz~l@r&a3dA^`U$Of`Fr@B(YjJwi_R--_#Q1SFg z71S8^^ccKtF3+XASV+q*fZl(G=TetELWk+Nkh?4=fo2S)PApMRJ{c~;MBL~XkcWqbC)EmQv8O2;QfPt9EsJ}I!SL4yV|?Y$GegbuOm?uD6vu_-9+NF7%kFB;p) zR=DMxs8mXt3aWcRL>Wl8*a))RG%^SchzM@o3bcQJcrF#MujsmtJu^BM=81?&l6i&! z(-MW^m|20rX683JC0jUZ`g%k^;(2f)sn|-l-8XTjFTF8pWZpuI9rGfR7Mm|Mzm-5! zeGigL)vuwZh_-#tOXmtNLx6~ew2;qG-9wT>x;Mg|D2>%HZ;*&1Vb=a_rZ2P;6T6&?uq*#R#-W<8|wdZlQU@Y1kOv52sP=Kb8^>tT@6KQ z*uVMQ`%}G#7N+g#OqIO{6gf!lhFlwn*%{}P7cYLLR((|47&BYF+4()5e)y#919%0B znx|ZHSE&zNmfhV>|y!FxZ#LchNp4%tK7!FJg|aHzy#p}L*)8?fWMn%h2qGz90_ z^7_V12Pd2pq?fH5Jqj(26OKwi%64VtefXHgUVY@rcfXeg!lO^9Ew>0h7yaoY(r&S2+VN zMcSY~!prer0EZ2eNZFnOWr5;6VgfGkN#234QRREY=3 zyY#KQgJHo*Wh~r2`=5!UU_K7q?$@Yc!!yCjSnf++P4qRb=t>^o%GTXkNK;rCL+(fbx(R&#t~^ z#=}Udi9UmQ*6+-Oyqx5RR5pi~rM0EXph;R;sjG;D(5!Egm(%8UBBZE*?2wcBk4qC@ zjE(R1oPLV?a9nPc9h6b7*UpyA&;O3x8ctCEy1Au+mx-;MV8~xO_w7{tnQMW0SzyFA z!rr*>B#PAgZ~()8!K?OsHValMnDKPEolkP$%otoCb{wK^|8 zHje@$eC>X9IYA=3o}G|!9b(er>}yfemv+@i9Z@v~CYRFj)4hAEW>KX?u-SbVsf!O% zn>n!H+w-v_mv6vn=5goD8>D}V#oW^{H{(%KyXXF{gLV&{CraU9zeXK%3jag$wJpj8 zT}aHQQ_=)>f`qMidz7+M&d{@zRJ8wjKyJ_I%^NoK-TwSnx^rb9l2;Wg2+Ce_5!aGA z*+^Y)RX0ewz!dK?eTuzT)r_?Wbmf}u;JSJqtS|Bb-6B_D((!H{ea}A>8-g`;r zR)V64EY60+!S>gALBEumSImt|Hy&Aq;mSF%zVvDpLc7bPSf-3nUt+lUMWi387+-OL z=P;D+Np#2I7_oCM>nBbqeQ__msc(M{B)8|8156u{pbLHH3jT%T%6%G{d~M#0m^CvW zpAJqZevNUy5eo>Ew=1dcW$p>3wRJQb$YPf69gdTL^B;|qlTtgS1Zl&rOn#cYXkX@R zpoPqbR(2$@JfYU?lq{FFTg++nmrWGk(#%NUxR|E5HsJG#C`2@yL>ZO7^x&FPw7K2@ zRo9msBgRV%_D}UrL9^{Fx;=weYj&;aYLx5MdoLpcY)m5e^G3Frx_=Q z?Q+O?_-=Mh1meuOD;5OREbN?9>aYi+rm+G_7JyL$Qx1|j^j>Hg7?*R>VzC5==6~PF z*#rNE+SP8H(8S+=ZLhRxz{Qz(Le!o*7;Ri5R)?J2x}fwM~i0L%KUR9n#b()i*P;-;;yRi!FJR=+akOA@sLb{;*TAw~s7qS=cM**r$xKN&19+ zO(A;mkw-TxdntR6xK!gI|7bQSz_kEQG+T36uanb@5Wh1fQwR(rGlBBZll}9cPHZ&> zbU-K=kg(okcFPYu;xDgBaDSF|KJSUBtd=6O`$_Q6JW>z$>elUS9VE*M=-hv1@UqtR zv(IymJ%$2Wc5pK2KRn?l;h7Cf4jex7TYACf)qYvAORu-tI&N8NzM##G9<8c$l+ORG z=OrItU%o)X{{oyG4d}M4sj_KQ{|66BO7_tLof+Kh9_@H(!8@>S?-VF1s5zkT$whU> zgsdWBvP{}MD^Oz;x`qtmY^%(~&e!HfMc9edsqc*!IOx7PU`x62aZP6Sj*b7e!=P%d|n!F zI!vh#g*AGZQnn z2H|`TWw$PQQeT3n5m{6Pvg3Q_SiZl0Ms#^LrIBB0Yz8XIeKF)3dI ziZ&+2el#Q|XV?+HsZ;?B($liIn}dl6iJ-KEQ^u}NxbrMLO}>>yr> z9Xzn+KzOOcZ&cY)6kB(wOH?Ax_zI(xyjr|>JjedpibH9*i=UZ5FrQDDTe`w+)_u;A`cNYp_ge=% zYKdnh(;7;ICnCGJ=qimfk6f1p4tm_CNs}h2yNsbwAm!65_j`-D=l=f0XXlC^Nvo8N zol^3{@iEH%e4NXp3wsdHbCE8cHKE6(jD#&dUvvb>6>dqc)Na8Ir091aLRB8QS=!0g z6a#s~eIA2$eKSX=KR$EHTrvs(BX6ybM=7RvMA~};HJZa4=GqmgGVd`RDtjOrH-lz9 z<3R4~Y93nS^N7-qI&yRos~+YhdXZp{?A-;I&Q+}!icq~8Nr_`B-%d%7WGBafkeUN; z{M{p~?RAd!RSMLjEh9DQmj~0qSYWE+&q!8szyx`Aoe?Xf)RuG6D2zH&z9C} z25OZ8lvY14=e@cjd~)X-wQ18PsR)lHadQIPE!_UZ683jEAptu1Cj6GVIMXI?$(eY%fCQx>v4skKCdGz4cbOzyDZ{BZ z7io|Y5j~!boAu=1e4C!pJ6a_ob5u)`Xo7Lcja4ROd3+$K3d`ZlG#-L##_F+e6SJxC zrTuM^0KTNMG3gDlff)2J5LEBY9XI7MR_LPSj~sV2YbF3k0Jujgad54TWo!5j`D7kf zktf>rz8}OeFi9)IB$!#bhh}WR80*Kx<4b@E!f|k$f&tKCY<#rOl6J_nw4fv>A$_X! zzy+GIs=jQRF|-TR)=lC@fFpszuJMCv2d>pUjUH;F9ol;lGhSv---oc9Pr|U7r}(DlR(#TtKl99h7K|LJEP4DR1~%NcY3IOVB6swV~Dd6?4)ZYBG7Y58G!i0+136 z<6w%M2N+}s$FXhrSx4@@={>(bc$e&vf)Gx~UIv=dFUD-PF*sx&^$sFko3g{tPb5rI z=qAgLs{Ro6?eG*SfU$S2@1Z|9!p2GpM|> zF+YLBqsdRpOv^|IXdBxTRNwlPTSN2pKU`DpkQhrwfI5{%AhO08!GScQp)uxJ;+4GZOf4D8i zr`yHl3@-mB#y{A<@uz|g6d>#KjQ@K5>9PN_V$c7`3;F+#G~b_f`SF6+&G)MYYZbgc M>)vU5hr!?e7i^Ytp8x;= literal 0 HcmV?d00001 diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/assistant-WTHWm9BcJfvM6YNr9AY1MJ_image_file.png b/sdk/ai/azure-ai-assistants/samples/async_samples/assistant-WTHWm9BcJfvM6YNr9AY1MJ_image_file.png new file mode 100644 index 0000000000000000000000000000000000000000..78fb3ddf9828118b09d44b1a98cb0659d944363b GIT binary patch literal 151993 zcmd43XH=Bg)-8UemvSfXCd*fVX@se+(b;i^Vw=YbGYH_vHVUm{r(_F)d+Y zl0I`v-aep@Zbw4_G%4O_P+l=$+-Tn;(?;NHEX*--f6)KqsNUxrWdVZe9j*)1+0|g7NTO}#Ft%J*E-S+R_-WiybSjKE3vvkr_ZMrMAa|&9WiMSLfXJwVVbm`LdTed_e>y<WKW4Kkv=P1=QqXU|`nF-X~?J9AK zoT85MZVPi6zy0FxO!!C4a%2@n08;6j}%H*Q3}fB#-S`MPvXj z+rE154x4~o%YXW5pOBDhMw8XUSH}$B?78^r`iyJqCC}w6SH_(4-a%H(s5Uk#+lJ9Q zkTW=b?v_heS>SYkMoX$r;VO@XxID77c(C5uIRDQt4*G_+nKtJ(bSxO+(O0qY#C|V) z7=x8hpZGpBG$b?esf0aldb~eoes0Df>r0|G^_cEQW@&Qx~*=rA!){S`c5uavaIydOlgYrY@4e|I)zr0+)_;m{D}Dp zUZp@mYsvn`G`?7WQK$6JK8O56C!3NC%DKM0;=h29jo{J9e%?P`_+Z0T{7FrswgkU% zt&Bp9;+W$jCZ=~8iTfA%NpFf)IH-`IN%vU$hk`G!R&7U#ug!R$%G^k~_$y(%`25-73kcs+<>Cvu z57zJh&iFM?XKs4Tapa4DUYcs2W5C?Zgz?mHM|wwZO-u})MN>3fUfQuW7eHA8+g=}*N6<=oE~FDoW8rtpC-pW5wgZD`7UIKN2j`iW1J=idRq$IJm-s!962)Gmr|Z;+pp?7A?P}riC75> zcAHnhK2*OxbqD9Hb394o#q;N3VPU&y4&_0jYFf@e?zZHzX=PcReaOaZt6bnRV>^e3 z!CAk(=FMldneqPWh+x-fjU3yo+;W8dG5yj_7XwaQ@e&>PbDS9HPwJ|wjZ;g*cVeST zclJ9}7d~X;cb>F}5Ll|$(UfJanxg->HdZCnd#6+{o>{RjUL$XP$K(6P+nH`QEaqb_ z>#1Qqm&sNS)x>M3!f8Jt;JLYFe0emguofI#;x5yv%?@3haq3xegZVS@MNWF#%@1ki zHw`UUXh_!EZ#$EO^vK9)ajkAlGbUe+qe!jW?(n?E2mwT8zB@29WRYfAEm>Jv>CXFV zH5*Srj4tO1i+2?%(a1#Wb3{Mn7j4lRdUaJthn%l4rrP_Ve@oZQhyrGXpv7`SmUc_c2DDtdtkm7mELrC%2qfc*R`P&P=l-Ws3)5cV3M810w>sGPYt{|sN*eKn2_gpdS&PTNg zTERVS&D>320tBymiA_h=b%oILU2>XTXG}fbU%s<~g|aX=5u=&sct9&ZmwV&GUHcCm zy0}+1=<5JI2N~=*9#F=hJm`x8v#8Vf*NX!5_CztnLQ+x^d2vrpVSW3)iVz9MABAlE z+V6kzbySfJ7SjlJ8GlKgj*Q$#(*H!0wU;*Q@bAC>G&eWVqKeG5kyq1XljKzUr)Q4S zJrUUCeKm1vb@b{qht8J<>({PrO%$8U%`l@!BOewG)%R3KjLnXe8&`!%yHD)53(6p2 zh3GIXS-WE$TxAY8R?+ptzf6j@@7_s>4{p z!d$tZa=o0oX)r)PUjX(se+1@qh=?)lTbF){@|-w`xH zRjb?HuJ(8f|IyNCj8c3*|MlWusb7CGC<~}W9vH1r(sB^~Ad-T8rCK5Q)p;mh>;h%0 z*E*Ct4dz8mG&_~oX@6VcBgUK=W82@Tx6jsh-<9`qa~j?nPKq%Kq&kUBwXc&fue!5x zjS|=FIQFgkh48w?t_b zxNbRa;^%WnP2Wjm|9)xS+@agZZ_nFKUq@a~>3^ATSiNU4Eg5$J-WZN{S8f3cGz}Ck z&q*;T-|~PS5)#7Y)GU;9YFFFHp5w`Qf$B9)VLkS*@2rrroaHBrU#gru9%jcA>H4JliW z-FUJ`HYoPZ*@vA%ACq*8dgF5jvbT%tUi3fyrB$*7r>qz0&dzaNDgf8@>tTPvpzfZC zU=j=bPU9xP3I`M30|$yx+B7PGpbPj6t9HeyrhQHv2=_O`(RmiIF+0$(F*T~1_m?`9 zBk5;*9Jm#k28_Q(dhe0(d#zLWU~_GPYT9|M%*Yk_AOmsnLlOha<_q!k2)yZWnmK2u zMbJ^b2Xa@hS~sQ71tiIe{P?|fQ_o#t+5G@v+79D=4Xk;!qK4Js7g-D6ZkKSsuG($8 zXWO=u2+1lEfY`}>`}P%AYd6jpk&UD-Op>bglF#5MU0}~1NeK^De&z(OO`9Uuh|gXo zyLI7e*K_1u)SE17 z5oSvfuTMAi&!6gKr)J|&y8zBOe!st$V->yZjFgmdsoxP+w}kq7<+(2Lg-=)1kxbmX z+DV0Xo1f)K>$r%JzZx!cB-Xe#_GPovplF8V%z>-nkESOE6SAzkj$UMDV%l?(^l9zq zWn>;v1FiwIRd-j0D!e)KOGe}Qmh$=4&4UFC&4*JD-^^9YrWlGY%#N@!_{Pmf?2zw| zakK50`yl1bRE*^@ioD><<8DJ<5Zi6_wHf)BbQClz;2+1)?hxU@-0H-@y{ql)njIpv zc{)>5k39wg)9t6JCF$JN&H~6aLZR}cJw&Tv zpFX+U;OvqxZW5a>;VpshVJV%#>qBVc-Tj4$4Pm95!arak6QBS4x z$Z7rFy#WZ-X4CF){0BXVif7}&%dc-P7S}7xA3Zt!LwDE&i@7lG+{7X7s$PI__0$>J zI8*oL$&)80UG6KHn68zfm4Bj3GBimg3~XBjzNjmjj`!^WsOK+Tl25SH(EHU{*?bGb z9VLxf))A!CBPB*!w7x%pa)Z`}@k4lCHsC}Ek{|=1^ZdDUm(kxm*l;i$H7E*)n@dzN z8wl5^$dmKI{ri;w4<^=z%`Ovkl>uJ>Vyed)24eStmzR>9+|$#iPooZpkj@E}l~pTw zN;sw_8lO#@mjd8Bpqd&Ptq?;#+99mi0SVr;C0_#|BwhFU0fqCVRr~DAJ9sOl*ohz} zBqx^gFZK2H9BD5C0%8M&?RwFsWLvb}Mwcv~YyZQfYmILh078FL)>SOU6=&yMgh(V3 zVl=4`DkF?R*}>D{QO9%c3^|k)49arB} ztTAEywr<@R(qcENT)6^G<^F>Qa;@+0zVP$Ap=PkKFv35>euh-Ds<#&|WQ&PU0hgLM znE-!sIh9Q|0tmB|++EF93*_M&a~xej0zOM!e0J!LDg8%VQ5qV+jPT7*;@41HLZ#Ns zq3ce!X)E$lLJ(+d`6$jCRuB|EGG<(f4l)frC?}#6*;ENgddJS4ror1qMORhetx}yP z%&=>he*E~6tQvPfJu?xFYYZZwF-R((f}XvRL~z@ zOws>R1yF1A{lhO2Gd?&7vZ7&|^*V$D6r(~N55(8!uv zH+mtHALcxX-_#Vixn+0-vh_u5wy^c|7cf6Qdd{Tc^(|A1J32K2J*Qh1<|FX3C;I#q zlAMqzUkTYnBU!vf9u%bS*|Fm^sSt$00QaQtz@yR;{d_9%<>vCe8U?O%%n2&GA0Js3 ztIc#-N+N50IqLHlHTF_$fM>>f>(T4yeB5SYtti-ds{WI;g@Ka*hD_EAr0mn}FdFeTw)F4H)fO&zv;*^Xn;ohZ;+ZgUDw z<9(k-`GxR^acUWgc-zh4HJ1?WoV6Wcbz{g?%%V2PYOz2+7plhCZw};5XiD5FslJB#%TEFS{|t#eiTeA+Mf<$Jp?wX?l*?&FnTNmn3|b(|1K4 zRC}e0;Q7vs^`~ zpaq#q;r2aZON2cAHrCYv8b;#oswLKv%;VD@o3K zBr>PM#&k`LS(lZG$;t#>w0j5GJsR7)Ed2imJ%l?_UU?khLxd}Mafu!?Z9Eon*R01; zHi|T$1V*AuxB8l64IZ_(DJunK+*dC|ExVb$&EGu#3Kl#KPmtmDqLKtK>4EU6DjQ?@ zkeZ%zxlU@6hs&|;|7=_E^{YIZBvqW%YV;IchfVSd?jb9nB|BzPpD44M{*==DbwBz6 znS|Wo^Wj?U*Y)x7y_KOJfWQHU_|o~eUoP*2XbO}JoSh0o=X9d&3R77*twVaw&CI7 zMo62z4Ji@W+Gj;;9m@!9R>-E+-Cz*>#cH6dz5V*dmq*VMPz%bpNvsS_+Gz1kuNye` zsJR9{32VV*zT!9bA>AD+LXJ!9Y^jRnj=ejX>szi{P0;Pw&Ezi(b_DX|S#<5spxgRO zEYU$r!DTESIA_s>^Jz09TEW)WvL+yJui(KanMWVoJGg4Yf%d~0Q-Gb!%k9k*%LmyF zN12iIPfJL!6h>n8Rj|~2eQlrovz!a(t-W{I^nU%c)6^H)<>ISjM`CGUCOqRm?cA|r zv|iV1b1kT;N{f--c@caUpUy#K7YQRW%JQ9gaGj2R(aa-co(5vfPs!7pR#a? zoRCU*Nq7eI=LuIrC3^tc%Eqm`0a#+9u0}+IG%?$HFao%LY<^~taer@61#n1;NIBBb z4YahRiyxb2BpehJ6jUlI`(&WObW?DVl@@#oyok!Nr51}_u1nQmJ0M>i;yVUHgZ?Ub z04RVoKU52Q6fs+&53wMuP~l=_#pk|u+|I!707*y_yL;EJ6C$K9xOea8h@yuNA6`W+ z9G1Ot8Da#RTV+!~2=6k5N zq=ZP6t4Xh7WwmXbk$p<2IV?n$e>N?-Ap9Hdq{KAXE7 z6+f;__)@XSZ_7M(*m>_bo7{KG4O}G-BCU07kA{AE7vlf%W?Y<%|yT)p65T+8UQHKLNELxUn9QHwvyJXRKoaOI(wn zCGOFDt7@#y8>czqY#cU{w5zX9SiUJK`aFn~vDL$-;qmyjMOZ|rsK7Z9IwGbT-)W-U z)$^W?62@G@e%z#(V%8e~8Z%vc3iuFhcsGmN9sh%a(&K}3Pbo9~8BW~pIQKae8CgNg z@4r)70tPzM1q5?WeVDf?nRh@_wf!5?{UXfc(DdiP?p@K{t@L1tp=} z%5jP=V>L?XyU&FS6o+r+-S*B-t7Y$=J-u07LDi_Pd~^Q4ySod~ClDF(kc3ilmz+2# zI2XH`>F{3nBf!vtR%nzE>+25&H-;j=*5>G+vBE6#X-q{%sp8(w*A9ar@kWuyrlCM@~UUP1~Tr+8O$dQ zw#m#P<;0UHaBk@m>oftLz_nyk+0+SGApPYEmMugc0=yG)9F5#~NHq*C&_OVveEOwB z-M%5sgkwFt<&?avPj|ECJ<~4_ibJHw*bO$bTI!W5^h~S!Z~8{7%kw;uTQ-SV!z?*Z)h*_=>_|l zFb1{2i$kz#eD=ZP$2H*>{Y4ztoz^UwROOaNqP8<0_3t}Tl7fCO270MkhC-%=HUSz# z+5CE+*N=5qQbWrh2Y}Ri6D=S0282RmEs?c|Btm#fgr>tRUj${&Wft}Im~&^6GB;tr zgzN@l9mjeb#00|LzBPmnvZ&Hs`(vrv5z+}vcZYh7g0r~plnXQ|*mUhzBzz)UqJ4{2 zN(kz2EQitrnuzeSK#N2(h!73Wvph74=n|IMst_%|pI{=;Udb0jenX~Jh*30r|NAfQ zl^0R;Ll@ z_kr-mh9`gvY~=C>Nlh@;-Me=M`|1GuXis8*wT`aOVdpn$PYRKsduxV$HsR@m$YV3m zq=JCX0i@L37k?>OOq8ZGA<*KiTVJG;)cshX*&JqSIV50ZX~F1%#fihVphWj-+i z*(kz7&RLzG)r*R}w)#@$N@`lE9>RUPNy_pNok;*B&c`I=TlsDLh6S&~I~(lVp~AiK z1+SJSX?38MCjhB$87JB(fKoWp?THUTM|>cUu#e}Al%eRC2nufweg+L6&mS5ZI`(ea z8v8!wW}o@oc!d}rkl6=4b|_!F_PVz&VG)V|k_e2Bs)6g(be+Vw|$MsoSgWDgB z`vu0+#^-R7#t<|oMs( z4GDa%f7}Ju^38M#kqw7xLTg z^N~2g>2;xG`{rc6dnV*hf4Nj?Lu zN7iQ~3JhSgc61;fzd_TW9}z-Gm<Kqn3(IP)Mt&=9mHbL}3`=7- z=_Dm7*$c>|1cGujcwttm#OF|oD424gxru^>8tNMHzTqV65z^~i`m@*Br8`(h3F1~i zXVsYU4HBGLS|!#ZZ>rN@W#eWd=K#-ccE2Wz=9{-*_Ng8Xjc8iy>O3hfhcozt13GCONS|PX zMdCI20BAIXxnyR5OM209I37I7G=v&vMv!A~l!?@eNc41)o*XYt^PBeJP{!ez1Y-*r z@j2>dZFnb2mc28YB98)NG@(SjzuusB^uU4hp;BJQM}N;p6?2{aA?4flyYYEn9`cX% z8feuzNjHYR{o!s3RL;4|X#O=4>okxfA7}j4Y_ZrWl>M07%U2^Y3)}I~wns$Bywu6m z?yOZaw{cu|D-Yre`RCa_{SOX;N7RT2RXJDJ^-;{?+fCQGF(sFwKUO+I)3&M8 zu`#W%HxeW@Do-2Lr}iv3yuU8dY-0DA!31$ARdO+Allui zO-(xB^sz2{{a*|hoN=07mPnjzxiS3R+B>B+*7}(Na)}%vSsCH7!2_e}!>TcgaU7i+ z%Dcf7k{?I5eTyX54N*l7ia%-jZnS-TY*xSnTqh6CCi&3H<;!~z#I`*na?kc$JVsPA zp_#fpPB0`K`=E@fa?$VbA06csK#Z53i5=48p&n2W7~s!Khd|%ETEDO~4OdYbccPy`2R#j<{USSP#5r{5zr#~Uq9 zrh0cKB1t}0`3P+o!8-;pW{^Dxs)Jc#8hS!wYYDYfK1%NF8qv`w7iiN}X#2kA*qM7| z`KYDdXsu8a;=?Jt{BYwTb+OJ^l@w-yXY2RNok4g0;KB?P0Yjh>cNIMk3UQ(S@nJHakQdZoGEsl)1mm6=tWe7ag zYk&M+1DJ+op$lN<1ZTWx#13@3IA7UK7f#@tfPkt|<3hoh+yF`BUa6j|2VVp!aPS5& zGjb?BI_z=+fi59x!?R=2?I&t}PucFCk__Mxku?V#+k^G4mRKVihTqUp_3WS7I zeA~oN1_Jl$#d(jCbD|J-ZY|9U)Tq>mAa6%u*N%sU-=7!^IP+K$`k(0MrYQd-mYRkz`{-=6m8pE`Al*mGEd|A5}!KRzOyK;mtd zo$3cUg(E3ALG9^;5&CVWARs~=vFOqN$xTO` z+t?P|VLS=UR{e*4>VMjRj>j^90eogsO&a9D=cg~Vb8IbpmLLfK43vw|kgjH_z$|%hUNv1K=Qa8N1C{qUyaY<|I(`V0YhTEk2a+oO++ti z{&gBLyT!*Gh$Ta^>GQ$yx>hvVWwxpjk6_dZ!Dg|~@4;qTGUiVF6_5V{*qXK(Y!_?; z(z?o?l!J4mFzg}I$^ob|pKU^g5;|l{V+;4^H}0%W3S(%=H^W+)F8GO+OC%CjEpCtE zi)^rkRBorot7pAIRt93d1iu;llgCG+FZ%v?$UyspAU0NE-R}}wTu*zyVY5@#IV(Sp zBY`-8VDgw5O<(Rprw3MTHXd=(Hvk{OO7+x_p!8I25p&Me-Zvks%)K#VekDXw*1R6b zo@N<3%q%Pl=vo&YAn9gMOt4w!97?khM}-X~}iI$Udful?o3x}IsalAb{= z$i@ibTxiVbJZneP=ThKmr?eJucpMMYPB8swH(txfmL?s_iIqBO$xu_ z-EI&P77c)uS2d6fziFfV-b(=$AoEilM)aYx(TD6lmEv?p`}u8pj2^E4_5@l*x@Q{T z7C|f^k~q1nZJlMc5`>JaUtHN{6%F!Y8vScvP(lJCys%ZWqSSpZqh*io-?TS9TV+i0 zTi?k2x#DF(*Mbd#{8mK(855!@j?EmFCzRy~kd&R-rn37qvQr7^kvCd(0q(%G<0kd5 z36G33(JG>-&v#aF91#jn3zoh6i;JSVpKA-&8! z;nJl`wQ!BFFy_&XbFk1~CUq^E2O;nImQ}9XIuAbTnB9Q-ApldN?Wi`DJwPx(`812< zRpV9J}w+M2vxqrq7AyBV4iyNY-}KyuQeCU(XP zfmPF8c+YV{$;8iTZC}_;a+AlBp zq)>TB&j&O=Wea`?uaZPi)=1VqJ|by?5*ckYPH3nl^irPV7?RoxD|&v!PjKg=Z%@CX z$D^4W2_}JfHXD=m9v(Mu?i-c#0TW^f5Jbc)YTCW_NiLHlWvfvx=lNL&YI4fNH~f9i zF}}#;tdpmjm}A<)tm&6}}7N zV^zl-{m`Wov?3K|*nzD#Ras9CwVG`$c?o6FgX>mVyX}oeqAB3OQ5;Dx1s{t>d-n)R%!S6_wqh9o+)QiPK*W7!>4Mt4qI$Z_}Z z=(;g*Vdl1p*$C%?mRa!C&0QZyS2jDcc*hq+Tqh3V49^#vl^nlK`-43Yj=dl)eZ|Bi z7L85x!u&)5JOvxMm0yG1bs29kaLz=BxNG8>;+eJ)!2LZAUc=}na$S^przP9 zhxR-DeHj?KF~HW|5D%{5Zp(oEfFO|F6PyOJq#%ayh0DBr0|M&H_HwtW4mQ!z3JKWI zKaW}7xpQX&9OuOMjDCgq!w2A!G?=tGf;kR)pMv{lQo*NZc(SmXB|2 zJ6FCfd02iHwik;1AVr}3s5ROGBE6?qJ^=U&N4mX4T!T`L6YkK*(N;@hrM2N?uOT%;RWGa$D0};8sAps(+%bPqaRN8;_vpPu4p#VS1q! z|0KA%wz=W4H=quWi{fYV>O>}{+;X>gjwMX*SPC0fjv%V-M>mxFJFif0jjQ|hlaoWwOa& zA`b7r8Xk%CdE2 z4ZRqvsF~=I7E23@L=rRLG#OCNEiw&tQ02IC8jcEBS>HJ~oxWL17r$U0-s4s1%LZ2f z=kS8$jQVrVnOQVfD&ac?n(jfXBBDHL(QSk>MzI_L16^_K+XM5sXj@Bkk|sy_PU;6} z_}YKBw$V2gZo7YFH4{_k>GDPL4G4nFK!9G>SL^EGJ<~^9y=qnVcK7NF7cY*f(#AzU ziuR-0Rrwv!Wo8gp2!bXG;gO9dQ~_fL8_=vERayBlhCv+E;Y{Rh2^CM;xYnFq zB1F2N8TIAfAp6s#n_=N3E@EQJGb}I6y$nS+56}~hN(M7Sz`QvY69Y55!YM?rTfa&+ zNF-`5w8eckU&zjeJ{V=3_N3Sn|xFHcWaQ|BZ@wwyZ!Ipa*IJzC~>x<^GEETrM^v zL(j3RgEDE#AYYFH;o+}kzh(u~Hn-PgM#yT{zJ2F_?~XG^Wdeg}< z)kIw_3YmQM;=_kC8C^Z8gHa5Vdu9d<&{}b5a5FqRR)eDs)XoCpCU~htBHw^x#d=Q( z_AUySj(O?U-g3lx&~(l;^~ucPU~rLCwqe4xK(ZL1-kbF=)TxArEp#Q9{k!okPcWb-&Qn zFTz^ODuWh4x7ePdUUr|L)(3QeNuWnx;I1|py+$CypJuh*(Nzo6_83?u#f2qLInThpo_TpKUFtEQ5@fg9fq(T_tp77^0@KpLP&D-Rl3SLBk5C~SF4qfM` z47|1$DG-!n^!0FT&f4KAPvHRzzlDbU$-t{*eq_pjUR?z*qdiLvzC#d3#gVp|~$c`nm&MCSx_v`A4t+aCU5EzZd8tdYycB1c(>>}~4s1mX;+sV)YRa%3XzHoucn}p8#T8sv*@9+R z8th=YbBbyl84v?a+O8-JbITEKC5Vl>N0tVmv*9PxHR&BdJ~?x)E2W5c!4+j#B4G-w zz7dQ(mon@WQ=vJ>5N#v04Wz?Fx^4v90GW~sO+O0o`UXym9*|6_aWe>Dj?R&?3*&_u z?Al-b!O?cZv&gr8wE)03@>mZZ_Ok*FRmdt+Fa>{*WyB|p$$&~^dX_p?QpXVAU9~@- z3%aIN8YlPm0hja89~^0E+SC=m?xhv!5y5=T@;SDqXQiar<9|V+jy-i}MMw6#32^9} z<6GY{uC8{V62ce>q|v>#88c?!2N^Fyrt&^D7<;AIj3@k9$;vU*Jv6eJoDRqs@EIn> z(O}jqa_V+Ejl7r!{6HIqDZ{2K&@Q{iea^-nxQ63FHxrZnDpAZWp7eadqoKMuF+j&u zjWs>zI!eF0Hq%>%Rv$riakh%F%B{8I$OBfBmxCHA?u=8t^#j;PB_VwXQj>EEr;yUK zTx4dp^N;8z&4Tab=jH{=95)A3-^(`b?2#Bue|WH8JnFh+-~l`z9-5cO7Y=G2rCL*! zX8X;J7hInv72gb4ji?62WTrd;6Q*8c4$bwC&o8g)bc)Vh0sf# zaUi2JGJfA)=w|I``?d2avuL?X6ecI!I{@?z>l3xJj7D&52tH?FVC<}KY-t?a+>68| zME_LO>IAO3pcM8qWdGZ`WInd0>?Y@4;Cez`tjKWjPWKLAVm)D()3(GU0NQGE?TbTd zigt7xrTE4#GB6Qyh_y7x-f3TRwE1~Jy#0^wtPJ!J#(uXCiir_-&akRFvgI*uCu4c{ zCe76iT6{T9oIKJCGc60rChM4(WZx_niMMVG^Q%^^IvP9E8|Y>Ylo3eGTUfYx@tLNkWH!QK-I3s`3E0yHyiKV7lIhcBbz zjww55>A82gCrr{>((wiE&HBV$Q2%_J{sa`H6U^*0;L%UeYh9HYh*!F{y_#J;TTY%r z=0S);65bYYd5C50u~gjk*|6MRmN>epB8|iqQJHoz8|_=uVXK?FNZoNbDlV|u!-oH6ijqc zr%@~%(%2SmE?LH0ScUR0w4TSDGA^(Kd~+1KbygatkSaook_8gJ^at;RtZADmSiR25 z>CkmI&Url5b>R)?B*_dY^$TO%bScv}X)@eTx(zjez@bK99btNM+yksqC$aS!$KCvdL9IF)v&q_v{Ojm!9 zGn%)f+z`y@9lT;l(cze;$-!CTT0doJ)=M+X`SK(fg>cc?C8upm9P|=hZP~GTk{kWUN9W|A^_w^M9PS$=gLTjL z%64X6badBZ^-c(Zq^4g>P~~bL_l0a@<9Lf$+iWa zeMja>7O%h93hGUvqoFc68S9tSAmAQ zA*i#^AsmKfi3W0Vw?SuF3s9dt``ZujSAY7zJ~$}cv`|2$U`g1Dg-AdR=%U^mUNgh% z4PUqemCY^Kd03h%&xy=hgO};IwQL7dJKg?F)=JdG%(V(GXOEzCOlsw6H{`!oBy5{e z&4Ie2vY7W8=rTAY-&tl@Si#Sc2Wrj{5*7HhV{mNd^}B$ zQN4)0z`M_L3;*YkiC7%8fJ)O+b7fvxM}PWQ~}3M@w4xV0%pH@ zhr#_*m&n1+&bZu{R|zb24q!ll^!}9)9AW@(bH2_HCYg{ZWT1dL4N$^BA{)Jt`?3dV zc6+d>#-lGXcrwzhDsFv*-*!HFJXUu45V63QJ72+q!2slWJuv90;d12NU)Nv%yZUMj*h{k}ymknT_ z33E@_0uTg5;O!YQh4LPQf2}`x^c^Sg0}&RF8P{_6H|MDl&Ilr$2{oIz&yoATBCOgE z8w|r<5D^&}$xSnd|CtP)x?spK-9|0LOaW$*8t{j7Xp6+r)_P$aDGVyD49S)0CJk?} zK8^Vz*OCrBrai#!#q>JVllVDDb0COeKjy#ZDOCdOdOX!jNAD&qMf zZBTw6fo7P9);TD$XV zaV)X`5S>k(TrzF@Uy^>h5Ohsc`uRSdoV%ks%0Q!lR*v` zRlVEmCDK$<^jEE3T{4yg)`$$yH274nwi_Qwb$XvS(Hw(R)Qjn^PYS}QP&I5?&LL}e zN=9OY40$HR48|COw&BD~GWCU_jLaQktH@XYbvBdmkNs(NW|{t3#Ck#|_9RzjK`tjV z%y9Na;RUjB&Npy!#sTz6*R8$)gj#_^MyL^w2|xTHafXKs^_k+cdg(bCM98LP6BZt> z3f8N!_OIq1vjgmN^`xmJ>6`_&yoirTjPr*LR4ItB`D=viR_GU(h@sJxK_(x;RjA@g z6mh1AFN{Dm&=XH5x5sJ7H$Yih`LXLlNZC44fmHv6Q}9!JO?_ zPj!(+n*v7LFm)V7d{{8U6SlD58gElW#!Jw+lWA+rZzvKWfBnYI4&PNtJ6;3l1xtUd zMvfw}NMKAp4y;YX@R9A?PvHPnEgI?j!BN&Zjdpu?wEV<$vuP20itIon5!0MID`#)T zYHc8abj{7!8$?hwi2LL|2P+KR5=-_=)|~?n5R6*HdGyzK?V>@DTx$Wk^ephcWwsU+ z@KGSdOv)qBTgvE3NweASOm+%YLW4*!aK(H8g@x?bCzr6@oP+`7F8xhhcF^=lxhGRf zpe5dhg`N1If`jFPR6*=rO+uQ}zb0>f$V>){;8YDXYe~|TD&V%gP=(p;5Zp09Wjjsq zQo1RP!@cw(;Wk`gMOMwSdaLaM6Mx?3=$4;<`K1z;pfSwSZCSOS>?XLl-Fgeo1JHni zoQFS&+^^Fh?+BUYno~cSGe)ru@Ej)b@_^1m0$c(2O{ldhJ!R!Macy7Fbr4!-gV}N* zp5Gud^<)m$bjwe$U6R(V(}@MfGtA9bRaHHI{+vRcP%gM4GCM?lh0#v!4<4ZWH4*<` zQEfOSU;H?;Mxke+{8T`ai}{8D42TCgbZ*XQiQs#N`j68<1_9D-dZz3kaB@415mSHw&tN$O<4tFFzqlLQWS z)ZA~4D}HWqoY8!v&E1Hu%yJTY?sW`eXvOj6%389UH*dVrm<+$Q^4+Gmoah0|sYoJc zS3`VW%BxjT7*ioU6`4aD%j6>tK9B@$RTVYOqo{}{nu^zL+_(px1fnlGX@S2}$C!_1 zdeJ^K4CF!#*y8IC*NqaW1TS?rocP|9Ih&A^x0aFyO)W3Kf2^OTLDS+-Zy;iGh(ORDSW(=kvO%PdA*VCYS1RI%5LVcL_Eb8~bkChNV&5b+ znQqn{c_fIraFUu9!wjqd-ysflOcx=@ibt0@!HF}9g<5PW4^S@DIqh)6X=WW+2~+PU zLv0Kjcf%H130&V_yK)!|f+0w@1AsKuMG*hqfRp4d;6_=Fqy{uL|B4MF`4+=atX^b< z2~;_&r4J@<$x;$^D^zzJ=Jxe@%*&Q7TXg>eDN?$V24ujX@&oA-%XNsa^2cUteWfV5 z-NYzUyR2FhPI#DsSvDO$i5eWQR4`Qj819B{j82!08FN0mef#z(J}43_XFhA8DJB^z zF*rr=afFu^pfw%?Gqt6u6&kQki41BxhOR~e05URB&cIC@aNd<@clTqByvq1Y#h*Js zo@WOg9Yr{f8!c#54$-(md!~F9LUKqPfFQ5yT3;3i6EzuI&xQl=D18qM*VV=uXzDQ% zN>ttvl@MW3&xjyL%uEwd3H~O}R>r(}+xJ*vy!T)?oI8t->X`QX-?^>Z(agWxm`$$H z!M`{!J~w(bh%NgqBY4Shwv-tfL{9f_ww;Joa`Q#_@-959D0q!U>QulR#zP}ahM@6P z3ZDU}KCpfJ_L9>X2Y;-@0RpgFq)Ne|M$izR*Qc`tbIPNGvNw{+J7Q--KQ1V$fByXW z@Vf&?AlxVLu8kNwft`&Z9FWE+-Ml5JgKa757%@xKgh;F;7RFEs zO|->VN$KJ@YdS%qx5wkh&6`YkDstC`JSvA3B><760en*hN^A^n0+bz%^A|3ZSOsHn zED}ata&H3hr5lX+DZpk#j34A0jybULFI^2eF-D5Vje@^B>p=brydFl`l>p#K2`1Me zkg|`=j+sM56fj7Q4-zr0 zD3JQB!B5vpdtY_x`A zq8q{|WL2YE+(#_ub5MM7B42=L3c)H6tAVt%w90r;&f4d=cL^eEHOj*%c)1%08rG|( z^{Zh6r@FJ$psR)rrg6~mWanMtF9hV~pFM;Zj6<`&CEXYnXeslP(t{5Em=MBzeQX3X zEJ64VicWTT8UhVS@n zFJmrhkQ)bRLb_={^BBTr;B!jG9TZSW$Pfe3q5)%rq`ZYD&};RA6B}#1C^U|qJQSVK zKHQ8zF6BVT5p#n_)NcX5{QPqT@@6bPmW(s73W(&qLGE+}fo;0;HaD&n!M>^ttb@gy zq(1;>;`x++dwzqjR~;-rSV&b&-;*(14tJ&uB|Hc^xzBIA?nK1jB{C38T6w`6nP|GL<5HghTQ7{F&XmY z{$0Dyz$QeJESdtpP~K}Ihvv+!Fnpqc`^bP^7U!kml!vG50ItcX+y3e_gn4EyGtvR@ zI=$_DL580(LyNXn0pW=BNM>k>YZfoP!`wST9Z7L!Vz3T)B?dpMk!xRhTvQUZkn9jJ z=D0iOA58Pa0VCuv9EKnY&&)hQyJ!TR5t6O~glpo|C&OkBXAxJg2v?6P-;K_jTtbHC z4hV=CUilK@)TePEkE6Y|&fdKOC-@6vm>%=vC^D|G*Ur$nvQ`B0C>jJ<10)7X~AFhy*@_rW!XgG=^ zZb z*J>tVGBGCM&dn)!q%M=o4IVro-f3q}Pc7XtGy5&p|Llp|N#bSr*RRDJIk&aMaQyb> z^Y-oetjmU?CnxPeGR_Dwzl-^MSO0Uh{#sSztGSuG3{7Mnks$5D{Oi*&++xwPVyXSw zf0pF7`o9Nn@c*6H`j6W}mpgMpe4|Z$mDloMve!Uc@*jcxACbt7_~~VAyC#NIA`c7G zn>TNuCU4a8XshFv@R%0*shzKOyM#06(toYS$@Kp<0TfScarcPPw#QYMOt}XD9OLp9K5Z?cs*CV*2U!Jd-_A%gmEr+v~}DdyDF*dQ#2Id-Ii>7h6VoX3a&=FWcp^Ra9ToP_pz^J>BzmR2+aRDZT}HkQx}tH zMrr4tqV-2W{%ccFOJB9D&ncg48KUB$#x4IUmWzL{HS$nrf|wg1Z;0?~p@8ABBc*hH!3GgI=WpSoy zs%aRrLLC&9>POPQ3@q?jkHS={KXP~V;ufIon)%=ESn0t+Tz{ZV3mj|M4@adKZp;Io z5f#v0ayaDB`W)@0@L!To68f*pIdXn+6K$0zu$7-NQU^~kp<+@@srU`-Ba4_%DIp$Y zs&GGC3xEGQ94vCk_JSM?n1N!fOwCno3+~rmaO=q6}{awUumD;6s z30mgLmx3c`4Z2z0NgDt5ucz!&>*v6*73(COBT)kE**TijlxWVzl$_Gur?Qw<{?|(S zA8AEIwa;f)OwFs%BBpn0th@IvZXbwg7xG$vAL_%atvLU!ewk#AM%Z!_~(J% zUBzbw30muWaxG&r?}aDw?`&!SIsIuEgzD)NzrAz%ugAHxjp{d(Lu@xhhy?!!j1d8| z)qy`v=L7@e$rSP&ZHPIWTyjdTnpB)2%qHA-1UUh55Y|`j--Ef0rX2yD z(?Ul6NplJ1vlp!g>WUn?7BUG*`a81>+*lxp2{{dNmEm7_pWHo3gl9m-8eByI9GOMu z`RASOZ%aL-lV)fa>9q)fx!1pp4>S00=4VG_BfJ1B$rB;V655^&S&Gi|rI1S-H~jkR z6TKX`J-$N!SsZ$th9?5G${40KV!70G=-5Dr3!b7GH0}%xwaDSHYR{3*2v%=$ucSsc zgGPiq8#E*AVvujNlu5b^pS(f5 zB^^k9WLBQ4i|Kk_SR+8SkTFgB;Y7`=z_N7bkuRjfB3Jg}5y@4{5EQP$^+>o}JPx_O zY8vVcno@D)X7`13mpd)W64y09Q4@5TbYTSYXi7^LS^8% z47TPQ=L=xIJl@>Fj?CFR^i)f)z5~sANnKY$Tv}ie{sHBy4mFjuj$5Q6| zHS>F}=X$Q|`Qz!I!PM!T&*%Mlzwi6C-IC>Rfb}~7fow;^g`_88dvwDEK$Anc9$+Z6 z-twa<@YZ*}4Oo^tI~np;x!%T=u-n+NgYemjS3#(Oafn4EBLCFhI)hPMb@crn(id== z=>yO`0f zR~}q%@7*HOU)riW1g=K6cQO0TZSvOv+04EFY$*%;MgiHeAaduvATUKGRu)mDpdgHP zJL40MNAgThMH6gc+o0lMvY=!nQunfjGhlU21sQ-4N|3Jh_Hqz8x-u@z<WzZo2y&0IPS--9_I?%i+9i| zs|n1MwvB&2aqaIH>muv`K?=aUl}SR0H7ir{aY860J)DZklPCL`#-L5*zLRxmzW~$1 z{Up5i84hQ}cif%BT?11e<+d4?hNKy+m=dzG`Py^rct?`rQ~&(401l3=T(8IgrFfjm zJvL^})h@O9dDi^{Zna}t!=dv;sP;cssZVcO*#gVcg%;@&lfe5P#MzGbfLs8{Gb^-Z zk1ypN0hg~WmtC-LVke$DX&HaGzmofVvN*nlf@O7hDv3E@#moKj0ZjUvyV-a=%S~67 zW5L)!HUpn<#$Dwe-`DH{q+h$IHxV2Oe3Wnsv?%ZoM1av;$=$)w7u{9aqa-xStl9`C zAN%o!MgMee9?WgVv!##S%Cqp0%?DO(;|e<$(~;N|XTq*-(hUu~Rq-2agAiq34QklX zgSTbGHAO*kB$>8tl~|CZF21G#K4;U3Qv-zJ)v)F_Ks8`pLwiT8y*eAFjUNSS8kRGn zXy<^+@E!em9IXgPT0l5w?wd3OGFNLW$JB*f}P zh!`HS5D?eyzpFF$-nb%9Cf$7Er`1b~dAUWrPnl*=g5G-mO@+Zx$Ot z(pPFaBkv!saO;y&eNcCrR1CnJcbcwr=|AR9=8#L)Ng2*Ss6wMr!RaCdrMBwzYeCMb zvu8C>bujC2DyZ;vVq@1T7?|Lag!Yn~C@=#mM==SB?QBYD2(3j0X@ixIxD%25Km|(& zmA;kj&GJ{gf`V&vSnOTX#6lJa2J$x@d1U_c?qxa`m>r_JyHIlpa&}sFMxa`eM))6| zaX(ch_2b*=f5HR%JBYrT4{zz5Lefz+0d-Cut^*e)_!eyxk#*B{7fEvv{~B;Oyf zF0{<=VvI~zmzAZPReW^L6ZO`JVqHYhpkznP!m22KpTl(3a11kb0}g*gO-=FvXknpi zKT2L!Li!2%0#28gwt)I4woBKVOt2@A6J71fd9jALhe|9LJ<#vrM*0ry< zo>94!CSr8hOzNjpU6eohIR8X+ldtWChJbjdy|6F#s!&cYlTaocq^2B`g$oz*B9&`^ z^u7hJ_F$?jmo?13ovx1jwfT273+ zM!x6m{ByHqcd<#d1M+DR*C}uHr!Jp>By2?9HJtJL{8wi^3w*@?%FX*_gJjZ}mG=$^ zNjxr^J7-MZyGEsCrHL6H->o45eOS$bg&*g_B?w4v0C?m&*biU_0l<0RWZKoe2Mx3x z<+x_k4y{cjX@(HTF1Xk)TDNyiUQzJ01goFIW7g>YIBAl?idlM_JZBw9IU)MFv*iIH ze=DDl={3qh*hBIH)U8C3f-tAi1ElB;Rs5}DGa2@%sH9R4#P=OyeaM2))N)pl6Ci2z zSMVA%nI=0p!ky*L-8kHqoilMkC;|6f4hQ<#TT3_gPPZ&wFn9-#o>4trPOk=+)@u9K z{fd7S=;0T4z-LHdK8v>pZJxdH?N#tzq6s$_G-^{Nz`);c+^+M5@GQ=w(i0=*xCNb3VTH&kn1Au|iDMpt9!Xb81 zU_5i)ygxg8QSeRhAUYH!PzlD=j80ztTY}gaI#>5zJDW}>JlJU2hS$i(mE9r)-6cpx!&!*Z`to&ji@!huTn4KS zKAVHVCz*#HgkjW%1LAIYq44^TbkMxKpzJl=d5j{^!As?ny#t&j_7D8XAtan4f9a6k z315mOq1hDomP$92mm!Uj`;H3Adr6c6g_206D?1_^6~PSC+2#wXEQp2=B zxbv?GRQ#S^sk=m#}`&Ih-17 zJ7q7dxGxccMn*ucC3aK^S-HVaaa0i(8&A@aUMDDODM&!6mkwjBnagPgo=h@pi?y(* z;w0)e`^pMMeD_o91OyLJ)lGv(_U>N0CxsDuqLnExjbW1(>aF?!LJyd)E2}u;f##s?E3z>W-{H=%trYD)m*S#24(n-l_ zoV_`4X@{W5HTKRUOLu3WPxW3hKi4kRuP%bapMi&Zu(mVY=9xPA;cz+EF zkip`H1a~LGSJ3%Mx%4MeL=!lv>77JeBJHPUEB6>4P4b@Ifx`|Z9tkB^SypeR>>Ls= zp;c$D<;^s}c32n6$zR~DGhUuR}F|A!00yjuC6 zaKWbr%27HFJ1;HPq>NnZ2dP*tdpQ7EFcN$6HEyZRrRZ(GAGbi}M>6A@)=m(K6jw;D zBeCVnmuHz!kQ>(GVWe03m`y^)-^d}spM9}CQ zvcm@;k&>W;jS-y!lvSltyU1<>zC!?N986~b0WPx1q*97^$_?v2yE^scq_4|zZJ10$ zi4cArLLQ6X#Mx^1+%17U*nfOhC9YNh^^Bzd0e~lk#Fz8J@{wTwHplCgp=rAhT}LoX zKW|Y}tK)FfJtU2sW)!m9U^H5dhX6Si`WP0Yj@&~^5(cbJS!T~sVK!POq6j7Ef^u1v z{iHPU`wP(NQP+-hEgY^C((UpbBpr4K5l;ZFFOpoen4WT+m`}WqTfdz8DNHXZo>ZTB zKSw)UhCr7)pTzi=;}$=|IY9I&@|vX5d$pl61cKAL{`U-}_Qp;@2V0{kAk<9btby{l zUn8E!aODwfwk!R3qFcU`w?`7iQ}hzT|W%--!gHX z*~s^{Air`v%m7SX@%^C#%!LaZpq8WXh6wK<3i~A$9?1*5Uwx$w7z|T2`_h=*Y?ucE zK@lG&*(Vfgq)z}Bxvo4Ck2W>z1vuam`6nyLSuS7nq-ZWfZyLM>4U-sA^S|46omrjK zdw(MCW<9+PRLzu>4v{V%34Qlb;-owU8ZCe)9@eN2Qi90`W=IvF?hAUq9l# zOGdH@6_^ab+76DDDlBaJC9{#he zY*MR|cnU4{cHCZpNf(T4>=+ATy8j%-Lyb{R{xkl200j1n#C2-GX-$#?5dDr5>JlB9 zxDSgEGY$1a2?z0YG!ZH)?NlOQN=4lE#VE{obB?swtaHwmu)Y`nhRTH>i6)c{O<6vV$zcQ zs@KQpPC~Ka%|u&T{w<~Yccu#0Ru5wo^n}JS#-AQNs+*__D3W-P{y*MH)Gf7zfkWO* zx6a>5X8XLDUg)cz|QdJYYF|h9NH_I;)Xhwgr;lP z4`VEqbbP^kIFWeu&ktwRlmvu7ErnJ|g&m#J6!AU@awsxd&qQ(~y_C=vmCpYWE%&r4 zt(k%+Aadgv*rJK1*ehyAUEOwloQiJw;HkF|QMsOrmi}SD;`kqc?PZWD1w3aCy(EBl;H9C#vFq=G}Nq>APdQ zt`f0v8Js;lb)a8nbN^fH`pAkzC5w5rsYq_csexF&3!bPwDWMI{h*u z7tagRV(;mX=LS^%nZPM@pQM4v%g{wT5 zVE@|y#b|Gy{e32G3sIvEH+K$RVFuG4aWG&69pYI-{&hUA8||K<*M{t=DRcgrcH8$^ zozn)X@Bi>T+VKBn2>*9w2oQci7j$z#%-Zas`5&~aPo6eyA&bU)etovf23BONb@q6R zi?e3Vnp{=2`QwQE@!jHj`QNA$u;)9YjJ0~;g3^HyB6Q9~agjU;1mR+Y%LV>0q-gD* zhvON=cH7atQaHXwj2^EB@32hi5EiiVugjrUwm{;!BskCfaa?H~|Y;(KkYb0(qd|&Scn%|> zEtW3tr!EY?jhHPSTZk5jgvn^aStt?m=o{gkuZIkrCL5xU4?|B~e|fj*6PpNf3sU=r zvxulm3R^-_H|5-rOc_l>J)HKqu=TcF!YU}Lg^n1Ey7OXQTBN>_-WWvy;yim;{swRz z83W041^$vI;!yJDZqtVwKva@Axoq}&nuCW*v;aYDG_R6eKp@c6ah+{I7tmNrpZ<5Q zv$FpPV)Ck)ziGrGbN5b73>l)HdX>B6#o4M8j?dBT5H1Og=QfykBlKMQY`nYr;18kJ z7YG@-hiDL0Q#5WGsrymI^9Jed&$u9%9tTW{d@w{@D{$T7xJ9Q!Xu>_*JWVc!#Qlw~>{tDpF}+XHWw`*$y2@ z{DXH;Q_~>U1}p@s@?S;cj*+aEeIK7uC+#Rw8ZaCW*Om|vSvaiYalMTyga2Am1}F1;bsrRw?W1T8fRVm zBz*|J$>#L<@#7=hGRQUY7h^GNGHRpCr3Z+3d;w8&r{%6VR>OeoQ1pXR^k7Nky0E5X z`5W-xbR45hpumwAk7)i8{;~=I;DDxz)s{>IFpI{Tk~wpGf+dGY{>K1uoa|uA*X1ZnsN+n=o0Dd^kSnuK~1|oPG_- zF*&p-F_OE6o-6Wi5!Qqogo%GqWR4R1b;Q~mFVar4*q8mKdD0bDa?9er(TnHEf1$e? zAmWOu8!Fq20iu9xZ3GEtJnygTl@vP&5J!)V>Beb-8|`#qYHr8TNi!?R4XlV+AbJsy zce|rz6XnJO=TidnZeIEZ0s)$G%56nDOJS^;{_a5Oq-@#>e2n~~@ALv=q1|rlJ|dri zZ7pUYZi6>27ca>svR=`b9$diSpmRxNj?t{(I0E)U$Kkd-QXZJM!Vy~ z8C&*4*zay(r_0+$WSF7Nf_Um1a__%=S`^gNOfi(p^JZUumz5Sn<(;OloUgqKPc}kn ztg^X~e@0=ijbtrmxn~s{FJ&DcZR)}w<+FZ0 z2B9@g%;vU&`SCT4h0_IcHs!ny$uxxV_!|0xpuXGp2wZh2R}kZT;hZ#&SG6^Pyt*jZ zK5OuFOCj#591j?)$nik(hu3G2^yx>UFkFTWV8BYAscgi4iq>u$rUuAPyUQn$^V|uJ zt57aHhA%oD;ZM0|va)IoYW*jiYKV`dVpdc)i-lFlw_+;D+?s?#v!F zL=xt&H7AOn08z@VrU{u)DA>Mxd526Sh-BD~hnHqdL0~`{Y#PrBO#;o)VZn)ciJ61| zM3NT;mcxrt>}&=qSsT*a=qHJ~oNYG3c?M)yn7pWf-R=dhR<&5BP%I3*D=scT^C ziS5%1+U&Wcwn1eYl5oh=iJ=v2j88E;Duatas$xTJchlTBKc{P*(Xg!jIo~AyH5ni% z_lUc$VfDngeSG>Me~i3$&3Smn#LrY+Wsl&2KMsE*XC~@}O-9}az|R1B7r}EvfUV+Q z>6=*CVW@lvkp=s*I+R<6XYS?rqT3isOYSO84Wuq)b+9bPjO6E?jv!*Pn#PP7!#!O- zgL@$e9#D4^U6cHsIuB2+uCnd4cnHgkI>rB7=I;NvJ2V{S9<&CXI8&s~-9hmI!edN; z#ky{5;No<25sp}3{0BOhJkGRkw6n8JS8bk#;|-!tvD>b_P7|6fA8<9SZoqE#gw#~Pmt}irsBKK7TfO@@<&kyX9hI>glc1FOfgQF@8{7oI|I{@$JWSsv*5^=9BJtCYMbaA(E zxU0k>JL_Ik*PMmICs$sSi+#v+|K+Z}?St}&Qm?bo2g=rn&EWf(Q##Ug3f$L025GB0 zKUnCy4?ha8n-TMg?Y807n;_cK)X@>NKYwg#-*3hk9!t9G&@qb4KbZt-qx&p3uP_tO@r` zo&_cGi0wLyal6kP`v5zV5zi!x&a*4&Wj-LP4+7t3^Sp#adcSHRTofDT1*TK$lZ7;l z?w*MilIbXJj>*D__zX{}Pm^5TXCXft`Yj;*vMhJIt;j_t4wh21dplS#6y+3Tyz4AJQ}RJ%$=H(k1BtoDDMt3ij}8v?(;ol48*KAVO}Z@ zM;IPOZe!*or=q;B+t-47)@1YcTjh5TMRKlbUh8bI%nNbSiG)(>`jtWWGsR@sBmNX& zUzEob5pZ5ma$a4}l{P?j>UBrl^yr9b>cU{kc#U5W=*r11S}Msu<4439UlHpC>axuD zxP`P)s@ndutCO_tO}{zB0*n35;V0{#ZxVix?WYZqAgXwTH(0B4dbJ=*xGPxo=OmN# zrZ-bKW~t78VkV`r4H4NWg=@o;h$ZK zijqkrSjk^M&YE@j?FlD1qohq%(tAp;j~FVgyNrh-7m2H|>)fVtyJ}7AjbcA2sL0BT ztA-6ixF71nI?!u2;ePO~)nVCPWZr?<9z95oWiQU~5@%R>>_bfYVZ!9+8XW))$EnD| zGz8Kbnyf4bpCmACns?3eCdi+pP)l*5hp+ur#3+|1bSEHsi0;guhr%E&f=oX;c+ zC#6-qE-t*lB=bIfJ5>3^No7r^DF0RdXQB5z@-|uy#L*J&(+@}Sy7qG)PiRuJyvJ|Y zQZ}OXH*A7Db^f5Q;=k?hz;ewnB%OxB<6s584IY{1cHe?tf@Ip`ro1mXSwEU$OAut& z4z-smTjRRIFdF2AGTi8V>0!`JSpPS{dc&&wGx_7ujW%0z(dF_43mWjNk@vIC7V<|8 z6a_T#hm{AKB?vqPl~hL(pl+w)f{n)D76eZ+`8!DWn1ou69Yg|0iWfn7Bg6sW<3&*# z#S_1jC=5pHc7M$)2!cM?n}qx;o{;v&FQ*cv5|!|INTj>_M@eK<;hkyVa$1!g0OLu$ z6~)AOGHJOjhG2`rV|Tx6#LiiVcTfOrlu2(A=u;Y27}A8yo*j6ev`~0arZc95{7kD5 zSIdM;8K-Dl?k$ynpE6=+&f0#2T-BotEJE#U4O<>%XHoA3zr7iE0dgq_G6pz+aD_jG z?W=$6>K9d*-7w%HDYIt5Uj&@QU@vD;WiyoX_-{)M#TncHTiD38CNuyIaCKpz?i!4@ zPJLg7R+Ln|OFJ8Bn3_rvg6>i&%L0!D4aBTN7(pR=8EREod1!=#ae<0CF1Xh1n2U12 zJ_1^!`irV^&mnAH%)%ainH1AVcixK3k>^OSzHif@#qGpD6yU=K7+wPfN_KJzZbhaG z8JO)b>V=vLSgGn@V&4^W;2g*m5ehANj*iSFwLNoir`a>xUMpAz2OJh6ew(f0RCvZ$ z93TLJ%!@UBE5(o=K`|(_!}zY-24ort?}{mLB3MMXK*y7?sVc8!Hm)g1IU4H+B8~>H zg54A1Bp`s7U;-rY8FJqsq5lTjW8Ne+ASXnjLe(mHgz5*Jskb5fSe7=V6887P#l|m& z=0*!xMpn4uOEvP^B9KN68(R??%ll{-hzDgu)?5G{h$n4_AYykDNF#OdWi)2x1|EX9 z&;=2O9_orm|3fpZH2L^a^l zwqja?Pr!RGP@KflqW3OD`33sQ)Db-B5v#+FO4#5uL7{kXc0$2ME1EMmTQ(3!6;-+@ zt7@Xx3~e`tx7I6Qv%Cx#2^s<~Z0*|aLnP_diuNyJdhtBwAwJOtyAnl@0zBBLCp6Rb z38Z|BM!0^Az{?JGTg1t^?|gst8`l!-7Ix^7Xim8-7A8Hl5ZqtK?cIi6<7xOL#+8EZ z`>S~H(BGU_|GmbR1$WNcP9X%K{9)PL0_;PReAAzTe}0*ICM}pl`V%v^adQF|@#L}k zVRPk`0_OeBr(SLJnN9l6V@6XD)ShxM0uK5tG*NG~q^F)afe|LL?I%yOndi=(Y9W-f z5vk*Bz!kbgZcO}abJ7V&UGldY0-PW^_)1$tILy9&fqXYun-o2GGZ_ACEIQyZi=+65 z+=7hS;p9vk{A%U}#C-YtdujVM2+H`7366z#`Di2g=QC^Lx8QgS^uTD2XI;JqCbI66 zt0hCF1~EZ-fz|uzqp4#1VlI-9$P2Y#y!_=eO;J?4obPW??(C(uv?&6GV>3}MAdErM zY026~?kLyB`-}8+eo7UY=OT@vddy{;J(E8({V5%iPk$}49_l&uNw00hwU>0jT=}nG z+E9c06~2ExUUo>k_$q%Y4({xxd)~usfQrs#RC#T^g{V|Gr+k;9z00 z(A31>$*Jw^Cl@@BP%EWdGU8V`AuSkH$7NXUfHT&=6AiV^u{4R4=i5$&xjICk)Vr4%lf zG6)4Zz36=sw9o4+l65w2bfbXKBP9~h3aPp6eT#-DT6?HBZf(s413U2YhSN9t7b)i# zK>KzdozqY65`Y5*j~%SV8EX9Nf{K^wEbw?o$%DNK3ALfc-vW`FK>pN)UPVX?E?M?uMF; z4po{tL4G*OblLPf-*w+!vtx<$vXKB8Bl(A!uft`g*IOs=uK6tV%Vidullv-1aC@U3 zlD++le*Nt?5f%y3u?-8gL=Rpj#+mo#Z`?0gkF#Qko@+*Q*X&7^^~)LVWLiPj)=Yl} z4mpbMO`Fw@QBG34d-UnvG(X*Y*jAs;>YN@?Mn-U;hX*r6>6dO>lrgNWlE1ftL;T(+ z4?RD0L8Z+8#+Y4pK0joI_*Ue6fqFQT7-9!`({J+!5n_gy;j%@x0W$X6Nfu0VB9Xqe zc>esG8731_p8@t4oIYgByB=ZX5bX%^nqoQbr05nrFJq`}m;UJ|9qe=F$a49YtLVF{ zk&3i-AoZpGy*)jd%g2+HbNP4@M_^e3fWIs7$Ar`i-rn9YNxgciHq2}OE1j*sjVg%L zHx+PI&5+ci{dRN z;XFh2M0jpJ!lN%3U(I~_wBW*AMKTMc#!uThRBn_h8Y++gzg76zv*TyD1-kS#=*NS% zs(Ix(kd9QL4QOx@9({Pt=L9fVV!R>+Z_@D;VUBm)H+)2)hw%=z9MTR$x?8xH!4FgL z_uhE^>2jQx5HXD$Sv@n`3Gr&}l;^l0e-$OIkv?$S+~K03VfGGTS3^U?QyGGb;U+`$ zb0q%=bJ6dwSzoP2IV`^MsWeGz|MZQIxcd(rc+=a1{`m$Xh76624nsC@{!2InE#=D5 z_#vR9zcMKl;-OxY#%*?!1!|^8SPp78F%fODZjRL8W8x}pu`4#gWIGWkw$u z{9cBFfM3VYsI*^TA(Dr&#-R^Oha*e7DeO{#GA0au^hK$ zW@KF8Ohi71pQV?Yl%gu z!m?7OG)hd!yos?u6h%s-`j{-(#tJ5@%DjrWFyv|*dSSiLltwZwp=>>ft?uDVjA0J| zXumLjmEqNpWk7vVO4eifXJ%%yjjy)8fBy{o73Ork#%p;^RvkU2;YsBlZTDg83h*26 z8|BWXKTrAmH2DUkE`FQ;X{+LW8gnC+-O#;(Wznj1r1}opd0uQ7uqFv*b@u-0Zj@*Q z0V^PA8%wQQgZI!?;_2E;12Myj0#-V)8Q=!*TTa2IX z?d=sqr;J~0^s~b8ab%}qQRX(cV83XF_OXB{@}EL=A9e)%}v8}=Kar}W`J4Hjgws08rho^Lj?lN z`o*ScyS20~WBeT5MQ#AaL(QMB$VP?a2(CEjmtN?&lBOa%9V>z+)#~Z#VTGMXT+9vZ zhOoD8OnX@i6W)DD+R^u$;nH~_Y=!i>lreZk+wH6G0}I1$|F2~d%7@#bE6wNxe)AMrFw_+ zf=-!Y>K1Iz#sEV=p=#oPQz+=ur(5Cs=?Uv4=PYgb+%{~^P?*!5qNtlD6<>6wfOM^R z6G2-RtEbU@a)5sRQg`y|g2bdG49ZwYRx>g-;~~W+O#%o+moy;5YkSi``K2MSP0s84 z26m`mEM?a!s{@=+^!Los-Z$M1kve;GpS5bn98e~(0m!#DI4sgBQlgJYX%Pv|PBa6n zJPg8sZhBK+d%Lx=zsycyr*==KG0><(MUj3Wq4g`};LZ;Sf7Vx^u3CpXrVYyINE8Y1 z(28gR%%Qomc!+O4o9BR*hKB(KCeX%*@?grONqe{lP(1c42@3;6GHj?dsY-qX{gyR*7_V>8` z^k~9X$=VJm3u8Vn43G80ly@1KWlkb6o6tefT5bvof``{W!Nv(N!}55qASk)W$aXfNFIk{6xtTm~Hd zo1g$ZGOJJ*4tngX;m@ilivP|!j6ReSS-3DOJw5c<8rSe+N1peOd69>mK--#AnXp_YERM$hyxW+?NQ}U)Y7gC^0-rm}Qy+tM6y29)>i^8GN3wolq($4@0dinV+t;_qN;p0YQed2GTW#z2jsy!kScwI#kpJjLL z0YgJ_W(L0u#TRLT%^a^s7*{sk89i!i)?1bl#(ZpT61Ba# zq+wf9yn{}s8AH^A7$3Nj5@6K|{M7e}ReluI`Z?fB*GDaM9FwEAgh%0LqGLM{MR|Z0{k)TjEz0cA-AAG;w_MbOxG7 zB{nHNh*hm?xvT;AXOqRYjW)Hhwio97|Tr9(-nSH40yshgZgFlYzM882%+f0&GW6jue zZX?+mD&obg(ikJvN9mJ5($Z_-c!j*S+f+SopF``t=Y5;l7dt9G9A$0qa%HS;Z+Q|E zfXSodR5!y!%Qs$QwQ8;mH$uu$Ym@r+*2V$LGX@6M%T}rvF4ocj3NNHvS9Niri0;*p z<;kA2Y9%D@zjaCfvA#AGO}ITLI(^0Jj*h}7S&tH0&9}0v9)*gv6>k~vFYCUN6CiKa z>)UF%wWh`K^_4AlTM_lp+S*R%Y@*&tOkosHQEn_Da()vNxVr#H;i0>b(wP#38T?Xq*?y*`0x0iF5X^&KBvTjWE>9d3)N|^CwZ??$X2Paz!p!8R_TfIw;r! zg+pW4mEPT5XyL3U;~S75{$CnXtU(G)Mq!1+8F7X8v5k>~4Ai|{powEgy~solrNBs- zBxk%TJ&|jueqF$(y6qCTTW7_&t5H#Af!t|^$=c0TnMI8cav{uouZE|SuWy9ci-%{& z`(hbhJ)CJ@c@0?+`iBCbXHoFkquMhgf3i zbFU)F`RIjwsj2KyZ0Np1i9c5aM-L)*ftPL~joV(@3G;Uo-C+Ou6;N#MdgJ^Wf|=iAsX<>qN8N#}6%>bjqK82_J~z{{7XZZ^)Dal;(h= z9Jp#gH&fzrQD$UTROqv<(R2{`$Vu)Jq=Zx$s1IFxlL+A+6u*Z9hYB>4G_7BZ>A;4C zE%B1^)vGA|P_~|#_u$N_Q#=B&E-$_ulwI3F223c`Dib#udwBs^rXx8 z2a-UO-3|v-JwiXW0pY2xsR@P#x*pFCr-_np&_cvNbHW~R5NYKkJN5GMAy@KkRF~8m zB9qyM%#&o|HYsn%Cp-)m?L_nE%u%Wz1c4AWQ{*yA68bjkv*@y;%Gw?{{~WbqAbMC( zE8uaZQCp^QuSUD4(`@H@V4|ZQ1z^wughsXYeVLwxk}|Tg9XJeltsqc6Q5M-f5j20n zD1k_8NG=y`_a$vGPO%_dD={?00MzX_o>STmg*vH=ZyBV8yOiVY6q4NZoRG8@B0^}S zC<=zQBX&3(Xz0n)*tm&at5GlT041nyZT=ZhI&eiY)!J{4l+-R`AACBxfDgknL(34WoEqmFW5?zplBn#yjbA(9Jeqw3@A6%7FQ5Z%&GeK<>ilt5S zEzmSxLTL)wPB^qiG*_NPTha1ec-E-!r*8$mg5@cSXAqDlIh|lP)oFC{2YZTUr2ukz zo7a!WBCd$$gp3a%`#kM zJwh~^@N~Tl_E-gbb6_7NQvNqg^+n?&-`Ln#4CA{)Io243w+$Lr3hZrpy}{nmkwK&M1mG6Qg!) z`!uT7#&9})vd>>@tJ;m94q^xl(WT$SgvM6o89zmOk^qL4QNlH7WU|RpF0!jDgHQ!> zJD^9^nKAFnOEMVO$6K`#KY0cshpjpV1IA^+&>7} zng-oBVO8*-evf~sOKS$ZP11~QIApfwFR|v5NgodaIdN%5DQdcBtZl3A?pePkc>X?x7KTh*xoJXwecYyhwFBixBhJ=Gn zXfE*RLMo7t9wmbI+}&?4Gbc`*XoJ)#K%wNmV6srqU&Z*!nDwL8R5aS!(MYHgsH%ak z6yl42X&dnglOVrmvGIV@bl>g|AEw;)!va3=^hzMsXHDr41ScgPDE;~K=Q-!=m6x#Hnio*(LNh( zcUQ~5jy0{ue}B3z|6f^7Nt>B%bg%&;CP1mZHA`G<#fs=>YoJRZpE{j)VbEM(m2-+W zX)+!9nW;oYJywl3K{0cbDMA@{G|B_gaW&K_w1S(OX#4^t6=PqfWPZ%85fv8~r&z+) zwl+%RAJ^v#v{$n3BqQz&`Z*DnaDYr5DA#n@COnwbi7z`p(p<0yRBj&%=KlX|>A{l# zW0V-5+F7+cU4n{d=QB&kTP`xXNt5P z?=ZQ8|AP^civJNZZOA$(K7$$w0I9``O+&V-Ov#DbL%xZjU?FLfZOSk|v0=puqWR(w zqr*dUKEHbfm;WG}w>6GdYxc`}3U=Ey70pQr4`p}Z7o*$u>_15cFp=r7LL+?#d$0EA zjgT4u=nRojWW8ZCBQyGtfVho~j)tQBW!NZAO;-0ZYX`mk`7-{UAO|6UqmO6dfD-le z*|ST`X(OU3Or&WCh4LoiJ;Q6?f5O%uk|)>9{QgIiOe}VyDi8OYTusa;tFm?85eWGJ z4r5V#0-Uyh(3{!;57#4Sz{>g?cKr3L3_o$8p_r8Ejcll^p&5QR?-tR-W+c|A1HVq1 zpE{(OiJRemt`S3n13I3r%oUZE-tD$`GyB-FV+FuDNFfdozO}2%J1Qy)vLQUoqsWp) zEAydcF#ZLE2q{JJvl;pM>ID62#0^;yYhQz_$Cl{t++)nWWumXIPxTOZ;L9E!s>hoU zv2{JUugMJI2&9l)ZJ0R)(4Lr>m=d7!`y`y2Wg7R#k3-4RvXhD-D`0-Vfqs|C!p@!K z+}EKki!$NF)Ohh)?Eh5#^`L09L732!<>Lt!M>^s8ABKcAR32!QyXS}#g2Zz`4uCIq z0Cft(!7Wm%4Q;Ppy{g-8DD_oL< ziG18DZ9V(o$0wQv$!!ELz!ArOas(W58FH<E@=~$g3>_4Hg4T@IpfPes| zF)DA{l7)K!Oc`Q2za8NwLi9Ke;x^K+M=dh~z7^Ngn!{_H`N90ifp{ zVYcOF4i53m5|jHwQB&@L=yqL;Nv_{&GsLQYnJx>?TJVpQGn%mUfD5ub zPq@2hEg!Gz($_&(c3_pz_%EV|_f9AM zF^-5v!4+RWFe9bBI0w;hyGOzUJ}4&xa}nC zod4%2wVV?@-HQrxbE)X1p%y67d3^2n_oou4nO3}jP&|BhFyLGPb1ZjZ_ip{Jqw9a=(R-aP z`Nt7{!sV4w-{02x7ePNdVQtxUiq9hy3}hu!cd_244I37rRYC!k6|1XqoV$EH*O`nu z<>sjq&W{`73$8pM2qf-2wi%uWuD`{@C8MIEqSHa;=iV*e$<26=M*j92sS<;<@jrLY zoLA^+iJS)LnTJg)2%RgBg#+scMa00E?a?=I%gEu1#H^pe$47xw6A~H{>yN!>GG8;w;;ErJT{{F(|{kWIr|xJ#PKSI6s|vCM#gjPD?~jB|>f0at^y|%M99S-T^;5+ou3KNKP{vq^mZ)<2*`YF}IH8TU z^YsA{_n;JO2Qw=M5FZ=4XL52fc%)FA_(1cW$98)$`{pt+;oh^o_R8wRCpDVg{oZ?wp8n(6}(U^7#f zLs%q@CQqLfybw^pn;8a*<{&1@k`5Tnd-dyE$Ny~FyxDw6>;GAUhk*b*_0gd4sTpY! zmNQl#@`8~U_9wIpIuvcv)CcmM5{ygl?)^H9AZ< z|L%3)Fa6>tNy|9PPwGQFz8JRhiWbKWd3R~dL0~}@!GxFMEWlPa^bOAgQvL8=nZ{5}!1Hp^+PK;l^&4L~ z#uI0vC4L~AoTxTLeR_Ul^ehA~y7QOJra%l09uhvW;)Dp?K{=gqC^j}6QV-W!%B{i`^?qv=6BC2kow{$= zcuy-FtdXxRX3fPk3>hHZp*B%p(~ZlrM#$%kfu2UO+2;_ zAc-fbqcEsyP3{_COB75e!onk(8@opsehloxN^D$fNaU{?U8D2^Km54}gzj&d&JZ0P8%sCa%!PR@QiyC8+TTQ*EPx2+%4!qT1!i{E>? zZ(j<~_cv*@*y=2>_+ZuvK2B}Tt1HZOCspk?7CS$5S$DUz$kw=Ql?>4IKmWwDBL9+o zbNwToo`GbgN1?xZ7`qtUKq;76xI(hkFR*n*AAi;bI%fH+Dr|#Q4@tgBlzS^S*Lgwysv{`fU^t0nC z;_Jsq9WGwZI$?L6&#&P4BMp9!Cp#yW7?aKQtrF zr#>rqAlK^A!lHq=B_2+{>}=5p*E*&ucv12E?Irej`9}-SKbf1Zaru}-g%kaIxyS8@ z+tP-ag=gfHytl+}nS1z;;Vc!={e7)jg?u~Z`yQ`&`2NbWWH)WCE0794k?lFzTKw)( z`o8rg`>uo-$4mVX_Y$z+1K8?Wd5FF0z%*ao2ie$KSA+IuvC%5PRgyc;MGqYvOlgAoT8dM$Y35)6MZ35joomqz{W%G`Du90V4c)#Vmert7+Of z8bV*FurH#_Pdk{M*^~um+&-(wW=HZf3>8scAMv2^Awg?^_;S)a{q={y4&!(aC`#>$AI;ZU9|JO9I7`Og&0C$2sm6JU6d z+ovmFcNRM^^p^&XiEyTqZzw?*$QN(KZ`)i&w_64qnukUb%V6+FK4#}3;#4ay*f^7H zOEA>R7p(zYO;K~Cie$yF3UkCUn8QD0uTNVcIPKvv#JP}0iK62$Ruio-2y@$b1h-mu zHtwVxNeU?sLPZE@ZYP~a76o56N(Ljs&9gis7$1wWN=ezmf|RfnfwgaP1EL^pC>>W0 z)UAlLS4gPLzBD8$@2)70cMm|!fa#s{0L4T>OQ{DIp3uIJA3qWwggMT7^%cpp21#v3?&uoQ>A5PGo-S*Fed-X;QeBh@$*Kdeg0Wj?uxDXt8|4%Mug%Bgt*ts z-Q68~W=*}h+Igs5DPWgP3*G*xxyQsz;eWbOmjLP*gXju=IWf&N?icjT5Rf$C$ou-W zZYNd&fLp!#K8^c|OBAB9#1>&?A=8#xRb8PW=hMRIqNllktA7@}9D=UI34MzT;~Dt+ zix`!*-%7gw_MyL-o@6fl&5R*acUWbh<30G7U%EIRvR(5_FnHED$RW&R+6Ew} zq$Y~qJDpYp-_2oy`;e3I&|lxbOTqqU6sRP{#+aIi{s<{~DEd>11@;>w$0Iy#*{eSf zGSYc?UDBxLKbMo|VG^Tq92_8oaLK^aa^gATyywN*5Mv0!TLo)=`qJG1pj{?!7Wl)- z*3&3FsSvJo;42%2m2g+4xqFyv>+yc%xWw`0uadO;`Qb)qh*t^cUY98qvSQ30AhqYqn z_H=g_x4qLTd?rn{yZU|ZFosx_ z17AeW=PRss1)pMu+pA}vQ9|>#MvM~{8~v5GBm5^-euU@`LoPFJSMB@U%EVETT3-D! z$x7+n4<1@dr!I$PVSFm?oe~POad9t)hws}D2bNk{ucf84hCqkS^(CSzrg}@g+Z-!q zObb399yrmB@IBz_UUrfjq;hoCUJ9&Ex9Ty>U&VJs-OGewbQ2bf3srX|u2<2OX>t+nYS=%{5Q z>Y`A_o+gm6?se2c=xV;BOu^#QhuE=U+p?MZdSx-S9M_do3`@C`G7Gp0#RcI|$Q$V& z-rGhy9AH@TE-cu3 zW^PA1Gl_;Mg9b&mwt_;6CYR>@;0I`E1yOW(v*Ad?RyjFc^bx)%91#hi!il{^fc4pV z%1y>sLkPbli7EIHqH!r4W8p93y`DlsABdJfbltigkeS#7p70bzUy%=WJmskp;7XR_ z$@4~SaijgkKXoyM9YKynu`Ij+IGBT^^B|j!G3*zjva-4~00UYF@rUPdk+m-U$4Cc@TSTn6XVKkN;}wzOM3o%sHwtqavuT`H z8ChKsqM{UUZ4n_VpUA8uVGl$-zDB|dJAe7SM^mAsTq|$iT0Ku-Bl9+DF$ajg05#GF zM}x6WV(WDcn#qJGj*K8_nChJegzh$!$-$waG;+yO?RSN`&)b36_2LOBcPN>RH@$!W zzqCVKxU5LU9Adn^aib4{>?L5MfiXV}9c$X5-4KkD%oIuy&@8s}V#pL%V;5jxF(BKB z9VV3N4U$QT4b|`dch49JpEkU^zrvTfrKF@J3}6U(Knxx@p~@i~c@cnd+~5bl%^SJJ z3~imn_?sCGDSFF7{fw{ff(U-ZjS-E%;SQ(ZX`~sercJ1j@lC~md?lMGLnrqxBY|St zn3s;4bOYJm%!bmvY3k%XW)z#ccRC33McaBod|*XGj=+_aNOAsjazyL_`h?edd^n3D zk3)OeX~Ny{J1noT)9Yd=C`OS(0r|}!C2h})y}S0)b1HW76h)(eHU6c^sucE$ zQvCQfMEvgxvkfsWT`8CnNpDie?6?5V|H>DDqTnrh011b|fNN$>&I=qYkYqvKpkVFl zG;;Ll%vH)JWpUd88Kxb&h7c4;qsXHLgN^9=>+LAta2`O{v-3u$WK-nBH!~gGMimRt zF@hrZsDr~6Ho5lce_D2q@Y+Bpys|n{rO20my08-+IpX}T0nx|aN%zX*o2y~u=ZVB3 zz!Wh%OptXH3~AF0w{49bTNp-A2VP_@f&dVK>g%IT8>hqEkD6YD1vQ2B=3}n1tI+)C z1=pFKN#HL)T!sy_0$6mj%o3mf_-dH#mkOmw*Uk@;GgQNvZyRaw0M-s~L>nzlU9JHs z#+VI1c=^p2VNvx1$;w(Yb?8m^pU87h>T4cM*|NJqiv6ob;j)PRS@q3 zLYZ2+0NeF?wvSK9TRW&E|MM`O6j-j1#X$`C#?-alN@nO-I>ZCiuZX) zE3VQvLXg$@=#*38=gh2sI?QV0%2;?PiFIL!u>dnDGTk=N*kvQ{bM zf}`4kMEH?q%QnUdN4hXH&dOgY5Bz)>PUpfqyNfQ@QcMDV5ElLUbpu74(BNrAPS&^{ zu|ip$09d*qEU@lS$Q3Cf1QEMX&673)_YfLSKX^yp3~~ZzVCFglDz5iFZQVB$6OOjr zpdqV1Nm3dX*OQbWr_ z(I?T)3V8GAfe)d+#^ZW<#Qc%GtxLiGW+Iq^Yi|5PVTdi!N&Yz;AVtHC7L-9p=I^z# zEKg70gbjoY_&QVN)ih|P3Pn^M+FsGRyybi$W2iRjv8 zun6)(5G1WaP~wPAJju_`AN%9*&S*e-6cI9R;>6^74n*X6eBz1ML1ReI zl}M38ebSCI2b1MDY|yKH01(#!&EApxCBvJEZo@iEsYAdyD;QRv6om=*Vq)5kRK0o?2%V!Q{2klz`#>0oO#5YAaXK^(?KqgL>vBnji}<9Ii%ef} z@cao-ar9|owV@s?XH5oI!7hfUqP@tNkBSawQLa_K4L;~jDLzIS74W||EES#jZI`IB zf$00-Xy6R2?I0!#6Ybj!Vk=-M!tN}}A`Zjopldn2^&8pNT$9+k<6~eQ?xjph?24B@ z^&Ls6BN`2GvhfJS2JqN$(GkH7+J;TYxue_Y;t9N+e~fEuAVi|bG8TJM$324riDyO` zpZW}!s}WSn^^<5E#NHE6p*E9>(kmVhbJdFf4~izj|Qghq}T6AlJ} zq#j!eoKdL*Yw(Xu>fA-&q>9Fe(ql<+d8-eO4uVFI+Vk}V8kXK2zR`AsD^tsw!RXqg zk4C<1RP9okQ5D6Fc;h*&at&&=?Ujz;M@~F)c?kP6K8ma=`Ck73?qyU2B+@ zd_o{8E(lhd=6GVP*NpaB(QxWXCAMl^9#gph2K}~(a_!R9eU{G0s2PG@a`)|hBe!I2 zvu_3u2q=Chn#3(hCkV@x<@`7z&FEdfOsL78J=dvFKB(X|_Vb|-HoXK@@f2$p z71lfMr(lDiXXhl+UfR<{B#MvMS-#QdqP~jv8@Z>H(^$|tsYynd(Ud15^?9qXKHW4D@h4% z&7*-gg-+Am0*OfnBz6dPL~8n8c&zu4fW%BDipHAY76>8QeOJlgAutnycZb(6=srBFGjVv9AqEbLvJaa6z@oVxARaFLBz`RDO0 zE(VHI`NA^ffIW_0O2?;VKJqi8$@3a)vPZ1+6P;Y;e2Te(ERDvAz}u;3BP==XT;Io! zm`W_N>BKlHQZr}aLh*`K5S?YSyBe=2X89Ce7mb<5Xg=F7SR=&SLN?h?78LY-*AQY% z?aHV}6XOzmOQa#PZ7x+{NMqezvcSP31d^2@T1#F_95t+N@i7dqQId8g$Oj1CoPTJT z)~9~!6+c=SCJ(H}F81p6LDzf6&1DazoO9&shnk^aTzLJYj_N)SENh2~ML0i(ZwY58U=1ga2L27a#&Fw9unXd7x#iP-^l}U_wsbyk z`E~F4yBZAD!dGCC@h?!~w*mATD#KGyOZzadLaZPLOEcSjC|Bqc{@ zN^d%MbkwIRACI#pJN|cz+nNRj=xY{FNcP3~y!dEB5~d!1>!%@~M8oThv;P*mKr)^wVe@Vhv*TV@>^l z=ZO%q#W{TB(}!rz_>6b%F~bZY^032j7X@nxZ2LKdbBC$4V23>n6)8P+@(+q*4_6!}_`leC6R4c`_5J^;L_*3gV}?>BWQvNAgvuDDP=;)? zQc1~Jgk(+`GbLoI&?u3}kU2xBh{~LZqD=qSoqg7K{nz@Pwa!}S?43N%=ktEw!*yNv zb+-d3!qbL2{~;plbp=vD$WF5VPK%}DAt5`U$*8Mv&=%ph^5ZEPl49A_V%41 z&O{6=xwTXuHQi?L;HTAJ7!`T+@P%TK^{EzCsaw#FNH-=ECBpFDPbUykR~0|4q_wBt z+GcQ+8D#M;xnHCj`VI?nsbvi85UxC=fBsVNQ4l{g_FvBZ#x!m=ov~m@1B+_ew>_ff zg1MPkeF9N9lJbX8ce4{B99*N(Qh;BQ3Tj@5;v^aljrH{vqjpdLi8*&zG}CzR zk=floGaTVEtn(3|^OAr#2D7AZ5?7W_EH zB;yuL3yZqfGp)22FsUtb|3{3+7ANSW{2bhG|A{^U<$Zpy=Cqc=^|(iXQQ6Jt=7co6sj=6 zAMs}uF2AQ&p%u8P;AgQK4dU0y_r$<~p0@4a_1BUgFU7=7-V&frYV{Xj!}k*4WgMmA z&Am%Ir>JCQIt^5KvQE^4C3qn@_wNJHQ6EvuM*zw&5v!ahA~ckM&U6Vf{UMW--DdB9 z!%%i7J2?;^&waUVSvF(Fj69Ptju^<5EM%kg7EwPKh)A2o(V2-#pV=R*9ApQ(Ea$>W z4~5n{?%N39N%85Bl**e9yA+=gjGVw7o2dXMPHCTHEy`^Dm;OEv=Gu2ly$9 z_y1Wj5aeMNcwcRqKUY0D2}^>!q7~E4elf*=lqrP?u}ly-(S8dcPnjr#1hW6;{Q2{@ z)tVXglBheL(_Hge4;_LpefR?Hqz)nr)((ivt!h#!l9h>z-Wt+(Gh#dj$Z|nphLYWA zs?l<{A+tG<;LTg6G!-*w_|JK}k4^@{BrzjiIY;HNllG#R3z-HA*-vR_p++76F6@v)LA zeu}sqM^!uznVZciHalH0yKIc*fN_3UH1bw)ktpu-&ogiWTm$u~j4v!-E+hJt6U9=1 zb(wZK08mz^T1Uq!pGLG-!!8-%WEj}hTDAK zMxJ9lC*WCs%;FdkZUaUJtS-ES*mH={48XuFge^ksqD}9^iM)LMp30z0al;gz9{fEr zwS)~CqjeQ{_cTuR;4q1C0VdLp%YS}eqjIp8eS7-J)l;<1qbkb#nC=UBwbQkUzAWsZ zu6aZ3dwPCp+QEUU_E}x)voLB9uG{y(2NM3h-bgTGoWv76{$YX!a7b(xg;0^LNr1;` zCNuXd7+Ey=l+OKXQvJK6dygI>phSN%NW^fwPiBD9Gi!bDl$A^ldPunzfmr!oZRwxV zyVL&1h;J7?^x!%OhLzV~(cd8-C!*?qqDkQ0ysCB5#>v_XC@Y_tKdolbGTDZS6++pM z7QN=i_x+8N#kTk9jN*$rOCV+rPZ2IkeNr$=ufWsg-Kr)N@5=rV*&|G~?!=2R=m294 z4|o^vNm-;jizFomX5%!pJc~$8gc8oit#7}CjBnMSyDKln;5sviROn*~97GIe?S}K} z8w=p=!Ep+U2E%y6yq&S%4^dPK2>EyVR`CM^fpFhh>IZ%n4@+Het7mR;>eTbZUZIm@ zWRHZ~+jK<6UQXKK_InpvkF@+83{H2FS zEU1cfd;d?$>+6#l``*H^IMVEtw(K?8j4%*p^6U1^k+;k4(r>;$yxTMT%WpAu63H~v zgd6KMif_yC4pDZXAfUi;g4xJX;#q9*eQUXlT0?XWWbg{fuJBN2SzqF&YcVPX^|bxy zp!auQ-)!>=V27%rf9_(Lz(Gd?U*owuppnr!OIJ+O!ml7qp=JQ=o)Fb@pik%;u{WFl zZ{V+gXUrIyy}WsrxTEJKkEHDad?#F6@AdE*F@1h$aA4Xj#$ef=Af`Q%+-GC<8$I+- zJ3=-=SWI-qHB~mONWN_{q`qs|)`J$y)fc75EEl&s?)eyiIiD8}N~><{+#=(<+Gia+ z#V$uei*SgT%Yy+Y-?dIgT~{cHfba7B(QhRFBSK->BA;)w=+VUWBSi-Gr#ng)4O<3^ zV)8cU>9fEzaSRYtkOF47Or?OqFx54+PnZ5kL}jL5q$W7S(lND4-Ppb3jH;2$GcS4m z{M6Jne*u8#z`#Juve!2UZyjS=CtDx9#cPg5lREZi{ifXxIsWoQR&DbM48Dn%MOIc; zNx(chJ0u+mI%z@WH<96zV8_54$|6X4b6oPV*cs3=vom#@iQ6%;%y~K|?jd-uB;vt~ zFB$S7%g-f&hY>TuRvXVU_WA!cx%k8^4mx-|@N_2pGipQujier&j;X#>l(4jd=FD#W-fKaA z_U8ILdV8MfUsD?L8Ea~p4dSR40e@U&m7y5`h-1{nEt%|z? z8@{Ew_gnBLP|P#zwh#6$dvfM#IH8Y%b+U35X$%Ld~D9~2@o}m%0%6H6VS4kAcF@kEzI2>0emehp0%#Kb9*Wzsd zMJePYS#wFa1Ee7977`Ud*ZE-f8p`7BbQ@U(2A-qmgDYC@p+mvU@58@)NuIFF)^bR9GHkkTdB7J2q5p z?2oKtHXkf3`f97i!w_VglPoHklYTDg$?WpoYuiGnE&pA0*= zYhU406`C)&4Xxv`=%K3`nPeX5MC-I=Z@{Z&c@2-r>_0GSQlcM6>FDyGMXjrnenB|S z-L$h`y^jXG3t`7)Mp$}0+yd@XMa}ik&ZWkr%*~PIlN7AD5%kG5EkSIM12w<*c*=L^ znl#`hyBs(Ff*QR9)$qZ)8+?w#2=c{u33ZuSuggq0cj*GtBAZ=748EhRdiyH7ze_Di zgUTu@%%+Z71T-z$e4#_{n~E<1z)jw=?E*UUYi}f%mb@0j25Pty;Buhu4nOzhs2Q zM_nGHXqL7OguTU>RhcEmV^74~ym?c3`yNI1N|aABCoXl=1<@9Wf5|8WKu~RqLi~w|8M;8H@qPm20ZMC z4RhqRK4eX)^oFUPy>4xZjGVi>aWZuwuB+Clhu~m`basiig5pn3-}`041M1I)?2?ji zuzB-l>u)6h$u{Vmfhfo!sx|a zK>4mWBU`mly2P^#2Sw_%8#Pyq{~DXP#?T|#RsUJ1 zo^$Bl81@=sHnP&*@%uu$JyT*#v(*DGwlH~kcz6p@?a0rEnBG^y_#2?lfnGzmB%HW7L-Xz_frDtC~o^PjG)4Y9JKvGR!lUB;R>* zrE7f%p-7mRbLdSRNOIs`G_H%cY|~WEcNp;}AG%cF8j&tCY-ykVoQKeDtAWD2uFPun z!Dfijg-xPVY|-L4^=vj(E>b8*%zI@rEiE!Zd76`Om)M7Zk#(J~lz(OFroM*-NWiwR z%R?j2{hfvmAC3`Zy6>C2dg3Z0#*D`YDl5WsD9OI^QyncWe;&(An*Goo4b2J)n6lt& zfjMHF23IW$NM=%n)G!FLS&|}KaJLP!)h4Ssj;3WiF0rKQ531vxzEe^0U5}G0diYTwS?jWY~w&Y3G zCbMd^=z<&78LSqrA;R8KSHR@Qai3*Ovzxok`K*sjIm=OC$mmAmqX)<_Dn4Cm_@2+W zY0=j=FK))e7wjOz8HiXyeDA~#b3y4-C%ME@K#A)n-M2~3d7MT{uZ>btWB~)T4Sizd zEWVJmE**OHdL$IfeL|?+Xe!`Da0~V4M%$$-2j!CZMP$vIy%?lSM3~e-Off%%BrV?2 zCTO(m7_e%V{1@~3c>d>qm{(r&oziPyuLm&5_`^|gD*$WQj2^Iub!|oQRH&eUN8Ni3 zA0ze+bd8~jU3h(ky)Bw7f)bPa8rs%qyRd*B1!tWmBzuYyl)1;6vZ-cGcmc@v*Ak;- z99X9;&cCqyk9%TEL=OO`CU)d2bQj^{L!9H;}AU&15$HWCG0|m4nLm?Jqb*ho+r=5TQCh z!|gH;6aE~0AMb`x#s);o&yo!7Jb(v~4o>q(?^-2eX$wU=(veCD^<;{JB zi@q1Z&)MGE8B^%i>1Ic-A!cXIZo#x3J>feDSRzOTRM|Z$@Szj)}q- zn4yD%)+Z1gjZh7vJ^yLY-y*F&jC^zSZnJsxvaWl-!B-qYmMF8u0z1~zuN4|c;V)1d z1@^~xqZDgCV>SV0P=Ru6CXf>6_rC-T1Yn%a*cbr;B2CiC5sGdREanikaxfFY_2 zfkc>td&5^2p<-(eNA@Zj3Rx6t>=d-p-7K3?F+n@p?yCWm#YLJ@aNqlMx?!19=dyLF z)J_dNQ(BPbys2Mx%*m#s^R3NBJz{n*Lb46^MA8jJulVs58Q=e~am2C}OwP+p@y-Lv zS&g-|qd5;o{rWwV&v-@zD>*1&j}c^Nvofk?uPgA6lsOU^z&Bi@mx8QYba&i;+4I6L zKMLPFy~=frHc`|YxQ#E5H}HAtmg04BgqBjJh>Y`gQ*Z(^w>w(AO6#!7pmE!_ZEGi5 z@a%8D5d2^sUf_Fwc-2H*4Rv+dU*|Ds{{Bt^O_SH;@#W2tJWMbYNwL$3YqL7y=^+!H z6#ug3b?=w?NTRW9VHM&Nzl~eZ$U-k!^eDPL?Q;%?#ic+xd~QJmgXt_QPU&rs@sW_R z=l%G#vv!GH7k^~_Efc?^X!FjZQcUQ4SYWJ=Qx zcv@q_KSmoURa{V+Z=KRqF4)9{FI0KTPs*Dp3i~n2Gi|qtLl~)35{~$PVI95NBaIU> z8)!zF9`Sgv%MlYn`s4RVPW6_^fyh47~*6#YFQTA5YrS}K~ za>3#&Af9iK@bJF(Uf>ewCUKRZQ2pgj|Dg~19cg(Zd4H#a>a{XXio0%Q3iVkvfqK51 zl}h$Xj+hv?ib>zzZ$y`=i&^8_XGW8ozLuAlX8S`xVy^_N>-ijL*$#no znOMfg(BTa7T+Fc%wb|d;qOrb_k&FiA6O{65z#5?sP<58Q3{m_@quX9UuP$yMTq?0w zG7u3odi>44N39-HYcI)bO%IiGs3I-sG`)LHuYpRdJYDS2l~8CT`j<_lL%|xy0rjv0 zz&sv&jio+A^bo7@kI`xB;fs~uk=1pMzq|Ah?Z`WpcMI5MXorfZuOt8#gUHY}WcxvZ z>-YFSyT^#JFQtT}vCfaiU!j1VFWXIY>*yHg=Zy*99@^t$l8rp7j_XaxeHKu0l5%2)w zu$t(k?lwL1#*Lv3Ek6bBU0OMA@Vbo7#rf-zC@n2m{^u9+cJNanr|2Rt%te+P#lwEp zVIy*fZF#F7@^@0C)Q{_w186ZjHMunE<5CbWk@*Ct+kMpH64X`*Lzw58;nMjTBjy$b12(axKKQ410 zeQIr@6*(@S^lNo64FFz=sT5;$QfmrFhb(M%S#%ksUN?&Ge|3*zu7 zOF~ILvWVi{W<^?dsvTo#1uPJidkWnW_tb<94bo8)YY;lM8xa}9!omlil~fG73TQ(h zkTL}D-~`@Rsn$qu$O(!+_;hx-b=mJ!cmS{@ zGb=i&rNpHNK-==p!Gy$hqDWcQehQ+3=6(FcG8q;(frZtWGX6ML3T7_Cxt10_HZ%%E z<1rXZ?$PX~*xyw;I?$c=9m>vrt+8s3nV2z=f&Ip1X`$FHPMq6bDJ0t z%tZbu#5u|DAz8)y)amAy1Ih|@2ii&3JM#yJY73!!#b$B~D!P zxv#rs-Q(i`*x8}9c?8I>s|m<_R|86y?|If*QE2%eS}dmpRR8+hoX`nv4wx(ZhvDnN2b36+`pAAl!c2$QQcxE} z0R~6eOH~fsGAPArOYzs&RXVBb=ARvZSJiBPCu~3taOs3Vf>2L=TuSB$5Neav4847d zLsax@eV9rN653qo`d^C}5O@Gz!9JtwDDmMUS(yBYwvw`M+WYrQmn@MtiSA2L`$!o@0# zs@Erd`*Yu4ap%f$ZbTb=OLpaa>@K+2{%C8+Dqm{N%4U2A`|P%EAf zK887ga4=ZH$uG zMam|ryKdTyP2ZJ{1QMTe#=NV2Mn7T5WZ@@Iv1&x_@Qh{l&*~)_?U+)3+7o(813LsIxj=wpK2&nY#$ZDVbU95%+645KsYSc(w^M)p z`X!#wc$>OH4t0(%5KxUt3#L?Ld#%J0tR)q$K@xxnVWwifmn9j8q-Pt(prLl-#)pMv zgJlq8Q+@6?$m<%s*+8reXvpW=pHl>$lR*f!xQrop?CQHX>33!F1;UV^W`C7Vi3}76 zIVe{am6gRI{+LA<#~;}&sX?}IlK09hDx9QNCJ!FxCt@|wW(oVlP^j!EG3?f(LH52H zk?{IPcaj{K^|LenyuMdp57!j00m6O2wJ_f(=ByMZ+0K6 zL6L>IaK+qBap!h^EdOS|a`)F!+eCM|f4z*5ByyGLW{IS-_8eu}76Z2hzLOCwiG?KL zO-3uhCo)#a`bkN|VZyJ<9I41r;+}q_0(r!LJs1Cm7qc+7$(RCuG0dSwNwy$Z#PQ5Z zk-z+(dp+SpN*M}$>Sksvw>;*S%QB|pnYBYZ!QX`0TlSnkVlgFAT2`&cd6cjx33hat z5&+3iCO&z{FJv54g%CzCB^ za?qSxt1Dcd3igYF0)3k-^XdQV>sEnhfM%%D!wiQeEUEI5;!0SrR!hG+u)dHUcbs5+ z&|`vRHEE?fDg~9eT^RcXost-!OJQe~3Zk8CerN-dD(`JA|D=bq3pJ2*4+ z32KCAkSvP8?w-uCghj&>G#c(kyl81BVa2UTS)u(Lji1bB;O{>)Ob|jTChzvLXwr^I zaF}Mpytj=a!%1RElEY{jo2H7G_@)v8R!wPA!2%bVypxT}Bbg7T@=57!y8p4yVe51B-t?|Bo~B}U%ZQ}HNC}2s23rX6iNg*3 z6Z=E;$Vp5D@?e}Uty)u&Q7!}4cE_wP7+zK|@MC8)z>#>$_};`~KhUK|5HxLB42N4& zp0Jq=v4oivnEv7rXdf3#{}^I+4bL$B+hfII3sBJLusc?pYqp zEOF=DFE?_N&y}a=7~MX*Gq4Bd3rt=VH@mkq;8fecFmJ%qXB0A$^a0{XpZZJ|U%AJS zKn}&nf0O@8ag4jDBO@`zw$+`D)6v8>HF>`_}SmI7aL89j0D4)**11$2~cBvgspW0u}tUmG&B zvVJ|U5P{A9(_tDsimT*!Z2>#PIWc}VBPVgXRi$Q=1@2Y!$Fx9IVW^FH;DIUMcCi^pD6=ZlZf{8a8rS9*_9A2 zE0$TU$vYG zZx-x7A^kZ$QH}bnPs`MO`&tk3OxX~5-t0nVQs3%ng5JG?PUK4E&AXwy{f=&9+_)wK z_gwa8!N?_MW5nTJaHB+Gmk6GOEuXlw;-RaVpDAfGcTtwbpE@Y}-y-l{;0M_YSuy~; z+ZiV)0`jC2Jzr4{#0Ud}-tfcYd*s4H_YrZCYv@HB>0>+j{ot}c?=`hfkGBU-fY4{0 zWbk)3VJ_+IYTol#K#b$d|NO4n=)Qr651&~hAAEC^118Bl%eam5Z7KT2f`=F+TFmNi zWs~qg236vD(fY8U?IpP(XDz<~fDC+FZ0`lu5CsFA1nzEa(SA?}QlrW0P$`VgW4JBw z5rq+zmCyNu!xYXnxa6?hnAW z(fCd4Ki}*8X`Rv85_q(lCt+yE)yO5{tV;;ahY1oLsC}5Q*lSuXx^}=@Ird5$mGA|k zVM-)=jO8!`J%}ntGQ27-Z7EB^rX;997K?^jKo=1fi|U&(vH9-BR`-ZWBSotu9D*Q6 zs1F^egkzt2g6PNtrjwdHxy>r8@L4h-3vmK^Cxffx)YoZyxS-b7*4nHLB8a+xM?a8J zUGlVOSm-%?e2T3u;x5`!+y=tRC+eDm!v7~Un+@cUdbmfO?B1aT@n;cPi?)?-aigM| z4t)0Fb9@cl)4Pt$BbDVHPM>BxDn*6+B<{gt7bq-85? zgI>}A?G&Cy2u_-=(R6&Gjh6pDFC8;_bOQK|2!4AVTZ~lk>uW`!d(FeFk%ivb)${mW zIC$pbWZz8zB8jkQI#5BG1<&sL=jRt$J0Z^vo#`kUF~iYh89onrNit0>DlN9v$S7US zI=XZu>id%3HVQf|GJROUKNfQz$LlBl9dVO|PtVxDSEIy5lN0OU6nOk5TcOagZg%|c zdlT}IlPdxVpwmbliWzNQ&_RJK`0@W@rNL1^W&y8dAB_n^S+A` zJo-+ZK5<2#y5JC;hFE#!($FBjBaVs1JVk;?nqxJzjJ6QUM>6`5!@Cx*eY{!TtZv~s ziuTWAH?XVdY$XDKo0SfJ1h6npmc1Rft`n*M`e{+gP#49t4O6=RQ7FtF5eLNeOZE`~ zWQ7*|{yzjIn*7|mjdjK{qK%V4SOKB!R8Q{Q`K6lM)6?&R%j;To!$+Z0T~PgXmI#?S zhl%O^rexO2Gkpd6<$QkVh-enP^1G=x?^0p(WO8E0YK>xVHfxEO1{>bOOg4nPdzLQq z(1y#GFNfwB01cBX%3&&Skbg=~Y4(Y~Ufy37fyXVDe2-@!o{@CZIgdK!4Sx{!Rj?XLhs1T)%ALCxj z-WX$f$2orAxO;SxqZxt(kZC!pPOMKNHvt3@Q)oGYUZ5@e=mNj zP8&~8E4XzGrAzAcF*Tzwd4uQeK75opBA8qWi%GfC7t@6YTAL}twP?NY=~*Yv4?-aY z#KyTUYjBvBp(a4z**Ee)i;}Ud=M(`sPhaR~T1(kCIz>sfVFP#D6Al0Y1&>m5`Ly?v=Dr|*vKO_B{u zvR_cRz^-?FUTM*!8Et0Cezl;eJQ73cXDJS4NGz=YYB`#(U?qTz!7LOT%`JSQd7Zk` zfCZ@zNFIv{gD4o5!73yz9k#Q3mJ)OSpCMJ7e1C?~1hWlu#ALN*RKx!W)9FIP{VHnx zN2XKu=ol;Zs}XJ~5g%gBDkL~pn07Pu@cVh_?PU_%d5yYu42?Dot$3*?@uBeRE|0lWOC%k$Ww1%eS}paoudC_20sBfm8@2^$IArlcc#K^NW6= z{U9}kR|1IWaW1DQWk4@r*%*3AR6cS=RT1727FmE+yx;=eZ*&;;I(d=ztg)t^ki85Y zxa_)%DXP>1a$QLQ_hI-eP`18f)T6Uco*AUs-e-ygvbL018w( zwf2YehonklDQbR^F;d3N?H?t~7wbufPz%Zw%6_t{n`k0Wy1_vS06-<@InjFOmYUI( z&~xFzyxNm3CoiBZ3j6)%xvC<=o0>mv(R_zxgSP&B>YD0y`Vq7mSlrXTe8DN1? zpj+|HMgFNZW@rB^(|iXw1{MYUby{L$j+8b~OW+sMbcxgU9OM{D^`=?yzK;NEz7;!e z&I~QSI0GYJCQ4@w8lrIBLnj!$BDX*(dnG?R^sH%vfZIkMgz(dmPa4pd_y7I5W*Lac zDC4JGdO((-w0oBH3m*I3gA^r2<$Tu8$dlpv8yQ_=5>QpFEJRL%llS9Qvv#feeH{m^E_G{UHp-E!#2#biw!cTk;v6uZffwPJsm>nqq4O5Q z0jGdSnoUqLU@|fH=@pBm)qv$oA69JPTlC=kC_m4may?Ks$%J+SmjHooF~>&+d9 z!&0N*GCm6BfsocXE$?e#TDT2)r_VzV((xfCnvHSLLUkb;U7f#XGIrXAJ3C5voedu$iwl87lj zL+b?k6q0(RP9R&2DelFwy${;#(!as9XDBd_iTV(xcMmg7g)&Kf{pL+a;xt#ieAttH z=!)plgc>4jWLHcCI;_Z_f9EJA{q6cqj7E7MS-YE(>2C%8b@_(^f5$Q3{`CL!y2ZZ@ zRlQdnpY2`v=($Qr?kTBp-$FKQ=)5x6=G}pXaTksy zEcP54R*TM9*K(mIJKYZ5wvDbEcj4PA%{R}|`*5c=@V*a>HoV>WKznsHR-ZLQcL1g|b#>QAM(UQ#54|J1`rHwp zcr92kh_WS7XG=(kV}`q#mXi_W-{T~Y78I4AX-<}0^_V;NE*cH~j@mMwWo;exl1jBX zvt)t!`L5K5N+U*$c)HOhx~EU7E<+3pzkkyE`t|FmxQTtu&CR{KSmksG)wkvPh+T;O z9g5?t+?F=MfxCB4*z&rbUOZ6K0vkhPoj!f~Bz_wQqt(eJUcnBW*=4(J-Poa9pQwrC zq~pFZuIpSTOqhz$5iOxMf}`k@AvTBnUf*s>su*Chy$BUfrql*L9OGPfki)LsyJymv zeP15rJAU-&M#yMXsH|Mm7xlc;Eo;VNirg3P-i1;o&MK~6W4oQ;`x22u{Hap|I8})) z*Vr0%^83eORmG|%DdIxlwS{lg9FbahB=W7i1k6?+uF zHeI`F@&K)#IW++GT*Jqh3JTq|Teo{-9iti%5j1}P{xJ_`TZO^itl1sy8iBuT!kl1z z>;A3SR z|EtC{Xb>kTlhNY5&mkdi{@njMx1ovuh_feBZhZg8sTO(pBDCj=($WoF{*o`DFGF_j zY=*T_C~emPAdm6(c8-<|9bDlkbcH)R!LEYGr59N1T&q^CLXvP6k-~KIX4^9~FKWEA z4|&NA`K=E9;=a!{Gc~ngv7@}YeoL#1^Fr6zI^zp7m0nHLdNS?n4%E-*=c7OE1{?(q z_@W|g3c}3zWxp1EmQ7@iLyq1t$_Gnt*H!ZWJL}3WzQGdla~x{X8ph{%y<9LpPvLdh_<}smH^6ZmmHdajLuf3e*CVIXx+hznLJDd{I=il8a><7EE^_9~qkn z2?*V2^oXaSXVUFvJbv;-uXpd}n66z$a}e6|+E)NQwYqhKG_8yB(ljFsd-UkBUe}@# zczQ5LUzDQgTCT{zXyxyWikU)1>pd?2-IK_Rw}bc6owdv>Smp zWaGxco}MP~6MrPh#3v=0oW{#qok@okgnTuD#w8lu$nF%(KGo z*qrTTVbM%63KUa$)4o3b8jAf#j?|ekx|VZ2Jf_T^&6=nB1_lOgI(4cK(50#< ze)Z}l-#u7US!QVm`x32!hqb#z^xqs7wgSAIA2{}4W5+HI0hKSa>go1d(v-ol%S6-g zvc-cOi1)Smz6_X*Z9JT-yqf;}1qCnisy^%2uTR*vZzkl*~e&BMl4I3gr#Fs>?zO3P|112;!m_eW9i3y1_AO7$ zC?H6roIN|cZL2Nzl+cR84Y@i@9cxh>oG_u#IGk z;$YYFOPbLr4dUb^My?MDv2WYGd!rlMyEt!L&nRsV+W0j{KhK@I1w--)gtV=>c~inQ zsp$=#;<~!J*RWWB0ZMEY{G%3@7uOB^^P^SAU@L^N zpFvO$P!nZ8$wP{j#rzNeA}5o@+bcj3E)^8G&52tXgFMCM+`?{hQuNxluS5O)ou;Q7 zT41S(0+bjc@19pYKPM`Fdq%u+3Rxk}CMk3gBQCwQSzH zRi(|mpfnWrUVeV7*R8wJV*BcLIXa|WVyP`!ZvPK_7~A2)S7}H77fC))lKVas*I*sLyz5ED=R9zFiCdiE_b zHPwPDOwpuC6W{lbI^RjswmEx(HzHAt-1nuWSYecZ-W^&%udnjy^vn^_$NAf%e6k)r zY78jV1Ojmb;xH?1)*1l>=>Tp7vmdUbp`;-HaP0jnaJIu0Ru(}ct$ejUvq{-;Sy$?3We0+T+9ef5AoWdBTt-bxCqBe&B7N&yH?|??K|FQU=S$PZz z7ruVIhP0(;XsBxSOj*`o$uCbyN!d>~+!dNo3adF!P8)uFmcEVwrb5;LNVOKYeWfLW zp}K`7iU$O^CXauUniLa1J|F(y^Shdnp+Jb40gkf#*frtA0z=^lHHgD08~q6ET(O zPD%}he|bq3ZEkOCV{tRyxo*gbEEdXE!wkYItk4Af82pw~zkA znq;@_%$Xigy&K8t9p**SX{s~Rk-;s!OAFh%y}{@%JBO|2Ax@$G3aE@*?lg96V{l2W zR;@M?mZnl3e@CnmaA}7xw&Tr;eJuE@4pEiNDvYT)r>O7_E;;A>b~F!AXVLjpt5hZT z&^}zeb*lkQYnS|jf>V@j4!3Y zZL|G`Ua%GLYId8>4%a`a?+VFIBn%W$5fJ^xPK>;A^{TWx_?)iXc3jE8Kir^HP1T5J)Qgfd`r0JED)s|=G`1V8&$@2db_x1O)`dgPn@Q&PusTevwq-q zv-Qst$|emE!lToKr21h7FDP)$@c}rj-{|Nn13T8g_F@iXBg30hiIrSL;{~S#C|I>= zQ$v|-IUH?c^R*q==XY?HQ#0VxXptvlp)pUA}11qVEXR?s8z2 z^U@df&7s&o9Xix;ZsZf{n!Rh+)@Am0EeB&No@qqD)vhL25g2=cr*s;3I=IKL7}u=^ zb@@s|m=N@_OJUH-ou~2tEC(nXDCV$pOz~Dhabg9l)OeDr!wYFsNdsz<~^|djXP0c)Y;YwB#*;p;4!Al_|rY!r_ zo7%n3;>C+Sh8P%Y;WP`mIY8||{+Zvh!rFiD(W4#a(w+m&EkUgKsj~74(1_ml8{Tho z3_m}T}Fa2p%PaEc~H zml^iy*^eKa-?5nY^iR&ki%Mpm`5917rcJ_P!kPb#DOIzviLTwr_kNz-4*LI^aJ^*C zoH?%)oL~f4x4xt*p>AB+@ zlWR~SN(wBIH=J%wd{ zriapo>-3(m*>*dIX9LuNxZ218kwJO?`Ms2Xbkw5RmTFqz*J<~*@j&Q3UQ)ZOCT%gO zYpr&z#76**YtQw9C!gA#@Q%~^CNH^5odSndkEn9x7W#k8zy4(jNPNV;FZ|X^{Iy>? zw_HS)0e{i4R(n7+xY_-mh_PN?U1^Iqg6~=``fBw2Go8W#Qwet~HR0_Z{)ZlzHDMwHlwvJCs?0hNOMs+u?X0&v(@M(G7 zCU}&_Hj_6lv$P*{aTqt|#I_@!-dSzT-4s!~z2@fFMvcle8#!1^o-w12UM(`l8d9Q8 z+R_mtGz8%Ni4IcnyUaR)62o@bFuEfMns*zAM>mF~=zx2CD3yXcVJyqDKf)l_v%Ym1 z&Iw`wsamDc(E;N+d$^4K(WsG*jd$OJljqD?2R?Q&D@&Eo{>Z(%eg(1xXN_W)v%a(T zws5C^Y1(V9%YjluJ8QD6H`8%uAKwp;t|vtm(3{jSRiXKV2M_Mrv*)0}gKN|Hlqa4X zccz`3w;2J;_4RY=gZz~g<(BBO?_w%fg?n`x!% ze66TJ?iz2VkE3UcX3f@ekUIR(%W>*t zeV?@jD{FtJU<>bmmAp!CR4axfDn+(0+WLy;_If zMp(|e=>`&;4jvsgOUF2u`XpdF5=9EM_AA4hdW_L*Ts+`*&mOW(p z{WIb{JzO>JbRVo6WOsHhov2#<`Z8o3&s23pW#p!H>n;&C<+{?B`iwQx+M>bN52TY~ zx?&x2J(`mB3AwqsLkN>yfdRkL47G!91P1KJKgvWdr=_hOyryAmYr;5TWaYtwZRsO} zDbZ&4(RVn2^tABB3njf;ojP^;M8$o8xt>8u_0-ifnS$E9ZCh=;(0TGXjbb4`tq6R3|H$!^lk^uXT2S>lKE5kKkT3J}Y4iVU zbP*_o4_$3v6X2=*k|aQ8V483GML8rSW)ykAe98B&q&+Vf2Fn|LI`%80Y1H8K)o;=3 zgiGEyO|31}?0s$|2r4x8I;x*0X-ip#BC*D0+_)EnwdS_;1k_-agx-)4+p8V6eO|uj z<;d^Huc|MKHFC|~7h>x#8G^7yW6zNQvitmGV$Mr-Ev=(N0KKUY1U#iz&`J9R0u|5X z46*SA-Ojs)w$V|Pey^)iVO!@}j%^-SNszUP_KZkqNhK8$I`;A7gcTdTJGF@L7mNdS zhgORg>p%%gcD^02z2M`JL5`1T6G9=crgCOe{L5M~CiDUvgcs7Gf8M-j=Z=<8YRpTf ztk-YX?(pTjp+-+dqq^r*NT>$|UiPm}JqLdV>>otO%sO+7h!f@gyvHqmYn09smaQ+PCU`Y1c>{8V0^ ziPpZ2-RQORP8^+DS$B(x?Z}Z=srU-tzh8$SxDAE5wc0b^c6&a(OI)id=;nXlBgYUD zL_&Y;qzfC7A?@dRa1`gis?P!8Cea8Tq6Qn(ac+RON#7Ckwjx28%-#ZkhAhwHkDG6_ zb&l|xpx(D{-yOSmAEf#rZ#a!H)~|TESFmiG=mqo{ne+}3t1Oo;UCQ#PRxk~0x=}ZF z8ohM=E9PW+>TI<=IPu)VAdc0gJ9ipFSg&+7sk7OCtP{Qw0>s0;SWW1*mdtWQcL~i2 z<>mRQha5&-usQpR^LUABeTs*NK&&;#yBC*UiZnRh6+rt_P#H|y93B#)E`Y$o#f!Jk z9@Qakg6;OBMb)ksA3V@zh$aITRR(@`Ft>4NQdEw+9O96W1W!_jxmTfr;{5TcwzzZN z*1GidkwB_(aa|as45SED1k{sM;^pfbeEfJvp;b;?Y;ChkKJ5}FLnpLccz=g)0j0_( zghTH1%6D^f1rBv}bGt{*vou=g!kFx6+v^bQE%VF|aS`J2LtPoVzN)OB!VeaoX|}9hLrkP~j|G zTwG>$G5H`;X?CbpVacFmx*Ht zAS4PS>V3#-3&01H_|A~gEZ!_@RL-N`*_*3(NJY~rT; z4MrjWayjL$3X;vD80z@%3@pU6qWkt{{XJs%Mbwn#g)WR`B^7DTvT=9UXFN>e0{~}- zeakC4?o3bbO9>{2giQn!T66<~Q=2zl<7>r$p>cms7@L%gq~OJ{_?V%7jBSh;(53sEWmhf2C!Sk{+Dhk`|zugTy7DGS~= zMn!Xq2rjVMwC|xIPmN=3Mpo=FR#*FM(Vh6zaE?>}+gGuCY-ev1O2Lk#^pRn1NFU z>F9e<%?Yvr3Y*Y!mF-k$j`f_*fm9Q`OsA?IPktVUl(S!*T zeuDE%YqmSaMObmdnb1wPc6LFDiHS*VsqF>sLrn$V9wl%eJgT38ej;mT0*M?VODr|A zX@2Qf@mmk@i6M#gul!@(23v^+YeOE$Xbfd#CJrJs--=3ARUwS63?ZCIqCp!1j-}bW zc_^+Y9o7|O+=#d#*@t+p$7+F-JJ*J;8*`4B=9`arPn}pf?ej>^Ci1i$Y#(Ht>`P<| z;PfFwu3E18h_6#2g{$GSFS}zpfkYkPmtM=xmWp$Ksku#a!4yehnNObV84pYwcOEr$-aVUF*X+nmkj#DgB7XRG=cr|F~(M7mN_nbO_F;Rgw(!;b(r z%2|a1?W-ZFzIIJ|i%_8|0s?BfsPe((jE zj805U-}R%-*4zJBPv(@)e45Y!hrkdr0HoOXyi*|$j!`B*-Du@FUE&3OZxndA;De|| zR3n#sHEsJ@to0=-ue}qjn?F9Iav79W=g5~_;FM4 z<58&vly09nA@0d-bCM(oP&|y982#?;Tk+eYNpjq{YaTQcnx`NN&e=VT9ZvOK{##dI zUP?a_A<2XmuOZ}Z#5M&cM+e=}V+c=y#z({rY#QUgg-a&ZH|o4~<4(i&^wu$(_w44EC0vEFn$bNVz&&-(D;Luk*C@nHrUXAi4SqdXDOPek4@V1IYg^q{0ZND*P3Y2`& zfSYPd^t^ zy2oV$Jn5Tz`?ioK_EUN7ov;WP zYzERayYttQIjo^Pr{9iTOUQJmA_k*-$d-aNi45yMfp~X(8&GH z+5wgCe{|C(D{E(gjbm&&=%cWbq$VJ$<=SZs(=FdbOMCI!H8s$%dsf!^6^KVJGhG^l z=)-v43BgK)YlXI{NiZ-bwHZE($l5}Ibm{*6e+b2Q$NT9!0k{_ug%An%rrN24uBrxV zL*)*0I?&>M5r5ZJ&X1<1W+3H6$L(i^j~qE<$r4Mx+$I)P5;1n{+&KmP-$IBh#W~kK zt7D>I@r7{r;)Q}hn0~8yr|}7DtPI?2P`A3e z`a6@jg&#leo#A*WF|jB3cnJC09Zmy1j|Ih9H~c?D-q8Q|_qOm0iQnx*o((%y$C2DT z?#*HK8@ahn0O&NWGsxXAE-DI$g8>1zj}O{JA@Tw!<2$Y;q6uLsLThF^8gj6mAKhDP zdW_d0g^F~ySS?X4DAc=HpRpS5m_T4x6ejl7+Oko|H`aiE0u+>ic60xax-_5~sjjh> zLS)LZ;U9lSQxwHtH-6Z#=-xF+t+87zB$*A+CVw+=#%$mdh)1^d7$u%EhMA4ZguV7?utd&C!IU56e0*NqE@Hx>LHn$2qSMepE2FEaDxU_jx8KF1 zwdKMTZA0G&O>wIBQ#fv^DFSJ}NO@?z57Nr-T2PX$BawpI=92J-3@_(jnuuuY zn4YwNKInFiE%9X3XZ$cq;6bMn5^QKQUEUn_+0a8W`}-$n z+Ji{1M#d?JK`dEPOTmk`pqtRECFL7`?V+haR~S%dWMv(4DFtu@!wZ6)e8Kx0%)}=S zxe?E}US4=z1g~DL58CDD=O+?QKgvQyAOPBH_^{3$@T02d`eHu=DbkXonOzAKHaRo6 zZu49A(IW((`|@R_h<~OTDgvf^B|~*0fD?hBvm3asQr5WGKeUagH3kt8QI{;i=E0=j zudib^6t@*vM*ytso`fbAWH)6d^>Lbi7xd^Wm>9^&$SB<4fjC%Fj+9P{$y25Tp{@vb z`ZvnG9!Mw;erW!A>Po3x&;R+oxaVR0(Q)9f?ih5)ZxU;Oxp_$&hVB}(YAebEMb)!B zD3k1~AiMK)KR7^)Dl)h`J;OKKE@dTRJfpbZfBYzd$j^Q~sk0YO^{x3nB<<^FY6waa zm@TFliMX?TX}wSsD#VHg=MJ|&+3~p37NFm3IHjYzVHi;bQ6^8pJ%Jl(m?=u0IsCoC z5`1Q00M1*;CbB2BsVT(&3M#yd^jqC`*5{e3fv!?uXfMDfl2BMkw`6I*Vu?JT4gx(@zR93Y@|_Ep%X zrBhG+0GLw*N)2ve;Lri+6_C27Xcb^iFn02~huqKfiZtWw~(k z>nhM~qt^S-6_AUr=c;^W$}eVSuIQm`H$%t2u(rb>gY7rEVYn>K zEY*pk{Lgb*I%}{Ns*qER&aZD_rol-5Kavi#cG^z+x!TS~CzCyn;&v+fXCCsPiI;cn z+*!ld;=w(7a9v=Ku7v6v^Dj9&7$IxF4D{moZl&1!t+|WNsclZElo1>^dC%n+He#0eAX*u}*8R&9$A(IdOr>$!7J8 z9H*quI@Bzxe07i5wG(4)df<8nsi6~48fCtB>(-Stl`kQZAE(Z5&r`plZ|tyEZLM>O zjbji{lsjj~JfPfw?@Qk`zoZ)}SB{5heu=SCCU$OTn0<$;KyjCuBYDoV-}^%$$;}Pc z1SXGvT5UMG{epsFV@`Y;5WP;gWJFY`m89FJ>2hlI19d@sz?bQ2dXuM5uT2sAp7*!i|5L`DsUl=9Wv5J;+NO}AGB$~1OehLP zWDKQ2rYLhrC?Q29iVTH9L_&rPA+sc7oY!6Z?DL$p&RYNVx7PDpd+!H*zn{ObV~^Zx_+ix&X6OMBo>jV{>45hje{ z$HNgY#z+p8u?h}6E0{e1ok3JuLxDwa(w53CBj;xaupAJvfQX4TZtf7IL(7Q>0n9wv z)ARa0z39(W__Sq!j$OdUO(I|7nA{;qreF8s0|RT(@Q5TE?p{FmkL#JHu{FM})0RI)}hVB3$^;toKaf z_&-=8l)m`DTz4||v$Ui9`tBbXDAaS<#lSfD7=VyY*=zL7D$JiZE@e#>eU76whz>QU z(?2%GbF{q~!H#);ASA>kKPGDOitUeT(kntp80_xuenx8=L0uK{nk0Cd{O|YGbfhc% zMJMB`wIya4tdE))dC6>qSfes$oLb&eCVh(1ym@mW91X{UM2t_Yr0_a z^pggN#^p-NdJ`Mp`7ji}efuWikOoEBu)NJgnXGH9wbW-(cYwMo@`oqC!^`fll>7fM zyZ-M$c52=^gchQG;B@OoIkq327l)amb-;Va;aj_?2owh1Nu$|AW4&cZhD~HaGNle( z%Mc!DQp;7AN5F?P)YTPK54;WK^2DN^CMH7w(7+N`0vfyN=x=nV8inL|@!sLG(ToSS zvWr7JB7Z$tR`IzHjUUghHaZK+ZeuR(FQ|*oZtJu6b4Fx_Z)s^cz}?-@%gc+ZRX_-R zHg^xArG}_8CB@-H9a;^lwVHVvv9&5eZ8;XvJ_IshHE@W8+)~^j!*PvX&6+h|QIdex z7CnD{acrV}Cp_z#0@h`R{}p7&BIBt-lz={B;AM2gvCb13sv$FG7(lX~?BZhA*e2Ne z9oZHY#YJc$rRk3+cJ^T(QEi!1Q*(i`b)nTr47IlJkh{BoEkDf$d3Go4$_7QRkw>@d z>ub}aW0DPYk}#irH3<=c$$;$W~VnVZ8{3ZHBi6{bzIj2li} z(i^d{?!UEPPpTrR=U{nBf6!+6iqp^r1+l0awOpGy$1sH#{SlT-5id=!S#b=3*6#ii z90&x8I6P^RAC+g(|1Wxy?OIv@@wX|?rHVIWiY0|Ql;`hj-w9GdXTV+^S&Ot}_onNK zDJhp|3g<$kAx7@uMi{274Gtb?X;~ZXKuSu%Bh6^eUn9P#+j3}9SE#^MSp+)BC0oGzx^w`p+X;!V-*}yPE9Lp60S#$4GZQR;``&Ut->7tKfZyL z{gg_?@0KuLz!8KvEr2l}We-%(V<%47`xdV(__{(rPbrH8Ak6nIxt~4 zxb`h$f~LinSc*t$c8 z40%;jyt@IyDQTc6{GI_#GYh7>$^1QTfPF+|NO8QYx!d{orl6Q9M9B=uV4DNpb8nsi zw+Q+E@!fXZMv~$`^IoYF1GooI-o6c`A>T3?4(oVm`p7QPL^#&Jqxc~o5y=7kiowJU zV`#C$J_h4+8ZoSF8uqF-%cdDg0W9R%p0k=#l^n>NXTJa7$xb%HY?VNUKmFmy7f5J_ zIFiI@X2qIGY!2)Qu9mAc%kp3aGZe*-hIJBj@=Q;b^7eC1^{PR&&4@eNlxPG-*S`zP zd_*lllX-z{BO>&nVt%>JS-o}Z^$;X0hkawe9kq|uq{F?aaya_dzuD;s9`Lo8^Pt${ zZcqSEn7TuL8bL(@l}yz{Ci5V*A>KKiS6wvEoX$}OU6Ue0CWe5Sb;$j6nuc{&lSwmS zNUwrBR|_0IlVNZXd5tBGLvl)jA6*5nBieSlIed!l_|XqM;zJgjem(N{_-R!HuHvrO zGWXAz?&7lV`WSO}*1gg6!r&Mcl{ZitIH`CfDrh@V7iZqfEYJ}tOE)fmL)TR)E2n}+I`KWvrPAZ7!a{<-=q4z%=vi z7eb(ogP}dU&GHG~aVE^c3Hx_o3`rsrc1Mp58)9aGIGzWl z0w1P+i7NHYd>ly6vzI$|mu+gvx|%AAn8?AF)uE2d{T%YuovN$yi?{0Al=1oZ`$k6> z3n)ti5Mecq-v1#-L^w_ZsWKVBg0Zg3_TV~;G(gGVMoB|5!?2xAtihQ;f<;p@*>&%$ zkcZH=2Rs_8Z9Jtqh1BHf(*xnFY_XWCn*<|l5Y>g?P<$b@Ld371 zW}B89UyN~bilUW&g>7K+KK0ldlxX&o5y-k=%0SgOgt|bqOtgbBw@24r0k5*&-+G2S zVYbz|Rd(kP`hu`cn?BKmX(lbFFIMI}H+E>sESiZTT+|^v3>l7~Kpw>)ue9WTl96yL zA;Z_hNaYeib1toj>&KAT4E$UmwZ{~VU15l zdmuZbbWdPG0s0ab?0&{1&l-C#u$|#*(zuSUuIkqhk1sKvmXU0DA@0W-baUBne4;|K z>21Jc0xO9@_ty=wZ_~+Tgn98JUcK1li5`=AcSoPaojwdRk6y~?b}-#GFbw0UNy#?QK_*R>l1)o_-RzSA=O*1Pl?T_3EXak(0ZCV=TOX{;>jV!|g>4_3%7O z{F_)O6NIAxjwD5Rp0&N(^~Ln-JI~SIt8%C9?Ccm9wIWX3U-*}KvzO1G`%|BOT5M3# z{kjNuI>If&=^&Hj<=}LE8v|xL_~oP{Fp1T4HNx&NOxZZJOCiqE zHxJ!$Gx+ukAV*=@!fBZ_V@7Sp@zTG~z^5^JpGJEzVJ^;R%lY3j@?Q z

NB?CzW%I&UmYsE;7df)bqcepw$QvQR$%g_}1MGCqG`U<8ig!Zb-l0qUxZQKs$P z5oDQDNs#Qp$Z+Apg#&7hYHFG)Cl{MIB}7=ptX7)T+)gDol^ z_5ncPd{)+rwH_dST?A8PFHNsi;vOvg+qk>t?i###5|u!Juo{5?D-PW`cA zbx1!O->~rT0E$l+Bys{x(?Th2MrxSXUO`PE+60+H!96*lwZ*c8_?G=MRPfHkicuj-LYHNt6`< zlUZ#H&Kkkrw)Nb-|Kx}d`EgCy7@j?_rTyNy(@er^k;QCM4~!}2zGIA|y?bf-wn|#S zUzj$uVBl~2$beb2sHSh?dMfH6$XK8Pu+pU;!_Ak+babb2DUJ_MaZ$)Cjca4enrOp+ zrJMd$bNH|33{koB&?%hN6#?pL4sj zficm*0iS3lG71VDzdH}y{UqMY+xr5Zd?=7FbiHC3-4Pk1uNW7E&!jxNK!%(I5t~cm z)~CLPMtI)H{uTZlCL_nU2DAKs#cfHD){?jD*jOXjdKjJo#6j-jC4?8@-D*r++F53q z%zsx>>xi4`kr;E=c4#eWMb8seOFIPT>~M>&Bcgr!Nuu)JL4i{!?q>G|eiAI<9J8O& z4qs=vx%~yYNm=!B?CX25r>O-xZz>pwm@AcwL+up^1+Wc);_+Pm2O?07iAz4!VEslk zgm>5VnVyCXPEMDvCHSEqzkjAlZ736&FiJu=Y)`48hd>27bMte29H6>X6CAiLHuQHh zAD-&LXRV2lvXEgN8Fw|`M&{o9^03e|^pOz1O;+oTEO}G2ZV1@q;8ZUI={{$tYkEY2YZ?F=6c1m5TFOK3orA~c+3Hay}%Qhx~ zLsvNj2qId1!OVnKGi)P9G8Uda_%C~xI5WmaCKz+$Gec$p{2>yYIH3`oZ4QM)YDFj^ zXFl!Z?{ooWg95}Y|K-PRiV%o`Wmgt7^jQ1&!()HO2PJRgCNPAzB}j>6S8QQhk>HbB zZh>!F83GmyV@|}yohO7c({@4HECCsqR+vSc3v+uv-j`7R6iFRWWY64PO4@shY)Vlm z6PC#O>uW-RgrvYe!kwD{fr)is2z2V!LiTy!o%-?nc~qK7AnURKG2Y z^P1>08j&gJy4L;I!K`lor?p1Oc3!Ix)yNemtUq}0H%guqBAeKrceS*%?+f>{S&2!X zKI7U}XvQ{_XRRZQ9BBH|-`HKAV6e1lxbK+kTu<&!?S1&ccBJ|oW(N7x(ymQntFn%t!7TUa7vxrV@Hm@+ZI$J@8 z#Z>h7Ku1^eb|6`o=)IM+7TRaQFVG%uj%nSnVME~op*9m{Bc1nHxH$UIZCRfi6cWO| zLN#10$Pa7akd6A?pD%yS7k&VauR*-QBB~}-zt0Ab==kQ18#hL+Ak#o8$7yle{QdV3-$(>lJZxdbP>qIv``KaRfta6X{;BiIzLf~K zik%phf^Dqe$}~P19F#58F7fI>OAS~D%H$dQVgSVgTe|_=$f4}m5I;pkrT59V+!(I>ZNUVwIdfPL@ ztP7Q0)Q>$dSx^QL{ys5AZF0IoVi%>oGH1a8$C*8&p5hf|Z`z8r6iz=7qNjPaK8Im5 z543kj?j`sN6rdH>CiT>uMr>^RhQ}%?5w@R-B0Erpb3`3!wEn~Wq&k(jOs57-yX?uh z_c;F4(w{x)mvsCMzU=9*J6!Frzf=@Du%}>Qokowt%}9D)4k7K%6UF>s zP8CuO80Utg1*(%WhZA=msGXsE%tAc6_AG&sF^Nj#z}=;Q-IF1}h}A*@ZZS&dzn}63 zPYGICs1bBxb*9kz8V!BIlEaVyPu(c50iRMI9v<5*sk~6)A3&HColZaXZ9&DpJ|lqf z^GV2@+lIUae@K|ZCsa7FL47@rI;XDWI@PDhfJj|UZZ!;9x3;DlH^;H0-IqLjs`PUo z@IIMmq)*W5Rvpl29c_4%`vnZkh2-&TTc$>yD|~;MFf){~zCGdQb=0RwS*-O;O_s!-EVpGiNpI zw0P*LHa&W1W#nvo_&WO42adh_yDgTx$oO@OieG*bPaNy;kXDNyFwvlM*~e`cwwuV> z`!4_OdeV)UT8PYUr0l(^Mdtf& z?-^K4$)JyxuXL1*@;Zbf2=g9-Z;g*$%@40Ef6+-g*5YJmsti1HhsAb-%5sH%gmX)p z6(y_htt~O?5IdAnuR9~Mgh&Om(snoBWaQ2M8#{#{S5sG&xN%3*o0KPZS~RVqbrM~K z&<1O6($u&aCu9ae$3?u1d&|eldDW=1%a)?Ps0X*g@C)0fwZDE{&0`OqLLD5;wwM_O%MzTy2bz!=dU@ ztzXnP+`M*eJ%at{r{%m`2bX5LV(O{KSa=R&3kf915%+n`Pd^Wck1pMr*pq6m`Lmk_)vT5uIfos;i#2Nrrt%2kHRjSpT(MQuJ zHtkg4<2Rn{3wrg*F&*EQ2CYpm5}9Rs0t$_L;rvWJ!vqjBT`ma7jN{C+mH155m~JD2 zvYn`*m5i%bJLG<*_|Lus?7Q??kywcrF-W83U-!aHGxjDoRM-%GeM`;1@0_yxSwTPF z(&|#zfj7&~cnikW0fN-cnY21U#WlsHgb*q!Cu3ybFwefP^^lxRM0x^Nf$d5sg35-M zy#p=B;RRe>r@E*c2oUfp7#Fp{^7%*w$MjQGt_8&YMA(qWCbiuD{rVy~mXYX-_wOeZ z)tdxq49NjeBiX!yp*0o`d0A}@lNc~O;lqoln|=?{#xvWmzPt{KTIf9t=MOQZodyX* zN>^M`u6|BqPfY12da*}i-j1F%bnp9cjiB2d`}Ao}EW8BJRHx`qxk;x`5GBS+lBfaw zp7G!9-Zv4Eb0txN8LL0kp*HlRNKyvC3=><7yZ7&(|4(B+1k?)x8ccogPkw)NUWWdH z1q+6sZKMn2R!uC5^6$q-okgz~nlZ&wHX{82pw1+laW7!d*2>qXrq6mu~kpKAYrANmk z=#Knje(Dglh0FqYC7WNe5MgogGx*2&Ahe%Yhz$GRpisZc(Iyk z#}ptgcnX)B7p9&(DJwt}KPcyu$YiczRQ{#o5~o_7`Pn{9Pl76g`^yW?JpFk{T@^Du zmZpv>`O2Gi9cU9edOp&%L?-+z8j73jd@XL$Mc3mE)v`n<)M*m31UlHX$u9V3AGGY~ znpD!#<_hdLUw{&D2WW4qRcw2zEJzpC0emIE1>? zNSbYRRb~YjdAyr{-kx}J&fXizI41jXUZu9AwwB`@?W(UGG=As6`-eu1Xk&PUQk{b8 z;K74;mT6U7DfS{hnVV-yS>aGSB>hjLiJPr^SBtPLY&0}8-8y090x&IO2q=m007ny2 zMXCbjDu~x3#W^M2B{30ydq$!J&iiUSAa%1MyqQ<=Pc>~D#tva}d(5M-r--Kka)%&Q zrh<<#X2%klsV&&34C5H%-ei}<)M?Xnk<@)pQ4}d{j%*oVuZKlVang4Htk@3o;tq{H zKJB07zxxt{j_mapSedJY29xe`qjlxmyLRVJMB5dy=k3yRCqBFt9@BS6cQ;L;&Wo83 z)xOjb^1~^4t@am}UpRmM7|62#+ha#1Fh!QNt~E=7t&f7H3p(cG(^GVYaIOv3S1>e_ z0uAFYNj8jV&t{m4?IEbomP!U<{rbQMzQ5rGA<*S2DheUeerBhC?-X+1w94)#DJsHD z?@(>A&1i93c$yTn9skxfY)L;O{4h?8)I;cDqbo7#){Sp zK2;?%@XzQ^Kq}`rQ9`V}w#C9;4`~WEifV{*2LpwDZXq0Uh!)0p$rF|u-#r7Ug6giL zDA08~*#raSnAKLPRVz?4$)sDiY6?ujm~MzWG&J}jb)0SDqv@b--)RDj)}oRI();N0 zKfe`)>BLSD-W<1}+ZXt#RH&KwRm_Fzs&rmcz$}cYxZI_W#fovpN;FK0Z_rK!1NnZ# z+`Y@B3_p`r-e6DgJ?22;C9;kl+x@U(d@BfdYa~2Owv7q{Dp6Hn_2JeqtQ|e2#cV!` z?)+4!dK9^b-!O@6OxlfURgCAIZK*?_bfko(U(5@$lF`| z`!l7i25sB<#9S*3P*cdLAT&q8sKCH7 zpMyngpxl(drgoh=M&;#3AOW%v4zVjmfqm}x8*qGf>V)l6q zZ0UJJO4lQK>%rDwVe(|&Vf7`02!pTf+Z_smmM4TaCh^7I!rIK8Iw?8#e`lm5&Y4?A+a(8^&p za052Hpp4R>GOA{tUqktb&JR+FzdH81*KS>hWeBi`=bOkf83aI6*|T=b0Q0gt08e(F zt@Zo|P++XMWc}>RGD!}!IrP3Vg=h=PGvM6NpGKxXzm%2HcK!2I_v<(DX^>$tTZctGW~<-nszvcCzC_=?eVM&OEEjnsUFtIer0~xuWT~1}{!Lh*@_?FlpRoZBhlWi6IQzjRzkUpec3dAmcwn`i1g2Ri-lrfDmVVyctBSA$qLW9g(~-#mIc*gV)6nxnnL;Hn_tIF7qv){^6@ zS|7fe^!vcf8VUb_S!S9V>BTglpMSkADfyn|vj3C&SuRXDy{8)3Q*9mUoBx}9Y4fQ6 z`n78t0j~xTlxzbroB2WA>40=>42>U8ZQK~$nKVzt?ehT?o+#jGA7W-1kpTr$Y zjvxWTX!fx$uWnW&L*E*`BuclvXI**ClpfjlmWy739@U1;hv7pU4}D%;13BD8(6Ov_ zEh}9R+I!(`wwD4idq-U|f;sq)Vh}%9gp=e(p7e-upMpbN0Z@gu$DmE+dB-_tml=tl z(bj&8d(x!Y9MBH7HUgytD8pT^I@+U+SYW_K3Wqr<^JXTxg3XSl+5;}Ht5Y2(>md}eag1nr%y}g;`WDdZyg1lNK=#vM+bbq^7rlx{L^?e z*A^_BH?KPsLG*Jq7`$19jbfAg&c42yR1~f)cgL862SH`!bB-yZfV^z&2XD{@OPd>zB`m=`NG!&Sdpx79t=RP4xr zqFQ23ba-q-zW)U*55`SBjmDsTN+QJ$ixx5@CK^Yn*7mWdO{Z_d|@4D45GSsFM{I-bBAWLZzlWS6AzfWp-tDnb}XjqBul#G7>5 z_;l=pu6!b@1#IcUvte{@UUgJuk7y5lddHY{L>4x_bTmnl!(Y_tS$(^k3W-LX9%{h3?^%WpeBb&t*eJmh&1yuq( zXg*Yw)vH(QyT{Ks_$m;X(bA59tO!#~-<=v!_)1LbQ!y3B8K%Cs_h-$=bI|odzfk;W zmW9A%W|FQ26fBZGiXYv6{nE!2wDt7#Y&LALxp_bWvh^ubbU_l&Y9=|>O-@csv~|Hr zLx`2xJ%|XT&?0Ip`3V2Ajrd02ltC1$clqE>OdRdd-lT-}VlUGVZT;U*TyQ$Yw)DG?T?PLPAq6FgUH}-1(}9Ude!icY6UlXo?D7(+g`p^F~e_q>5Jjh&SSmU^sC$EU-xs z`4)b9T0_aeLeGVmwx&|VKEyN@$wP+?yGUimf95HHC1^)u{SuXO zPlQv@EU7bfaJ2bP2F%OrkInNF_AulL?{Dac5m{n~#E4CIO^c9cpq(5>_}&4C85}3* z?z+QG5d%CbE}767Q*Y7Zct!Sel4^vHAl~fZhM$tVYS{R8fj3=QO}FYNvJJ5jl)*Zp ztsA#;`nTp9x9HZ0=TMSUr9h0fzMIHNb*hW%97SU_X5Kfk6VWwXK|+~=)gJ(% z!-(IB$Op-o(6bJvnPS_+;Ho+}I{k2RJS0OvnvAn5|5h6x80Itu(;^@o{pE6m831_K zi__bDC@r2pe{N7`6(Ev`3E-4TJ`agNkV!v0iFk(o(a@$fNVNM1yv{R|a5)q{E#gcx zYc7$xd$5^=L;$yAJ@-*~3S{0@R8z}-l>ChDd>K7Ab8#6tW}kIGDJpW zSZmv`14~BK-uNNSap>x4)22yer|-3G$E^@e_91mav4q(xU8Jew?bdIwN1DQ{?!niv z>;U9ilPK8e4{Y5Ur%ai4`%NxNl2h4G7VY0rn36VCU_c7_p2o+)4x2U_tPzpBwN{md zM7{QfiS&ahnr3lZTUjme4Au)7ka|DU>HQ5ZGR}+jxOMnmijIH;y8km>FZrw{+!Y@Ueonl7u_he=L}i#fol&*WIf=wN#wuGPl3~DFpMtvo9kEN4NW=i z;$6aDKH=5j0wUrwuI+iCfYb*q!kYuWnA11;ZTawozfs7%+4}t-K71%e&&iYjh(-~# z)vPvjzA0D>g2JdOxHFF5(RtOYwWw$8JH3dFBE<=Z*7Wx`7jcd8sL%`_Jo&@tm1iQf zdd!J$)8elA{X0O4P=aa`Q7zy30{NSURo4d1Zc{PeQ%mH;r8v;?-l6CxODck5>NJK;s%IQvICk)^d>TBcF8G=f@Gg_a(wQ$|jX(ZtEp ziOB4PGqRFl;vV1;X{iLS&vYDUPh)Ueb$90(;i?V>o?R$GCo{sJ-!pPB)t$fa)=%eM zU?v{IwBcLr+_fu#-Plu7P{J8f#{)$9Q~%FsqhnK|)qP>?KXG%?LU!Maj+3UG4GzLy zr8mjLfP;{1awz(pg0qm69*vV?&vV2_nc^yC4D(5?-$eMFqHu@GQk`hWTDy972Zl+z zli%Ar!g5!m%va3HiooFEgE4U?(~Iu|Ep-j13BW>6324HsAh>e#r0@AwRE-EG>J>0o z%sQ%n^vcRNdAy{WtRZOG+kQvqjkUoY@EeJq3xAzXWYkeZJLeD|=la{{jUTw`Q%Iiy z>kD2)=g8M#RU$!g7*03*Qp@CRAdO3}Pe)4o!($Ru0jmbULEzN|;?Yvc zM8$JK7KDgr6bLo8Q-Rvurr{-I>K7brb>{PKgju4Hg@#ApX+hu@mv7g(R1?)HQO_Yf z5pPlGOBw*doxG00=m{TzGl!6{3HcE*74-hNJOfo53e4n=hjQ2~j^*-5WrHR;mcw%ZksZUByx zXj{d}hGK5?tKXw2{$;QSDOGwS2F=2M`1tA53g&sxOZt%vq_D^25o+xPDrvx^=&nl~ zphvWZ@6@8-qDFiWTmb%K?~1vWaP^64i<(0htOM^~kVMhvqAVpvw7S2JKyALcvVg>2 zi85L#)%uMoc=-BTd3mFN(o_ri^$a*)IQ`C+QMPQ98^o&sxjF`?*{sgP_{3w!G6*9# zsZrpjX~=_NpsS!sP-qDHh z49%%3F+%EkLU&EhW{>^<7SiYGs4h~At7~YiVEh6S-xkKl{$#V-Z&_V_;jdupEKokW z397K{&r1M(!`DND{F5~$C2Bm1ZH6V`VfZ>KA|5c*vY6liv*zf%I$OPbaWTjC=eW6shAXf<$)gE#;e@E9O8=PdB#bpAiNxVnq#uo~5-cx4ju zdteRxLY)p{V<-rT)l)7@Gt8OIi*pw_;My_;4zIjkkkP%#%c?B+52@4FuYx&_09EVs z5j`vSid7?{J)usYWN#2P018!5s^VvmV}yZ|Q4U~KO<8wJZ`Xml`;r(n>br}Ip*ptG z@ZP?!>%p)Z0D%n!R_*N|Jni|R`HZLmBZzsSKdBjx#^jkZ>j0<vhCm@#|3gI~>V_rIT(tu4xMr zk)69Z(>uf)K2A=tAmFu_fQ4(o-&*!aQ^R2n5JoLU056CNE1Ww)=7+#48UPzwZF~=O zxKYeOWR6dRp(s##zCGVGa`P(Nc#t%jBuXlAbCb-?WJ-L}+&=4TiZZnrt;Z#?7cJg$ zoKSlKLn1J+Kt>*1J#lOPzcnN0GdoZF81E_(6_QpjzcquCq(odhReEu9~)GTe;;qLb1+pn-}Xzle5^RVI^N*dWQ zv4n)oEi&VId_p_4kJfhftcHw&f&GY^^ahDLjPMWs>RY+fHAIA+I>K(LQ+113{DZ~N71{*l!K>@~So+2<_cKlJ+bIw7xU zLKdMl+6~sc5Z@S`tc7vYTIOJGE5|V35TgwME+I@q<`3i*mVOf*BW`5>#}jG$5^eK< zr&on+!6ZOMA*D*WTIZp?4r^t#WaKwx%9O}8*2+R8HVKSnWk5vqs*$`Lz7^0eD&>tF-XW_90j9M{DcEWvKHL;7o1IDo{1L; zpEdFBa3{&~;WPhJ@vfj<5bwdOeD%i*89q!9J126Y>`#(;_M%0Lq#=_mLYgDaMND>z zdfL42!b+!VA?{L)i8mNsm5_Ht*$x)Fx~kXsgn6QvD)WS)=!SZYeJNv1tj~RaJS4(Y zYN-a0fKHUXe&u0%_pQhLYa#@`_OJ0LZ?HI%2AhyetAg9+_`}Ia0t7ffd=%{G=hrv$ zJ=IkPr({d#iCNI?Z@0tyM1a~l>P?yi zvN0jm znnHdT8@6|iD$))?h!ZD;NYkZG=jWNf#_@qC4!0M>5lyTY_Tc?uZf^}nFG{II*Br4E z*1^W@a!-j*EUc$Ho=~J#GrrT@5z)h0Y$2YlGyt+LOJv&cX~ngY@^eZK^gkLz;R96e zg=+rUQ>xC`OxLY~{aCJZvn5=;eZSbC!;f0tU(HR6iauS)TlT$Ng z5>Ce)!1v8_ui*rUI%MUZJ$sfcE*Ki-&)XY@l3d1EbOKQ~AMjXD2n~bjH^*fV@gU*M z;%QZEmF#5`*z(;?F(@y6W&XNdbV0l|f_TJHEhI`y*NuqkUlFe>_L=Rc*V?{)ySq^5 z(+6W8L9;~yS`yj(oLSJ4{<7W+?G+!sV6eUJTGkJDktqqsyos;p|Iio@?25cgu4I)5 z9p4hCnZ0JYe~|l=U+OF0<7ZA-id~xUb@0@@ImU-F>OR=d=AS;x87j%v2z$@dlg_Dv zY&L`V2)Phl+!gP%<$r$5!ZLah@p+m*4$KQ62%f@02v5mQV_wI~7 z%{JcWl*mpEQLG4~m={_(n|na9NNi(r*pcfb%$GetdN?4Nn*(g@^Q!(U&SYgB!zIky zRm8hVM1@DD>gV}G^Q{5!X`(zg@^AERu%7L;P+Fb8|8;Tnr8MtjWJXc%c}QR!W^o!ZE6%W}W; zv^1Uw+rR#&;C9(xMj@w9pT=lOYaEvxW}2E1N2gD(X7FoT!qaq5e!DC)Ldex&5vb%j zF0&*mSL4%+5}Nj3)>jq~(yO9eC>C8}<|4VB`RejAUYw6Ti}OKC}WEc~?z8jxOmRcmCS7 z;j69$h4f@8Gles>M^wps7`4pq`!#>;L!!1Uf@74i`{hn(<5Ip5r|`do=?RE&yxm0D z16q?oB}IPqs84qqbcjj9G!nut1T?AB8NmYvtmB4Jz_xdMEw+_R2DTl{#o1~S$^e|Y z7$`C(F31|%R<)9O?#;FP9&#_F|M^TU78M;GW4^yJMr({Yzyij8j~rb*vOI2RmP7o6 zmS9+BB(R(|#u^&!*=o^%RZLsFf=h`kjQTq~_60d;+Q&ztlcG~ygn@J*9rfDT?q-95 zi1Z+~gz4?Y)btycUeE`xnIv1`@{jqISBW#ops4;yMxBr}>1z_^8MsZxK7u2Z>DigJ z0*iF>B51MOB92CXc%@s;&-r0`jN}HUrhLs5X+5RO^#DpWn-W2mpX^RYido-ErQ5e>NVNiz|rXS2aVW@q!j3 ztwNreZwE!x67@UN;_tY7#W404O(?+}s{~bGG4awS2Me=}FZ5FTgcx;jEg*t5p$E>- z8meQch1iWclM1*)ZeE2&$?e!pHSILCI)Zd&k;JRqdaO56d|j z9m>Cs)E%#hN=&CZ9p30yzH}v`djDA3Dw_=|Wm(?zp}~lwp_z$V(Q)Pk0-Gsyiu?e6 zv%*1 zt;`wrhD>|$ExJU4%`wo~_(F)YmFfqKhww&|ef2GR&#TQj6bGO9FILktQJ0bwku9f| zrqXurqlm><&TPMR+QY%cfm047Cne3_`?7t_&nlR=YKVI4)pwX`7XeGChF0YTGHptd zVTcgrDFmNFr4-J-*Z9Jm(#VirH_+cHBy!#_^#LPZ^LxJeE|)@iB(7aa_Aj2FW~8cxgh8E zz0cwdMS4q1Y-!`$xOwwBm0P$Kwt<9Hm(W}%5M9p2X87*1BA}L(^A<6jWcEVp|d1{ z3>2iIRi*nAkOu|M=;3a`w>gQw&+|r^OtFtnU>^R2*5c7^&~P{cWQY=*d=MT}K|obe z07ztJHX70^Ln_{Ohk16Xvin_pMqy&l?)wsy0zTzxuksYb$3|MK#$3C0Ex#c=bnEB5 z2E~uc`1p;6$7Uf_~a?Ds5Op-C?AcH8KhLZn{52&@)!PSu985qMF+aMJ%0u z`0*2K?#DNmt5r>b(;cxr3?``+WMCCVVAx_k^X(VJJoAYFJcJ{%odxf*%Ic{;<@gC~ zx`>v~E>Fp`;{QqDcrlg?m7h1A|DB9?3eVl(>zKBA79ss6^qKd{58BX3@i&a|N3R>q z`=b&SLqSF|qf_5unc2Erw~CiDS^acl8vh8xU8D?{@Jg zcr-wpy19qnN2%SVW0U*}+{~`mti*;IHaBVIW!6xH7`Jjp#yrv{KyPwPQBdE z{U%o0KyuxNnp*F9!it{^(15=?E^Vf*ErL&xhm?M9@t#D9$|m*;Amq6!RLvolqs?_7|r`tcy$zKvuAB$wE!Q-1sbTL0IJDG z>rl;xS`!hN%w;DhdPTQBYBsT7N^aF6L0^>f$<((Dw-iXSe!F(Hq<}~uABvz%@at1?Cbg2bvAPZ8O73lQbyP?pDix-eBLp1@LDG~QDOO-We|D#U2bg8Y>Nh5XwFF2k#OR+n%zP0|Y zO8PdwvJ5lDR0Uukd!m?&&_QIcc2%xzL3ubP$vwRETUvfab@izM1TjZlGGDFS~>^Uy{Dnv0U{5N+~Fo{zn~!T z23OKLbQl2)K!fn=-tmJyDYX=XEe#W~cA0rLV&lXUXCy0>^<6_M9Z2`B$@|ZMKxo!yq6aG4U#BGAb zcNT~v!*SwLnzhYK3XG|bARbZ-j^DU+e5#j4s_Jk+&zndj)=s6<&S$|)19oQDksXJ^ z=fWP|#;r)x##dMGQGiaAR|i5e@qZ8;0c8(k*~+b^_)^#(+Pu8)NSOMe`~FAgRz!>3HxFwj1ceNJR96?=;U18%l(f62?ARvU-U zjK4lDZ!=MLhOC^vLyQwzx>5Y;I(B>su@@rEcUm>o+ybU{=TebPUbt|f8dFZ;&A=_Y z!PYHgNr%$ZYB?IW8D9$UDT`g1nFyb8FDpBV5mGc#_jLSdn)8OPZzVlUlZ+hwyo1Ac z9JqIbM$#a&$2BY~KJv5MGUj9$kwxDm&kgK8Zpp-*1s(t0>2ob7rw%qa#Bj<>VL!lu zcAk0~B_H=w4Ojwd5z|>&rP00h));l_V|B9`hwpZjl_H15*)N6EU4)*)AI66Gfz6&B zkChY{0rvG2KDE`<%rICL>D8ty*!!&}%by4FfJA9QC`dK$U(zTuDb>*WRHnbW+5U5F zKrPJmzx?P?FMeE%?$fR>5UoN+ud9fpjA$Nf|ElJMPH)n1-V$p(dJC})mGOX6ISKs& zuoAe=MG9cyE*95q5ANV`iM0)yd`D?p^5eD;7{V=Wyv_a9vzuq_9LZJ3C&uRai}`gN z%q3E$Hb&Q8=}03Us<2@wGsMb5IFk6{cT<5|s4q45=i^ljEl9y*4lnyBZ!4GRHsLfP zuAX3Bsh4IpThzP~eZF6HwVbz@(_php^srEYgoBytzM?ld)81pLhFi!K|4(n)uGPB4 zUlWv_hk?e4mMi$jnA;1751L)AlFHMqXHWJk1`cai$+n?Qn4hI<7+zL3k6bmgPfeR` zc8!w8KB)4e17*>5sQmFxz%CoMZ+D8)aED;e7z=6?n|i911cj`(FkLi`35uIgvSSUH znG9RDo^7Halk(>&e9u-^vq#9#0tRTZrNZr2JK2|Jm;zMV4OEB*hg$J4p0$U@o|+fS zY9Ed8-@opU|HbxG;Z@+$_1B=)bs>Bq$rcwfM!wH<3%N=4 zIFVJB5rFx$^&Z-Km0!s4F@dy=|BY86-Qo9VWw{?-9+j7kM;kZUUmZNdF?BpNJv}S7 zPn$A`>LC~sUHrQ$Rk3Wy77Crxd3Hz-+?qst0ks5bL#=;D4vglQkuf+s2lfM#+h4sY zSRt+Nkzz+A|D0;m&6!s{6{$9}me;be5>XPkB_DP$HP}Z$+PDWka3>Ht)P^0TLGMVJ zUu8IXa|isg_O+hIt}Luu<^7hOjsrPm7K(YZ;vJqL>(7wKurI1b2-+UXC6ZrS7ojD~ zE@(^&xHMWb#~lWPl8j(hn?hgVA?J%vhHliKhIEPCbx}bxaU1O8lbr35dQH8gYrjex z^p1Q>JOYHQn_b*!%?~66f!H2x4Uq98zYHVDkE#j`fDngQHT{5CglMhI`BCPUG+lgt z4KGu<(F!yWIET9wKng1{{So&kXr4o*)4HuZNO6O#l>WhrW%g6K`|MSBny3G55i7o- zVCo&)wbNbnGi_0~F=B30q`E#l>kX0~4q+k6UTQhI>opH?!Vr-TbiPdfYmW`=ehxB- zhcr?|f9vpj7KWiZ)hlgM{BXwMA2za&sDpbQok@r?V1RjKwzJh<^RVpC-P0dl#T=@r zc$Pb&woZVe60K)nlHOjwyGFtaKYIvY2}SvJ7FA4Wq?LKJQJpA*pc*CVK|$Sh%0?K3 zZ1x|Z*(N$H^7GGC6JKarSx#=ZX~WlL7+vyz%`%s;6pN;w>T9jil_e4Y6*y)36`t&`jK zA3T_o8IWnAk}(#R;TQULt5deme>3%&M>F+it;wSsyjy)MuEZuixw8;Ue?Xx6B1xu4 z4+oB7J}2eF;co+uezdBVG5p%4OG9u}*?Xg&5^(WLM0)m{(y<*5)=-q4PhM+}{J!yK z=gvQ;s@AyHD(CIN{rgK&Uq&7u)Jz$$tb$}}ZDqC5crXm%O8=>MgDf9CdW7z5hx7)+ z!FIBm3bZ8x7nOrGOIL4u$agYO*B+_%@=LQh$3bSc7!PI+a|>|(^8*};K5g&mu|3pS zt$ux<0cIJ-q~rZ;Uw2MA!c)Un^l6cK#@hDdk`Hsx=7G*E`I7s}5nG`?2UOp_i9HyZ zN2{3+A$!ReYS;qr+vBSwmtOL`qgCkBa<(3}+PB3b|qDyP-`LWx(X{gp{})J9cc^q&Nz{ zFifvWlO{`Ho%c#M$+{czXl#Y0quL{?q6ITNiw1xB@c41hpHLO7oX#xqJ^F)6V;Fff zLNE2ysTXeTZT9Tx_v6*R#12CN%y01paKhQ$3 zndc=xd(-ds^WtKCtJFskwdd5V{KSBv)~8DQ%Vg(8?1z>M9!&nZ`v-l|+nsfBFFV5_ z)wg0;IoDHtKpy?Zd}fH+F5;>|PteD)0*kOs??%G()w6=rlLxi1lk)b+SJi9wBIn#HQ;vK`1D=b01(ID zEhki4*&-yWmZwsFE>d0zxq=VOV5`Gdt<+Xb!I*v9!){YkQ}Y^S##Kk6WF=DhQKL3= znvfO)B8RSzT?UR25o`dUOmatcz@Np9!utmXgi}MWd_J?yukGRSr|h_a|to@Tyxdgx8W_($`afl2u{lv%m*w_*}jT2 zc3CagEcBJ)7spu15!^F5@L)IiT^D`QxDw=w58KaB7^g5#)OV2~&+}I?1J2mItvM>% zwZrbGFJEqn|E)dpSh4FLtnyvh4P!4dWw^PC9>=L~mU22vzkV%P{^#dJddkyEL)Bh0 zX;qU8ux8j0J8EG+NC6?Cq4r33I^={I52jyDaU9m>F=xJwl^1RX1nGBOr-ADVQ534o z4C0fIcEdTF0T10dIEza5Z|U061$5K30`G$4fM=pBZe)`2)UI9}~{B)0?!#OqQ$x3ojX?}k`Y?%FlVcLtcR9m=n24Kl-MEnp- z%r&GRU=T=ByMDV9|4cI;riW9D1qHX)VCOakzbQK1VWM`;x}4Y{6+#N&p!>s9Nvzg1 zB$16jvt%sqJf9Pmgs8EwUNPhZutHk`+xndqJ(V4ATiv~UZB+|L@40iaw@8|Yjg#c* zP0R`UJykK$V^dj}eZmxr)9V)JbWE`fqima6Fjr;B>j$aFr6>m~s!^jx38MIr_3POU zyv1=U<+ZHS^Y#T{+*1w&lOYLvmz#X|@r50;7mGN%W+uIcm@I z8R?f%*A%KfY+(9r4NA#5=S+jAp$?b>iXa#p8H$#i9QTve|go_xHnYa2Au zC)L@L+;+9BHm5cNHK?LP<%h$V=d^ERAw zYtQ52)3Wzq_FSV@t(m}za%fgW zU@OTT9dc}W2Hlz9)u>+mpFUIoy8)UG!99D8GFq)cgDq>;tf7nddH7fD0h7-kSWT8e z9&>lv31%!47?ECeILk|)W*A-euCPGoyRsOIPk-uJ*Rc^G;0Mv=N+|60zVJTc>%vdG z&Xz43UQVIQw+!RWlwqs%GWG+kZ;wKo=jDH@`E|qK*@lOF8;0hqw+0#Ya!!7l&*|aF zW5r;Vy2H4>Vz7`zc9Wi!n0VBk#NV-1Zam$xA3<~IT|U4Hnm-^is$GEK2)r>b!3^LVL|*jksVy>@45V$pGs)UXY_03cxDC#7ni}Ej|R&n zTx@ALHg}I&IYvC%-e9#O-hI0@vVT&}R=gO;Mm(z)kWZ1e|6t@IYcRRwr3Gn^a%nSqU=5sJ zQC@g`UH!YyQ^qX)QlX^`ctT-!fYFmXW&0uq`k!y}KabIuT=?>IP3PVQEhn|U`nKjA zyBPb}FckVRGyv{Y=)ITyT9`cX6s=aA0EsLSB4u|hiXQODwL=ax?00lVV>EmB>UGa^ zo3eOu(TZ9p7!=j0S@SifZiW-jEIImf0*j}p@w#R)nVnF7*k5-Gs9lrTut6rc7m>ym zuH1y(#v&s#^79{1P9>#>9CKGM@4-m!QAZsw!%OVt;xrh}HvE^(#)I)z?eI0!>;v`J zZ5Z1wJG9=scyWMN`Z~GLa}&FRJOUEH-2l7CJDpvYPOu>&pZWfxxkJpTfn1)Q#~;@W zN#%F^(??awI43UgQgYEllBa!NkK=U@-__e8tLc|_g^tw5^M_foe}$)W3!pQf+P*vV ztbo<4-HC*fm_6c8sA(C-fYHJ!_V%%FGt$}lXorhoJ5()4M@2>HC8TvzOW|I-VZp}( zJvm{?CyQ%ajRz6cuO>{S?13S&KRQ}3Kh|!DK(c-(GS>FO-K{cR^IUx2&x}vsxT3j3 zl=)v$HU0Ro{EtyiV%hzf@zi}*Bl~_bkG!jKfv^45B`69;XhB+}(-eplV(Bc`hMd z0^JP0f2vby&uT*hqPj>~Mu=%+Wj%a&{*KvaVo9+S@ecfJy60oM8@S147~1565O3bT z-OweJHe`?Gmdtb2Qas`9ibxawQhx?8K;X%)rbIm4v`xBpA=7y z+{Osw!6^@Yc2HZGOrvKEi)OGOX@EQ$*d6M)lx0*<5U_Wn#g^)iIoCV>ECpD&>sYDT z(aD~m^lPy^7a*WT#c01-9FkX{54ND)dL`@nyuTp@5Y5by3F-Hf`}#UN?2C6x&raL;p<_p7h@4$g62Es*357veG$-4QD=-%L`6~j zjF^GChF$H*Uxe0`zxFhLxskPRN?Rix&2@tLodem-eeP`U^Z)tzJEcdGk(;zw|KKV+W@&@{R6a zMRiebh`Fbp6;nHhqrcN(m)Hng#mokrhFVbg-9v`d@U$F}WAd`xX++MzYS$Be-Z@!0 zmiIpMV{-JHSm)G4jBa8%W(IJbCCX79w$N^P`FK~?>{rZ0Zqs|se{&?|{`Ki!zJA^P zQeryl*OJQGWUX%7XN%o3L$p5U_jlQ$`#W-9mo;nG0%{$(jUcDrm)mx}ultZ!O?j5b=VvmA!}^^gY7)rKyN`bXThLyg^UMg^ci@P3$^nQ0+Cw4_}L zguj1%U*b7%uUtJUol(3~2ZpsGOyk~JSXi_NGvk*bm>=pEiu`f&U)>l;8RhLWt4%Hh zrG1RYUz6R5Yf|TB13U~E=#=WUpSEmj=uO6DU}CalkNtZ*`4h~93MYI~{8nQAtIrQ8 zUYz?bbQv(ov55v5*A0BCos1R(TRjj+Tt010xEImJS_~ znN>pb04M7GZXK^Qcrc3!^Z=!xL6QzF4e*YR zeo#bCw?Cif^BIzmCeQ!y=;5{m73GhlKE3gFSFev5J>pbiJqw?%A-az7uT>iL6fSk^ zqG|c`&Q^lYJX-SkTHh}%O+0scVpn9(gBqzU^$v>-mLg~_D}x!1rPW0C}T;+ z5HB`@+eU%6Ja%HsmKXpqeCyMvt5WnQ=kEG%AeP&%Pk;N+(2C;pa`N)5#1wE#uK~R? zOaCkNt4Z}Pk?q%fS>*XfB`@l>2QE>M6erO0o!>l7Z z#0IunbomJ|3(;zm-uLg{i{q$d&gzu_HT&dLS1zCK^Zw)@I=@YN6w+@%R|@C|juuX* zNTrk6QW1Gvug zMm%&Rg`qt{CSm90l~{>Hbw2ZL%9%Am0|s6mv$0+L@l)NVmZ8Z$F|M65;J0`vN#x@N z({V_dClE8BMmisO^3MmFj3$wK?CR^Acc*}iLpAL)VPTslTpmT)GYWL)7TK12sxzK= z3Eop$ULFC-OHKvxot;h_^EjXEvb2K%JpI6DNw_H~DKHAZ(7{*sm_RRG{}*R(9+z|e zzkPqQj%AoJAxp-LqR3K~N|uJPoY}IKlq^X|sZ_`o-!Y6y!%2}fWQ|fOEtY1Cq!5vo zvj~N>jU_wX&)1px{;unKJg)n3KkoC7S*Y{;oS)D8{aTLWcpdNfcNjCZ?$oIl3kn8d zOU&9q$88S?-m<7iJH&P1cfIWGdd-G7qyIA{t%bQA5;?NoR>XVziw00k}-&^ z{fG}hY$cIvsI&~8*Vb2z8c=gYGuHPs%x&L9Hmyl!HidfM*74Dy&yx<5mB;& zbwSNa+JRWGK)VAY?y~ScDA*EKA&WEu*iaoH1>Da4nHbn)?Z+lxeRUH^&X-U81cWAt zCxLOQ3VMekzfnfe0o^Z0tZ2lp^*-;nOoZq>s-rH@csO8x!hA=(B;!lq12r!K$Gc6 zxE=3VHj1^UL{4d>C?un$sqNptzb3)ZX1lmhunF}T_=r~~*Lx?GfTFBVV};kyXVd)t zaHT^UT9N6@@esIOJwPUs?9$gh7QMcCRveMCV0)dNQ>37PdY|1oR?sQ_5fS zm%~Qd*v{!(T}5Hyfyp36fPGy^+@s&b>lo#!HSS^Aq!zH8HkwWS(6F#re-CXl!`S}DLoidMa_BP-s%q?ZlmgTyrFl9IDdj8A#McM z%cH1T^@?iFMor$1hI){WDl_Ij&1ek>PoDG3Eu9qIA@mV<(J4*q-`xD`y6=SS^K54G zZ(M>1LzP>;I(?A=kuq@iGp&=$Ie018dBr%6<2j0RdDMdaptw?s0hZpT&NR-9MObJd z^MNp_CdIF;t0YTk-03l{Y$`;XXO`+qDU-{05J2;2@{u~ zs-!2KxQJ^_%A^w-CY9=sosX98S$oiv-pyw0Sh`Q*Vh+$nXBw47nv}%EW0*)ZWgm9= zW6(kVPOd@TlNO|DFNEi^4Mutl;Tz5JIW$lq zkuf_L0Cm?;2z56y8clw&C6-cq9t5kxh zQGF$F&N^Wh*E@Oc=ycX(4D6Q{Us0>)22H%GHjK9V{Tm|_9Kd_41kNY2c^3_PlSA3? ziuln3V36ylX&!vpQ+GP!5i$Dry|3u=qf6$rPc zBA7o4J6@U7a{k!`w@0#U&d<}dAuNpQ(`NEzMOWwZ=g&#bX_k+&raKpOaM5kmN?pCu zdN#e4}*aFChBv)w1OugZ|h?cP3pED4;q5 z>Qs2#*TKy!er7jHO6Td7fjH+UfTu^{-*AAFb2D;s;-P7*pVQ(d)O$BLY44W$y8Y() z*EjNsdJ`0Enr2sW<9ghn*LjWG87-pOmN@NKSumAZznfGgCC{FnfN4w5zZJMB#Kd6| zU+7&gHMY?CX*x}NV7_VCt(#QuY+8%vaWUR;VaQY?dN>{vBJ*8$-!b-fhu#6~2s=nJ zcc;tOO_?Z1<}BnK-u7m5H%h07L&XfxnYzqrx4voXJJ{@K+(hp zU#Fz4;OHYgH{~v$adO?|o?d-lzJ7gII5NtB1%ldBg9r;&G0i$uq9QUIBGZ$|H6t%l zau)P$Gs_;kl1ZaSZ^Qv)uYNokoGfHhw5Hk`mge6_r_a#aGr_r$F#Xw3YV)GBWZ%bo1H0^C_I~ zdXRM2-O$i*LT1&`hzL3E%`ELHvCMatO>vqW^-ZFaH5+$GQG6aeTlIMVvWHDlg5H5_ z%om<1f@KyWTPkt3N$=o?!2+Nk0AsijtmrzNy%!KEE1mFXWz}@csZpJl|16syTVZWC z1G|0Q@4nDHlzULOw+IpsQ@5vXE`#HcTntgyqhG)64igpIbk~)uu{J$OLY}w7l*#=l z`qF1O+k#9-&q!+^pakT?9(^6J?h%6?QwQ%h3iEk*Ee=PFLogr)jbwvqh)S`V5V2zml9x_F@LkC&wlJ2pN`Gc+G zUv&RcwzF4PVd`sg>;urv$1^J}-$epH91~GI?vYN5B~*n{q);$Pzm1xSYC?-1gl4x{O!peeP8lJtQ{tyF4h zAt^l~SH61z|2rl4k)Df@?!nP=h%W=Q$I#wp`rn8(X{QZw$16z{eiZT|q>+A`)3Y?| zf}1%V;vS=gk#_SH4rzHFh4jM?_&^AgB*qXao10nuB`ht#?cpOwx}QC}wWzRAXbg=< z^a^m9^4arP%;sBpEZ|it0Qg6Qc@|9aCmyzD}mhkyK4D7yV(@6-t;hluY3CQjV}7du>d#PRO7NLlp0*ljThK z_|YYZKUfTjxk9Vtf$l5Ic(NU7%~PPHi5!CzTNwveBK!b_01ars%j@Pj%szCgVC}RR zp+&oTxg=$Myy1NQP0ArY9fm@<_s>qo)9i__tk1hA!}KTB z3zm7sx`LG=V?K+zyF+Y=-v_x@TzEF^Toe_OQP-{`DWvb>y6b3tTqk^4^48zXk}lu% zGPwlJ^`Vl)^w^2$*PXZc9Y>4Sd#+J68 zy1BOlGqDRHw=e@e1xRMu$8d`k{Z=-(hoOqfFKNh!intD)Ub2v~`VFz2Mx-9Eo`UuA zxP5~xuMAr%r*9+tP*E#LKPfmpfu(qs`M`;djGomk0C<8lXe(m3D@#=!L3i|&J}qlW zEx_;s0D95;oIj+qoQBXm#CY;ZBtX+~=o7*XtgK|kIR-|@v35fZKtClhoUxyhT18P} z>=GoH49GqLDv&W@%LL)1BJ)D5lA2@Wd&uGtc!<0zMCS;_0i5{mS#@P)PZb3BSvQl; zGm#?4%%5>eM@Z zvj!a^A&3SbV`?pFrkr(m7hVylxwA`<^rKW|78aIXE@q#DpPZehudly%K}X(xlr}&p zO@E$0fBunNitv2rlyB|-Xr>rV1I?NI^UuE^&Dk0KSwL=sklr)z?TZOq`SPECZX!61 zMkgdG3mFuxd^U-{&t-eb7*t+mS4dAXd+tzy$oe8_hPv{r;P}S z#b4Ya(YraGPmMik@_k;)`@Z9nd`h)M4N8s4^Z?CF`g(O~)S}}tMg>Qr7kN*eh|koK z9nJw|$(pgqh^1WI#euyBS95Zv3L|wa9gF5>&70o=0Hw_84r&-S$GmAzZSz+T24>0I z>RD+$_FT`(>aEGwF2}4vFM|XR;CajkL=+4ijQG?HKx0{7qW+^_ zf5wOi+otsI66<@QGZRDtDd*U;xBQ+>={fDRwSZ0ZVkOnpy}!s^1j6K;UDDqP3?Mbb zJIgPDjF(rOt*4~_NBCRxGJ;2VG(`PURljL77t>WdwWBY|{juS*C#312U$R)VQc0~L zk%+e^E*qf+z_hj^(GZJnx>rqTHwu_sj+r=v5?M3pCRnooIbhX>GmMSutz zr1)>;fih%UPVo@8YUX+8m`Bg5;|2%3y2&5(Xxnzjx{XbhL>+Ke&UFbBe3K9{CfUDq zu+&=!Fm7yYT(Xf`cUW}i&{3vo^D+KvQ9Ib$A8=g%vlX#7aEWm6AlB4F&kEur=nC3 zixwRjk{#06lA35*ahQ(c5N9$*%qFC`MK9h_vts%3d^_t|Z8|Hu&Jc3k0cj~oJuaF# z<Kb(F-DAK{GhH*bd)qk`Cv(9@ugvD18)%cZ?W(ShjuO~qb4SZb;5{J-NHEv) zXgP33bY4#tGlar2O7YR3=71sZS(Z+^0c?|9bXXqy?Tufy`dE9eH6GX}1Q+B%^}4x7 z2`u~#G!o8Rpe5Rs(bW52Bu+fyf8iw}%}KDm^)!-K`mOoAvZwR8X#k7rs85UTaWu+c z?4q907*GrE`S!c?@7P^U2hTfKPYK*Dr_B-EayXa^g=EoqY~1Q)p_FTNqlKbahH-!6y6`K7r^%Bx+5I!O%sjut6=*CpEYcsI(n z1gtRzHfTr39stku>aOEr`sp#EpF&`PVs0wDM3GNs&l|yC$-DW&D#ycNUB{dS-IeJ7 zBCd__WC0efdQ&7UZKo9#rtMtJRt;^qsj1wI^MbnxXRi{p8Ph9r!{g+ZV-_7aosn0c z@u42Km$R`o-czP*m)g^{R=7tl?LBKjzilh8a721AWj;58k)bGs64=A3wB3n{n>KAy zZ?73rC=EF4KkM#yiN?6*9GAt`_%@FL2Y4|eE%qUqhz;hs?NF}j}6Fs{SSPZ3-3?cr| zSssIuu(E2u#T^he;NfrGd|y4CsLEY!T7OeAamI+u!gO|)?*-m1;Sq8480>HEE5sLP zD^G>ym_4zceGATuP>Jk_$TB4B6LO--P%4|b($uTep(1yc$6UZ^O!&g~J(Ymx&Jhtl zkgONxM(q{cmCoksZ@*DPuAq5={`KvBn1)9dH8uB9s#Ysg{o-lzHH?0tCzE!P&Swm> zU3OApaS5`-C#QfOoUgg-h`8}q|I9%hT<)A*JoEg!$Gg!P^-O(beeVmJm#aH~h<_M{ z6NAv}DoSd+Bx6`5N5xb0_B-IRt3^dK$TjaF7i;od{y08tebcZ4=p=Jwl8_nT;zsFt z)@vWl?xuL{6Q+>d$Hz_#{g5&bqyM|`Gk&x*?7B$|8|Mm)N=n*&&|I+yL-y#*Sw~;% z58OUDaiWPVm+&p`SUolOYd_ng+sbuHVcNC^H>~5xC^rQM+o4LtW6jhd6dmxXy{-;a zN>YlG@#66ab#L0udbXUGTF`VkfcZ@k9iv>1MEZRrxKZ9XQJ{j6l2gZ1e$`RNv_;7P z_tMD-4Z`O|V=Ed(Ma9tFz~Jw@-9Vp3U4Y3DG|h0gbFCGlQ0aom#Uk3v4!QUM8WjNH z4#TA@7o=Hu1!NU-a;WD* zYSwKAlp=SNep15qRHH}Vy!g2W3xC7>c}w1)i=tNXjk++rlZBTxpBhgEh1|yD9I@c} zKdltqEmGqF5iu;0gh)M{^A9y$(>NZ4d+jPUhqT#9e~vLeiqP!%w#oHhQ|?-CZQH8V zK?!+~LP-Vpdq=t^Mmc|O=M~Du3;oVb;G1jdGLBK!)PTH|L#o}PvJz?utTU1w4lnO| zWbiFvu?3hSy#vzT5|44MH-Z}~m(!#&Lx1Q<0oQ%14NqVaDAtmq zi`ZI7EiKdnuo~cvN@VdeCCnZyT#M$){v|pcIX?`iHj2slU?bX!v)*O{foIVo=HmAt z_KXan%)}en+taJKQM(@IAwMWa!X=8ni;3JZY9XKHKppuS1Y3{HZH#=C)3y{`Sd~Ip z1iPsHIBIiyLk@XVWfRNx)(lWo*%D@C>7?mNMFL}ybVNpW7n$oL;sEgiurFJ5C5Y`s zSI0DTgk*dSNn2#LEZj(J!Y*a9J6%`g7a0*XR(Eo{`@>yZaC$*J8~rdu}ziqKZq+*d5X+}fqRc9eTT$# zaM`|ZA51N4=s4ZBWsmM#dYfEp89o}-!+Koog~dsH)IxNvGNd7Q&2cxy=ra;PF2)j3 z93;Ah>+hpK|^RBT+I6K`Py=uBW^xC*b8`$#sx;^9(q|9yLM9!-sP|G~YMzX@bcZ z+2ta!kzhRxOmyJ1SzP?Ez1uggQxq z`3&eHn_+Y~iG>?{v150d5wx0+QDOF&g|oow9u1V>(}jtIr{mPPpn#z`4Ie;zaSV1q z{rT))ljzsvZ1TY%KBCc~)ih+XCjaWywdF2ZFbgcu4|K|i8J0|amC3Oa2cQRzgV)Js z5jQc?pj98^Epd>fW0K7}d3j z6Jh0|Oe9v!w~hiDpBGTcv`}SyhX;=zcjerITg& zEhVU|swxW5SvxEATt``N&(+Ed1%Yoo_clk``PH8Sklp1C(NN7;SsHE`Or^c)Owg*~ zU^QlYFTPJ({C}yYmCh=(8mpoTn0q&d5LkS^CiGrFc~5Y(V~n_&rIqC6>K(#8x5MV9 zC63_#9<_FxW#sb4i(Tj6zJD)5cwwRfUOkQm#*jXrtw(D zPY%Mi7!Mp+08+1Xyn@h2N*pORk)N}cM#x|xQNE;NeyX2#UjS70px~vI3%juc$*ccL zP?AN96I?51?=cCU_lUzQADj7 z(!Hc?}O{v#xe@R{8ki#Ng;TID4%j**!r;^B8NstM8GA?>CRnLoXjC-!YI0~Sj9tS zJ~2M?#NgtwJhml*PNS6D7w!J8b-Q-QNwrcWt0T~ePZ9(xDRkeys+&1m8F zXEwpNx!|QDZXk2shBTGV6X|>A|9h<(Q`yZ()pF23qD}GIOmcTemz3+V&Aac9hZ1#k z8?t*L-U@wbs{3PayEXl2w|T)Jq@WzM2$ zps=|FB+Hp{O5e} zz};yS0sT3avz-fC_^OMwtH16vXqrq_0uD|&*dVL0zV+;f)z#If2dDI8g0QQmiS+2N zQ5o{D_4kOr{W)T|GB0L0=X!`_m=p@rZax^*OYX@k9mOk{N=SNbqAPO10JNDr62(iO zN7h1rKPy!6>L+-iVq_}?qsPn#IH0FzWmP9v0;wNE50x2va_pFg^R8JxfAvbMR;AUj z>eX*nV?HY`(?Z*EYg|oI@1fE<2!vb&%ph`P@Gixt&*FQ7Hq-`_=?8vX$~S)ayL_b5 z6ywETKmqE;E+Z%?(l1{&)V#)l=O)tVAIjETu3XdR9N(F-BnZAkkc_OAsv=_+Uy& z6#4?0c_S&7LDis?+-6`*+al(atiX;R8U=RPzJJ(KyOsgU`l(2S&^GuKUBp3{S6(w( zhV{-`2;RQ;(hphg=06^F_0>_#@!w^4t^g-a_P{#pI7EQsfoeL;92$>>D|)Ix269l6 z3Cu37wJZw3pVj(_6Z!e8Q?20gXxRX+q!i!9DWpNB@6q1nFqhW&Xamo&^rMjeC+T+K zwB=&L^=6^yT<@Hw-k%-6Vv6&s%8{qv7%I9!xLXRLPE(%4eUH*4P-rI{-X+{MS$A%J zmB@}cY6X3)a7{punyMrnH)xeDv}OlE-?SniA;l$FCJ+@N{snXvJcu14D~OlZcP=wb zl@Y(XI^_4dALd;-aY-}`}k$EN0)v1Xsl+)~-`!g*?d7qMWCImqv-Y<{gnB}^cTP%@Yx-&EiRGs1EPGse>KN@22}L?UFU)QBBF zwFz9=q0cPqD9sTJGr(GeF z|E%VDnb4~qD{eixoAaTq{fQ|mQJ-dn_H=(TSNr+Oh;6^earF1*bfYcX0a*YauTQ=& zf@U^YsDVe`=|ADamHc*tmYoKVG1RVv(X1?wvUsa9Irtw15E&nhm`5*rDZasr@vt94HPfKjde2z?ypP^BmVrWxRIbj#!U39_diJ3dIH2Oj5+@tsT zS85p$LRHViT6bgPt9B(9%A->HQCXlv8hpiD&XNN1P^me=xjVr0s8Nkyh;$*z(-A^k z;3Bdcs$>6I-L;<_crbIC{e(k0fj=IB@U#ImVN3S&e)#vGH{XNX&^vUaX4fAaa7U+! zj$#>OG6u*DNP`k#`Siz@WCsd9b(C)788c?+xBgD?3K5ai$EEmRWjwg3eU`RoOyg)V zLe=UYRm=C~?ET@;Om<j&SoZ!abH z0(hl?OCAT&P={d59C?Af+NjY$iyv4fT2WOt4=z$h1dwjb{DdW#p^E1a<7UEFEI#ri zef`6ZGe$R7N;ru?B{e_bz|obTuAepe{o)e>%LDCF`#*KU7Y6*FI$_@IaW=*kNCy;) zJThcFPjJJKLdXfPL)!P?*>IM_5Fv7bBLh$kEh$>ce}SWd+=3`AMQ*GFoBsCb3T?1@bFgyqu>%EXkgF(_DrD}Nze9hhy>>3V(JUqopF zJ+{R_Vx34c5u_w^{2)OeS;<6n#6hM4L(tnkeu`sGxZXQ@c0Z<`RN0_Mkz{n|+w*WU zs`F}bhOeQTaLEngra81D->?!PsP`>@-0uAA0{0uie9bL}Lg)u}xCNrfQBRXYT?NsT z`Cl-caQtXYG#g-jMZ1OGXVLnq6;(+ah7ny)M@nlVflF9JuqbxM^!$W)lOFa%vCj}` zv|V5|wTZ4h`W~kJv$7^oc6j^4?*}f539=0Qv6}2O1)B{~oyas5lefYtQ$|jqP^YVW z#!{kJ6J?&k)r9*~_#06iS@a!(6t2lCl$8mOMA&9%<5U~D!?dgYhuN#)^pn{?o)$## z)EZ2&Y}8^8la$zJCWVBqvv@iz|35^a{)gU@p9kVZdnCdB{?DGbk8I{d&J#fTy7QfR zeoLC6aUllQ(daaewB=r7y38`&900$J$*;mQcGo{3{()oZ1p|3j2}O`!gN-bvv`4gXa63P?XX`(SSDDZQV{UrzZJY)Kp z7=M&H??tLUp<+9OUh;eXR1T>8YHB!&lmn=Ia>lDYLYMja`hMn=2c_gM;C(|C)!-Bm z?09gGA|QJaaZ9&K#>&*&_K3yb)cSj*RJ$I?j-2FJmT{0!i5+^I5Q*0PJA} zFh53QM)efzn6zZ#MmizSVeQ7O>}p_8l9xdFOR?k#>>+)#_AX+GUA=2g0<%FaTG(+$ z*S@raBHQUm+svvUdWzT`o~MVQ;bs4O$R_sp19PLzn+l01N*T;@+RSxQtb3t*aG?8_ zuF~&&1R{;Q*7G5$S_&aU#z!;R?vV`H3RI!Yy&QYeX>ZED!fT&-NQvKSS3keqp3#eS z>(=>vI-Gg?wnUd(z^BOHP4$PYoW`p1@tUMqkEfXyJ%3Nm#le0iYe~#ay=_mzx(1$? zA!c#upe3suSjF>TXCU3J7}$%IK2+PEe9LpG^A4$=S^ItmMTkLJhVvvKVte8eV^HE` z>8gpwy=2!wzY_)$AGtbcKE`llq?d=Lelf(ofsqNV(_wJIr%>BcwoAEv$)ytnH{=mw zc$ma9!zBn;eDM$X_k%sKtAI~n3Q&epq_ILKXd{*iQJS-q6C!tqMdZ##(f%swBZ;tL z6mb1yg`kM|wTtL{l3C|!${PTQm<3ZVr=2^ zIKgzJD9ccvIdUtr*}?~;k@*qlh#iP19qTZUR)#iD31)Xd{sbU)8&&_sNv=dLN_>Y{ zuE)SJC6>Jye2Q+_dPcjZFHS0LqY}Hs z3he{}Em&mb+uwU%xC!ZR?0YDk(`Iin#E%4Z$SYh~@9=9c`ipQX@+PpvsfvIK{~m2FrL`Y2F$T6(QK-aVFBzkWd^E-S#azsAN-MIEh7%&{!-l0eikU z$B6~g?g7FrO-b?haJxeGy4+k8;nM*tL?+aK=BGyw9~!8b2d$e{qvTgjB1P@p)8ODrVn>(~?LaNIqfo7|$HB@Ey`wYkmDw(z}8t#8uARZ+a+&$hixf zB?4gSwX@7;ZxRW18E+X^6h zkEN1o37mS6%9>{Ua{fdmb;Sqie?e$5 zfq}ege58R=B2(BTzX(pmdRAc+I0HS@aJJ5ES?mmq330DpG-g}v?Hu#1{COk~YM2c7 zBBB@LEhW%k0rlt?DebC}3;st+dp%*`IMO08qw8+lE&n$Stx>k1VQjx2x z{DRbVl_OsQLYJj(2l5~EBE5^4RuH#*8FF>1T+m#%HTg7~NKwNo>+*FbC&p>9 z9_F%X45dxu$uX1qznM35TJzDH2Ii+UJ?ZR)b86Pvlr<0~HS7z9NTReOpYKtAPGpgY zAe^W#94=G{EkeM9^=*ZIk7OGN z=SWG_gQhJP$VxC?3Sy}+lB!lPpvMy(4=I9D{ghzQ(rbn%qE;lAy+3dW1vDcjn$P<_)S>xF?>~43iTv`C52;PL{1d&b7zGMH zL=k#zNv+I20NdpDJX?4WQyPoTG!&_=EAr(dX+=d4C(suX=S5HYHtkZoGU_PtD1h&S z@5Rw|K}|>koU(Xi&g+~t7Y`je)cU*cwms4db&WBUZ#t_n`BfdUGCFqqe?_rMEI@PW zxI^q^af5q>4zP?blKZ7vLLWpOdqRpX_m5QPe2j4)=4_Ay0?snA!e0m0Mk6m4rEY>}(Fu$vLX5V4 z(4>R%un90kI@4Ak?S^MdJZE12fB``yd zhAc0veI+m`TY@g{+QM&m#e#i|Nsb+M1(@tdSgLN_8yyghGq6q1V|miKQ2i7i@nezv?_;x`WG)6Il(~Q2zduu} z8W8rtN16sDJZ}ZXy_h;()$5{?t-?7xr|40ouYX+QCc$qJsNOs{OGTgbQ>l$#S{Zwj63!jH-e@2 z)m_2JY5O576NO1^=dx%-MTOI<-AOzi$!DbxA5QvaqLh$nN5OB`Gk{&TbyO!Al`tEu z`k1vJ5haKX*Hhv+y zGF^gQ!}tT>;ksYR9M=nJo#@t#sa0z~ZhKr+<>_eG!`QfN*9Do66iyI51+$k*`d=g; z@#%=4e`^?uLNDztRqX#iN(KdA@TU;B4U)7L!L}@~N~yjJi0*2&l)8Q)=Or$rZGsc= zc)}S#GS(%UI!dMABYI3Q$OR!m2E@^2j%%MZR-9t+c9VftAB@syX9VeB5|r=H%QihG zw0Yo zYE({DrpLFklDR-$fx|YrUL(KqdAGrjrtk&CZE$&&oD@f+;v(8V3>2P9G%M7j+>Yp> z86&4+qOvhd8?B}!?S%q@R=@idZ)6CFkB7&7xdTQWZ7i(l%{Ef`s3yTjzs$La)LUq1 z%Be+J)0DtL1)y-J5_?|tU*r&qDzkORj(a3hxwe;8YWX3N2^aMX~p78;{MGKjoJT*~R2BFDO^ z)pL5R?BG)J=uww1Vrqs>!=hwbQQAG!oOrc{aQ4)K3i>>EN>Zj1e){u*F)?kjkGH?H zM9?vJ;JAoVVoyufC^^A5{?HHVUL}ecV_&(Wit=#{t#K~!Sq@{8u+?xlBLpXbQPaZ0 z4I3^1^yQXhe^J&7yTxWkZG*e>dGVyu$GcPvF`QSwbA_SC3XS|Q7;oW4_S}k!b%4#A z&JoGai=oZq-HRnFij+oVTtcGKS-bBbB5{#27bq$&D1?5X>>`fiv&OIdGMuqK6Uu}8 zJXExX0tX>!jO+M;b02I)T`qWl_}5Xqo}JN*j+Ad)XFf2b_Tv&xoVc=9I3V4|+8UCi zepVk8NQ%{jYrtMWaa$pom?U73&11#FJZc7RIcquNIVyV;l}~pH0ME-d-RZnXA@_*# zYlG99uc^;~zkqa6LSUmbD`0+rqZ| zH+8N`EtNO;u4Na}0jnSk^2q?B>XFM)#!RBQ5VINC_2jlCN9HVLd`B=D>8{Z)$Gd9_ zK%urvO3zqd9vU4N-VjuYPE@Rt&md&bBn4g6I1FAX?s0;0kcXthAlOWr#epjDpgDCq z6B-ltHJ>bNhChl*x=R%*9|Rz4&a4J^frio(q)AZ3)7v=5Snd)1#Pyp0h(4tKK;L6b zl7x%@6V~}K(f;)90%~#|VOJH^)x`tejF&`PJ}EEnp+e%Gd?1EPFpy1bv1ZL0!9DY? zGLfU(zI}Iuw83cm?0NU9u_HbhOlB5NL z8LeA%;U%9i*Go1RB@=2wQ5b3G6;BEF&)doTp+m2ktK*6Mkzh^gzb~xeX9M*MAydiO zBvbtBcE7mu1~>MbkQPu45OL8-t10VMwYwkC-gtE(oDtQ^WI|dCUr|QyDQ&)`d;z!< zqz~w1<;G>at#ynKK$A+%pP_1!8G|P$?sFBQ&C!%IEHp29AA<^{eWPPmeTZ)fjmPXeMd&AWSni_z=4xB za2g(ki4(COImoG7hiEsh>K#JeRZdB8_0DU)-07OdMEAAl!79C8&Ow7@9?x|wWJ<08 z839kOl$u(k^B_txXX=r8ub#4}9Gpw* z3wHKDzHlG{Ar?r%+6YN`?mMFld)M4_F<+(Ttnh^&LsWvZR9Q z&54ttt_#6UBp0hJDu^3?_eFR|R%JFE$BQM}+MPkjN5NLUSM-;TR{!P0U5{PVO93AymcXAk`-<;j*Q=vtBP=I7I3Dh8_j z37Z=P4nvsMMI9lN4d80I9@vXda-5yQX-NT<@wsNLlMRo9m57Y9vW8?{u}4qY(pDy+ zz{j+PVsA?$&En?OAD@pp z|Ic5|EsD&n zz-*0Oud4(u;R1fbA2WlEEqhrQ3Oq20f zK2SQz?k=6M$fPg|`OCl8Bb_@zIXj|;?D*l)PdSkquFj&KKzQq5>LCMb*zof-T_N=v zx9E6!)mBl5)KT@xu?>1D;a&@Hkpe3$snFwBtz)SEyNai_fZ+){KV>b+4*BcN&}cr` zVv~@))}C{(We*`f$x!B^I1D(jnJTzo%e>`~E8bbSg=zap(Z9+|*MlLDyW&6(vx&K7 zdY3=uGZG>b->0q<5=7xHo^i6Lh%KGLEpQf*i8*rgZSut4w-goD-YAx=w~Fbu<~bHD zQAwUye|~&1MpG`Ap18)HsYyUAFmnj4iqO`|mZhS$WJOayykA*!ADNCvbGab%$I=85 z9Z;{xG_61+)HhogusgN+;>C+11>^pf$LBgvrll2IVm9~rk1sL@u6%o1I{yFlf;i+< zrjOMEQY>I7PUJu`uP?}vbfj9A-(i3|?i`lENsPT}oQu$|csngqtb-*eOYmngzI%3& zJE@UJpm<&b%raCV_~5%uiF&^bgA*YOsiEJQC?Y+M~G88UchF-A6vr zi8BGjaYO{bG61(?xOh^sRF?(2^qO`$)qf?8yH!^U`6RIJC3U9_fLn5sh--e=lvPRh zmpu}uuzT08BWbq%=8okDB}xR-O}JB%#ZnngXp|W%cIBYQ<3Rs%^~HebeR%h7rCW>p~I9@xu(wJl$0jvyW~*SWON zdjo(dlayY}2p$9B7E8)I9MIA6inySl#4x~qJKo)1L;H5nmtjv>+)VhjY<{Gf7 zW?vDKjQrQ=P1Od*{rcT8{gc1_d{H|Bx14s&N)R;|T7<4B{PF3Y5cdvSx^d@GN~j}R z8NQwTl`_JCt_MVja*nC2ZBX#^OEcL=f|PM(W1rCd&$s%_x0pf_GWoEdf>TW$UwJx+ zA8jSOai{4>5b!Mf`)h`TH3yPl94zZ~0hYPZD?Tlxe-$%JabII9fQ}9AQ9%!>KWrCC z%g9&Ay>`u$8K}x-RBb|FM3r?w?M1w|5ete(dEo-)XvfwA>*4!x%*6f?LJJV=yR=s? z`$w6MM8tLeR$cWb(5!giBOuH4y+DYS%!j00COW8^Pf*k+l%r*rJuqd8Zp%aUl)!q7 zad1HmZ^WJ)v^PtIOVumdsS_*!^H!4Y1Ldd>G1-U4zf0D@;VdY?=TceCY{f^7oOE)4_=jdoJm2nXq%Q%tv`bQ>ij){-tT%6V|?5X{E++G#|;fK z6^Wuq8##p>;A(F~xWVYTN(W7Nd z3ry394Bch)Iee#HcuV?;*HkSb-A?@VZ zRm_)55evruzKndPjiO}!_hnnM-Ah~hckV5tjRN~Y9N9w;N*N;##dWxGVLt}?Jg(Fh zMq8QFrE_%Pj_S|6Ns_qx%7uZ0)-7xB@ZxXR<-M;Xrh6ojmW_p9&584 z9`9?~xfrN~X!@necZkogI=%Uxl0(kt0gA^%=-5u)7@KVV`9xB`47pPqbGV0j<^Zvu zK!Y{c;cow-qOTJ3tO1gly}rwQd-6O?m3Y`eP^a5Vw=(L>2#ry?y(( zc+{&92{GSwlh#UHm9&ai8MgreIA^^?00r~Nps*tK8E`ToB}N5z^q%u03)7uKmw(-M zE$kPGK7d=krNkuPlgN}ypR)K7G3Fm?w?^tKGnmrbhgZYtK~GG zQC~_3o`G1ivRStph)*e%Z7zJrUO5(~SO#M~Z5Y|x&yO9ZKrrTnON~F!kJeri4oP>` zK!{;ZXi}+9&Hpq3BxQ$zV0M5n*L+Vgeog%yLWf3%>A34VZjX_t7&x(Ip3#wP^>rO_ z7{&br8A&I+ZM)6zwAKb`cQy<63N=pApD9U~X2v7kb zTr?63}p+?39uXV0Fu<7HAmnEGV5Yb}B*?TG>%+X5~qa+cpw9+p5v z>Ig6dz=hpSAUaZw$!v+nT*n%Z0{to|HIdSDn8??Ox(g> z?=>Jjot`_AoISCH>gS)P2!0v2cPI|~IB6M`+5C_KGvV-hC=&`0^B^OSF3D}b>_l_K z)OS-EdCv~1)H>ury`^2fDkT*@jn*+Y(cB2{MRGd3w{O~fq)uYE@E!Nd0M_xRg!75C zKZ9k4UFHcn4YM~VJQI>BtFB(ZUb*W6BF%}=p<2MF;V#J$Mp00zcwC#wz_&VrgC51V z{(aV(&xh!Kx*cV&UT(<7x%?NAf@|wct6y+~8%$e2j3@8IstDBDrn7z4;WtWWP$d(Y zlHlbWlJjpL{_gO42EA3~Tj}doPUBM*wE>NWW`IvSktM(m4>U!LV>Dh$3ShNXVs9%x z#~`6t$4`p-@au*RkyP{vhcj2LT)AlGU(#{{I9M3;RXQ)08brov@J2ypoUk7y)&gcK z!|a}<&vFQr1_uxHil@{yKV6YXz3Ds)Z$53j%%ev1h&#t>Y&xuo2*C=c5iW@S6r98^ zD7}4=tA)UZ!C+?0SS4Ot;-erj8TMIBrx_Zq#|p5aAn`A@LT{5sdr-rqM>#*R2^}iZ z`Ki#Ji!@jM{yT5pO577%9lnvgwPk6$$X=y(hyi3WLmZyP%KkqLL@9IUojkXCp^nmR z8Ew8;OeQ5J&Swh&RZI$Ah0m{KEXHDg4s~S_+DOkX`ec;|L`31-&PA;0C2e@Wk~V{v z(JQeBRCQVHCU4U|F#?nTw!Czk?vDY-s6)`jZN2TTEyFK>H zESoy-7LxBB|JODVKzEvgSd^$Fbt^M1Nf&mgiyBDYT|q4nKAI8Q3nZQ+$aF-FFEd$3 z%9%3DCc2%XV^vKerOMbi4anoJ2lW9T zoJ8xP{Z%4*i1uJH(Rwh3N6Y#$*@a)3!s90q^6gDFp2gcBm@=^}=1Ql@3l}a-xLmPj z-?k{-3l0Y)X`j9*(1F%kD)ssk5JKBL;lm7?>kphFBeF864@4sqhG9`|*B&$5#pxh- z$ZCRDEV{z~FFyDD*GfM|I6H>wE80;)cfgZ`-Sedfl0_z?O$bNT5iq^Rc&ZSxZlTeW zT_!>UUI7qpcL;@U7lf5AK4L})7RmrQqO&vT7r&IIC+cnfgM<`j=OUFT1=ya}L_FQT zy~I)@=>hn(h%^Zrao3Ae-j5PWDoUh?#0gnx*h=m$orde9aQ=%{BnK>8HXoX=;cEc7 z-1jWUI=1-LwZ2(TMFqnXE~%{aaL3CsnlB=rq=Np4zXEGdJaSP^#maarm0Gt8Qim5m z7C|tK#OUmhAK!=sjSf#vxw|`$=O}ww_8|qyPr|{mZ5ZK{CYM{2Z&$_>@zNC<)J()j z#9YCtr{jIDJ)n=qLb_1Vk{MAv$v~G3OOZPrL`WmtF_rpMItRj)&~LJ{W-NcWjKyeK z#R219C)p@AYMMIfQJiX?qnZ*?EQN%eZ$ZDi9TU@NN6)0oTPMoq*nJNuK%hQ?4<=r6 z)<}Lr4dDhVk~7)Nh4~e0fg15uMbe$Szmi{jkvVPAnR?+HsW7%P%7^fIciBf88$loP z=6&_~cFvf6_sPS2$LTtTjg+~ZYCHg#${x56vC$ySJCUl8MtJfT78aQo%h{h&PLW2m z?l;Kq?0*_5@z!%7J(vJ8Q^7)Eb+TSteteeo1Vq%~H6~_9IFh3W1vRLIjHUNVhnf!G zIh(ng5m;JR_g~Lo1|^orNysp+DJ4l(zN9}NRkX(TQ3fE!Ob^QJzJ+<@GVWsttsyQcPSLwSeomPwm(Zr!FAi?n(cP6h{ z=1%GmsH@2$M}jR7Q(jR?Gr%tVKLExd=6{ZXxQglH+r|vCjg5}J7iWc40`l2wu`_{q z4QAv!N*GDqd^K-i)qckYa8=pbP+ zakHFuLxvS%9SI9{G|%r$K+Qr#j`vks@wzaeT!7IY4*vig4243WUNoEWJHUasY_KfK z_J2QfFy_U?!TBIgKHjq*(8(#Oon*vuFTW>^(LJimvAHAv=yZyTJKxz&_d?$xkvKO$ zMYtb#Dw~C&P56A&++WtOms3RlJy?vO92lsOH`A~?AsRbln}P0RUBJ-ApFqEUDCJKjU=HL*8{7tD9#RXl~Pb4G+yC~29i z#6$rOMovW##xNeKA7~b*p>ZP;vp-&iN-wW1L(XoQKB_P z7QEe{r&FRW*hBA~H?^OntWOp53Nccb(hehF*()IG;)Ek30t60-3i0Vb&IL+)O-D*C z{})Guk6;|`36M9CVBEP2;P#s5BJ(Vpjy6J!3sl)qv2~3p%W@0Cpouuw)Zn`_pJGzZ z0ObfEE9u|`vqQ}wc1*%lvkUB~z%g%uSJT?JwE6z~-m2~l*M;VuMHVfC0Xdwr7FxE8 zd8;Cw}G8|4lB*#hu zA)rnYSqX+jCqbj;QwoY_nn|XQ{~{LOHLcnTsmSQ$*gfxtQud6K;jHbI`z8Uys-v#G zsM{l{@sbL`#H=a0!2MPo<^Z|L2tvRqDB#4xUbt$SmGpc{6>C%NHZLA2guAp{b@E^_ zh{*8wpdd_mL@_@vzZNZzmesQyV5D^3*05Qd`8>XS=Re3J`rO(fSDr_CLLe!rrZ$`& z!UH@(ub8~-`9Z>+%%x_@cGS*OSZ}?Hs!)zZVERA`*jLo!W7KkLDBV0QI;;8wq0!-W z%ce~M&-&NST=7PK#?+9DYd%^UN9g>ZqdWSWzc;?>K53ni(Yo+sXKYOZAGOt;xT(*8 zj*Z&{ZSK>rUBiaoju^fD$Ik2ib4M8vU~f}T_0>1o!$(_Q9Qw9&{IG(r4J?!XS$*pA zt|eLdEAD3{CC$6}q~bh+#7if16bgzFoH)&{7&#V0 ze7@TKdVq#TFo@5t^Zbd;TjyAVXB=6|YDnJqgJq1HObLbAZ-(Otsjl zHt%PqR$^>ISraMK1sG13lEYn(6Gz&(?i_nc$LqUkFniFkQBXouzu3Y<=t+A4Rui!A zHS159QedZUl!|g%m=F==u@b@+1#?zJ?KIY0%3fyo3_kTP9?*O^3s=rUeTDfM$983A zNQci|EOY|fz{lT+c*qgfZSs1d8l|k=W+3N`kXl`*N+x9qC8;Ub_@M5*wJ4}6SH4~| zFbchj2rEcD8Jt&f!i;x)(fPk=!%;kzkyxIPnmgI2bW=oa!@_%<*^a-C*k-}kIXXyK zT&sPc)Gan@vB{tXYLg=8rofdNit%rctJ&JCP%`>U+WU2OvqWYi(e%50Ih?PjZw@Id z@I~P14uQinKktaeRY2gC%T+^}DHE$*v*zMiaM<>Ty07zY`=|R;wJ}&O2BdTP!RqJ8 zwRSue6v8rT+6+aRw%Kj9_NP-%RgoX${9BuqPJfYpZFh0Obyrl}#%}s|I34Yt# z@Rw)LpdV9-k-+7Ux*yqo3i-Xm3^2CTuYUl$9HYTO#^7phZtg=-2lL9X1Cz)xTNQ;{HLvK5Z1}IE|Wv<9O;4kCNK+7g;f)E{fvM3U|@S7?wS=K@OySd44UQA_oT> zfT-V<=Vh0E?V8N%h$}HSn8Xx0=F2tNj?9bW-By%r9d$+yk3(x378d5?@1Kxt0C@^~ zIkPw(BS@;C3I3b6VJHbOgIQUo_EAA=B}QauV+P;;M!O>mcGXPdYwXxx|55j#bXyFGZejZ%AARNmtpwDXPoN0A%0z*Y}!lGPnhf*+|J9 zWhZK1jW6+u|MS}LXs+LXcL{&2+qSM#EPRJ-O%7BP6^RV~+1O07v$Na!srjXw(0du- z!@j(ST^9*`)=A#fHbR_nijrf@hh0aEFZHoG(EZB~m3})LT0w9WCh|l}LUBk$9wSO8 zxE-W?CwsM2w)_E*C$@Ovio0Ofh=i;b36%@#Dq=vJ;UeDO$|FGXF`uy&=~~HOFVA@U z``ZBnyKI%sl0!eK1^QL4rXJI{Keq!9vlT_Y>m5V*m_82?-gBVu?9p2P8V?2#HN9w z`w+u}Tbx+h^-`ZZ$R0j%=y<4s0Q+KU*Zh(#O0uXSU}IvaEB44lm<6H^hTkAb*L~jU zb=z_pf*J_c!qCi%21O`juCDvlZ6%5?e@x^A zP;nXd9n9d8_@PT(i9>6)i4a`8tSyI;-XCn%GaW|jua4?9`Lm2M3Db;Zg~5ghp*&gB;UlHf}Qde&hIm1O4{ivMCDare;~~L%t2b=4pSCVVeci z?g_cSbWvuBG*>J}Mn%139i7pid2U%u>Sw-ksKH(ilGQ!&Zz%5ct?VF+* ze#*%ULXFUE&fRr{>UD-q>v~GX12MgaAiL^!0RSxF5h7OIyjoF9Mu^MuFu%I{u4HtP zG*hXqZu#C$_O&FnG4E%NuBV(%q<=Gx)P0d9$DbmgogPh$GoY88uQt^81$`YDqApG1Wk|2J^+Rw30z{<{894v|E7@Q zW!i_q{UZRgxBp3g@AgK9=vARa#Uq$RsO;?v(%UUtv22;}xZGi?RB_M3%;9HYf5lSd z{hR7&`oBAOFR)u(ouOZ}FZV<#AqqEs5$cN_7!#tN&Bj!Gi*qCp%hf*35osNKg(g$C zj1w+J(n~QWL#8qZ?eBrkQZO2pPzdvDNF&==q%prab!6**w?})1LM_g4a{C>B1V_Eg z8EEmZvEq|N-mWPJS>L|QLs32unB6f3na<;dCfQX!oi^hPr(wl=7EM30;PJaIDzt$z zRDaS)zwYV-M>OBI*;B_N5z%ECZ_8H~bAAqoh@|XhimT5A4FN!mT*Nruw=|=*cJ%xe zh63CD@);ldhvcw8AY@Kn;3dif9JM=5LdB4C_)C;Ea*-u^4{%<_E=jO|pu0I<_%qK#F^=p1ZrrH1W2_Y+P_?*P1W4fJXH#53x{JrW>%XS7Ez;!*7Vy+tAN# zz1(Q{whsCH)@5G(e|Ny7rKP3fp$RxWns8~iqp@=O5~P^ulfImdlR>ROvOL-A zOXRk;{7Hlxfj+K46=JLau;B4z?CtHd^%w$7&w0&`0D;(TW@hF~0WVXXt}}XlxrE=f zOG?+C)FB8kk+wT1D-m6EMp8ckhn!eieUJ2iI(aOH2X6_wQGle=vvUGdj6_3%L`{B` zQ!m9Ek1%9XDK)#iw%6zA?_ygS%92?IP@U6zGxt^QIK`6y`Q9bxSN0&XI~^4b91%G7 z{Av!{Lil)0CPwf%es`lHG=ix{Ibn^^MvJ*m?8rl1|6Q*eC7cJIm$*v*EpuN9`Kxn3 zM{5mE1;IozyhybdlFJG%3ihLiaD{itvEpkiG_GhW53H(RzkX?1nXw9q_Ne2zP3x{d zd?k%X@i&a;oFoKv@6zScfZsK=_>@j}W$am!9!Vy_;T|=a&?206FweV`@?a%9IpO~E z?^ph_7Q$oJ(wS~Q6yL3V$EXG5$0-YRu2j=fv0W|4qVw^zXSq3Am;)5JL~ZilH8zr@ z_Je3w)s$xAfQOIWOdya>qNXEQqY1KYLoMZL%R&+vZ27P6Q<9QGrj4uno=S$f1m_>v zj=#=+%DijOSdSb2)IEo1v{%9#pkNy%r})-{kQJ{hjQ#JE!k{B2@C7Jk4QB#0%PY|5H>t$?jGL^c9ljsk^HLul zcKP7kBbjPv5(?q8Oc?GQ2n{gn^=?%B$-pODzdd6rSD zg|?t3=zggW%-AXV)X;hVc1{`J@-^x5gi4WAKKAw$ASA)yfLYLl5162Vl5i)63H>Ig z6KcA5k_kXe>@$;nwO6euZA{1BecP~DLF!473yyt^Vx(>UXTl#w=9;;BzoLAt4EO`I zuV{W4S!&j9hA2r5{JB|4~x zAOUv)FF4=#{r&xQp6i;qB*4p4?&scnuf5jVp!bPKQSlqcYG4EY z5s6Y@6+2ePEJwCq`pq1zeSoQL(Ox<%sD{$Pv7E??>N) zXXpsaHz0zE$A~ZH)d({6q|S7MN1xvO(x^b3>2ulV zM^5S6W7sJ7IM1sD{}pY{I9~RgT=(HtM&`rq@wD^(>4WAUTEbg^ecfgH^N`%4EI*q1 zQ=J!E{g*d;d61aAIL;0JcPe2(FV@S8V|lSo$Tb4q1ww+-Qum_7*6ZeYT7^|;luks% z6!&p7ae@~th@5TQ8h$hFuV);GW!-CQ72VmWPzQyxv?0qpIT7?>+&e9-T$43xf*!3Y z6H8&4r1u^E9hjS;Jo74+Aehpj-PMLXcBbM|=f)vDmY0GY%&OLyW`3&!O9p1BXlK?* ziB(lpnP!DnPPS}yb2UWGpf zl|bEf=MUt`MTAhtCdc`(d6n{-X$~Eiq1PLNtT}roci4LTchoMjE5sMT5SdBcw!yLs zN4Kdf1~++tCYR5rR~IA`p-#VE7t&0Wyx=%YIKa8L0Y!9#70<3)^Z9*@k$J-3Vcfb* zxx|HCJb(V_^DFH@>GlF8=j^g^tTlbv$&D&}@cdsaB?S>7z$kKhFDXJi)7d?9a^JSCl`t>^=K`k+5 z&g|smLqwe!#^>zd`?=lyXV0D;_sEii&J_XSKE7Z8x>f&u4ZvxUyZ7^_{~;xU?wFMld(l! z*l78+G$(De<+uQlJ<;G9gLV7?hgWcYl>`;e-iGO$MXq3)eZZh%O(A2xG8LJ+D=_}Y zc3oQkEUR=I4+C!-<}3C>&LF$Ju5l0H5j-S0SwmgS>HZ4HKAILwsE#*gRBh%XcC zKc>+57_hD(;WTHBk2yH$%(A`wk28dPxc~z&ZI#@aD{;@&`5>mt!bVKYx&V*E)7WnbF9n&Rf;!`)Gb*2etp14=j_66(iKo+OZO)EyE1bvNmEgsNgA?_tgVK=~{1ELDRu zevwqLt`HkHf9pF7OF#B*o#iNBIQk~Tse&EWZ?0b{x`SQ?%#~-iY6+@yT?*u`A zxytFp2^M(+V?XoLueY7M6nv6o^KE9;Be^tX)Nd_2fL@DY{(|m|J|8djXZkp%&`mdA z!SMPyYxo{{jXU!z7CkL#L6bpwY)}CVaCxZEg1Cl7oqMm?zB= z%JZtadyfNld*h7}w<;_*fp9aouBhE}dRyMj;CC&59oo%b*dCr8*BPNwdk`u|>SlqrOTOKc<|0!psYbrsl# z4I9QxapFXqH+Op(0K#%a+W`3;ZC4E^Sduq98YcDi-W;5Cv&yn057fTlm(Jh?0V>3$*)SoFv9L9a|KPx_g1gf2z z3XZCUw~chAiR?^&`5Ai+aoH)Z_v1qB!n0gJOGB`LTET>Yws#uNGoZ7vn-QjL4TW;I zY2SKD-;glcyjmq$ECKewVubg%u zgTF|h0oW($FMUxtz5wYB7!tr(QZXrWsrd*i3kKo|UKFH6V;2m5@psPhcaFAW1o6KX zY@KuE0S(>1H?n{{(y~DJ#iU&v3`5JJqD>>N zrB)+_8v);4L;`4ec6eZFGzWwQ-{Pk4_RHN-j~S7!S9Bhl<#@mW#Mb@OPnT%`1ujGi zhNw{Dy;2y8%3auH>{{P0eD+<8SOn8)!A%|0wf?HChFIx!Y=LhVSr+rIF@EfnlPFml zvy-$@#nl9s(t>34<5!n9c9}iWF$Dcq3X}7+r;Yp^h>SnnK zjbSoU8VsokrIR_Nrhmidxpq|#>w~vSkI@`x?Du_PMf{($8a}{vwvoX>n6i%_M%x4o zZYPM(rkK4@230l_DArqo33mzptI&{eO|lYi6?Ck9`M8F^<^1Yo2zakO1W3sg00zx8 zmI$cIUpVN*X8dhoI>P@mMffY=34&!wCiaU_??HgNkpO&#t;wM)Y(46y*=V;PylTGPBOnV|+*7i4DJUV@+64ojeS@U~zB zu`e0Y@ONFqjlTrhtcH&*YH3^sdDr#TEfAe)(}QBUw35+jIVg=Z zwcebLRxy#Mv>>clGOZ9e4fH2WM@s<~cwxT=;g zbXL*|*mdn1e<4Z!V~`TyGo9+XM>M&*!*-Pb#&eFtgw{_L#m256zDvDxF-ze6Y=Oz)A;p9NHv15H?y2ilm%9{=G`>3_z*q@Q6!&x~t@B!u%>f{+eiN#No*oOP+?4i6bfY`2n zgmCNoKUoq@*1*cPwtp&erA0|aR>4E`!Lg6{wF%t%Ks_AI^-Kk)2^%yYu?|ubhb+gb zn`7eR<2Bt%95(nsZ}*za`DDs3#mb4-;V6 zQX0+T7S)h-S@r$5djh3+wSf2+hpQ(i{o+{8C%yRU|3-xVKYh~xn)s_>%+rt)q9Ay-S~HH9MdVP_jxra1+Q}o_Y3ng67T30qN)t7QgTt z|GP6<-k{1*lG?h5uPaT0wD>9cr?Gn*eZ=3(q&^q7$-54-4eQ6tqz|5YOJ3RF+Vcu_?`0@P{YYkG32coWF4X>AU2UoPi`?i`}xCiqTL)vaGefLB57A$1w%Qt z1OER#8qU;$->mTL)4y%oft0H2ZKSHBDHSd=T2V1{MeKg|kPO8~mOlkdYcTBDs}ivA z4t;d4MW6YC%r6W}-k7QY8?BJ8cIVg#D111N5KwAsD%E=FAUc zCu@IlUeq+paain1EfK5yeD<{p9?|vjQg~d*_+tSD${;z?62+SM7GuUFtqmky>!f2n z?ms64AkyqbDi)vm3Hg5F*VdGb-3u`)scqS%w6u{R$G!ue;XCoT-~5ROw>VV^p5iHp#W8;VO>2|8(|Tmr zdq|s}R$h7P;d|q_2f;4*x+D&8Ph@AfVm~t*KLbIWwLP|#XX_x$-;h<9mcINPFMqXT z!5rDcWSGYf{g_%QkEJH=%DcBwfr?xzlCMI_d6*hlf@B0Ins1fG6XDefQ)>Q?e_VB{eL6hl=k(W-y*xeD zw9)Jhl4cE;O#j}g)7`~O=EBNzWB`x&Quva{eij5?xE(bmLmEDhnXUk(iJuy}*|xA2 zh#k^PIBQgXE|i*dRJ_XgP?2=T(!7-}f}S#Cvdv|rbHJb9`yo;*C+dPx$1<}((Qf;;Xw_l~PlZ;GY#>{Dzd zBwmqh*@2I5n-~C*)%dC?*XHK6sO!sBTEM?lAsv@ciIoUz;xA6*%{sEbLGOS@;Dozf9 zqo7iVO3E%Obwm5koqtVsG7vipXE!35R>rxYd{UP;-&;&5JNEShi}@-WM|im>p;>k( z;!cF`M3^t)kqkk%Pp-w}2_AkH&odSV_dbfqBp;TARPl_LvA=t7_I0(pr3j+=G8%Tk z_&ASLg0Dd2L|C4b=P(NjdxWHPhFy@!b5LfCilci@DA8m(d>Fk!Hq+r1E`WLPq}gX% zw``Hxf;3#tBBXPdvp%x7A13d2y~0zcOu4hF=MCXzp0{UT5C84w)-(5r&d_D@r`s}u zN&S>AV7R5(&Y^fVzuj%x(6}DpScW_QH{aNrTp~NR>w14Y6Vl6jG257}dhjnEo`6s} zXeT~2i9jL+3mEzMoU!uY>EXLk!bTor>A0EPF7ddPWwdJ5N>C)OWuCWBz)AR=KsSQ! z0vEI1$Kx4g$gpkw@_H0MTWd+OA?8WT`nF?mh@7z|21mmIRem(IlIOD<60TN*QT(+*z2f#-+wn7!r|$i4k` z<78Xa(B$}|SnoC_iEW93alFD}T?a>|#1RP@Ht+%6X;UWPg>Qm)lR5B((5?=~LH)v< z3H%jJ#gi%HLZ>xb)pcxrD8(tPnl&Hb%7wM}`=4y`b{1(FdYz;b zH&7)Ym$}mRy!}wH$@1K7ClYL@sU2EG#jd<0pSq_n=h1%9J7?>))yvb> z&cQ@x58{Mc)OqOP;}=y;}|Qu zG3;Z<#+wt`1kJjg|N3$Mp!Lh4d2Gac)hHv8U2C&+>C(K@+0{o=Qc7P;R4m;~f+Dz&2;u@$2s*{FPf~oP1fzJdyL)fwqx-GWb7Ugxdmb+D z;n<(cSm7ZBi)(A7Hld-$L~chi?ymLo791? z(W7Z6bjc|L%y{!1r@r|crhM1MD*`%J?&${*XIflOLSSO;@O0~CJnw^iuXHWXa~b5) ze(t6Rz7%5)@x!3=T=}rHG<+1;v|O6hDAOFuRjpgM_Hj?y^3zuXC>?yy=X~<#b<&^8 zT~GxyDajqu?BIcqz*@`XRA>WlF7V-vg1EP5o&~}4?Y19nXB>o}ySQ3ARQ4ir@E#kc zZ7ELULayU?=mBun{_wE$hx_^$mTxJ{u3j`=0j0bb*e%(M+q%FZBLRDN#>Te(`{2QK z1@BRZy2>_eB(`R@@Pc!vHS=EXXUA}kWfgKMz^y-z{d~WMy+j+ou6=ArN{o=vU zg%J2zKD9Qs^^5@(9Hzeiv+8*d@~4+Y(VV-T>$cFn++$O;U1h&tjpFg?q-o;m zSVdoxy$uSI@VS9cQPg!Sy4-4Ei;uihe4;!$ts#XfpW&Vx3N}6+FNUNXN*{LwOpV)I zypfl`>l__1_&J%F)G}n)meu(=Uh&~Jp$rm8TAK)VC#K-0Z2Hk3YP%6aNt!2xH0VKI zws@r-+~Xo_diUa&|^L``+FjEj}u(nGiC8`ua!vrJ*zw-+Ftl%Q2@9 zW7X;js%Edg9pV|8keq-bB|YhQ6`SS+_yIG6D$Hcievu1_3r(6O?|S^Go81t)S$^kX zSv7Z4tyJV@fqCr$*YvL(8~1DC(PwR1#Lz=enjA9d{FZX%fnO#@L_#q*6BktXc9fR3 zHIYdt71MfptG8Ra?coEk;nx_Fa2s_~Mf}HPc9+;In#cGXP6?4#LS0eS z+%Mtuob=TN7!G25?RuvrCnW!hC*>2y>xL5IIvu`D9tlZfk-3cW<97146Saw>e~Z6E zF|DFK?uX29%>8$ej=Cg&kwuv)FMV@`02|1+k=J_g8X#dCBA3>~s>PnYRqDMKq#+|C zqjiV)rZkT~y>a6PB>$|F(1ccY&>aovU}-c*`~&uIyPS^qWEibczA3eAp}G}5B>l(I zoqkPyNoI65UIkHqyjT1CXG8U2>6x`Qa;U%!BFRS~V_!#E4(7ah zukzA^^yk}#xJNl%xpb-Hp>maATb&j^|9M5nmx!2umYY@xHvz|t>D1%O!prlq?%3dy+-JH*PAT-{pNCwo}#_%a@T(adqE#f7~2^eGS>THlLCS^TYg$vrc5+0 zFlRM9)aCLkFTb2d!Wk|vq*d?s{{kA)vFkP;eWd*G58!}{m#;+I$s3Kd_QSjP(q2WU zmO)){kkoad`TWm6&){_z#Tt6?xX4{v1V{gAbR+p!(Jv!lJ{}l<;tQy^R~176*o6Kn z-4VkYU?GhuNIY2~dt?I}z5nIC=%O1Xz6)i4CQ1ielN_7jl&Xr}1CS?qFV%Ns2di41 zQ;b;o_i7>|l={h`mJ$#iejlhoV$!O>rkce5G(5JPbra>?6gdfkfB~Qzpj>bixR_JI zJ}E5Acn!ojAcEMK#9xN^U()s1sU7S!V-Zqc4)n(_rumpBPJ#i*dIWf$jjL3~jk%P3 z%}kjT8N1nwD0kg7GMcXUZqNx~HcUy;*wFzuxeE{Y2fII z@9~rPl|?s=RgWqlp?<6B&1h9sazLQR1$2ut4^2r-G65ir9DoU|M2>HaM&2~1yvl1W zo1^r?f3RC720xj$boufQuo@_m-H3y~rM~pKSoid`JA9MU(L=B%OI56Sg+^{LDvGN# zA5@p|V$2B&{gkRD88`nOke%ts24DzujE( z_yW+D<}S%vNZSiEM~y%NgdsEC2~-`6>_q60_?B!6d67$m;fIw?LhAE{S6#>Nf*&?P zwC&c}@H9F=vcI)r|K!avYLMcHY|6w5HEMT}**lFLwlLgJ^GK1$c z7W(0%ZS{TUU|81rhgDjlu<4*`x`G!wH*FGYg#U;ZmKI5LzU%=EN4JqQ`MLvh)rAtP zh!zeKxawFd5}#D*w9@9@I{PZG?YSVov;Y0Z<42EvJk&=&;v8I0I89r%XOErGhv4)h z#3-_|!_k&gs;B0r?V{tqhnEK=F-kZPUyg;x#u447i-jzquZ$z?Rz2`<`41~rzG$AY p$pu(m$GY#nw)}s|H%n{(W9-D0`MDF1oY9q=27mtdxPcSD{vU_19c=&r literal 0 HcmV?d00001 diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py new file mode 100644 index 000000000000..526502bca468 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py @@ -0,0 +1,110 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import asyncio + +""" +DESCRIPTION: + This sample demonstrates how to use azure function assistant operations from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_azure_functions_async.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + STORAGE_SERVICE_ENDPONT - the storage service queue endpoint, triggering Azure function. + Please see Getting Started with Azure Functions page for more information on Azure Functions: + https://learn.microsoft.com/azure/azure-functions/functions-get-started +""" + +import os +from azure.ai.assistants.aio import AssistantsClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.assistants.models import ( + AzureFunctionStorageQueue, + AzureFunctionTool, + MessageRole, +) + + +async def main(): + + async with DefaultAzureCredential( + exclude_managed_identity_credential=True, exclude_environment_credential=True + ) as creds: + async with AssistantsClient.from_connection_string( + credential=creds, + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) as assistants_client: + + storage_service_endpoint = os.environ["STORAGE_SERVICE_ENDPONT"] + azure_function_tool = AzureFunctionTool( + name="foo", + description="Get answers from the foo bot.", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + }, + input_queue=AzureFunctionStorageQueue( + queue_name="azure-function-foo-input", + storage_service_endpoint=storage_service_endpoint, + ), + output_queue=AzureFunctionStorageQueue( + queue_name="azure-function-tool-output", + storage_service_endpoint=storage_service_endpoint, + ), + ) + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="azure-function-assistant-foo", + instructions=f"You are a helpful support assistant. Use the provided function any time the prompt contains the string 'What would foo say?'. When you invoke the function, ALWAYS specify the output queue uri parameter as '{storage_service_endpoint}/azure-function-tool-output'. Always responds with \"Foo says\" and then the response from the tool.", + tools=azure_function_tool.definitions, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + # Create a thread + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="What is the most prevalent element in the universe? What would foo say?", + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Get messages from the thread + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + # Get the last message from the sender + last_msg = messages.get_last_text_message_by_role(MessageRole.ASSISTANT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + # Delete the assistant once done + result = await assistants_client.delete_assistant(assistant.id) + if result.deleted: + print(f"Deleted assistant {result.id}") + else: + print(f"Failed to delete assistant {result.id}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py new file mode 100644 index 000000000000..cb4b6a2aab4a --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py @@ -0,0 +1,72 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_basics_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" +import asyncio +import time + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ListSortOrder +from azure.identity.aio import DefaultAzureCredential + +import os + + +async def main() -> None: + + async with DefaultAzureCredential() as creds: + assistant_client = AssistantsClient.from_connection_string( + credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with assistant_client: + assistant = await assistant_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistant_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistant_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = await assistant_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await assistant_client.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + await assistant_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistant_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..29cbdc66d7a7 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py @@ -0,0 +1,89 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations from + the Azure Assistants service using a asynchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Foundry project page. + +USAGE: + python sample_assistants_basics_async_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-monitor-opentelemetry aiohttp + + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Foundry project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" +import asyncio +import time +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ListSortOrder +from azure.ai.assistants.telemetry import enable_telemetry +from azure.identity.aio import DefaultAzureCredential +from opentelemetry import trace +import os +from azure.monitor.opentelemetry import configure_azure_monitor + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + + +async def main() -> None: + + async with DefaultAzureCredential() as creds: + assistants_client = AssistantsClient.from_connection_string( + credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + # Enable Azure Monitor tracing + application_insights_connection_string = os.environ['AI_APPINSIGHTS_CONNECTION_STRING'] + configure_azure_monitor(connection_string=application_insights_connection_string) + + # enable additional instrumentations + enable_telemetry() + + with tracer.start_as_current_span(scenario): + async with assistants_client: + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py new file mode 100644 index 000000000000..538407082a9d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py @@ -0,0 +1,89 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations from + the Azure Assistants service using a asynchronous client with tracing to console. + +USAGE: + python sample_assistants_basics_async_with_console_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry aiohttp + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Foundry project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" +import asyncio +import time +import sys +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ListSortOrder +from azure.ai.assistants.telemetry import enable_telemetry +from azure.identity.aio import DefaultAzureCredential +from opentelemetry import trace +import os + + +tracer = trace.get_tracer(__name__) + + +@tracer.start_as_current_span(__file__) +async def main() -> None: + + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistant_client: + + # Enable console tracing + # or, if you have local OTLP endpoint running, change it to + # assistant_client.telemetry.enable(destination="http://localhost:4317") + enable_telemetry(destination=sys.stdout) + + assistant = await assistant_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistant_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistant_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = await assistant_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await assistant_client.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + await assistant_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistant_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py new file mode 100644 index 000000000000..90c8ce2eacbc --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py @@ -0,0 +1,109 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use code interpreter tool with assistant from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_code_interpreter_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" +import asyncio + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ( + CodeInterpreterTool, + FilePurpose, + ListSortOrder, + MessageRole +) +from azure.identity.aio import DefaultAzureCredential +from pathlib import Path + +import os + + +async def main() -> None: + + async with DefaultAzureCredential() as creds: + + async with AssistantsClient.from_connection_string( + credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistants_client: + # Upload a file and wait for it to be processed + file = await assistants_client.upload_file_and_poll( + file_path="../nifty_500_quarterly_results.csv", purpose=FilePurpose.ASSISTANTS + ) + print(f"Uploaded file, file ID: {file.id}") + + code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + messages = await assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + print(f"Messages: {messages}") + + last_msg = messages.get_last_text_message_by_role(MessageRole.ASSISTANT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + for image_content in messages.image_contents: + print(f"Image File ID: {image_content.image_file.file_id}") + file_name = f"{image_content.image_file.file_id}_image_file.png" + await assistants_client.save_file(file_id=image_content.image_file.file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + + for file_path_annotation in messages.file_path_annotations: + print(f"File Paths:") + print(f"Type: {file_path_annotation.type}") + print(f"Text: {file_path_annotation.text}") + print(f"File ID: {file_path_annotation.file_path.file_id}") + print(f"Start Index: {file_path_annotation.start_index}") + print(f"End Index: {file_path_annotation.end_index}") + file_name = Path(file_path_annotation.text).name + await assistants_client.save_file( + file_id=file_path_annotation.file_path.file_id, file_name=file_name + ) + print(f"Saved image file to: {Path.cwd() / file_name}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py new file mode 100644 index 000000000000..c3f2b74b7d6d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py @@ -0,0 +1,84 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with code interpreter from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_code_interpreter_attachment_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" +import asyncio +import os +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ( + CodeInterpreterTool, + FilePurpose, + MessageAttachment, + ListSortOrder +) +from azure.identity.aio import DefaultAzureCredential + + +async def main(): + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistants_client: + # Upload a file and wait for it to be processed + file = await assistants_client.upload_file_and_poll( + file_path="../product_info_1.md", purpose=FilePurpose.ASSISTANTS + ) + print(f"Uploaded file, file ID: {file.id}") + + code_interpreter = CodeInterpreterTool() + + # Notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the attachment + attachment = MessageAttachment(file_id=file.id, tools=code_interpreter.definitions) + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + await assistants_client.delete_file(file.id) + print("Deleted file") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py new file mode 100644 index 000000000000..7a95a1df6352 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py @@ -0,0 +1,80 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with code interpreter from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_code_interpreter_attachment_enterprise_search_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" +import asyncio +import os +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ( + CodeInterpreterTool, + ListSortOrder, + MessageAttachment, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) +from azure.identity.aio import DefaultAzureCredential + + +async def main(): + async with DefaultAzureCredential() as credential: + async with AssistantsClient.from_connection_string( + credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistants_client: + + code_interpreter = CodeInterpreterTool() + + # Notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # We will upload the local file to Azure and will use it for vector store creation. + _, asset_uri = assistants_client.upload_file_to_azure_blob("../product_info_1.md") + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + + # Create a message with the attachment + attachment = MessageAttachment(data_source=ds, tools=code_interpreter.definitions) + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py new file mode 100644 index 000000000000..a4791dc9c8c7 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py @@ -0,0 +1,109 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_assistants_functions_async.py + +DESCRIPTION: + This sample demonstrates how to use assistant operations with custom functions from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_functions_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" +import asyncio +import time +import os +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import AsyncFunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput +from azure.identity.aio import DefaultAzureCredential +from user_async_functions import user_async_functions + + +async def main() -> None: + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistants_client: + # Initialize assistant functions + functions = AsyncFunctionTool(functions=user_async_functions) + + # Create assistant + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=functions.definitions, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + # Create thread for communication + thread = await assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create and send message + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, what's the time?" + ) + print(f"Created message, ID: {message.id}") + + # Create and run assistant task + run = await assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, ID: {run.id}") + + # Polling loop for run status + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(4) + run = await assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + await assistants_client.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = await functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + await assistants_client.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the assistant when done + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py new file mode 100644 index 000000000000..34e62f9ca963 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py @@ -0,0 +1,114 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistants with JSON schema output format. + +USAGE: + python sample_assistants_json_schema_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity pydantic + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import asyncio +import os + +from enum import Enum +from pydantic import BaseModel, TypeAdapter +from azure.ai.assistants.aio import AssistantsClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageRole, + ResponseFormatJsonSchema, + ResponseFormatJsonSchemaType, + RunStatus, +) + + +# Create the pydantic model to represent the planet names and there masses. +class Planets(str, Enum): + Earth = "Earth" + Mars = "Mars" + Jupyter = "Jupyter" + + +class Planet(BaseModel): + planet: Planets + mass: float + + +async def main(): + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) as assistants_client: + + # [START create_assistant] + assistant = await assistants_client.create_assistant( + # Note only gpt-4o-mini-2024-07-18 and + # gpt-4o-2024-08-06 and later support structured output. + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Extract the information about planets.", + headers={"x-ms-enable-preview": "true"}, + response_format=ResponseFormatJsonSchemaType( + json_schema=ResponseFormatJsonSchema( + name="planet_mass", + description="Extract planet mass.", + schema=Planet.model_json_schema(), + ) + ), + ) + # [END create_assistant] + print(f"Created assistant, assistant ID: {assistant.id}") + + # [START create_thread] + thread = await assistants_client.create_thread() + # [END create_thread] + print(f"Created thread, thread ID: {thread.id}") + + # [START create_message] + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content=("The mass of the Mars is 6.4171E23 kg; the mass of the Earth is 5.972168E24 kg;"), + ) + # [END create_message] + print(f"Created message, message ID: {message.id}") + + # [START create_run] + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + + if run.status != RunStatus.COMPLETED: + print(f"The run did not succeed: {run.status=}.") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # [START list_messages] + messages = await assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + # We will only list assistant responses here. + if isinstance(last_message_content, MessageTextContent) and data_point.role == MessageRole.ASSISTANT: + planet = TypeAdapter(Planet).validate_json(last_message_content.text.value) + print(f"The mass of {planet.planet} is {planet.mass} kg.") + + # [END list_messages] + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py new file mode 100644 index 000000000000..2473637752ac --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py @@ -0,0 +1,82 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with toolset from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_run_with_toolset_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" + +import os, asyncio +from azure.ai.assistants.aio import AssistantsClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.assistants.models import AsyncFunctionTool, AsyncToolSet +from user_async_functions import user_async_functions + + +async def main() -> None: + + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) as assistants_client: + + # Initialize assistant toolset with user functions and code interpreter + # [START create_assistant_with_async_function_tool] + functions = AsyncFunctionTool(user_async_functions) + + toolset = AsyncToolSet() + toolset.add(functions) + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, + ) + # [END create_assistant_with_async_function_tool] + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = await assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py new file mode 100644 index 000000000000..ec2b570f0978 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py @@ -0,0 +1,97 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler in streaming from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_stream_eventhandler_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" +import asyncio +from typing import Any, Optional + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ( + MessageDeltaChunk, + RunStep, + ThreadMessage, + ThreadRun, +) +from azure.ai.assistants.models import AsyncAssistantEventHandler +from azure.identity.aio import DefaultAzureCredential + +import os + + +class MyEventHandler(AsyncAssistantEventHandler[str]): + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> Optional[str]: + return f"Text delta received: {delta.text}" + + async def on_thread_message(self, message: "ThreadMessage") -> Optional[str]: + return f"ThreadMessage created. ID: {message.id}, Status: {message.status}" + + async def on_thread_run(self, run: "ThreadRun") -> Optional[str]: + return f"ThreadRun status: {run.status}" + + async def on_run_step(self, step: "RunStep") -> Optional[str]: + return f"RunStep type: {step.type}, Status: {step.status}" + + async def on_error(self, data: str) -> Optional[str]: + return f"An error occurred. Data: {data}" + + async def on_done(self) -> Optional[str]: + return "Stream completed." + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional[str]: + return f"Unhandled Event Type: {event_type}, Data: {event_data}" + + +async def main() -> None: + + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistants_client: + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID {message.id}") + + async with await assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + async for event_type, event_data, func_return in stream: + print(f"Received data.") + print(f"Streaming receive Event Type: {event_type}") + print(f"Event Data: {str(event_data)[:100]}...") + print(f"Event Function return: {func_return}\n") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py new file mode 100644 index 000000000000..c6bf104b05fb --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py @@ -0,0 +1,138 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler and toolset from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_stream_eventhandler_with_functions_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" +import asyncio +from typing import Any + +import os +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ( + AsyncAssistantEventHandler, + AsyncFunctionTool, + MessageDeltaChunk, + RequiredFunctionToolCall, + RunStep, + SubmitToolOutputsAction, + ThreadMessage, + ThreadRun, + ToolOutput, +) +from azure.identity.aio import DefaultAzureCredential +from user_async_functions import user_async_functions + + +class MyEventHandler(AsyncAssistantEventHandler[str]): + + def __init__(self, functions: AsyncFunctionTool, assistants_client: AssistantsClient) -> None: + super().__init__() + self.functions = functions + self.assistants_client = assistants_client + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + async def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + async def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = await self.functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + await self.assistants_client.submit_tool_outputs_to_stream( + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=self + ) + + async def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + async def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + async def on_done(self) -> None: + print("Stream completed.") + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +async def main() -> None: + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistants_client: + + # [START create_assistant_with_function_tool] + functions = AsyncFunctionTool(functions=user_async_functions) + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + # [END create_assistant_with_function_tool] + print(f"Created assistant, ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.", + ) + print(f"Created message, message ID {message.id}") + + async with await assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler(functions, assistants_client) + ) as stream: + await stream.until_done() + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py new file mode 100644 index 000000000000..a00f401c1ac6 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py @@ -0,0 +1,104 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler and toolset from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_stream_eventhandler_with_toolset_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" +import asyncio +from typing import Any + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import MessageDeltaChunk, RunStep, ThreadMessage, ThreadRun +from azure.ai.assistants.models import AsyncAssistantEventHandler, AsyncFunctionTool, AsyncToolSet +from azure.identity.aio import DefaultAzureCredential + +import os + +from user_async_functions import user_async_functions + + +class MyEventHandler(AsyncAssistantEventHandler): + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + async def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + async def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + async def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + async def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + async def on_done(self) -> None: + print("Stream completed.") + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +async def main() -> None: + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistants_client: + + # Initialize toolset with user functions + functions = AsyncFunctionTool(user_async_functions) + toolset = AsyncToolSet() + toolset.add(functions) + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + toolset=toolset, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", + ) + print(f"Created message, message ID {message.id}") + + async with await assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + await stream.until_done() + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py new file mode 100644 index 000000000000..5d7e0398ce4e --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py @@ -0,0 +1,81 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with interation in streaming from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_stream_iteration_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" +import asyncio + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import AssistantStreamEvent +from azure.ai.assistants.models import MessageDeltaChunk, RunStep, ThreadMessage, ThreadRun +from azure.identity.aio import DefaultAzureCredential + +import os + + +async def main() -> None: + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistants_client: + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID {message.id}") + + async with await assistants_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + async for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AssistantStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py new file mode 100644 index 000000000000..e1e5c27e47ee --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py @@ -0,0 +1,108 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to override the base event handler, parse the events, and iterate through them. + In your use case, you might not want to write the iteration code similar to sample_assistants_stream_iteration_async.py. + If you have multiple places to call create_stream, you might find the iteration code cumbersome. + This example shows how to override the base event handler, parse the events, and iterate through them, which can be + reused in multiple create_stream calls to help keep the code clean. + +USAGE: + python sample_assistants_stream_with_base_override_eventhandler_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set these environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" +import asyncio +import json +from typing import AsyncGenerator, Optional + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models._models import ( + MessageDeltaChunk, + MessageDeltaTextContent, +) +from azure.ai.assistants.models import AssistantStreamEvent, BaseAsyncAssistantEventHandler +from azure.identity.aio import DefaultAzureCredential + +import os + + +# Our goal is to parse the event data in a string and return the chunk in text for each iteration. +# Because we want the iteration to be a string, we define str as the generic type for BaseAsyncAssistantEventHandler +# and override the _process_event method to return a string. +# The get_stream_chunks method is defined to return the chunks as strings because the iteration is a string. +class MyEventHandler(BaseAsyncAssistantEventHandler[Optional[str]]): + + async def _process_event(self, event_data_str: str) -> Optional[str]: + + event_lines = event_data_str.strip().split("\n") + event_type: Optional[str] = None + event_data = "" + for line in event_lines: + if line.startswith("event:"): + event_type = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + event_data = line.split(":", 1)[1].strip() + + if not event_type: + raise ValueError("Event type not specified in the event data.") + + if event_type == AssistantStreamEvent.THREAD_MESSAGE_DELTA.value: + + event_obj: MessageDeltaChunk = MessageDeltaChunk(**json.loads(event_data)) + + for content_part in event_obj.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + if content_part.text is not None: + return content_part.text.value + return None + + async def get_stream_chunks(self) -> AsyncGenerator[str, None]: + async for chunk in self: + if chunk: + yield chunk + + +async def main() -> None: + + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistants_client: + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID {message.id}") + + async with await assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + async for chunk in stream.get_stream_chunks(): + print(chunk) + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py new file mode 100644 index 000000000000..981002f22710 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py @@ -0,0 +1,116 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations to add files to an existing vector store and perform search from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_vector_store_batch_enterprise_file_search_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity azure-ai-ml aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" +import asyncio +import os + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ( + FileSearchTool, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) +from azure.identity.aio import DefaultAzureCredential + + +async def main(): + + async with DefaultAzureCredential() as credential: + async with AssistantsClient.from_connection_string( + credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistants_client: + # We will upload the local file to Azure and will use it for vector store creation. + _, asset_uri = assistants_client.upload_file_to_azure_blob("../product_info_1.md") + ds = VectorStoreDataSource( + asset_identifier=asset_uri, + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + vector_store = await assistants_client.create_vector_store_and_poll( + file_ids=[], name="sample_vector_store" + ) + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Add the file to the vector store or you can supply file ids in the vector store creation + vector_store_file_batch = await assistants_client.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=[ds] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="What feature does Smart Eyewear offer?", + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + await assistants_client.update_assistant( + assistant_id=assistant.id, + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Updated assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="What feature does Smart Eyewear offer?", + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + await assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py new file mode 100644 index 000000000000..2c1eabc34e87 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py @@ -0,0 +1,110 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations to add files to an existing vector store and perform search from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_vector_store_batch_file_search_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" + +import asyncio +import os +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import FileSearchTool, FilePurpose +from azure.identity.aio import DefaultAzureCredential + + +async def main() -> None: + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistants_client: + # Upload a file and wait for it to be processed + file = await assistants_client.upload_file_and_poll( + file_path="../product_info_1.md", purpose=FilePurpose.ASSISTANTS + ) + print(f"Uploaded file, file ID: {file.id}") + + # Create a vector store with no file and wait for it to be processed + vector_store = await assistants_client.create_vector_store_and_poll( + file_ids=[], name="sample_vector_store" + ) + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Add the file to the vector store or you can supply file ids in the vector store creation + vector_store_file_batch = await assistants_client.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, file_ids=[file.id] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + await assistants_client.update_assistant( + assistant_id=assistant.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources + ) + print(f"Updated assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + await assistants_client.delete_file(file.id) + print("Deleted file") + + await assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py new file mode 100644 index 000000000000..c506935c77c8 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py @@ -0,0 +1,76 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to add files to assistant during the vector store creation. + +USAGE: + python sample_assistants_vector_store_enterprise_file_search_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity azure-ai-ml aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" +import asyncio +import os + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType +from azure.identity.aio import DefaultAzureCredential + + +async def main(): + async with DefaultAzureCredential() as credential: + async with AssistantsClient.from_connection_string( + credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistants_client: + # We will upload the local file to Azure and will use it for vector store creation. + _, asset_uri = assistants_client.upload_file_to_azure_blob("../product_info_1.md") + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + vector_store = await assistants_client.create_vector_store_and_poll( + data_sources=[ds], name="sample_vector_store" + ) + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + await assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py new file mode 100644 index 000000000000..a6ac7c88b062 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py @@ -0,0 +1,87 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to add files to assistant during the vector store creation. + +USAGE: + python sample_assistants_vector_store_file_search_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" +import asyncio +import os + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import FileSearchTool, FilePurpose, MessageTextContent +from azure.identity.aio import DefaultAzureCredential + + +async def main(): + async with DefaultAzureCredential() as credential: + async with AssistantsClient.from_connection_string( + credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistants_client: + # Upload a file and wait for it to be processed + file = await assistants_client.upload_file_and_poll( + file_path="../product_info_1.md", purpose=FilePurpose.ASSISTANTS + ) + print(f"Uploaded file, file ID: {file.id}") + + # Create a vector store with no file and wait for it to be processed + vector_store = await assistants_client.create_vector_store_and_poll( + file_ids=[file.id], name="sample_vector_store" + ) + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + await assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + + for message in reversed(messages.data): + # To remove characters, which are not correctly handled by print, we will encode the message + # and then decode it again. + clean_message = "\n".join( + text_msg.text.value.encode("ascii", "ignore").decode("utf-8") for text_msg in message.text_messages + ) + print(f"Role: {message.role} Message: {clean_message}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py new file mode 100644 index 000000000000..294980d31c04 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py @@ -0,0 +1,82 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations to create messages with file search attachments from + the Azure Assistants service using a asynchronous client. + +USAGE: + python sample_assistants_with_file_search_attachment_async.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. +""" +import asyncio + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import FilePurpose +from azure.ai.assistants.models import FileSearchTool, MessageAttachment +from azure.identity.aio import DefaultAzureCredential + +import os + + +async def main() -> None: + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) as assistants_client: + # Upload a file and wait for it to be processed + file = await assistants_client.upload_file_and_poll( + file_path="../product_info_1.md", purpose=FilePurpose.ASSISTANTS + ) + + # Create assistant + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) + message = await assistants_client.create_message( + thread_id=thread.id, + role="user", + content="What feature does Smart Eyewear offer?", + attachments=[attachment], + ) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_and_process_run( + thread_id=thread.id, assistant_id=assistant.id, sleep_interval=4 + ) + print(f"Created run, run ID: {run.id}") + + print(f"Run completed with status: {run.status}") + + await assistants_client.delete_file(file.id) + print("Deleted file") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/user_async_functions.py b/sdk/ai/azure-ai-assistants/samples/async_samples/user_async_functions.py new file mode 100644 index 000000000000..ba75eb3d7231 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/user_async_functions.py @@ -0,0 +1,67 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import asyncio +import os +import sys +import json +import datetime +from typing import Any, Callable, Set, Optional +from azure.ai.assistants.telemetry import trace_function + + +# Add parent directory to sys.path to import user_functions +current_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.abspath(os.path.join(current_dir, "..")) +if parent_dir not in sys.path: + sys.path.insert(0, parent_dir) +from user_functions import fetch_current_datetime, fetch_weather, send_email + + +async def send_email_async(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Email address of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + await asyncio.sleep(1) + return send_email(recipient, subject, body) + + +# The trace_func decorator will trace the function call and enable adding additional attributes +# to the span in the function implementation. Note that this will trace the function parameters and their values. +@trace_function() +async def fetch_current_datetime_async(format: Optional[str] = None) -> str: + """ + Get the current time as a JSON string, optionally formatted. + + :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. + :return: The current time in JSON format. + :rtype: str + """ + await asyncio.sleep(1) + current_time = datetime.datetime.now() + + # Use the provided format if available, else use a default format + if format: + time_format = format + else: + time_format = "%Y-%m-%d %H:%M:%S" + + time_json = json.dumps({"current_time": current_time.strftime(time_format)}) + return time_json + + +# Statically defined user functions for fast reference with send_email as async but the rest as sync +user_async_functions: Set[Callable[..., Any]] = { + fetch_current_datetime_async, + fetch_weather, + send_email_async, +} diff --git a/sdk/ai/azure-ai-assistants/samples/countries.json b/sdk/ai/azure-ai-assistants/samples/countries.json new file mode 100644 index 000000000000..58d3df70d28d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/countries.json @@ -0,0 +1,46 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "RestCountries.NET API", + "description": "Web API version 3.1 for managing country items, based on previous implementations from restcountries.eu and restcountries.com.", + "version": "v3.1" + }, + "servers": [ + { "url": "https://restcountries.net" } + ], + "auth": [], + "paths": { + "/v3.1/currency": { + "get": { + "description": "Search by currency.", + "operationId": "LookupCountryByCurrency", + "parameters": [ + { + "name": "currency", + "in": "query", + "description": "The currency to search for.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Success", + "content": { + "text/plain": { + "schema": { + "type": "string" + } + } + } + } + } + } + } + }, + "components": { + "schemes": {} + } + } \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/samples/fix_sample.sh b/sdk/ai/azure-ai-assistants/samples/fix_sample.sh new file mode 100644 index 000000000000..067c4b55317b --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/fix_sample.sh @@ -0,0 +1,20 @@ +#!/usr/bin/bash +fix_samples(){ + for fle in `ls $1/*.py | grep agent`; + do + new_name="`echo "$fle" | sed "s/agent/assistant/g"`" + echo "$fle - > $new_name" + sed "s/gent/ssistant/g" "$fle" \ + | sed "s/azure-ai-projects/azure-ai-assistants/g" \ + | sed "s/ai.projects/ai.assistants/g" \ + | sed "s/AIProjectClient/AssistantsClient/g" \ + | sed "s/project_client.assistants/project_client/g" \ + | sed "s/project_client/assistants_client/g" > $new_name + rm -f "$fle" + done +} + +#fix_samples async_samples +#fix_samples . +#fix_samples multiagent +fix_samples ../tests \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team.py new file mode 100644 index 000000000000..fbf4c296a569 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team.py @@ -0,0 +1,434 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import os +import yaml # type: ignore + +from opentelemetry import trace +from opentelemetry.trace import StatusCode, Span # noqa: F401 # pylint: disable=unused-import +from typing import Any, Dict, Optional, Set, List +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FunctionTool, ToolSet, MessageRole, Assistant, AssistantThread + +tracer = trace.get_tracer(__name__) + + +class _AssistantTeamMember: + """ + Represents an individual assistant on a team. + + :param model: The model (e.g. GPT-4) used by this assistant. + :param name: The assistant's name. + :param instructions: The assistant's initial instructions or "personality". + :param toolset: An optional ToolSet with specialized tools for this assistant. + :param can_delegate: Whether this assistant has delegation capability (e.g., 'create_task'). + Defaults to True. + """ + + def __init__( + self, model: str, name: str, instructions: str, toolset: Optional[ToolSet] = None, can_delegate: bool = True + ) -> None: + self.model = model + self.name = name + self.instructions = instructions + self.assistant_instance: Optional[Assistant] = None + self.toolset: Optional[ToolSet] = toolset + self.can_delegate = can_delegate + + +class AssistantTask: + """ + Encapsulates a task for an assistant to perform. + + :param recipient: The name of the assistant who should receive the task. + :param task_description: The description of the work to be done or question to be answered. + :param requestor: The name of the assistant or user requesting the task. + """ + + def __init__(self, recipient: str, task_description: str, requestor: str) -> None: + self.recipient = recipient + self.task_description = task_description + self.requestor = requestor + + +class AssistantTeam: + """ + A class that represents a team of assistants. + + """ + + # Static container to store all instances of AssistantTeam + _teams: Dict[str, "AssistantTeam"] = {} + + _assistants_client: AssistantsClient + _assistant_thread: Optional[AssistantThread] = None + _team_leader: Optional[_AssistantTeamMember] = None + _members: List[_AssistantTeamMember] = [] + _tasks: List[AssistantTask] = [] + _team_name: str = "" + _current_request_span: Optional[Span] = None + _current_task_span: Optional[Span] = None + + def __init__(self, team_name: str, assistants_client: AssistantsClient): + """ + Initialize a new AssistantTeam and set it as the singleton instance. + """ + # Validate that the team_name is a non-empty string + if not isinstance(team_name, str) or not team_name: + raise ValueError("Team name must be a non-empty string.") + # Check for existing team with the same name + if team_name in AssistantTeam._teams: + raise ValueError(f"A team with the name '{team_name}' already exists.") + self.team_name = team_name + if assistants_client is None: + raise ValueError("No AssistantsClient provided.") + self._assistants_client = assistants_client + # Store the instance in the static container + AssistantTeam._teams[team_name] = self + + # Get the directory of the current file + current_dir = os.path.dirname(os.path.abspath(__file__)) + # Construct the full path to the config file + file_path = os.path.join(current_dir, "assistant_team_config.yaml") + with open(file_path, "r") as config_file: + config = yaml.safe_load(config_file) + self.TEAM_LEADER_INSTRUCTIONS = config["TEAM_LEADER_INSTRUCTIONS"] + self.TEAM_LEADER_INITIAL_REQUEST = config["TEAM_LEADER_INITIAL_REQUEST"] + self.TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS = config[ + "TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS" + ] + self.TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS = config["TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS"] + self.TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS = config["TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS"] + self.TEAM_LEADER_MODEL = config["TEAM_LEADER_MODEL"].strip() + + @staticmethod + def get_team(team_name: str) -> "AssistantTeam": + """Static method to fetch the AssistantTeam instance by name.""" + team = AssistantTeam._teams.get(team_name) + if team is None: + raise ValueError(f"No team found with the name '{team_name}'.") + return team + + @staticmethod + def _remove_team(team_name: str) -> None: + """Static method to remove an AssistantTeam instance by name.""" + if team_name not in AssistantTeam._teams: + raise ValueError(f"No team found with the name '{team_name}'.") + del AssistantTeam._teams[team_name] + + def add_assistant( + self, model: str, name: str, instructions: str, toolset: Optional[ToolSet] = None, can_delegate: bool = True + ) -> None: + """ + Add a new assistant (team member) to this AssistantTeam. + + :param model: The model name (e.g. GPT-4) for the assistant. + :param name: The name of the assistant being added. + :param instructions: The initial instructions/personality for the assistant. + :param toolset: An optional ToolSet to configure specific tools (functions, etc.) + for this assistant. If None, we'll create a default set. + :param can_delegate: If True, the assistant can delegate tasks (via create_task). + If False, the assistant does not get 'create_task' in its ToolSet + and won't mention delegation in instructions. + """ + if toolset is None: + toolset = ToolSet() + + if can_delegate: + # If assistant can delegate, ensure it has 'create_task' + try: + function_tool = toolset.get_tool(FunctionTool) + function_tool.add_functions(assistant_team_default_functions) + except ValueError: + default_function_tool = FunctionTool(assistant_team_default_functions) + toolset.add(default_function_tool) + + member = _AssistantTeamMember( + model=model, + name=name, + instructions=instructions, + toolset=toolset, + can_delegate=can_delegate, + ) + self._members.append(member) + + def set_team_leader(self, model: str, name: str, instructions: str, toolset: Optional[ToolSet] = None) -> None: + """ + Set the team leader for this AssistantTeam. + + If team leader has not been set prior to the call to assemble_team, + then a default team leader will be set. + + :param model: The model name (e.g. GPT-4) for the assistant. + :param name: The name of the team leader. + :param instructions: The instructions for the team leader. These instructions + are not modified by the implementation, so all required + information about other team members and how to pass tasks + to them should be included. + :param toolset: An optional ToolSet to configure specific tools (functions, etc.) + for the team leader. + """ + member = _AssistantTeamMember(model=model, name=name, instructions=instructions, toolset=toolset) + self._team_leader = member + + def add_task(self, task: AssistantTask) -> None: + """ + Add a new task to the team's task list. + + :param task: The task to be added. + """ + self._tasks.append(task) + + def _create_team_leader(self) -> None: + """ + Create the team leader assistant. + """ + assert self._assistants_client is not None, "assistants_client must not be None" + assert self._team_leader is not None, "team leader has not been added" + + self._team_leader.assistant_instance = self._assistants_client.create_assistant( + model=self._team_leader.model, + name=self._team_leader.name, + instructions=self._team_leader.instructions, + toolset=self._team_leader.toolset, + ) + + def _set_default_team_leader(self): + """ + Set the default 'TeamLeader' assistant with awareness of all other assistants. + """ + toolset = ToolSet() + toolset.add(default_function_tool) + instructions = self.TEAM_LEADER_INSTRUCTIONS.format(assistant_name="TeamLeader", team_name=self.team_name) + "\n" + # List all assistants (will be empty at this moment if you haven't added any, or you can append after they're added) + for member in self._members: + instructions += f"- {member.name}: {member.instructions}\n" + + self._team_leader = _AssistantTeamMember( + model=self.TEAM_LEADER_MODEL, + name="TeamLeader", + instructions=instructions, + toolset=toolset, + can_delegate=True, + ) + + def assemble_team(self): + """ + Create the team leader assistant and initialize all member assistants with + their configured or default toolsets. + """ + assert self._assistants_client is not None, "assistants_client must not be None" + + if self._team_leader is None: + self._set_default_team_leader() + + self._create_team_leader() + + for member in self._members: + if member is self._team_leader: + continue + + team_description = "" + for other_member in self._members: + if other_member != member: + team_description += f"- {other_member.name}: {other_member.instructions}\n" + + if member.can_delegate: + extended_instructions = self.TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS.format( + name=member.name, + team_name=self._team_name, + original_instructions=member.instructions, + team_description=team_description, + ) + else: + extended_instructions = self.TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS.format( + name=member.name, + team_name=self._team_name, + original_instructions=member.instructions, + team_description=team_description, + ) + member.assistant_instance = self._assistants_client.create_assistant( + model=member.model, name=member.name, instructions=extended_instructions, toolset=member.toolset + ) + + def dismantle_team(self) -> None: + """ + Delete all assistants (including the team leader) from the project client. + """ + assert self._assistants_client is not None, "assistants_client must not be None" + + if self._team_leader and self._team_leader.assistant_instance: + print(f"Deleting team leader assistant '{self._team_leader.name}'") + self._assistants_client.delete_assistant(self._team_leader.assistant_instance.id) + for member in self._members: + if member is not self._team_leader and member.assistant_instance: + print(f"Deleting assistant '{member.name}'") + self._assistants_client.delete_assistant(member.assistant_instance.id) + AssistantTeam._remove_team(self.team_name) + + def _add_task_completion_event( + self, + span: Span, + result: str, + ) -> None: + + attributes: Dict[str, Any] = {} + attributes["assistant_team.task.result"] = result + span.add_event(name=f"assistant_team.task_completed", attributes=attributes) + + def process_request(self, request: str) -> None: + """ + Handle a user's request by creating a team and delegating tasks to + the team leader. The team leader may generate additional tasks. + + :param request: The user's request or question. + """ + assert self._assistants_client is not None, "project client must not be None" + assert self._team_leader is not None, "team leader must not be None" + + if self._assistant_thread is None: + self._assistant_thread = self._assistants_client.create_thread() + print(f"Created thread with ID: {self._assistant_thread.id}") + + with tracer.start_as_current_span("assistant_team_request") as current_request_span: + self._current_request_span = current_request_span + if self._current_request_span is not None: + self._current_request_span.set_attribute("assistant_team.name", self.team_name) + team_leader_request = self.TEAM_LEADER_INITIAL_REQUEST.format(original_request=request) + _create_task( + team_name=self.team_name, + recipient=self._team_leader.name, + request=team_leader_request, + requestor="user", + ) + while self._tasks: + task = self._tasks.pop(0) + with tracer.start_as_current_span("assistant_team_task") as current_task_span: + self._current_task_span = current_task_span + if self._current_task_span is not None: + self._current_task_span.set_attribute("assistant_team.name", self.team_name) + self._current_task_span.set_attribute("assistant_team.task.recipient", task.recipient) + self._current_task_span.set_attribute("assistant_team.task.requestor", task.requestor) + self._current_task_span.set_attribute("assistant_team.task.description", task.task_description) + print( + f"Starting task for assistant '{task.recipient}'. " + f"Requestor: '{task.requestor}'. " + f"Task description: '{task.task_description}'." + ) + message = self._assistants_client.create_message( + thread_id=self._assistant_thread.id, + role="user", + content=task.task_description, + ) + print(f"Created message with ID: {message.id} for task in thread {self._assistant_thread.id}") + assistant = self._get_member_by_name(task.recipient) + if assistant and assistant.assistant_instance: + run = self._assistants_client.create_and_process_run( + thread_id=self._assistant_thread.id, assistant_id=assistant.assistant_instance.id + ) + print(f"Created and processed run for assistant '{assistant.name}', run ID: {run.id}") + messages = self._assistants_client.list_messages(thread_id=self._assistant_thread.id) + text_message = messages.get_last_text_message_by_role(role=MessageRole.ASSISTANT) + if text_message and text_message.text: + print(f"Assistant '{assistant.name}' completed task. " f"Outcome: {text_message.text.value}") + if self._current_task_span is not None: + self._add_task_completion_event(self._current_task_span, result=text_message.text.value) + + # If no tasks remain AND the recipient is not the TeamLeader, + # let the TeamLeader see if more delegation is needed. + if not self._tasks and not task.recipient == "TeamLeader": + team_leader_request = self.TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS + _create_task( + team_name=self.team_name, + recipient=self._team_leader.name, + request=team_leader_request, + requestor="user", + ) + # self._current_task_span.end() + self._current_task_span = None + # self._current_request_span.end() + self._current_request_span = None + + def _get_member_by_name(self, name) -> Optional[_AssistantTeamMember]: + """ + Retrieve a team member (assistant) by name. + If no member with the specified name is found, returns None. + + :param name: The assistant's name within this team. + """ + if name == "TeamLeader": + return self._team_leader + for member in self._members: + if member.name == name: + return member + return None + + """ + Requests another assistant in the team to complete a task. + + :param span (Span): The event will be added to this span + :param team_name (str): The name of the team. + :param recipient (str): The name of the assistant that is being requested to complete the task. + :param request (str): A description of the to complete. This can also be a question. + :param requestor (str): The name of the assistant who is requesting the task. + :return: True if the task was successfully received, False otherwise. + :rtype: str + """ + + +def _add_create_task_event( + span: Span, + team_name: str, + requestor: str, + recipient: str, + request: str, +) -> None: + + attributes: Dict[str, Any] = {} + attributes["assistant_team.task.team_name"] = team_name + attributes["assistant_team.task.requestor"] = requestor + attributes["assistant_team.task.recipient"] = recipient + attributes["assistant_team.task.description"] = request + span.add_event(name=f"assistant_team.create_task", attributes=attributes) + + +def _create_task(team_name: str, recipient: str, request: str, requestor: str) -> str: + """ + Requests another assistant in the team to complete a task. + + :param team_name (str): The name of the team. + :param recipient (str): The name of the assistant that is being requested to complete the task. + :param request (str): A description of the to complete. This can also be a question. + :param requestor (str): The name of the assistant who is requesting the task. + :return: True if the task was successfully received, False otherwise. + :rtype: str + """ + task = AssistantTask(recipient=recipient, task_description=request, requestor=requestor) + team: Optional[AssistantTeam] = None + try: + team = AssistantTeam.get_team(team_name) + span: Optional[Span] = None + if team._current_task_span is not None: + span = team._current_task_span + elif team._current_request_span is not None: + span = team._current_request_span + + if span is not None: + _add_create_task_event( + span=span, team_name=team_name, requestor=requestor, recipient=recipient, request=request + ) + except: + pass + if team is not None: + team.add_task(task) + return "True" + return "False" + + +# Any additional functions that might be used by the assistants: +assistant_team_default_functions: Set = { + _create_task, +} + +default_function_tool = FunctionTool(functions=assistant_team_default_functions) diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team_config.yaml b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team_config.yaml new file mode 100644 index 000000000000..af1c9b50fba1 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team_config.yaml @@ -0,0 +1,43 @@ +TEAM_LEADER_MODEL: | + gpt-4 + +TEAM_LEADER_INSTRUCTIONS: | + You are an assistant named '{assistant_name}'. You are a leader of a team of assistants. The name of your team is '{team_name}'. + You are an assistant that is responsible for receiving requests from user and utilizing a team of assistants to complete the task. + When you are passed a request, the only thing you will do is evaluate which team member should do which task next to complete the request. + You will use the provided _create_task function to create a task for the assistant that is best suited for handling the task next. + You will respond with the description of who you assigned the task and why. When you think that the original user request is + processed completely utilizing all the talent available in the team, you do not have to create anymore tasks. + Using the skills of all the team members when applicable is highly valued. + Do not create parallel tasks. + Here are the other assistants in your team: + +TEAM_LEADER_INITIAL_REQUEST: | + Please create a task for assistant in the team that is best suited to next process the following request. + Use the _create_task function available for you to create the task. The request is: + {original_request} + +TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS: | + Check the discussion so far and especially the most recent assistant response in the thread and if you see a potential task + that could improve the final outcome, then use the _create_task function to create the task. + Do not ever ask user confirmation for creating a task. + If the request is completely processed, you do not have to create a task. + +TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS: | + You are an assistant named '{name}'. You are a member in a team of assistants. The name of your team is '{team_name}'. + {original_instructions} + + - You can delegate tasks when appropriate. To delegate, call the _create_task function, using your own name as the 'requestor'. + - Provide a brief account of any tasks you assign and the outcome. + - Ask for help from other team members if you see they have the relevant expertise. + - Once you believe your assignment is complete, respond with your final answer or actions taken. + - Below are the other assistants in your team: {team_description} + +TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS: | + You are an assistant named '{name}'. You are a member in a team of assistants. The name of your team is '{team_name}'. + {original_instructions} + + - You do not delegate tasks. Instead, focus solely on fulfilling the tasks assigned to you. + - If you have suggestions for tasks better suited to another assistant, simply mention it in your response, but do not call _create_task yourself. + - Once you believe your assignment is complete, respond with your final answer or actions taken. + - Below are the other assistants in your team: {team_description} diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py new file mode 100644 index 000000000000..0f8ee1d66f0b --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py @@ -0,0 +1,63 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import os +import sys +from typing import cast +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.telemetry import enable_telemetry +from azure.monitor.opentelemetry import configure_azure_monitor + + +class AssistantTraceConfigurator: + def __init__(self, assistants_client: AssistantsClient): + self.assistants_client = assistants_client + + def enable_azure_monitor_tracing(self): + application_insights_connection_string = os.environ.get('AI_APPINSIGHTS_CONNECTION_STRING') + if not application_insights_connection_string: + print("AI_APPINSIGHTS_CONNECTION_STRING environment variable was not set.") + print("Please create AI_APPINSIGHTS_CONNECTION_STRING with the Application Insights,") + print("connection string. It should be enabled for this project.") + print("Enable it via the 'Tracing' tab in your AI Foundry project page.") + exit() + configure_azure_monitor(connection_string=application_insights_connection_string) + enable_telemetry() + + def enable_console_tracing_without_genai(self): + exporter = ConsoleSpanExporter() + trace.set_tracer_provider(TracerProvider()) + tracer = trace.get_tracer(__name__) + provider = cast(TracerProvider, trace.get_tracer_provider()) + provider.add_span_processor(SimpleSpanProcessor(exporter)) + print("Console tracing enabled without assistant traces.") + + def enable_console_tracing_with_assistant(self): + enable_telemetry(destination=sys.stdout) + print("Console tracing enabled with assistant traces.") + + def display_menu(self): + print("Select a tracing option:") + print("1. Enable Azure Monitor tracing") + print("2. Enable console tracing without enabling gen_ai assistant traces") + print("3. Enable console tracing with gen_ai assistant traces") + print("4. Do not enable traces") + + def setup_tracing(self): + self.display_menu() + choice = input("Enter your choice (1-4): ") + + if choice == "1": + self.enable_azure_monitor_tracing() + elif choice == "2": + self.enable_console_tracing_without_genai() + elif choice == "3": + self.enable_console_tracing_with_assistant() + elif choice == "4": + print("No tracing enabled.") + else: + print("Invalid choice. Please select a valid option.") diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py new file mode 100644 index 000000000000..66981207c671 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py @@ -0,0 +1,62 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use multiple assistants using AssistantTeam with traces. + +USAGE: + python sample_assistants_assistant_team.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from assistant_team import AssistantTeam +from assistant_trace_configurator import AssistantTraceConfigurator + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") + +if model_deployment_name is not None: + AssistantTraceConfigurator(assistants_client=assistants_client).setup_tracing() + with assistants_client: + assistant_team = AssistantTeam("test_team", assistants_client=assistants_client) + assistant_team.add_assistant( + model=model_deployment_name, + name="Coder", + instructions="You are software engineer who writes great code. Your name is Coder.", + ) + assistant_team.add_assistant( + model=model_deployment_name, + name="Reviewer", + instructions="You are software engineer who reviews code. Your name is Reviewer.", + ) + assistant_team.assemble_team() + + print("A team of assistants specialized in software engineering is available for requests.") + while True: + user_input = input("Input (type 'quit' or 'exit' to exit): ") + if user_input.lower() == "quit": + break + elif user_input.lower() == "exit": + break + assistant_team.process_request(request=user_input) + + assistant_team.dismantle_team() +else: + print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py new file mode 100644 index 000000000000..4fcda33d7269 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py @@ -0,0 +1,115 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to multiple assistants using AssistantTeam with traces. + +USAGE: + python sample_assistants_assistant_team_custom_team_leader.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. +""" + +import os +from typing import Optional, Set +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from assistant_team import AssistantTeam, AssistantTask +from assistant_trace_configurator import AssistantTraceConfigurator +from azure.ai.assistants.models import FunctionTool, ToolSet + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") + + +def create_task(team_name: str, recipient: str, request: str, requestor: str) -> str: + """ + Requests another assistant in the team to complete a task. + + :param team_name (str): The name of the team. + :param recipient (str): The name of the assistant that is being requested to complete the task. + :param request (str): A description of the to complete. This can also be a question. + :param requestor (str): The name of the assistant who is requesting the task. + :return: True if the task was successfully received, False otherwise. + :rtype: str + """ + task = AssistantTask(recipient=recipient, task_description=request, requestor=requestor) + team: Optional[AssistantTeam] = None + try: + team = AssistantTeam.get_team(team_name) + except: + pass + if team is not None: + team.add_task(task) + return "True" + return "False" + + +# Any additional functions that might be used by the assistants: +assistant_team_default_functions: Set = { + create_task, +} + +default_function_tool = FunctionTool(functions=assistant_team_default_functions) + + +if model_deployment_name is not None: + AssistantTraceConfigurator(assistants_client=assistants_client).setup_tracing() + with assistants_client: + assistant_team = AssistantTeam("test_team", assistants_client=assistants_client) + toolset = ToolSet() + toolset.add(default_function_tool) + assistant_team.set_team_leader( + model=model_deployment_name, + name="TeamLeader", + instructions="You are an assistant named 'TeamLeader'. You are a leader of a team of assistants. The name of your team is 'test_team'." + "You are an assistant that is responsible for receiving requests from user and utilizing a team of assistants to complete the task. " + "When you are passed a request, the only thing you will do is evaluate which team member should do which task next to complete the request. " + "You will use the provided create_task function to create a task for the assistant that is best suited for handling the task next. " + "You will respond with the description of who you assigned the task and why. When you think that the original user request is " + "processed completely utilizing all the talent available in the team, you do not have to create anymore tasks. " + "Using the skills of all the team members when applicable is highly valued. " + "Do not create parallel tasks. " + "Here are the other assistants in your team: " + "- Coder: You are software engineer who writes great code. Your name is Coder. " + "- Reviewer: You are software engineer who reviews code. Your name is Reviewer.", + toolset=toolset, + ) + assistant_team.add_assistant( + model=model_deployment_name, + name="Coder", + instructions="You are software engineer who writes great code. Your name is Coder.", + ) + assistant_team.add_assistant( + model=model_deployment_name, + name="Reviewer", + instructions="You are software engineer who reviews code. Your name is Reviewer.", + ) + assistant_team.assemble_team() + + print("A team of assistants specialized in software engineering is available for requests.") + while True: + user_input = input("Input (type 'quit' or 'exit' to exit): ") + if user_input.lower() == "quit": + break + elif user_input.lower() == "exit": + break + assistant_team.process_request(request=user_input) + + assistant_team.dismantle_team() +else: + print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py new file mode 100644 index 000000000000..bf6142d2468c --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py @@ -0,0 +1,99 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use multiple assistants to execute tasks. + +USAGE: + python sample_assistants_multi_assistant_team.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. +""" + +import os + +from user_functions_with_traces import * +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ToolSet, FunctionTool +from azure.identity import DefaultAzureCredential +from assistant_team import AssistantTeam +from assistant_trace_configurator import AssistantTraceConfigurator + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +user_function_set_1: Set = {fetch_current_datetime, fetch_weather} + +user_function_set_2: Set = {send_email_using_recipient_name} + +user_function_set_3: Set = {convert_temperature} + +model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") + +if model_deployment_name is not None: + AssistantTraceConfigurator(assistants_client=assistants_client).setup_tracing() + with assistants_client: + + functions = FunctionTool(functions=user_function_set_1) + toolset1 = ToolSet() + toolset1.add(functions) + + assistant_team = AssistantTeam("test_team", assistants_client=assistants_client) + + assistant_team.add_assistant( + model=model_deployment_name, + name="TimeWeatherAssistant", + instructions="You are a specialized assistant for time and weather queries.", + toolset=toolset1, + can_delegate=True, + ) + + functions = FunctionTool(functions=user_function_set_2) + toolset2 = ToolSet() + toolset2.add(functions) + + assistant_team.add_assistant( + model=model_deployment_name, + name="SendEmailAssistant", + instructions="You are a specialized assistant for sending emails.", + toolset=toolset2, + can_delegate=False, + ) + + functions = FunctionTool(functions=user_function_set_3) + toolset3 = ToolSet() + toolset3.add(functions) + + assistant_team.add_assistant( + model=model_deployment_name, + name="TemperatureAssistant", + instructions="You are a specialized assistant for temperature conversion.", + toolset=toolset3, + can_delegate=False, + ) + + assistant_team.assemble_team() + + user_request = ( + "Hello, Please provide me current time in '%Y-%m-%d %H:%M:%S' format, and the weather in New York. " + "Finally, convert the Celsius to Fahrenheit and send an email to Example Recipient with summary of results." + ) + + # Once process_request is called, the TeamLeader will coordinate. + # The loop in process_request will pick up tasks from the queue, assign them, and so on. + assistant_team.process_request(request=user_request) + + assistant_team.dismantle_team() +else: + print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/user_functions_with_traces.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/user_functions_with_traces.py new file mode 100644 index 000000000000..5c1df046571d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/user_functions_with_traces.py @@ -0,0 +1,111 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import json +import datetime +from typing import Any, Callable, Set, Optional +from opentelemetry import trace + + +tracer = trace.get_tracer(__name__) + + +# These are the user-defined functions that can be called by the assistant. +@tracer.start_as_current_span("fetch_current_datetime") # type: ignore +def fetch_current_datetime(format: Optional[str] = None) -> str: + """ + Get the current time as a JSON string, optionally formatted. + + :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. + :return: The current time in JSON format. + :rtype: str + """ + current_time = datetime.datetime.now() + + # Use the provided format if available, else use a default format + if format: + time_format = format + else: + time_format = "%Y-%m-%d %H:%M:%S" + + time_json = json.dumps({"current_time": current_time.strftime(time_format)}) + return time_json + + +@tracer.start_as_current_span("fetch_weather") # type: ignore +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +@tracer.start_as_current_span("send_email_using_recipient_name") # type: ignore +def send_email_using_recipient_name(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Name of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +@tracer.start_as_current_span("convert_temperature") # type: ignore +def convert_temperature(celsius: float) -> str: + """Converts temperature from Celsius to Fahrenheit. + + :param celsius (float): Temperature in Celsius. + :rtype: float + + :return: Temperature in Fahrenheit. + :rtype: str + """ + fahrenheit = (celsius * 9 / 5) + 32 + return json.dumps({"fahrenheit": fahrenheit}) + + +# Example User Input for Each Function +# 1. Fetch Current DateTime +# User Input: "What is the current date and time?" +# User Input: "What is the current date and time in '%Y-%m-%d %H:%M:%S' format?" + +# 2. Fetch Weather +# User Input: "Can you provide the weather information for New York?" + +# 3. Send Email Using Recipient Name +# User Input: "Send an email to John Doe with the subject 'Meeting Reminder' and body 'Don't forget our meeting at 3 PM.'" + +# 4. Convert Temperature +# User Input: "Convert 25 degrees Celsius to Fahrenheit." + + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_current_datetime, + fetch_weather, + send_email_using_recipient_name, + convert_temperature, +} diff --git a/sdk/ai/azure-ai-assistants/samples/nifty_500_quarterly_results.csv b/sdk/ai/azure-ai-assistants/samples/nifty_500_quarterly_results.csv new file mode 100644 index 000000000000..e02068e09042 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/nifty_500_quarterly_results.csv @@ -0,0 +1,502 @@ +name,NSE_code,BSE_code,sector,industry,revenue,operating_expenses,operating_profit,operating_profit_margin,depreciation,interest,profit_before_tax,tax,net_profit,EPS,profit_TTM,EPS_TTM +3M India Ltd.,3MINDIA,523395,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,"1,057",847.4,192.1,18.48%,12.9,0.7,195.9,49.8,146.1,129.7,535.9,475.7 +ACC Ltd.,ACC,500410,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"4,644.8","3,885.4",549.3,12.39%,212.8,28.9,517.7,131.5,387.9,20.7,"1,202.7",64 +AIA Engineering Ltd.,AIAENG,532683,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,"1,357.1",912.7,382.1,29.51%,24.5,7.4,412.5,88.4,323.1,34.3,"1,216.1",128.9 +APL Apollo Tubes Ltd.,APLAPOLLO,533758,METALS & MINING,IRON & STEEL PRODUCTS,"4,65","4,305.4",325,7.02%,41.3,26.6,276.7,73.8,202.9,7.3,767.5,27.7 +Au Small Finance Bank Ltd.,AUBANK,540611,BANKING AND FINANCE,BANKS,"2,956.5","1,026.7",647.7,25.59%,0,"1,282.1",533.4,131.5,401.8,6,"1,606.2",24 +Adani Ports & Special Economic Zone Ltd.,ADANIPORTS,532921,TRANSPORTATION,MARINE PORT & SERVICES,"6,951.9","2,982.4","3,664",55.13%,974.5,520.1,"2,474.9",759,"1,747.8",8.1,"6,337",29.3 +Adani Energy Solutions Ltd.,ADANIENSOL,ASM,UTILITIES,ELECTRIC UTILITIES,"3,766.5","2,169.3","1,504.6",40.95%,432.1,640.8,369.9,84.9,275.9,2.5,"1,315.1",11.8 +Aditya Birla Fashion and Retail Ltd.,ABFRL,535755,RETAILING,DEPARTMENT STORES,"3,272.2","2,903.6",322.9,10.01%,388.8,208.4,-228.6,-28.2,-179.2,-1.9,-491.7,-5.2 +Aegis Logistics Ltd.,AEGISCHEM,500003,OIL & GAS,OIL MARKETING & DISTRIBUTION,"1,279.3","1,026.5",208.3,16.87%,34.1,26.6,192,42,127,3.6,509,14.5 +Ajanta Pharma Ltd.,AJANTPHARM,532331,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,049.8",737.8,290.7,28.26%,33.7,2.3,275.9,80.6,195.3,15.5,660.2,52.3 +Alembic Pharmaceuticals Ltd.,APLLTD,533573,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,605.1","1,386.7",208.2,13.06%,67.6,15.7,135.1,-1.9,136.6,7,531.7,27 +Alkem Laboratories Ltd.,ALKEM,539523,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"3,503.4","2,693.4",746.7,21.71%,73.9,30.3,648,33.1,620.5,51.9,"1,432.9",119.9 +Amara Raja Energy & Mobility Ltd.,ARE&M,500008,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,988.6","2,556.9",402.5,13.60%,115.7,6.2,309.8,83.5,226.3,13.2,779.8,45.7 +Ambuja Cements Ltd.,AMBUJACEM,500425,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"7,9","6,122.1","1,301.8",17.54%,380.9,61.2,"1,335.7",352.5,793,4,"2,777.9",14 +Apollo Hospitals Enterprise Ltd.,APOLLOHOSP,508869,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"4,869.1","4,219.4",627.5,12.95%,163.4,111.3,376.9,130.2,232.9,16.2,697.5,48.5 +Apollo Tyres Ltd.,APOLLOTYRE,500877,AUTOMOBILES & AUTO COMPONENTS,AUTO TYRES & RUBBER PRODUCTS,"6,304.9","5,119.8","1,159.8",18.47%,360.3,132.8,679.9,205.8,474.3,7.5,"1,590.7",25 +Ashok Leyland Ltd.,ASHOKLEY,500477,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,"11,463","9,558.6","1,870.4",16.37%,226.6,715.1,924.4,358,526,1.8,"2,141.5",7.3 +Asian Paints Ltd.,ASIANPAINT,500820,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,"8,643.8","6,762.3","1,716.2",20.24%,208.7,50.9,"1,621.8",418.6,"1,205.4",12.6,"5,062.6",52.8 +Astral Ltd.,ASTRAL,532830,GENERAL INDUSTRIALS,PLASTIC PRODUCTS,"1,376.4","1,142.9",220.1,16.15%,48.7,8,176.8,45.1,131.2,4.9,549.7,20.4 +Atul Ltd.,ATUL,500027,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"1,215.8","1,038.5",155.2,13.00%,54,1.9,121.5,32.5,90.3,30.6,392.3,132.9 +Aurobindo Pharma Ltd.,AUROPHARMA,524804,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"7,406.4","5,846","1,373.4",19.02%,417.5,68.2,"1,074.7",323.7,757.2,12.8,"2,325.5",39.7 +Avanti Feeds Ltd.,AVANTIFEED,512573,FOOD BEVERAGES & TOBACCO,OTHER FOOD PRODUCTS,"1,312","1,184.5",94,7.35%,14.3,0.2,113,30.5,74.2,5.5,336.4,24.7 +Avenue Supermarts Ltd.,DMART,540376,RETAILING,DEPARTMENT STORES,"12,661.3","11,619.4","1,005",7.96%,174.4,15.6,851.9,228.6,623.6,9.6,"2,332.1",35.8 +Axis Bank Ltd.,AXISBANK,532215,BANKING AND FINANCE,BANKS,"33,122.2","9,207.3","9,166",33.43%,0,"14,749","8,313.8","2,096.1","6,204.1",20.1,"13,121",42.6 +Bajaj Auto Ltd.,BAJAJ-AUTO,532977,AUTOMOBILES & AUTO COMPONENTS,2/3 WHEELERS,"11,206.8","8,708.1","2,130.1",19.65%,91.8,6.5,"2,400.4",564,"2,02",71.4,"6,841.6",241.8 +Bajaj Finance Ltd.,BAJFINANCE,500034,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"13,381.8","2,851.5","9,449.7",70.63%,158.5,"4,537.1","4,757.6","1,207","3,550.8",58.7,"13,118.5",216.7 +Bajaj Finserv Ltd.,BAJAJFINSV,532978,DIVERSIFIED,HOLDING COMPANIES,"26,022.7","14,992.2","9,949.9",38.24%,208.8,"4,449.1","5,292","1,536.5","1,929",12.1,"7,422.6",46.6 +Bajaj Holdings & Investment Ltd.,BAJAJHLDNG,500490,DIVERSIFIED,HOLDING COMPANIES,240.1,33.5,191.2,85.08%,8.4,0.2,197.9,73.9,"1,491.2",134,"5,545.1",498.3 +Balkrishna Industries Ltd.,BALKRISIND,502355,AUTOMOBILES & AUTO COMPONENTS,AUTO TYRES & RUBBER PRODUCTS,"2,360.3","1,720.5",532.7,23.64%,160.4,23.9,455.5,108.1,347.4,18,"1,047.5",54.2 +Balrampur Chini Mills Ltd.,BALRAMCHIN,500038,FOOD BEVERAGES & TOBACCO,SUGAR,"1,649","1,374.6",164.9,10.71%,41.2,17.2,215.9,56.6,166.3,8.2,540.5,26.8 +Bank of Baroda,BANKBARODA,532134,BANKING AND FINANCE,BANKS,"35,766","8,430.4","9,807.9",33.52%,0,"17,527.7","6,022.8","1,679.7","4,458.4",8.5,"18,602.9",35.9 +Bank of India,BANKINDIA,532149,BANKING AND FINANCE,BANKS,"16,779.4","3,704.9","3,818.8",25.35%,0,"9,255.7","2,977.4","1,488.6","1,498.5",3.6,"5,388.7",13.1 +Bata India Ltd.,BATAINDIA,500043,RETAILING,FOOTWEAR,834.6,637.5,181.7,22.18%,81.7,28.4,46.1,12.1,34,2.6,289.7,22.5 +Berger Paints (India) Ltd.,BERGEPAINT,509480,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,"2,782.6","2,293.7",473.6,17.12%,82.9,21.1,385,96.7,291.6,2.5,"1,032.6",8.9 +Bharat Electronics Ltd.,BEL,500049,GENERAL INDUSTRIALS,DEFENCE,"4,146.1","2,994.9","1,014.2",25.30%,108.3,1.5,"1,041.5",260.7,789.4,1.1,"3,323",4.5 +Bharat Forge Ltd.,BHARATFORG,500493,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,"3,826.7","3,152.8",621.4,16.47%,211.3,124.3,336.1,121.8,227.2,4.9,783.7,16.8 +Bharat Heavy Electricals Ltd.,BHEL,500103,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"5,305.4","5,513",-387.7,-7.56%,59.9,180.4,-447.9,-197.9,-238.1,-0.7,71.3,0.2 +Bharat Petroleum Corporation Ltd.,BPCL,500547,OIL & GAS,REFINERIES/PETRO-PRODUCTS,"103,72","90,103.9","12,940.5",12.56%,"1,605.3",973.2,"10,755.7","2,812.2","8,243.5",38.7,"27,505.3",129.2 +Bharti Airtel Ltd.,BHARTIARTL,532454,TELECOM SERVICES,TELECOM SERVICES,"37,374.2","17,530.1","19,513.7",52.68%,"9,734.3","5,185.8","3,353.7","1,846.5","1,340.7",2.4,"7,547",13.2 +Indus Towers Ltd.,INDUSTOWER,534816,TELECOM SERVICES,OTHER TELECOM SERVICES,"7,229.7","3,498.8","3,633.7",50.95%,"1,525.6",458.6,"1,746.7",452,"1,294.7",4.8,"3,333.5",12.4 +Biocon Ltd.,BIOCON,532523,PHARMACEUTICALS & BIOTECHNOLOGY,BIOTECHNOLOGY,"3,620.2","2,720.7",741.6,21.42%,389.3,247.7,238.5,41.6,125.6,1.1,498.4,4.2 +Birla Corporation Ltd.,BIRLACORPN,500335,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"2,313.2","1,997",288.9,12.64%,143.5,95.4,77.1,18.8,58.4,7.6,153.1,19.9 +Blue Dart Express Ltd.,BLUEDART,526612,TRANSPORTATION,TRANSPORTATION - LOGISTICS,"1,329.7","1,101.8",222.7,16.82%,110.6,19.5,97.9,24.8,73.1,30.8,292.4,123.2 +Blue Star Ltd.,BLUESTARCO,500067,CONSUMER DURABLES,CONSUMER ELECTRONICS,"1,903.4","1,767.7",122.7,6.49%,23,17.6,95,24.3,70.7,3.6,437.7,21.3 +Bombay Burmah Trading Corporation Ltd.,BBTC,501425,FOOD BEVERAGES & TOBACCO,TEA & COFFEE,"4,643.5","3,664.7",859.2,18.99%,74.7,154.6,697.1,212.6,122,17.5,"-1,499.5",-214.8 +Bosch Ltd.,BOSCHLTD,500530,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"4,284.3","3,638.8",491.3,11.90%,101.3,12.2,"1,317",318.1,999.8,339,"2,126.9",721 +Brigade Enterprises Ltd.,BRIGADE,532929,REALTY,REALTY,"1,407.9","1,041.8",324.8,23.77%,75.7,110,180.3,67.8,133.5,5.8,298.2,12.9 +Britannia Industries Ltd.,BRITANNIA,500825,FMCG,PACKAGED FOODS,"4,485.2","3,560.5",872.4,19.68%,71.7,53.4,799.7,212.1,587.6,24.4,"2,536.2",105.3 +CCL Products India Ltd.,CCL,519600,FOOD BEVERAGES & TOBACCO,TEA & COFFEE,608.3,497.7,109.9,18.09%,22.6,18.4,69.7,8.8,60.9,4.6,279.9,21 +Crisil Ltd.,CRISIL,500092,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,771.8,544.2,191.7,26.05%,26.5,0.8,200.3,48.3,152,20.8,606.3,82.9 +Zydus Lifesciences Ltd.,ZYDUSLIFE,532321,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"4,422.8","3,222.7","1,146.1",26.23%,184.2,8.7,"1,007.2",226.4,800.7,7.9,"2,807.1",27.7 +Can Fin Homes Ltd.,CANFINHOME,511196,BANKING AND FINANCE,HOUSING FINANCE,871,49.7,749.2,86.01%,2.8,548.4,198,39.9,158.1,11.9,658.8,49.5 +Canara Bank,CANBK,532483,BANKING AND FINANCE,BANKS,"33,891.2","8,250.3","7,706.6",28.24%,0,"17,934.3","5,098","1,420.6","3,86",20.9,"13,968.4",77 +Carborundum Universal Ltd.,CARBORUNIV,513375,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,"1,166",978.8,167.5,14.61%,45.9,4.9,136.4,43.7,101.9,5.4,461.3,24.3 +Castrol India Ltd.,CASTROLIND,500870,OIL & GAS,OIL MARKETING & DISTRIBUTION,"1,203.2",914.4,268.6,22.70%,22.9,2.4,263.5,69.1,194.4,2,815.5,8.2 +Ceat Ltd.,CEATLTD,500878,AUTOMOBILES & AUTO COMPONENTS,AUTO TYRES & RUBBER PRODUCTS,"3,063.8","2,597.2",456.1,14.94%,124.5,71.7,270.4,68.3,208,51.4,521.7,129 +Central Bank of India,CENTRALBK,532885,BANKING AND FINANCE,BANKS,"8,438.5","2,565.4","1,535.4",20.81%,0,"4,337.7",567.2,-41.5,622,0.7,"2,181.4",2.5 +Century Plyboards (India) Ltd.,CENTURYPLY,532548,FOREST MATERIALS,FOREST PRODUCTS,"1,011.4",852.5,144.3,14.47%,23.4,6.1,129.4,32.2,96.9,4.4,380.7,17.1 +Cera Sanitaryware Ltd.,CERA,532443,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,476.2,387.2,76.5,16.49%,8.9,1.4,77.2,19.8,56.9,43.8,232.4,178.7 +Chambal Fertilisers & Chemicals Ltd.,CHAMBLFERT,500085,FERTILIZERS,FERTILIZERS,"5,467.3","4,770.5",615,11.42%,78.4,45.8,572.6,200.2,381,9.2,"1,137.7",27.3 +Cholamandalam Investment & Finance Company Ltd.,CHOLAFIN,511243,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"4,695.2",987.6,"3,235.1",69.99%,38.5,"2,204.2","1,065",288.8,772.9,9.4,"3,022.8",36.7 +Cipla Ltd.,CIPLA,500087,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"6,854.5","4,944.4","1,733.8",25.96%,290,25.8,"1,594.2",438.4,"1,130.9",14,"3,449.1",42.7 +City Union Bank Ltd.,CUB,532210,BANKING AND FINANCE,BANKS,"1,486.1",333.9,386.6,29.65%,0,765.6,330.6,50,280.6,3.8,943.8,12.7 +Coal India Ltd.,COALINDIA,533278,METALS & MINING,COAL,"34,760.3","24,639.4","8,137",24.83%,"1,178.2",182.5,"8,760.2","2,036.5","6,799.8",11,"28,059.6",45.5 +Colgate-Palmolive (India) Ltd.,COLPAL,500830,FMCG,PERSONAL PRODUCTS,"1,492.1",989,482.1,32.77%,44.3,1.1,457.8,117.8,340.1,12.5,"1,173.2",43.1 +Container Corporation of India Ltd.,CONCOR,531344,COMMERCIAL SERVICES & SUPPLIES,WAREHOUSING AND LOGISTICS,"2,299.8","1,648.4",546.5,24.90%,153.1,16.5,481.8,119,367.4,6,"1,186.2",19.5 +Coromandel International Ltd.,COROMANDEL,506395,FERTILIZERS,FERTILIZERS,"7,032.9","5,929.4","1,058.7",15.15%,54,46.2,"1,003.3",245,756.9,25.7,"2,024.2",68.8 +Crompton Greaves Consumer Electricals Ltd.,CROMPTON,539876,CONSUMER DURABLES,HOUSEHOLD APPLIANCES,"1,797.2","1,607.8",174.5,9.79%,32.1,21.5,135.8,34.9,97.2,1.5,432,6.7 +Cummins India Ltd.,CUMMINSIND,500480,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,"2,011.3","1,575.4",346.2,18.02%,38.3,6.8,390.9,99.6,329.1,11.9,"1,445.5",52.1 +Cyient Ltd.,CYIENT,532175,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,792","1,452.7",325.8,18.32%,65.8,27,240.3,56.7,178.3,16.3,665.6,60.1 +DCM Shriram Ltd.,DCMSHRIRAM,523367,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"2,73","2,593.9",114.1,4.21%,74,14.7,47.5,15.2,32.2,2.1,617.6,39.4 +DLF Ltd.,DLF,532868,REALTY,REALTY,"1,476.4",885.3,462.4,34.31%,37,90.2,464,112.2,622.8,2.5,"2,239",9 +Dabur India Ltd.,DABUR,500096,FMCG,PERSONAL PRODUCTS,"3,320.2","2,543",660.9,20.63%,98.3,28.1,650.8,144.3,515,2.9,"1,755.7",9.9 +Delta Corp Ltd.,DELTACORP,532848,COMMERCIAL SERVICES & SUPPLIES,MISC. COMMERCIAL SERVICES,282.6,170.5,100.1,36.99%,16.9,2.7,92.4,23,69.4,2.6,273.3,10.2 +Divi's Laboratories Ltd.,DIVISLAB,532488,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,995","1,43",479,25.09%,95,1,469,121,348,13.1,"1,331.8",50.3 +Dr. Lal Pathlabs Ltd.,LALPATHLAB,539524,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE SERVICES,619.4,423.5,177.8,29.57%,35.9,7.8,152.2,41.5,109.3,13.2,301.4,36.1 +Dr. Reddy's Laboratories Ltd.,DRREDDY,500124,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"7,217.6","4,888.8","2,008.3",29.09%,375.5,35.3,"1,912.5",434.5,"1,482.2",89.1,"5,091.2",305.2 +EID Parry (India) Ltd.,EIDPARRY,500125,FOOD BEVERAGES & TOBACCO,OTHER FOOD PRODUCTS,"9,210.3","8,002","1,057.5",11.67%,101.2,74.2,"1,032.8",246.8,452.3,25.5,991,55.8 +Eicher Motors Ltd.,EICHERMOT,505200,AUTOMOBILES & AUTO COMPONENTS,2/3 WHEELERS,"4,388.3","3,027.4","1,087.2",26.42%,142.5,12.7,"1,205.7",291.1,"1,016.2",37.1,"3,581",130.8 +Emami Ltd.,EMAMILTD,531162,FMCG,PERSONAL PRODUCTS,876,631.2,233.7,27.02%,46.1,2.2,196.4,15.8,178.5,4.1,697.8,16 +Endurance Technologies Ltd.,ENDURANCE,540153,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,560.5","2,226.7",318.3,12.51%,118.4,9.8,205.6,51.1,154.6,11,562.8,40 +Engineers India Ltd.,ENGINERSIN,532178,COMMERCIAL SERVICES & SUPPLIES,CONSULTING SERVICES,833.6,691.3,98.5,12.47%,8.3,0.4,133.6,32.2,127.5,2.3,472.7,8.4 +Escorts Kubota Ltd.,ESCORTS,500495,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,"2,154.4","1,798.6",260.7,12.66%,40.8,3.1,311.9,79.7,223.3,20.6,910.5,82.4 +Exide Industries Ltd.,EXIDEIND,500086,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"4,408.9","3,872.4",499.1,11.42%,141.5,29.7,365.3,95.2,269.4,3.2,872.7,10.3 +Federal Bank Ltd.,FEDERALBNK,500469,BANKING AND FINANCE,BANKS,"6,548.2","1,603.8","1,400.3",24.18%,0,"3,544.1","1,342.7",342.6,994.1,4.3,"3,671.4",15.6 +Finolex Cables Ltd.,FINCABLES,500144,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"1,229.3","1,041.3",146.1,12.30%,10.8,0.4,176.7,52.3,154.2,10.1,643.9,42.1 +Finolex Industries Ltd.,FINPIPE,500940,GENERAL INDUSTRIALS,PLASTIC PRODUCTS,944.5,780.2,103,11.66%,27.4,12.5,124.5,35.4,98,1.6,459.3,7.4 +Firstsource Solutions Ltd.,FSL,532809,SOFTWARE & SERVICES,BPO/KPO,"1,556.9","1,311.2",228.8,14.86%,65.4,26.1,154.3,27.8,126.5,1.9,551.7,7.9 +GAIL (India) Ltd.,GAIL,532155,UTILITIES,UTILITIES,"33,191","29,405.5","3,580.2",10.85%,837.3,199.6,"2,748.7",696.3,"2,444.1",3.7,"5,283.8",8 +GlaxoSmithKline Pharmaceuticals Ltd.,GLAXO,500660,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,985.2,667.5,289.5,30.25%,18.1,0.4,299.2,81.7,217.5,12.8,647.8,38.2 +Glenmark Pharmaceuticals Ltd.,GLENMARK,532296,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"3,209.1","2,745.1",462.3,14.41%,141.5,121.5,-124.4,55.9,-81.9,-2.9,-196.3,-7 +Godrej Consumer Products Ltd.,GODREJCP,532424,FMCG,PERSONAL PRODUCTS,"3,667.9","2,897.8",704.2,19.55%,60.9,77.3,619.4,186.6,432.8,4.2,"1,750.1",17.1 +Godrej Industries Ltd.,GODREJIND,500164,DIVERSIFIED,DIVERSIFIED,"4,256.9","3,672.1",265.5,6.74%,89.3,333.1,162.4,75.9,87.3,2.6,880,26.1 +Godrej Properties Ltd.,GODREJPROP,533150,REALTY,REALTY,605.1,404.7,-61.7,-17.98%,7.4,48,145.1,38.8,66.8,2.4,662.6,23.8 +Granules India Ltd.,GRANULES,532482,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,191",976.5,213,17.90%,52.5,26,136,33.9,102.1,4.2,393.9,16.3 +Great Eastern Shipping Company Ltd.,GESHIP,500620,TRANSPORTATION,SHIPPING,"1,461.5",585.6,643.4,52.35%,186.7,77.1,611.9,17.3,594.7,41.6,"2,520.1",176.5 +Gujarat Alkalies & Chemicals Ltd.,GUJALKALI,530001,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,"1,042.3",926.1,45.2,4.65%,95.2,10.8,10.2,-0.1,-18.4,-2.5,82.7,11.3 +Gujarat Gas Ltd.,GUJGASLTD,539336,UTILITIES,UTILITIES,"4,019.3","3,494.5",496.6,12.44%,117.9,7.8,399.1,102.9,296.2,4.3,"1,254.3",18.2 +Gujarat Narmada Valley Fertilizers & Chemicals Ltd.,GNFC,500670,FERTILIZERS,FERTILIZERS,"2,232","1,911",169,8.12%,78,1,242,64,182,11.7,932,60.1 +Gujarat Pipavav Port Ltd.,GPPL,533248,TRANSPORTATION,MARINE PORT & SERVICES,270.4,102,150.6,59.64%,28.8,2.2,141.1,53.4,92.3,1.9,341.8,7.1 +Gujarat State Fertilizer & Chemicals Ltd.,GSFC,500690,FERTILIZERS,FERTILIZERS,"3,313.2","2,881.4",237.3,7.61%,45.7,1.6,387,78.1,308.9,7.8,"1,056.2",26.5 +Gujarat State Petronet Ltd.,GSPL,532702,UTILITIES,UTILITIES,"4,455.9","3,497.2",913.7,20.72%,165,14.5,779.2,198.7,454.6,8.1,"1,522",27 +HCL Technologies Ltd.,HCLTECH,532281,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"27,037","20,743","5,929",22.23%,"1,01",156,"5,128","1,295","3,832",14.2,"15,445",56.9 +HDFC Bank Ltd.,HDFCBANK,500180,BANKING AND FINANCE,BANKS,"107,566.6","42,037.6","24,279.1",32.36%,0,"41,249.9","20,967.4","3,655","16,811.4",22.2,"54,474.6",71.8 +Havells India Ltd.,HAVELLS,517354,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"3,952.8","3,527",373.4,9.57%,81.2,9.3,335.3,86.2,249.1,4,"1,177.7",18.8 +Hero MotoCorp Ltd.,HEROMOTOCO,500182,AUTOMOBILES & AUTO COMPONENTS,2/3 WHEELERS,"9,741.2","8,173.5","1,359.5",14.26%,187.1,25,"1,355.6",353.1,"1,006.3",50.3,"3,247.6",162.5 +HFCL Ltd.,HFCL,500183,TELECOMMUNICATIONS EQUIPMENT,TELECOM CABLES,"1,128.7",978.9,132.6,11.93%,21.4,34.8,93.5,24,69.4,0.5,305.5,2.1 +Hindalco Industries Ltd.,HINDALCO,500440,METALS & MINING,ALUMINIUM AND ALUMINIUM PRODUCTS,"54,632","48,557","5,612",10.36%,"1,843","1,034","3,231","1,035","2,196",9.9,"8,423",37.9 +Hindustan Copper Ltd.,HINDCOPPER,513599,METALS & MINING,COPPER,392.6,260.2,121.2,31.77%,45.6,4.1,82.6,21.9,60.7,0.6,320.5,3.3 +Hindustan Petroleum Corporation Ltd.,HINDPETRO,500104,OIL & GAS,REFINERIES/PETRO-PRODUCTS,"96,093.4","87,512","8,24",8.61%,"1,247.3",590,"6,744.1","1,616","5,827",41.1,"16,645",117.3 +Hindustan Unilever Ltd.,HINDUNILVR,500696,FMCG,PERSONAL PRODUCTS,"15,806","11,826","3,797",24.30%,297,88,"3,59",931,"2,656",11.3,"10,284",43.8 +Hindustan Zinc Ltd.,HINDZINC,500188,METALS & MINING,ZINC,"7,014","3,652","3,139",46.22%,825,232,"2,305",576,"1,729",4.1,"8,432",20 +Housing and Urban Development Corporation Ltd.,HUDCO,540530,BANKING AND FINANCE,HOUSING FINANCE,"1,880.8",82.7,"1,809.6",97.04%,2.4,"1,216.8",606.4,154.7,451.6,2.3,"1,790.7",8.9 +ITC Ltd.,ITC,500875,FOOD BEVERAGES & TOBACCO,CIGARETTES-TOBACCO PRODUCTS,"18,439.3","11,320.2","6,454.2",36.31%,453,9.9,"6,656.2","1,700.3","4,898.1",3.9,"20,185.1",16.2 +ICICI Bank Ltd.,ICICIBANK,532174,BANKING AND FINANCE,BANKS,"57,292.3","23,911","15,473.2",39.74%,0,"17,908","14,824.2","3,808.8","11,805.6",15.6,"41,086.8",58.7 +ICICI Prudential Life Insurance Company Ltd.,ICICIPRULI,540133,BANKING AND FINANCE,LIFE INSURANCE,"17,958.1","17,612.3",-229.6,-1.32%,0,0,340.2,32.5,243.9,1.7,906.9,6.3 +IDBI Bank Ltd.,IDBI,500116,BANKING AND FINANCE,BANKS,"7,063.7","1,922.3","2,175.3",36.02%,0,"2,966.1","2,396.9","1,003.7","1,385.4",1.3,"4,776.3",4.4 +IDFC First Bank Ltd.,IDFCFIRSTB,539437,BANKING AND FINANCE,BANKS,"8,765.8","3,849","1,511.2",20.54%,0,"3,405.6",982.8,236,746.9,1.1,"2,911.1",4.3 +IDFC Ltd.,IDFC,532659,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),36.7,6,30.6,83.56%,0,0,30.6,6.6,223.5,1.4,"4,147.1",25.9 +IRB Infrastructure Developers Ltd.,IRB,532947,CEMENT AND CONSTRUCTION,ROADS & HIGHWAYS,"1,874.5",950.4,794.6,45.54%,232.7,434.6,256.9,85.8,95.7,0.2,501,0.8 +ITI Ltd.,ITI,523610,TELECOMMUNICATIONS EQUIPMENT,TELECOM EQUIPMENT,256.1,299.3,-52.8,-21.42%,13.3,69.3,-125.8,0,-126,-1.3,-388.4,-4 +Vodafone Idea Ltd.,IDEA,532822,TELECOM SERVICES,TELECOM SERVICES,"10,750.8","6,433.5","4,282.8",39.97%,"5,667.3","6,569","-7,919",817.7,"-8,737.9",-1.8,"-30,986.8",-6.4 +India Cements Ltd.,INDIACEM,530005,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"1,272.4","1,26",4.4,0.35%,55,60.4,-103,-17.4,-80.1,-2.6,-261.1,-8.4 +Indiabulls Housing Finance Ltd.,IBULHSGFIN,535789,BANKING AND FINANCE,HOUSING FINANCE,"2,242.3",190.6,"1,779.2",79.88%,22.9,"1,349.8",421.6,123.6,298,6.5,"1,146",24.3 +Indian Bank,INDIANB,532814,BANKING AND FINANCE,BANKS,"15,929.4","3,599.1","4,327.7",31.44%,0,"8,002.6","2,776.7",768.6,"2,068.5",16.6,"6,893.3",55.3 +Indian Hotels Company Ltd.,INDHOTEL,500850,HOTELS RESTAURANTS & TOURISM,HOTELS,"1,480.9","1,078.4",354.8,24.75%,111.2,59,232.2,72.3,166.9,1.2,"1,100.3",7.7 +Indian Oil Corporation Ltd.,IOC,530965,OIL & GAS,OIL MARKETING & DISTRIBUTION,"179,752.1","156,013.1","23,328.4",13.01%,"3,609.6","2,135","18,090.2","4,699.7","13,114.3",9.5,"38,614.3",27.3 +Indian Overseas Bank,IOB,532388,BANKING AND FINANCE,BANKS,"6,941.5","1,785.1","1,679.8",28.84%,0,"3,476.6",635.5,8.3,627.2,0.3,"2,341.9",1.2 +Indraprastha Gas Ltd.,IGL,532514,UTILITIES,UTILITIES,"3,520.2","2,801.6",656.9,18.99%,102.2,2.5,613.9,151.4,552.7,7.9,"1,806.2",25.8 +IndusInd Bank Ltd.,INDUSINDBK,532187,BANKING AND FINANCE,BANKS,"13,529.7","3,449.9","3,908.7",34.75%,0,"6,171.1","2,934.9",732.9,"2,202.2",28.4,"8,333.7",107.2 +Info Edge (India) Ltd.,NAUKRI,532777,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,792,421.2,204.7,32.70%,25.9,8.2,382.8,68.7,205.1,15.9,-25.6,-2 +InterGlobe Aviation Ltd.,INDIGO,539448,TRANSPORTATION,AIRLINES,"15,502.9","12,743.6","2,200.3",14.72%,"1,549","1,021.3",189.1,0.2,188.9,4.9,"5,621.3",145.7 +Ipca Laboratories Ltd.,IPCALAB,524494,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"2,072.5","1,712.7",321.3,15.80%,90.3,44.1,225.4,87.9,145.1,5.7,492.2,19.4 +J B Chemicals & Pharmaceuticals Ltd.,JBCHEPHARM,506943,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,889.4,638.2,243.5,27.62%,32.2,10.4,208.7,58.1,150.6,9.7,486.6,31.4 +JK Cement Ltd.,JKCEMENT,532644,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"2,782.1","2,285.8",467,16.96%,137.1,115,244.2,65.7,178.1,23.1,444,57.5 +JK Lakshmi Cement Ltd.,JKLAKSHMI,500380,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"1,588.5","1,357.3",217.3,13.80%,56.6,33.6,141,45.1,92.7,7.9,357.6,30.4 +JM Financial Ltd.,JMFINANCIL,523405,DIVERSIFIED,HOLDING COMPANIES,"1,214",407.9,662.6,55.34%,13.2,388.1,277.9,72.4,194.9,2,608.1,6.4 +JSW Energy Ltd.,JSWENERGY,533148,UTILITIES,ELECTRIC UTILITIES,"3,387.4","1,379","1,880.4",57.69%,408.7,513.7,"1,085.9",235.1,850.2,5.2,"1,591.7",9.7 +JSW Steel Ltd.,JSWSTEEL,500228,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"44,821","36,698","7,886",17.69%,"2,019","2,084","4,609","1,812","2,76",11.4,"9,252",38.1 +Jindal Stainless Ltd.,JSL,532508,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"9,829","8,566.5","1,230.6",12.56%,221.9,155.6,985.7,229.1,774.3,9.4,"2,600.2",31.6 +Jindal Steel & Power Ltd.,JINDALSTEL,532286,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"12,282","9,964.5","2,285.7",18.66%,603.7,329.4,"1,384.5",-5.8,"1,387.8",13.8,"4,056",40.4 +Jubilant Foodworks Ltd.,JUBLFOOD,533155,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,"1,375.7","1,091.4",277.2,20.25%,141.9,56.8,85.5,23.3,97.2,1.5,235,3.6 +Just Dial Ltd.,JUSTDIAL,535648,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,318.5,211.8,48.8,18.71%,12.2,2.4,92.1,20.3,71.8,8.4,314.1,36.9 +Jyothy Labs Ltd.,JYOTHYLAB,532926,FMCG,PERSONAL PRODUCTS,745.6,597,135.4,18.48%,12.3,1.2,135.1,31.1,104.2,2.8,326.9,8.9 +KRBL Ltd.,KRBL,530813,FMCG,PACKAGED FOODS,"1,246.5","1,018.9",194.5,16.03%,19.9,0.8,206.8,53.6,153.3,6.5,671.4,29.3 +Kajaria Ceramics Ltd.,KAJARIACER,500233,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,"1,129.9",941.9,179.7,16.02%,36.1,4.3,147.7,36.6,108,6.8,397.8,25 +Kalpataru Projects International Ltd.,KPIL,522287,UTILITIES,ELECTRIC UTILITIES,"4,53","4,148",370,8.19%,113,137,132,42,89,5.5,478,29.9 +Kansai Nerolac Paints Ltd.,KANSAINER,500165,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,"1,978.6","1,683.3",273.2,13.97%,47.4,7.6,240.3,64.8,177.2,2.2,"1,118.8",13.8 +Karur Vysya Bank Ltd.,KARURVYSYA,590003,BANKING AND FINANCE,BANKS,"2,336",616.4,637.9,31.94%,0,"1,081.7",511.5,133.1,378.4,4.7,"1,364.2",17 +KEC International Ltd.,KEC,532714,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"4,514.9","4,224.7",274.3,6.10%,46.5,177.8,65.8,9.9,55.8,2.2,187.9,7.3 +Kotak Mahindra Bank Ltd.,KOTAKBANK,500247,BANKING AND FINANCE,BANKS,"21,559.5","9,681","6,343",46.24%,0,"5,535.5","5,888.3","1,465.5","4,461",22.4,"17,172.7",86.4 +L&T Finance Holdings Ltd.,L&TFH,533519,DIVERSIFIED,HOLDING COMPANIES,"3,482.1",935.3,"1,882.4",58.57%,28.3,"1,324.9",797.4,203.2,595.1,2.4,"2,080.8",8.4 +L&T Technology Services Ltd.,LTTS,540115,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"2,427.7","1,910.9",475.6,19.93%,68.1,12.6,436.1,120.2,315.4,29.8,"1,239.7",117.5 +LIC Housing Finance Ltd.,LICHSGFIN,500253,BANKING AND FINANCE,HOUSING FINANCE,"6,765.9",250.6,"6,095.7",90.10%,13.2,"4,599.9","1,483",291.2,"1,193.5",21.7,"4,164.5",75.7 +Lakshmi Machine Works Ltd.,LAXMIMACH,500252,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,"1,355.5","1,184.5",136,10.30%,23.6,0,147.4,32.3,115.1,107.8,416,389.5 +Laurus Labs Ltd.,LAURUSLABS,540222,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,226.2","1,036.6",187.9,15.34%,93.4,42.4,53.9,14.6,37,0.7,367.8,6.8 +Lupin Ltd.,LUPIN,500257,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"5,079","4,120.8",917.8,18.21%,247.8,80.6,629.7,134.3,489.5,10.8,"1,331.2",29.2 +MMTC Ltd.,MMTC,513377,COMMERCIAL SERVICES & SUPPLIES,COMMODITY TRADING & DISTRIBUTION,-167.2,-180.1,-30.4,14.42%,0.8,1.1,12.1,1.5,52,0.3,174.1,1.2 +MRF Ltd.,MRF,500290,AUTOMOBILES & AUTO COMPONENTS,AUTO TYRES & RUBBER PRODUCTS,"6,287.8","5,060.2","1,156.9",18.61%,351.5,85.5,790.6,203.9,586.7,1383.3,"1,690.9",3988 +Mahanagar Gas Ltd.,MGL,539957,UTILITIES,UTILITIES,"1,772.7","1,250.1",478.9,27.70%,65.8,2.5,454.3,115.8,338.5,34.3,"1,147.8",116.2 +Mahindra & Mahindra Financial Services Ltd.,M&MFIN,532720,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"3,863.5","1,077.5","2,109.3",55.03%,67.1,"1,703.4",369.1,96,281.1,2.3,"1,982.5",16 +Mahindra & Mahindra Ltd.,M&M,500520,AUTOMOBILES & AUTO COMPONENTS,CARS & UTILITY VEHICLES,"35,027.2","28,705.9","5,729.6",16.64%,"1,138.6","1,835.2","3,347.5","1,083.7","2,347.8",21.1,"11,169.4",100.2 +Mahindra Holidays & Resorts India Ltd.,MHRIL,533088,HOTELS RESTAURANTS & TOURISM,HOTELS,672.2,519.3,136,20.76%,83.8,33.3,35.8,14,21.3,1.1,66,3.3 +Manappuram Finance Ltd.,MANAPPURAM,531213,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"2,174",555.6,"1,481.3",68.68%,62.5,689.4,746.7,186.1,558.4,6.6,"1,859.8",22 +Mangalore Refinery And Petrochemicals Ltd.,MRPL,500109,OIL & GAS,REFINERIES/PETRO-PRODUCTS,"22,904.7","20,705.6","2,138.2",9.36%,296,311.2,"1,592",546.2,"1,051.7",6,"3,784.9",21.6 +Marico Ltd.,MARICO,531642,FMCG,PERSONAL PRODUCTS,"2,514","1,979",497,20.07%,39,20,476,116,353,2.7,"1,41",10.9 +Maruti Suzuki India Ltd.,MARUTI,532500,AUTOMOBILES & AUTO COMPONENTS,CARS & UTILITY VEHICLES,"37,902.1","32,282.5","4,790.3",12.92%,794.4,35.1,"4,790.1","1,083.8","3,764.3",124.6,"11,351.8",375.9 +Max Financial Services Ltd.,MFSL,500271,BANKING AND FINANCE,LIFE INSURANCE,"10,189.1","10,024.6",143.9,1.42%,0.8,9.4,158.2,-12.1,147.9,4.3,506.4,14.7 +UNO Minda Ltd.,UNOMINDA,532539,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"3,630.2","3,219.8",401.6,11.09%,125.4,27.2,257.9,73.3,225,3.9,742.4,13 +Motilal Oswal Financial Services Ltd.,MOTILALOFS,532892,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,"1,650.7",724.1,904.5,55.18%,17.3,241.1,657.6,124.2,531.2,35.9,"1,449.3",97.8 +MphasiS Ltd.,MPHASIS,526299,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"3,325.5","2,680.9",595.6,18.18%,89,34,521.7,129.7,391.9,20.8,"1,605.6",85.1 +Muthoot Finance Ltd.,MUTHOOTFIN,533398,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"3,631.9",723.4,"2,801.6",77.69%,22.2,"1,335","1,470.2",374.9,"1,059.6",26.4,"3,982.9",99.2 +Natco Pharma Ltd.,NATCOPHARM,524816,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,060.8",573.4,458,44.41%,43.6,4.2,439.6,70.6,369,20.6,"1,127.4",63 +NBCC (India) Ltd.,NBCC,534309,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"2,129.1","1,957.7",95.5,4.65%,1.3,0,104.6,22.9,79.6,0.4,332.2,1.8 +NCC Ltd.,NCC,500294,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"4,746.4","4,415.9",303.7,6.44%,53.2,153.5,123.8,38.8,77.3,1.2,599.4,9.5 +NHPC Ltd.,NHPC,533098,UTILITIES,ELECTRIC UTILITIES,"3,113.8","1,173.9","1,757.4",59.95%,294.9,104.8,"1,618.3",-75,"1,545.8",1.5,"3,897.8",3.9 +Coforge Ltd.,COFORGE,532541,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"2,285.1","1,935.3",340.9,14.98%,77.2,31.9,240.7,52.8,187.9,29.6,696.2,113.2 +NLC India Ltd.,NLCINDIA,513683,UTILITIES,ELECTRIC UTILITIES,"3,234","2,143",834.6,28.03%,455.1,213.9,"1,700.6",614.7,"1,084.7",7.8,"1,912.3",13.8 +NTPC Ltd.,NTPC,532555,UTILITIES,ELECTRIC UTILITIES,"45,384.6","32,303.2","12,680.2",28.19%,"4,037.7","2,920.5","6,342.9","2,019.7","4,614.6",4.8,"19,125.2",19.7 +Narayana Hrudayalaya Ltd.,NH,539551,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"1,323.6",997.1,308.1,23.61%,55.3,22.9,248.4,21.7,226.6,11.2,737.5,36.1 +National Aluminium Company Ltd.,NATIONALUM,532234,METALS & MINING,ALUMINIUM AND ALUMINIUM PRODUCTS,"3,112","2,646.9",396.5,13.03%,186.2,4,275,68.7,187.3,1,"1,272.4",6.9 +Navin Fluorine International Ltd.,NAVINFLUOR,532504,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,494.9,373.4,98.3,20.84%,24.2,20,77.2,16.6,60.6,12.2,365,73.7 +Oberoi Realty Ltd.,OBEROIRLTY,533273,REALTY,REALTY,"1,243.8",579.2,638.2,52.42%,11.3,56.5,596.8,142.1,456.8,12.6,"1,961.3",53.9 +Oil And Natural Gas Corporation Ltd.,ONGC,500312,OIL & GAS,EXPLORATION & PRODUCTION,"149,388.5","118,618.4","28,255.3",19.24%,"6,698.1","2,603.3","21,564.9","5,633.6","13,734.1",10.9,"43,072.5",34.2 +Oil India Ltd.,OIL,533106,OIL & GAS,EXPLORATION & PRODUCTION,"9,200.1","5,293.3","3,523.2",39.96%,499,278.9,762,67.6,420.7,3.9,"5,874.5",54.2 +Oracle Financial Services Software Ltd.,OFSS,532466,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,509.6",886.4,558.1,38.64%,19,8,596.2,178.8,417.4,48.2,"1,835.1",211.9 +PI Industries Ltd.,PIIND,523642,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,"2,163.8","1,565.5",551.4,26.05%,80.3,7.8,510.2,31.7,480.5,31.7,"1,495.8",98.4 +PNB Housing Finance Ltd.,PNBHOUSING,540173,BANKING AND FINANCE,HOUSING FINANCE,"1,779.4",158.8,"1,574.1",88.54%,11.3,"1,057.3",507.1,124.1,383,14.8,"1,278.7",49.3 +PNC Infratech Ltd.,PNCINFRA,539150,CEMENT AND CONSTRUCTION,ROADS & HIGHWAYS,"1,932.4","1,511.6",399.8,20.92%,40.9,161.3,218.6,70.7,147.9,5.8,614.3,23.9 +PVR INOX Ltd.,PVRINOX,532689,RETAILING,SPECIALTY RETAIL,"2,023.7","1,293.1",706.8,35.34%,308.6,200.3,221.7,55.5,166.3,17,-232.5,-23.7 +Page Industries Ltd.,PAGEIND,532827,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,"1,126.8",891.6,233.5,20.76%,24.6,11.2,199.4,49.1,150.3,134.7,510.7,457.9 +Persistent Systems Ltd.,PERSISTENT,533179,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"2,449","2,006.5",405.2,16.80%,74.4,12.3,355.8,92.5,263.3,35,981.5,127.6 +Petronet LNG Ltd.,PETRONET,532522,OIL & GAS,OIL MARKETING & DISTRIBUTION,"12,686.2","11,317.9","1,214.7",9.69%,194.8,74.7,"1,098.8",283.9,855.7,5.7,"3,490.3",23.3 +Pfizer Ltd.,PFIZER,500680,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,611.3,392.6,182.6,31.75%,15.4,2.7,200.5,51.6,149,32.6,522.8,114.3 +Phoenix Mills Ltd.,PHOENIXLTD,503100,REALTY,REALTY,906.6,361.2,506,57.82%,65.9,96.5,375.2,71.4,252.6,14.2,923.6,51.7 +Pidilite Industries Ltd.,PIDILITIND,500331,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"3,107.6","2,396.3",679.7,22.10%,75.2,13.1,623,163.1,450.1,8.8,"1,505.5",29.6 +Power Finance Corporation Ltd.,PFC,532810,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"22,403.7",315.4,"22,941.9",102.46%,12.7,"14,313.1","8,628.8","2,000.6","4,833.1",14.7,"17,946.4",54.4 +Power Grid Corporation of India Ltd.,POWERGRID,532898,UTILITIES,ELECTRIC UTILITIES,"11,530.4","1,358.7","9,908.4",87.94%,"3,277","2,341.3","4,393.4",573.7,"3,781.4",4.1,"15,344.4",16.5 +Prestige Estates Projects Ltd.,PRESTIGE,ASM,REALTY,REALTY,"3,256","1,643.9",592.5,26.49%,174.1,263.9,"1,174.1",256.4,850.9,21.2,"1,714",42.8 +Prism Johnson Ltd.,PRSMJOHNSN,500338,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"1,846","1,745.4",92.4,5.03%,95.2,43.5,210,30.4,182.7,3.6,154.2,3.1 +Procter & Gamble Hygiene & Healthcare Ltd.,PGHH,500459,FMCG,PERSONAL PRODUCTS,"1,154.1",853.5,284.9,25.03%,14.3,1.9,284.5,73.8,210.7,64.9,734.4,226.3 +Punjab National Bank,PNB,532461,BANKING AND FINANCE,BANKS,"29,857","6,798.1","6,239.1",23.23%,0,"16,819.8","2,778.3","1,013.8","1,990.2",1.8,"5,904.8",5.4 +Quess Corp Ltd.,QUESS,539978,SOFTWARE & SERVICES,BPO/KPO,"4,763.5","4,584.8",163.6,3.44%,69.7,28.1,79.3,8.3,71.9,4.8,240.9,16.2 +RBL Bank Ltd.,RBLBANK,540065,BANKING AND FINANCE,BANKS,"3,720.6","1,422.6",765.4,25.45%,0,"1,532.6",125,-206.1,331.1,5.5,"1,173.9",19.5 +Radico Khaitan Ltd.,RADICO,532497,FOOD BEVERAGES & TOBACCO,BREWERIES & DISTILLERIES,925.7,803.8,121.2,13.10%,26.1,12.5,83.3,21.4,64.8,4.8,237,17.7 +Rain Industries Ltd.,RAIN,500339,CHEMICALS & PETROCHEMICALS,PETROCHEMICALS,"4,208.9","3,794.3",366,8.80%,192.5,241.7,-19.5,46.2,-90.2,-2.7,270.4,8 +Rajesh Exports Ltd.,RAJESHEXPO,531500,TEXTILES APPARELS & ACCESSORIES,GEMS & JEWELLERY,"38,079.4","38,015.8",50.1,0.13%,10.7,0,53,7.7,45.3,1.5,"1,142.2",38.7 +Rallis India Ltd.,RALLIS,500355,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,837,699,133,15.99%,26,3,110,28,82,4.2,98.4,5.2 +Rashtriya Chemicals & Fertilizers Ltd.,RCF,524230,FERTILIZERS,FERTILIZERS,"4,222.1","4,049.3",105.9,2.55%,56.1,44,72.8,21.1,51,0.9,523.6,9.5 +Redington Ltd.,REDINGTON,532805,COMMERCIAL SERVICES & SUPPLIES,COMMODITY TRADING & DISTRIBUTION,"22,296.6","21,738.7",481.4,2.17%,43.7,105.8,408.3,96.7,303.5,3.9,"1,242",15.9 +Relaxo Footwears Ltd.,RELAXO,530517,RETAILING,FOOTWEAR,725.9,623.8,91.5,12.79%,36.9,4.7,60.4,16.2,44.2,1.8,193.9,7.8 +Reliance Industries Ltd.,RELIANCE,500325,OIL & GAS,REFINERIES/PETRO-PRODUCTS,"238,797","193,988","40,968",17.44%,"12,585","5,731","26,493","6,673","17,394",25.7,"68,496",101.2 +REC Ltd.,RECLTD,532955,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"11,701.3",275.1,"12,180.5",104.21%,6.1,"7,349.8","4,837.6","1,047.7","3,789.9",14.4,"12,738.6",48.4 +SJVN Ltd.,SJVN,533206,UTILITIES,ELECTRIC UTILITIES,951.6,172.2,706.2,80.40%,101.9,124.2,567.7,129.2,439.6,1.1,"1,016",2.6 +SKF India Ltd.,SKFINDIA,500472,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,"1,145.5","1,003.7",121.5,10.80%,19.3,0.5,122,31.7,90,18.2,484,97.9 +SRF Ltd.,SRF,503806,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"3,206.5","2,551.2",626.2,19.71%,161.2,79.3,414.8,114,300.8,10.2,"1,733.4",58.5 +Sanofi India Ltd.,SANOFI,500674,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,726.4,506.1,208.5,29.17%,9.9,0.3,210.1,57.9,152.1,66.1,596.3,259.3 +Schaeffler India Ltd.,SCHAEFFLER,505790,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,879.2","1,506.3",342,18.50%,55.6,1.6,315.7,80.7,235,15,922.6,59 +Shree Cements Ltd.,SHREECEM,500387,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"4,932.1","3,914.1",886,18.46%,411.7,67,539.2,92.6,446.6,123.8,"1,826.8",506.3 +Shriram Finance Ltd.,SHRIRAMFIN,511218,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"8,893","1,409.4","6,334.3",71.30%,141.4,"3,798","2,404.2",614.9,"1,786.1",47.6,"6,575.4",175.2 +Siemens Ltd.,SIEMENS,500550,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"5,953.2","5,107.5",700.2,12.06%,78.6,4.9,762.2,190.5,571.3,16.1,"1,960.9",55.1 +Sobha Ltd.,SOBHA,532784,REALTY,REALTY,773.6,665.8,75.4,10.18%,19.3,63.9,24.7,9.7,14.9,1.6,107.4,11.3 +Solar Industries India Ltd.,SOLARINDS,532725,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,"1,355.2","1,011.3",336.1,24.95%,33.7,24.9,285.3,75.5,200.1,22.1,808.2,89.3 +Sonata Software Ltd.,SONATSOFTW,532221,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,935.8","1,715.2",197.3,10.32%,33.3,20.7,166.5,42.3,124.2,9,475.7,34.3 +State Bank of India,SBIN,500112,BANKING AND FINANCE,BANKS,"144,256.1","58,597.6","22,703.3",21.14%,0,"62,955.2","21,935.7","5,552.5","17,196.2",18,"69,304.1",77.7 +Steel Authority of India (SAIL) Ltd.,SAIL,500113,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"29,858.2","25,836.7","3,875.4",13.04%,"1,326.6",605.2,"1,674.7",464.2,"1,305.6",3.2,"3,219.5",7.8 +Sun Pharma Advanced Research Company Ltd.,SPARC,532872,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,29.7,112.7,-91.5,-431.87%,3.2,0.3,-86.4,0,-86.4,-2.7,-253.6,-7.8 +Sun Pharmaceutical Industries Ltd.,SUNPHARMA,524715,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"12,486","9,013","3,179.4",26.08%,632.8,49.3,"2,790.9",390.1,"2,375.5",9.9,"8,548.5",35.6 +Sun TV Network Ltd.,SUNTV,532733,MEDIA,BROADCASTING & CABLE TV,"1,160.2",320.6,727.8,69.42%,218.8,1.7,619.1,154.4,464.7,11.8,"1,861.8",47.2 +Sundram Fasteners Ltd.,SUNDRMFAST,500403,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,429.1","1,191.1",230.7,16.23%,54.5,7.4,176.2,43.1,131.9,6.3,502.9,23.9 +Sunteck Realty Ltd.,SUNTECK,512179,REALTY,REALTY,36.2,39.1,-14.1,-56.70%,2.2,15.8,-20.9,-6.4,-13.9,-1,-46.5,-3.3 +Supreme Industries Ltd.,SUPREMEIND,509930,GENERAL INDUSTRIALS,PLASTIC PRODUCTS,"2,321.4","1,952.5",356.2,15.43%,71.9,1.6,295.4,76.3,243.2,19.1,"1,028.2",80.9 +Suzlon Energy Ltd.,SUZLON,ASM,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"1,428.7","1,196.4",225,15.83%,51.2,43.7,102.4,0.1,102.3,0.1,561.4,0.4 +Syngene International Ltd.,SYNGENE,539268,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,931.7,656,254.1,27.92%,104.6,13,150.7,34.2,116.5,2.9,498.3,12.4 +TTK Prestige Ltd.,TTKPRESTIG,517506,CONSUMER DURABLES,HOUSEWARE,747.2,648.6,80.8,11.08%,15.9,3.1,79.5,20.5,59.3,4.3,224.3,16.2 +TV18 Broadcast Ltd.,TV18BRDCST,532800,MEDIA,BROADCASTING & CABLE TV,"1,989","1,992.2",-198.1,-11.04%,50.1,33.8,-87.1,-6.5,-28.9,-0.2,92.2,0.5 +TVS Motor Company Ltd.,TVSMOTOR,532343,AUTOMOBILES & AUTO COMPONENTS,2/3 WHEELERS,"9,983.8","8,576.9","1,355.9",13.65%,237.1,483.3,686.4,259.8,386.3,8.1,"1,457.6",30.7 +Tata Consultancy Services Ltd.,TCS,532540,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"60,698","43,946","15,746",26.38%,"1,263",159,"15,33","3,95","11,342",31,"44,654",122 +Tata Elxsi Ltd.,TATAELXSI,500408,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,912.8,618.2,263.5,29.89%,25,5.8,263.9,63.8,200,32.1,785.1,126.1 +Tata Consumer Products Ltd.,TATACONSUM,500800,FMCG,PACKAGED FOODS,"3,823.6","3,196.7",537.1,14.38%,93.9,27.6,490.9,131.7,338.2,3.6,"1,275.2",13.7 +Tata Motors Limited (DVR),TATAMTRDVR,570001,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,,,,,,,,,,,, +Tata Motors Ltd.,TATAMOTORS,500570,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,"106,759","91,361.3","13,766.9",13.10%,"6,636.4","2,651.7","5,985.9","2,202.8","3,764",9.8,"15,332.3",40 +Tata Power Company Ltd.,TATAPOWER,500400,UTILITIES,ELECTRIC UTILITIES,"16,029.5","12,647","3,091",19.64%,925.9,"1,181.8",979.2,213.3,875.5,2.7,"3,570.8",11.2 +Tata Steel Ltd.,TATASTEEL,500470,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"55,910.2","51,414.1","4,267.8",7.66%,"2,479.8","1,959.4","-6,842.1",-228,"-6,196.2",-5.1,"-6,081.3",-5 +Tech Mahindra Ltd.,TECHM,532755,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"13,128.1","11,941.1",922.8,7.17%,465.7,97.5,623.8,110,493.9,5.6,"3,600.7",40.9 +The Ramco Cements Ltd.,RAMCOCEM,500260,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"2,352.1","1,935",405.6,17.33%,162.8,116.5,137.8,37,72,3.1,348.9,14.8 +Thermax Ltd.,THERMAX,500411,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"2,368.3","2,097.8",204.6,8.89%,33,19.8,217.7,58.9,157.7,14,498.8,44.3 +Timken India Ltd.,TIMKEN,522113,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,692.1,546.5,135.5,19.87%,21.1,0.9,123.6,30.6,93,12.4,358.3,47.6 +Titan Company Ltd.,TITAN,500114,TEXTILES APPARELS & ACCESSORIES,GEMS & JEWELLERY,"12,653","11,118","1,411",11.26%,144,140,"1,251",336,915,10.3,"3,302",37.1 +Torrent Pharmaceuticals Ltd.,TORNTPHARM,500420,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"2,686","1,835",825,31.02%,201,91,559,173,386,11.4,"1,334",39.4 +Torrent Power Ltd.,TORNTPOWER,532779,UTILITIES,ELECTRIC UTILITIES,"7,069.1","5,739.5","1,221.4",17.55%,341.7,247.2,740.7,198.1,525.9,10.9,"2,176.8",45.3 +Trent Ltd.,TRENT,500251,RETAILING,DEPARTMENT STORES,"3,062.5","2,525.8",456.6,15.31%,152.2,95.5,288.9,86.3,234.7,6.6,629.4,17.7 +Trident Ltd.,TRIDENT,521064,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"1,812","1,557.3",240.3,13.37%,89.4,35,130.4,40.1,90.7,0.2,458.1,0.9 +UPL Ltd.,UPL,512070,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,"10,275","8,807","1,325",13.03%,657,871,-185,-96,-189,-2.5,"1,856",24.7 +UltraTech Cement Ltd.,ULTRACEMCO,532538,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"16,179.3","13,461.2","2,550.9",15.93%,797.8,233.9,"1,686.2",409.4,"1,281.5",44.5,"5,694.1",197.2 +Union Bank of India,UNIONBANK,532477,BANKING AND FINANCE,BANKS,"28,952.5","6,189.3","7,265",29.38%,0,"15,498.2","5,492.3","1,944","3,571.8",5.1,"11,918.9",16.1 +United Breweries Ltd.,UBL,532478,FOOD BEVERAGES & TOBACCO,BREWERIES & DISTILLERIES,"1,902.1","1,705.8",184.3,9.75%,50.9,1.4,144,36.9,107.3,4.1,251.3,9.5 +United Spirits Ltd.,MCDOWELL-N,532432,FOOD BEVERAGES & TOBACCO,BREWERIES & DISTILLERIES,"6,776.6","6,269.8",466.7,6.93%,65.3,26.2,446,106.3,339.3,4.8,"1,133",15.6 +V-Guard Industries Ltd.,VGUARD,532953,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"1,147.9","1,041.3",92.5,8.16%,19.8,9.3,77.5,18.6,59,1.4,215.2,5 +Vardhman Textiles Ltd.,VTL,502986,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"2,487","2,192.1",205.4,8.57%,103.7,22,169.2,41.7,134.3,4.7,531.9,18.7 +Varun Beverages Ltd.,VBL,540180,FOOD BEVERAGES & TOBACCO,NON-ALCOHOLIC BEVERAGES,"3,889","2,988.4",882.1,22.79%,170.8,62.5,667.3,152.9,501.1,3.9,"1,998.7",15.4 +Vinati Organics Ltd.,VINATIORGA,524200,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,464.4,337.3,110.8,24.73%,13.7,0.3,113,28.9,84.2,8.2,408.2,39.7 +Voltas Ltd.,VOLTAS,500575,CONSUMER DURABLES,CONSUMER ELECTRONICS,"2,363.7","2,222.5",70.3,3.06%,11.7,11.4,118.1,49.3,36.7,1.1,199.5,6 +ZF Commercial Vehicle Control Systems India Ltd.,ZFCVINDIA,533023,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,015.8",846.2,145.5,14.67%,27.1,1.3,141.2,35.5,105.7,55.7,392,206.7 +Welspun Corp Ltd.,WELCORP,ASM,METALS & MINING,IRON & STEEL PRODUCTS,"4,161.4","3,659.9",399.5,9.84%,85.7,75,340.8,79,384.7,14.7,809.2,30.9 +Welspun Living Ltd.,WELSPUNLIV,514162,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"2,542.4","2,151.1",358,14.27%,98.5,33.8,258.9,58.7,196.7,2,526.1,5.4 +Whirlpool of India Ltd.,WHIRLPOOL,500238,CONSUMER DURABLES,CONSUMER ELECTRONICS,"1,555.5","1,448.4",73.2,4.81%,49.2,5.6,52.3,14.1,36.6,2.9,198.8,15.7 +Wipro Ltd.,WIPRO,507685,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"23,255.7","18,543.2","3,972.7",17.64%,897,303.3,"3,512.2",841.9,"2,646.3",5.1,"11,643.8",22.3 +Zee Entertainment Enterprises Ltd.,ZEEL,505537,MEDIA,BROADCASTING & CABLE TV,"2,509.6","2,105",332.8,13.65%,77.2,23.4,184.2,54.4,123,1.3,-102.2,-1.1 +eClerx Services Ltd.,ECLERX,532927,SOFTWARE & SERVICES,BPO/KPO,735.9,517,204.7,28.37%,30.3,6.1,182.4,46.3,136,28.2,506,105 +Sterlite Technologies Ltd.,STLTECH,532374,TELECOMMUNICATIONS EQUIPMENT,TELECOM CABLES,"1,497","1,281",213,14.26%,85,95,36,12,34,0.9,203,5.1 +HEG Ltd.,HEG,509631,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,642.2,512.3,101.9,16.58%,38.5,8.5,82.9,21.7,96,24.9,439.5,113.9 +SBI Life Insurance Company Ltd.,SBILIFE,540719,BANKING AND FINANCE,LIFE INSURANCE,"28,816.2","28,183.8",609.9,2.12%,0,0,621.5,43.9,380.2,3.8,"1,842.2",18.4 +General Insurance Corporation of India,GICRE,540755,BANKING AND FINANCE,GENERAL INSURANCE,"13,465.9","11,574","1,464.6",11.20%,0,0,"1,855.4",243.7,"1,689",15.2,"6,628",37.8 +Tube Investments of India Ltd.,TIINDIA,540762,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,005.4","1,718.2",251.4,12.76%,34.6,7.7,244.8,63.4,181.4,9.4,717.5,37.1 +Honeywell Automation India Ltd.,HONAUT,517174,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"1,144.3",965.9,138.3,12.52%,13.8,0.7,163.9,42,121.9,137.8,443.4,503.9 +Indian Energy Exchange Ltd.,IEX,540750,BANKING AND FINANCE,EXCHANGE,133,16.6,92,84.73%,5.1,0.7,110.6,27.9,86.5,1,327.8,3.7 +ICICI Lombard General Insurance Company Ltd.,ICICIGI,540716,BANKING AND FINANCE,GENERAL INSURANCE,"5,271.1","4,612.4",743.5,14.16%,0,0,763.6,186.4,577.3,11.8,"1,757.1",35.8 +Aster DM Healthcare Ltd.,ASTERDM,540975,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"3,325.2","2,939.4",377.3,11.38%,227.2,101.9,2.1,10.2,-30.8,-0.6,284.3,5.7 +Central Depository Services (India) Ltd.,CDSL,CDSL,OTHERS,INVESTMENT COMPANIES,230.1,77.9,129.4,62.40%,6.5,0,145.6,35.8,108.9,10.4,320.2,30.6 +Graphite India Ltd.,GRAPHITE,509488,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,884,823,-30,-3.78%,19,4,992,190,804,41.1,856,43.9 +Grasim Industries Ltd.,GRASIM,500300,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"30,505.3","25,995.9","4,224.8",13.98%,"1,245.2",397.8,"2,866.4",837.7,"1,163.8",17.7,"6,624.9",100.6 +KNR Constructions Ltd.,KNRCON,532942,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"1,043.8",806.9,231.6,22.30%,39.2,20.6,177.1,34.6,147.4,5.2,537.5,19.1 +Aditya Birla Capital Ltd.,ABCAPITAL,540691,DIVERSIFIED,HOLDING COMPANIES,"7,730.4","4,550.1","2,821.9",36.55%,48,"1,827",956.8,284.1,705,2.7,"5,231.9",20.1 +Dixon Technologies (India) Ltd.,DIXON,540699,CONSUMER DURABLES,CONSUMER ELECTRONICS,"4,943.9","4,744.3",198.9,4.02%,36.4,17.1,146.1,35.2,107.3,19,308.7,51.8 +Cholamandalam Financial Holdings Ltd.,CHOLAHLDNG,504973,DIVERSIFIED,HOLDING COMPANIES,"6,372.2","2,495.1","3,404.8",54.05%,52.1,"2,209.4","1,215.8",324.6,420.9,22.4,"1,532.3",81.6 +Cochin Shipyard Ltd.,COCHINSHIP,540678,TRANSPORTATION,MARINE PORT & SERVICES,"1,100.4",820.5,191.2,18.90%,18.9,9.6,251.4,69.9,181.5,13.8,429.9,32.7 +Bharat Dynamics Ltd.,BDL,541143,GENERAL INDUSTRIALS,DEFENCE,694.1,481.8,134,21.77%,17.4,0.8,194.1,47,147.1,8,425.4,23.2 +Lux Industries Ltd.,LUXIND,539542,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,643.6,584.2,55,8.61%,5.9,5.4,48,12.1,37.1,12.3,103.1,32.9 +Zensar Technologies Ltd.,ZENSARTECH,504067,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,277.1","1,009.9",230.9,18.61%,36.6,5.7,224.9,51,173.9,7.7,525.8,23.2 +PCBL Ltd.,PCBL,506590,CHEMICALS & PETROCHEMICALS,CARBON BLACK,"1,489.4","1,248.6",238.1,16.02%,48.2,21,171.6,48.8,122.6,3.2,431.6,11.4 +Zydus Wellness Ltd.,ZYDUSWELL,531335,FMCG,PACKAGED FOODS,444,423.1,16.8,3.82%,5.8,6.5,8.6,2.7,5.9,0.9,281.2,44.2 +Linde India Ltd.,LINDEINDIA,523457,GENERAL INDUSTRIALS,INDUSTRIAL GASES,729.9,537.7,173.6,24.41%,49.7,1.2,141.3,34.6,108.7,12.8,417.9,49 +FDC Ltd.,FDC,531599,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,513.6,409.9,76.4,15.71%,9.9,1.1,92.7,22.9,69.8,4.2,251.2,15.4 +The New India Assurance Company Ltd.,NIACL,540769,BANKING AND FINANCE,GENERAL INSURANCE,"10,571","10,773.4",-246.5,-2.33%,0,0,-242,-46.7,-176.1,-1.1,947,5.7 +Sundaram Finance Ltd.,SUNDARMFIN,590071,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"1,710.6",322.5,"1,332.1",77.98%,43.6,820.3,470.6,142.8,365.4,33.2,"1,506.7",135.6 +TeamLease Services Ltd.,TEAMLEASE,539658,COMMERCIAL SERVICES & SUPPLIES,MISC. COMMERCIAL SERVICES,"2,285.6","2,240.8",31.8,1.40%,12.9,2.5,29.4,1.8,27.3,16.3,106.6,63.5 +Galaxy Surfactants Ltd.,GALAXYSURF,540935,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,985.8,858.2,124.9,12.70%,24.7,5.4,97.5,20.1,77.4,21.8,349.3,98.5 +Bandhan Bank Ltd.,BANDHANBNK,541153,BANKING AND FINANCE,BANKS,"5,032.2","1,400.2","1,583.4",35.25%,0,"2,048.6",947.2,226.1,721.2,4.5,"2,541.1",15.8 +ICICI Securities Ltd.,ISEC,541179,BANKING AND FINANCE,CAPITAL MARKETS,"1,249",433.5,810.2,64.87%,25.8,215.1,569.4,145.7,423.6,13.1,"1,238.1",38.3 +V-Mart Retail Ltd.,VMART,534976,RETAILING,DEPARTMENT STORES,551.4,548.8,0.7,0.12%,53.2,35.9,-86.4,-22.3,-64.1,-32.4,-103.1,-52.1 +Nippon Life India Asset Management Ltd.,NAM-INDIA,540767,BANKING AND FINANCE,ASSET MANAGEMENT COS.,475.4,156.1,241.4,60.73%,7.2,1.7,310.4,66.1,244.4,3.9,883.3,14.1 +Grindwell Norton Ltd.,GRINDWELL,506076,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,690,536,131.4,19.69%,16.9,1.8,135.3,33.1,101.9,9.2,378.3,34.2 +HDFC Life Insurance Company Ltd.,HDFCLIFE,540777,BANKING AND FINANCE,LIFE INSURANCE,"23,276.6","23,659.3",-508.1,-2.20%,0,0,-373.1,-657.5,378.2,1.8,"1,472.8",6.9 +Elgi Equipments Ltd.,ELGIEQUIP,522074,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,817.8,663.4,142.7,17.71%,18.7,6.6,129.2,38.8,91.3,2.9,401.9,12.7 +Hindustan Aeronautics Ltd.,HAL,541154,GENERAL INDUSTRIALS,DEFENCE,"6,105.1","4,108.1","1,527.6",27.11%,349.6,0.3,"1,647",414.8,"1,236.7",18.5,"6,037.3",90.3 +BSE Ltd.,BSE,BSE,BANKING AND FINANCE,EXCHANGE,367,172.8,189.2,52.26%,22.7,8.5,163,63.6,120.5,8.8,706,52.1 +Rites Ltd.,RITES,541556,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,608.8,444.5,137.8,23.67%,14.1,1.4,148.8,40.1,101.2,4.2,488.1,20.3 +Fortis Healthcare Ltd.,FORTIS,532843,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"1,783.5","1,439.8",330.2,18.65%,84.1,31.8,231.4,48.8,173.7,2.3,547.6,7.3 +Varroc Engineering Ltd.,VARROC,541578,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,893.5","1,692.6",194.3,10.30%,84.9,50.3,65.9,18.2,54.2,3.5,146.5,9.6 +Adani Green Energy Ltd.,ADANIGREEN,ASM,UTILITIES,ELECTRIC UTILITIES,"2,589",521,"1,699",76.53%,474,"1,165",413,119,372,2.2,"1,305",8.2 +VIP Industries Ltd.,VIPIND,507880,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,548.7,493.2,52.9,9.68%,23.8,12.4,19.3,6,13.3,0.9,110.9,7.8 +CreditAccess Grameen Ltd.,CREDITACC,541770,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"1,247.6",248.8,902.3,72.36%,12.3,423.9,466.8,119.7,347,21.8,"1,204.2",75.7 +CESC Ltd.,CESC,500084,UTILITIES,ELECTRIC UTILITIES,"4,414","3,706",646,14.84%,303,305,461,98,348,2.6,"1,447",10.9 +Jamna Auto Industries Ltd.,JAMNAAUTO,520051,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,608.7,528.2,79.1,13.03%,10.9,0.8,68.7,18.6,50.1,2.4,189.3,4.7 +Suprajit Engineering Ltd.,SUPRAJIT,532509,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,727.6,639.1,69.8,9.85%,25.7,13.6,49.2,14.5,34.8,2.5,146.9,10.6 +JK Paper Ltd.,JKPAPER,532162,COMMERCIAL SERVICES & SUPPLIES,PAPER & PAPER PRODUCTS,"1,708.8","1,242.8",407.3,24.68%,83.5,42,340.6,34.9,302.4,17.9,"1,220.6",72.1 +Bank of Maharashtra,MAHABANK,532525,BANKING AND FINANCE,BANKS,"5,735.5","1,179.4","1,920.5",37.90%,0,"2,635.7",935.7,16,919.8,1.3,"3,420.8",4.8 +Aavas Financiers Ltd.,AAVAS,541988,BANKING AND FINANCE,HOUSING FINANCE,497.6,123.5,367.8,74.03%,7.6,203.6,157.4,35.7,121.7,15.4,465.4,58.8 +HDFC Asset Management Company Ltd.,HDFCAMC,541729,BANKING AND FINANCE,ASSET MANAGEMENT COS.,765.4,162,481.1,74.81%,13,2.3,588.1,151.6,436.5,20.4,"1,659.3",77.7 +KEI Industries Ltd.,KEI,517569,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"1,954.2","1,742.7",203.9,10.47%,15.6,7.5,188.4,48.2,140.2,15.5,528.3,58.5 +Orient Electric Ltd.,ORIENTELEC,541301,CONSUMER DURABLES,CONSUMER ELECTRONICS,570.3,546.2,20.7,3.65%,14.2,5.2,23.4,4.9,18.4,0.9,95.3,4.5 +Deepak Nitrite Ltd.,DEEPAKNTR,506401,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,"1,795.1","1,475.8",302.3,17.00%,39.4,2.7,277.2,72.1,205.1,15,797.9,58.5 +Fine Organic Industries Ltd.,FINEORG,541557,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,557.6,409.4,131.1,24.25%,14.4,0.7,133.1,28.9,103.4,33.7,458.8,149.6 +LTIMindtree Ltd.,LTIM,540005,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"9,048.6","7,274.1","1,631.3",18.32%,208.2,47,"1,519.3",357,"1,161.8",39.3,"4,427.5",149.6 +Dalmia Bharat Ltd.,DALBHARAT,542216,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"3,234","2,56",589,18.70%,401,101,172,48,118,6.3,"1,041",54.8 +Godfrey Phillips India Ltd.,GODFRYPHLP,500163,FOOD BEVERAGES & TOBACCO,CIGARETTES-TOBACCO PRODUCTS,"1,412.5","1,151",223.6,16.27%,36.5,6.6,218.5,55.5,202.1,38.9,802.9,154.4 +Vaibhav Global Ltd.,VAIBHAVGBL,532156,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,708.4,641.5,63.5,9.01%,22.6,2.9,41.4,12.4,29.4,1.8,121.3,7.3 +Abbott India Ltd.,ABBOTINDIA,500488,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,549.7","1,113.3",380.9,25.49%,17.8,3.1,415.4,102.5,312.9,147.3,"1,081.4",508.9 +Adani Total Gas Ltd.,ATGL,ASM,UTILITIES,UTILITIES,"1,104.8",815.7,279.9,25.55%,37.6,27.3,224.2,57.2,172.7,1.6,571,5.2 +Nestle India Ltd.,NESTLEIND,500790,FMCG,PACKAGED FOODS,"5,070.1","3,811.9","1,224.9",24.32%,111.2,31.4,"1,222",313.9,908.1,94.2,"2,971.1",308.2 +Bayer Cropscience Ltd.,BAYERCROP,506285,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,"1,633.3","1,312.3",304.9,18.85%,11.6,3.7,305.7,82.8,222.9,49.6,844.4,188.1 +Amber Enterprises India Ltd.,AMBER,540902,CONSUMER DURABLES,CONSUMER ELECTRONICS,939.8,867.5,59.6,6.43%,45.2,36.6,-9.5,-3.8,-6.9,-2.1,156.8,46.5 +Rail Vikas Nigam Ltd.,RVNL,542649,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"5,210.3","4,616",298.3,6.07%,6.2,132.7,455.4,85.2,394.3,1.9,"1,478.8",7.1 +Metropolis Healthcare Ltd.,METROPOLIS,542650,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE SERVICES,309.7,233.7,74.8,24.25%,22.2,5.7,48.1,12.5,35.5,6.9,133.4,26 +Polycab India Ltd.,POLYCAB,542652,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"4,253","3,608.8",608.9,14.44%,60.3,26.8,557.2,127.4,425.6,28.4,"1,607.2",107.1 +Multi Commodity Exchange of India Ltd.,MCX,534091,BANKING AND FINANCE,EXCHANGE,184,193.8,-28.7,-17.38%,6.6,0.1,-16.4,1.6,-19.1,-3.7,44.8,8.8 +IIFL Finance Ltd.,IIFL,532636,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,"2,533.7",788.3,"1,600.8",64.66%,43.3,932.1,683.5,158,474.3,12.4,"1,690.7",44.4 +Ratnamani Metals & Tubes Ltd.,RATNAMANI,520111,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"1,141.9",886.3,244.9,21.65%,23.6,10.8,221.1,56.8,163.9,23.4,622.6,88.8 +RHI Magnesita India Ltd.,RHIM,534076,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,989.7,839,147.9,14.98%,44.2,8.5,97.9,26.3,71.3,3.5,-502.2,-24.3 +Birlasoft Ltd.,BSOFT,532400,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,325.4","1,102.7",207.1,15.81%,21.5,5.7,195.5,50.4,145.1,5.2,378.4,13.7 +EIH Ltd.,EIHOTEL,500840,HOTELS RESTAURANTS & TOURISM,HOTELS,552.5,387.6,142.9,26.94%,33.2,5.6,126.1,36.2,93.1,1.5,424.1,6.8 +Affle (India) Ltd.,AFFLE,542752,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,441.2,344.1,87.2,20.22%,18.4,5.5,73.2,6.4,66.8,5,264.3,19.8 +Westlife Foodworld Ltd.,WESTLIFE,505533,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,618,516.5,98.2,15.98%,43.9,27.4,30.2,7.8,22.4,1.4,107.7,6.9 +IndiaMART InterMESH Ltd.,INDIAMART,542726,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,329.3,214.7,80,27.15%,8,2.3,104.3,23.9,69.4,11.4,321.1,53.6 +Infosys Ltd.,INFY,500209,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"39,626","29,554","9,44",24.21%,"1,166",138,"8,768","2,553","6,212",15,"24,871",60.1 +Sterling and Wilson Renewable Energy Ltd.,SWSOLAR,542760,COMMERCIAL SERVICES & SUPPLIES,CONSULTING SERVICES,776.7,758,1.5,0.19%,4.3,64.3,-50,4.6,-54.2,-2.9,-668.4,-35.2 +ABB India Ltd.,ABB,500002,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"2,846","2,330.7",438.5,15.84%,30.3,0.9,484.2,122.2,362.9,17.1,"1,208.7",57 +Poly Medicure Ltd.,POLYMED,531768,HEALTHCARE EQUIPMENT & SUPPLIES,HEALTHCARE SUPPLIES,351.4,253.1,84.2,24.97%,16,2.2,80.9,18.8,62.2,6.5,233.7,24.4 +GMM Pfaudler Ltd.,GMMPFAUDLR,505255,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,946,795.5,142,15.15%,32.2,21.5,96.8,26.5,71.1,15.8,183.2,40.8 +Gujarat Fluorochemicals Ltd.,FLUOROCHEM,542812,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,960.3,783.7,163.1,17.23%,67.5,34.2,74.8,22.1,52.7,4.8,915.2,83.3 +360 One Wam Ltd.,360ONE,542772,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,617.1,235.6,317.8,57.31%,13.7,139.9,226.8,40.8,186,5.2,696.8,19.5 +Tata Communications Ltd.,TATACOMM,500483,TELECOM SERVICES,OTHER TELECOM SERVICES,"4,897.9","3,857.1","1,015.5",20.84%,605.1,137.4,298.3,77.9,220.7,7.7,"1,322.3",46.4 +Alkyl Amines Chemicals Ltd.,ALKYLAMINE,506767,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,354.5,303.9,48.3,13.71%,12.5,1.7,36.4,9.2,27.2,5.3,171.3,33.5 +CSB Bank Ltd.,CSBBANK,542867,BANKING AND FINANCE,BANKS,835.8,317.5,174.6,25.41%,0,343.6,178,44.8,133.2,7.7,577.7,33.3 +Indian Railway Catering & Tourism Corporation Ltd.,IRCTC,542830,DIVERSIFIED CONSUMER SERVICES,TRAVEL SUPPORT SERVICES,"1,042.4",628.8,366.6,36.83%,14,4.4,395.2,100.5,294.7,3.7,"1,061.2",13.3 +Sumitomo Chemical India Ltd.,SUMICHEM,542920,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,928,715.5,187.9,20.80%,15.8,1.2,195.5,52,143.4,2.9,367.7,7.4 +Century Textiles & Industries Ltd.,CENTURYTEX,500040,COMMERCIAL SERVICES & SUPPLIES,PAPER & PAPER PRODUCTS,"1,114.9","1,069.2",33.8,3.07%,59.2,17,-30.5,-3.3,-30.4,-2.8,117.7,10.5 +SBI Cards and Payment Services Ltd.,SBICARD,543066,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"4,221.4","2,018.8","1,327",32.47%,46.8,604.9,809.4,206.4,603,6.4,"2,302.2",24.3 +Hitachi Energy India Ltd.,POWERINDIA,543187,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"1,228.2","1,162.6",65.3,5.32%,22.5,10.7,32.4,7.6,24.7,5.8,82.5,19.5 +Suven Pharmaceuticals Ltd.,SUVENPHAR,543064,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,250.9,133.1,98,42.40%,11.9,0.5,105.4,25.8,79.6,3.1,431.8,17 +Tata Chemicals Ltd.,TATACHEM,500770,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,"4,083","3,179",819,20.49%,234,145,627,120,428,16.8,"2,06",80.8 +Aarti Drugs Ltd.,AARTIDRUGS,524348,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,642.2,565.1,76.4,11.92%,12.6,8.2,56.3,16.7,39.6,4.3,180.2,19.6 +Gujarat Ambuja Exports Ltd.,GAEL,524226,FMCG,EDIBLE OILS,"1,157.7","1,012.2",103.3,9.26%,30.5,5.9,109.1,26.3,82.8,3.6,305.1,13.3 +Polyplex Corporation Ltd.,POLYPLEX,524051,COMMERCIAL SERVICES & SUPPLIES,CONTAINERS & PACKAGING,"1,595.7","1,451.5",120.6,7.67%,75.1,9.9,59.1,10.9,27.9,8.9,71.1,22.6 +Chalet Hotels Ltd.,CHALET,542399,HOTELS RESTAURANTS & TOURISM,HOTELS,318.2,188.6,126,40.04%,35,50.1,44.5,8,36.4,1.8,266.7,13 +Adani Enterprises Ltd.,ADANIENT,512599,COMMERCIAL SERVICES & SUPPLIES,COMMODITY TRADING & DISTRIBUTION,"23,066","20,087.2","2,430.1",10.79%,757,"1,342.8",791,397.8,227.8,2,"2,444.3",21.4 +YES Bank Ltd.,YESBANK,532648,BANKING AND FINANCE,BANKS,"7,980.6","2,377.1",810,12.06%,0,"4,793.6",304.4,75.7,228.6,0.1,836.6,0.3 +EPL Ltd.,EPL,500135,COMMERCIAL SERVICES & SUPPLIES,CONTAINERS & PACKAGING,"1,011.2",820.6,181,18.07%,83.6,30.6,76.4,25.4,50.5,1.6,251.9,7.9 +Network18 Media & Investments Ltd.,NETWORK18,532798,MEDIA,BROADCASTING & CABLE TV,"2,052.2","2,083.8",-218.3,-11.70%,56.8,66.2,-154.5,-6.5,-61,-0.6,-144.2,-1.4 +CIE Automotive India Ltd.,CIEINDIA,532756,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,299.4","1,934",345.4,15.15%,78.3,31,256.1,69.1,375.4,9.9,298.4,7.9 +Vedanta Ltd.,VEDL,500295,METALS & MINING,ALUMINIUM AND ALUMINIUM PRODUCTS,"39,585","27,466","11,479",29.47%,"2,642","2,523","8,177","9,092","-1,783",-4.8,"5,202",14 +Rossari Biotech Ltd.,ROSSARI,543213,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,484.8,419.9,63.6,13.15%,15.1,5,44.8,11.9,32.9,6,116.8,21.2 +KPIT Technologies Ltd.,KPITTECH,542651,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,208.6",959.2,239.9,20.01%,48.1,13.6,187.7,46.3,140.9,5.2,486.9,18 +Intellect Design Arena Ltd.,INTELLECT,538835,SOFTWARE & SERVICES,IT SOFTWARE PRODUCTS,631.7,497.2,121.9,19.69%,33.7,0.8,96.5,25.7,70.4,5.2,316.6,23.2 +Balaji Amines Ltd.,BALAMINES,530999,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,387.3,326.8,53.8,14.13%,10.8,1.8,48,11.6,34.7,10.7,197.3,60.9 +UTI Asset Management Company Ltd.,UTIAMC,543238,BANKING AND FINANCE,ASSET MANAGEMENT COS.,405.6,172.5,231.5,57.30%,10.4,2.8,219.8,37,182.8,14.4,562.9,44.3 +Mazagon Dock Shipbuilders Ltd.,MAZDOCK,543237,TRANSPORTATION,SHIPPING,"2,079.2","1,651.1",176.6,9.66%,20.2,1.3,406.6,102.8,332.9,16.5,"1,327.6",65.8 +Computer Age Management Services Ltd.,CAMS,543232,BANKING AND FINANCE,CAPITAL MARKETS,284.7,153,122.1,44.39%,17.4,2,112.4,28.6,84.5,17.2,309.2,62.9 +Happiest Minds Technologies Ltd.,HAPPSTMNDS,543227,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,428.8,324,82.6,20.32%,14.6,11.2,79.1,20.7,58.5,3.9,232,15.6 +Triveni Turbine Ltd.,TRITURBINE,533655,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,402.3,313.4,74.3,19.17%,5.1,0.6,83.2,19,64.2,2,233.1,7.3 +Angel One Ltd.,ANGELONE,ASM,BANKING AND FINANCE,CAPITAL MARKETS,"1,049.3",602.6,443.4,42.31%,11.2,26.4,407.2,102.7,304.5,36.3,"1,020.2",121.7 +Tanla Platforms Ltd.,TANLA,532790,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"1,014.9",811.8,196.8,19.51%,22.6,1.8,178.7,36.2,142.5,10.6,514.7,38.3 +Max Healthcare Institute Ltd.,MAXHEALTH,543220,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"1,408.6",975.8,387.4,28.42%,57.9,8.5,366.4,89.7,276.7,2.9,990.1,10.2 +Asahi India Glass Ltd.,ASAHIINDIA,515030,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,122.6",934,185.6,16.58%,43,34.4,111.3,30.2,86.9,3.6,343.5,14.1 +Prince Pipes & Fittings Ltd.,PRINCEPIPE,542907,GENERAL INDUSTRIALS,PLASTIC PRODUCTS,660.4,562.3,94.2,14.35%,22.5,0.7,92.8,22.2,70.6,5.2,219.8,19.9 +Route Mobile Ltd.,ROUTE,543228,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"1,018.3",886.5,128.1,12.63%,21.4,6.5,103.8,15.5,88.8,14.2,365.3,58.3 +KPR Mill Ltd.,KPRMILL,532889,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"1,533","1,212.9",298,19.72%,46,18.1,256,54.2,201.8,5.9,788.8,23.1 +Infibeam Avenues Ltd.,INFIBEAM,539807,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,792.6,719.7,70.2,8.89%,17.1,0.5,55.2,14.7,41,0.1,142.2,0.5 +Restaurant Brands Asia Ltd.,RBA,543248,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,628.2,568.7,56.2,9.00%,78.6,31.5,-50.7,0,-46,-0.9,-220.3,-4.5 +Larsen & Toubro Ltd.,LT,500510,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"52,157","45,392.1","5,632",11.04%,909.9,864,"4,991.1","1,135.5","3,222.6",22.9,"12,255.3",89.2 +Gland Pharma Ltd.,GLAND,543245,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,426.6","1,049.3",324.1,23.60%,81.3,6,289.9,95.8,194.1,11.8,698.8,42.4 +Macrotech Developers Ltd.,LODHA,543287,REALTY,REALTY,"1,755.1","1,333.5",416.1,23.78%,29.3,123.1,269.2,62.4,201.9,2.1,"1,529.2",15.9 +Poonawalla Fincorp Ltd.,POONAWALLA,524000,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),745.3,178.9,531.7,71.98%,14.7,215.5,"1,124.6",270,860.2,11.2,"1,466.4",19.1 +The Fertilisers and Chemicals Travancore Ltd.,FACT,590024,FERTILIZERS,FERTILIZERS,"1,713.6","1,530.8",132.4,7.96%,5.3,61.2,105.2,0,105.2,1.6,508.4,7.9 +Home First Finance Company India Ltd.,HOMEFIRST,543259,BANKING AND FINANCE,HOUSING FINANCE,278,53.7,211.6,77.43%,2.8,117,96.4,22.1,74.3,8.4,266.2,30.2 +CG Power and Industrial Solutions Ltd.,CGPOWER,500093,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"2,019","1,692.9",308.6,15.42%,22.9,0.4,329.9,86.2,242.3,1.6,"1,1",7.2 +Laxmi Organic Industries Ltd.,LXCHEM,543277,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,660.5,613.3,38.9,5.97%,27.5,2.1,17.5,6.8,10.7,0.4,100.6,3.8 +Anupam Rasayan India Ltd.,ANURAS,543275,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,395.6,284.7,107.5,27.41%,19.8,20.4,70.7,22,40.7,3.8,178.9,16.6 +Kalyan Jewellers India Ltd.,KALYANKJIL,ASM,TEXTILES APPARELS & ACCESSORIES,GEMS & JEWELLERY,"4,427.7","4,100.9",313.7,7.11%,66.9,81.7,178.1,43.3,135.2,1.3,497.9,4.8 +Jubilant Pharmova Ltd.,JUBLPHARMA,530019,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,690.2","1,438.5",241.8,14.39%,96.6,66.1,89,35.9,62.5,3.9,-44.6,-2.8 +Indigo Paints Ltd.,INDIGOPNTS,543258,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,273.4,228.7,41.8,15.45%,10,0.5,34.3,8.2,26.1,5.5,132.4,27.8 +Indian Railway Finance Corporation Ltd.,IRFC,543257,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"6,767.5",33.5,"6,732.4",99.50%,2.1,"5,181.5","1,549.9",0,"1,549.9",1.2,"6,067.6",4.6 +Mastek Ltd.,MASTEK,523704,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,770.4,642.5,123,16.07%,20.9,12.6,90.3,25,62.8,20.5,269.7,88 +Equitas Small Finance Bank Ltd.,EQUITASBNK,543243,BANKING AND FINANCE,BANKS,"1,540.4",616.8,330.2,24.30%,0,593.4,267,68.9,198.1,1.8,749.5,6.7 +Tata Teleservices (Maharashtra) Ltd.,TTML,532371,TELECOM SERVICES,TELECOM SERVICES,288.6,159.3,127.5,44.45%,36.3,403.2,-310.2,0,-310.2,-1.6,"-1,168.3",-6 +Praj Industries Ltd.,PRAJIND,522205,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,893.3,798.4,84,9.52%,9.1,1,84.8,22.4,62.4,3.4,271.4,14.8 +Nazara Technologies Ltd.,NAZARA,543280,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,309.5,269.4,26.7,8.98%,15.1,2.7,21.2,-1.3,19.8,3,60,9.1 +Jubilant Ingrevia Ltd.,JUBLINGREA,543271,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"1,028.5",902.3,117.7,11.54%,33.9,12.5,79.8,22.4,57.5,3.6,258.9,16.4 +Sona BLW Precision Forgings Ltd.,SONACOMS,543300,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,796.9,567.5,223.3,28.24%,53.4,6,164.1,40.1,123.8,2.1,462.8,7.9 +Chemplast Sanmar Ltd.,CHEMPLASTS,543336,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"1,025",941.8,46,4.65%,35.3,38.6,9.2,-16.8,26.1,1.6,35.3,2.2 +Aptus Value Housing Finance India Ltd.,APTUS,543335,BANKING AND FINANCE,HOUSING FINANCE,344.5,50.6,277.5,83.18%,2.6,96.1,189.6,41.5,148,3,551.1,11.1 +Clean Science & Technology Ltd.,CLEAN,543318,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,187.1,106.3,74.8,41.32%,11.1,0.3,69.5,17.3,52.2,4.9,275.5,25.9 +Medplus Health Services Ltd.,MEDPLUS,543427,HEALTHCARE EQUIPMENT & SUPPLIES,HEALTHCARE SUPPLIES,"1,419","1,323.5",85.1,6.04%,55.5,23.5,16.4,1.9,14.6,1.2,58.3,4.9 +Nuvoco Vistas Corporation Ltd.,NUVOCO,543334,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"2,578.9","2,243",329.9,12.82%,225.6,139.9,-29.6,-31.1,1.5,0,141.8,4 +Star Health and Allied Insurance Company Ltd.,STARHEALTH,543412,BANKING AND FINANCE,GENERAL INSURANCE,"3,463.2","3,295.8",165.7,4.79%,0,0,167.1,41.8,125.3,2.1,725.4,12.4 +Go Fashion (India) Ltd.,GOCOLORS,543401,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,192.8,132.2,56.6,29.98%,25.8,8.9,25.8,5.7,20,3.7,85.4,15.8 +PB Fintech Ltd.,POLICYBZR,543390,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,909.1,900.7,-89.1,-10.98%,22.3,7.2,-21.1,-0.3,-20.2,-0.5,-127.9,-2.8 +FSN E-Commerce Ventures Ltd.,NYKAA,543384,SOFTWARE & SERVICES,INTERNET & CATALOGUE RETAIL,"1,515.6","1,426.4",80.6,5.35%,54.6,21.3,13.3,4,5.8,0,19.8,0.1 +Krishna Institute of Medical Sciences Ltd.,KIMS,543308,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,655.4,475.2,177.3,27.17%,32.6,8.9,138.6,37.3,92,11.5,342.1,42.7 +Zomato Ltd.,ZOMATO,543320,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"3,06","2,895",-47,-1.65%,128,16,21,-15,36,0,-496.8,-0.6 +Brightcom Group Ltd.,BCG,532368,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"1,690.5","1,172.3",518,30.65%,72.3,0.1,445.8,124.3,321.5,1.6,"1,415.2",7 +Shyam Metalics and Energy Ltd.,SHYAMMETL,543299,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"2,978.9","2,633.6",307.1,10.44%,176.5,35.4,133.4,-348.6,484.1,18.9,"1,049.9",41.2 +G R Infraprojects Ltd.,GRINFRA,543317,CEMENT AND CONSTRUCTION,ROADS & HIGHWAYS,"1,909.2","1,415.7",467.1,24.81%,61.7,144.6,287.1,69.9,217.2,22.5,"1,240.3",128.3 +RattanIndia Enterprises Ltd.,RTNINDIA,534597,UTILITIES,ELECTRIC UTILITIES,"1,618.1","1,392.8",1.5,0.11%,4.3,28.8,142.2,1.7,140.9,1,147.6,1.1 +Borosil Renewables Ltd.,BORORENEW,502219,CONSUMER DURABLES,HOUSEWARE,406.3,369.2,32.5,8.09%,31,9.6,28.9,-1.1,25.1,1.9,32.1,2.5 +HLE Glascoat Ltd.,HLEGLAS,522215,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,227.8,198,26.5,11.79%,6.1,5.8,16.1,5.3,10,1.6,54.4,8 +Tata Investment Corporation Ltd.,TATAINVEST,501301,DIVERSIFIED,HOLDING COMPANIES,125,10.1,113.8,91.88%,0.2,4.7,110.1,-1.3,124.4,24.6,326.1,64.4 +Sapphire Foods India Ltd.,SAPPHIRE,543397,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,650.1,527.5,115.1,17.91%,76.8,24.5,21.4,6.2,15.3,2.4,208.5,32.7 +Devyani International Ltd.,DEVYANI,543330,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,826,665,154.4,18.84%,86.3,41.7,19,-16.8,33.4,0.3,177.5,1.5 +Vijaya Diagnostic Centre Ltd.,VIJAYA,543350,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE SERVICES,145.6,81.5,57.4,41.31%,13.7,5.9,44.6,11,33.3,3.3,103.4,10.1 +C.E. Info Systems Ltd.,MAPMYINDIA,543425,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,99.3,50.1,41,44.98%,3.7,0.7,44.7,11.1,33,6.1,122.9,22.7 +Latent View Analytics Ltd.,LATENTVIEW,543398,SOFTWARE & SERVICES,DATA PROCESSING SERVICES,172.7,124.9,30.8,19.78%,2.3,0.8,44.7,10.6,34,1.7,153.6,7.5 +Metro Brands Ltd.,METROBRAND,543426,RETAILING,FOOTWEAR,571.9,400.3,155.4,27.96%,57.2,19.7,94.7,27.5,66.7,2.5,340,12.5 +Easy Trip Planners Ltd.,EASEMYTRIP,543272,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,144.6,76.9,64.8,45.71%,1,2,64.7,17.7,47.2,0.3,146,0.8 +Shree Renuka Sugars Ltd.,RENUKA,532670,FOOD BEVERAGES & TOBACCO,SUGAR,"2,564.7","2,491",63.7,2.49%,64.1,216.8,-207.2,-1.6,-204.9,-1,-286,-1.3 +One97 Communications Ltd.,PAYTM,543396,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"2,662.5","2,749.6",-231,-9.17%,180.1,7,-279.9,12.7,-290.5,-5,"-1,207.9",-19 +MTAR Technologies Ltd.,MTARTECH,543270,GENERAL INDUSTRIALS,DEFENCE,167.7,130.7,36.1,21.64%,5.8,5.5,25.7,5.2,20.5,6.7,103.3,33.6 +Capri Global Capital Ltd.,CGCL,531595,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),557.4,229.3,304.8,54.70%,23.1,195.8,86,20.8,65.2,3.2,231.2,11.2 +GMR Airports Infrastructure Ltd.,GMRINFRA,ASM,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"2,185","1,336.8",726.7,35.22%,373,695.8,-252,54.9,-91,-0.1,-370.9,-0.6 +Triveni Engineering & Industries Ltd.,TRIVENI,532356,FOOD BEVERAGES & TOBACCO,SUGAR,"1,629.7","1,554.5",62.9,3.89%,25.8,10.2,39.3,10.1,29.1,1.3,434.3,19.8 +Delhivery Ltd.,DELHIVERY,543529,TRANSPORTATION,TRANSPORTATION - LOGISTICS,"2,043","1,957.3",-15.6,-0.80%,171.2,19.6,-105.2,-2.1,-102.9,-1.4,-546.7,-7.5 +Life Insurance Corporation of India,LICI,543526,BANKING AND FINANCE,LIFE INSURANCE,"202,394.9","193,612.5","8,445",4.18%,0,0,"8,696.5","1,083.9","8,030.3",12.7,"37,204.8",58.8 +Campus Activewear Ltd.,CAMPUS,543523,RETAILING,FOOTWEAR,259.1,234.2,24.5,9.46%,18.1,6.5,0.4,0.1,0.3,0,103.1,3.4 +Motherson Sumi Wiring India Ltd.,MSUMI,543498,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,110.2","1,856.5",248.1,11.79%,36.4,7.4,210,54.1,155.9,0.3,523.6,1.2 +Olectra Greentech Ltd.,OLECTRA,532439,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,310.3,266.6,40.5,13.20%,8.8,9.7,25.2,8,18.6,2.2,78.5,9.6 +Patanjali Foods Ltd.,PATANJALI,500368,FMCG,EDIBLE OILS,"7,845.8","7,426.6",395.3,5.05%,60.1,24,335.1,80.5,254.5,7,875.2,24.2 +Raymond Ltd.,RAYMOND,500330,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"2,320.7","1,938.8",314.6,13.96%,65.4,89.3,204.2,50.7,159.8,24,"1,514.2",227.5 +Swan Energy Ltd.,SWANENERGY,503310,REALTY,REALTY,"1,230.1",966.3,257,21.01%,27.1,58.3,178.4,12.8,84.6,6.7,308.4,11.7 +Samvardhana Motherson International Ltd.,MOTHERSON,517334,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"23,639.2","21,585","1,888.8",8.05%,867.4,487.9,449.5,229.2,201.6,0.3,"1,910.3",2.8 +Vedant Fashions Ltd.,MANYAVAR,543463,RETAILING,SPECIALTY RETAIL,233.4,125.5,92.8,42.51%,32.5,10.7,64.8,16.1,48.7,2,399.9,16.5 +Adani Wilmar Ltd.,AWL,543458,FMCG,EDIBLE OILS,"12,331.2","12,123.5",143.7,1.17%,95.7,220.2,-161.8,-31.5,-130.7,-1,130.1,1 +Mahindra Lifespace Developers Ltd.,MAHLIFE,532313,REALTY,REALTY,25.7,52.7,-34.9,-196.45%,3.1,0.2,-30.3,-10.8,-18.9,-1.2,10.5,0.7 +Tejas Networks Ltd.,TEJASNET,540595,TELECOM SERVICES,OTHER TELECOM SERVICES,413.9,383,13,3.28%,41.7,7,-17.7,-5.1,-12.6,-0.7,-61.3,-3.5 +Aether Industries Ltd.,AETHER,543534,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,178.3,118.2,46,28.00%,9.7,1.6,48.7,12.1,36.7,2.8,139.1,10.5 +JBM Auto Ltd.,JBMA,ASM,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,238.8","1,091.3",139.7,11.35%,41.2,47.9,58.3,11.3,44.2,3.7,136.8,11.6 +Deepak Fertilisers & Petrochemicals Corporation Ltd.,DEEPAKFERT,500645,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,"2,443.2","2,138.1",286.1,11.80%,81.2,107.1,116.8,53.3,60.1,4.8,674.5,53.4 +Sharda Cropchem Ltd.,SHARDACROP,538666,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,604.3,559.6,21.2,3.65%,74,4.6,-33.8,-6.3,-27.6,-3.1,191,21.2 +Shoppers Stop Ltd.,SHOPERSTOP,532638,RETAILING,DEPARTMENT STORES,"1,049.7",878.2,160.9,15.49%,108.2,54.9,3.5,0.8,2.7,0.2,94.2,8.6 +BEML Ltd.,BEML,500048,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,924,855.3,61.5,6.70%,15.8,10.8,42.2,-9.6,51.8,12.4,200.8,48.2 +Lemon Tree Hotels Ltd.,LEMONTREE,541233,HOTELS RESTAURANTS & TOURISM,HOTELS,230.1,125.3,101.9,44.84%,22.6,47.3,34.8,8.6,22.6,0.3,130.1,1.6 +Rainbow Childrens Medicare Ltd.,RAINBOW,543524,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,340.5,215.1,117.6,35.34%,26.8,13.3,85.2,22.1,62.9,6.2,215.4,21.2 +UCO Bank,UCOBANK,532505,BANKING AND FINANCE,BANKS,"5,865.6","1,581.5",981.9,18.81%,0,"3,302.3",639.8,238.1,403.5,0.3,"1,84",1.5 +Piramal Pharma Ltd.,PPLPHARMA,543635,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,960.6","1,645.7",265.6,13.90%,184.5,109.9,20.4,34.5,5,0,-133.6,-1 +KSB Ltd.,KSB,500249,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,572.2,493.4,70.3,12.47%,12.3,2,64.5,17.1,50.1,14.4,209.7,60.3 +Data Patterns (India) Ltd.,DATAPATTNS,543428,GENERAL INDUSTRIALS,DEFENCE,119.2,67.5,40.8,37.63%,3.1,2.3,46.3,12.5,33.8,6,148.3,26.5 +Global Health Ltd.,MEDANTA,543654,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,864.7,631.1,212.9,25.22%,42.9,20.1,170.6,45.4,125.2,4.7,408.9,15.2 +Aarti Industries Ltd.,AARTIIND,524208,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"1,454","1,221.2",232.8,16.01%,93,58.2,81.6,-9.1,90.7,2.5,446.2,12.3 +BLS International Services Ltd.,BLS,540073,DIVERSIFIED CONSUMER SERVICES,TRAVEL SUPPORT SERVICES,416.4,321,86.7,21.27%,7.3,1,87.2,5.2,78.7,1.9,267.6,6.5 +Archean Chemical Industries Ltd.,ACI,543657,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,301.7,195,95.5,32.86%,17.5,1.9,87.3,21.3,66,5.4,394.4,32.1 +Adani Power Ltd.,ADANIPOWER,ASM,UTILITIES,ELECTRIC UTILITIES,"14,935.7","7,819.2","5,171.4",39.81%,"1,004.5",888.4,"5,223.6","-1,370.6","6,594.2",16.5,"20,604.8",53.4 +Craftsman Automation Ltd.,CRAFTSMAN,543276,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,183.8",941.6,237.5,20.14%,66.8,41.6,133.8,29.6,94.5,44.1,298.3,141.2 +NMDC Ltd.,NMDC,526371,METALS & MINING,MINING,"4,335","2,823.6","1,190.4",29.66%,88.8,18.6,"1,404.1",379,"1,026.2",3.5,"5,862.2",20 +Epigral Ltd.,EPIGRAL,543332,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,479.1,370.2,107.9,22.57%,31.5,21.3,56.1,17.9,38,9.1,223.4,53.8 +Apar Industries Ltd.,APARINDS,532259,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"3,944.7","3,576.2",349.8,8.91%,28.2,103.1,237.3,62.9,173.9,45.4,783.9,204.8 +Bikaji Foods International Ltd.,BIKAJI,543653,FMCG,PACKAGED FOODS,614.7,521,87.7,14.41%,15.6,2.9,75.2,15.4,61.2,2.5,173.6,6.9 +Five-Star Business Finance Ltd.,FIVESTAR,543663,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),522.4,133.2,375,72.28%,5.7,105.9,267,67.6,199.4,6.8,703,24.1 +Ingersoll-Rand (India) Ltd.,INGERRAND,500210,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,282.8,210.7,65.7,23.76%,4.6,0.6,67,17.2,49.7,15.8,218.5,69.2 +KFIN Technologies Ltd.,KFINTECH,543720,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,215.3,115.3,93.7,44.82%,12.6,3.2,84.2,22.3,61.4,3.6,215.1,12.6 +Piramal Enterprises Ltd.,PEL,500302,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"2,205.2","1,320.1","1,117.9",50.97%,38.3,"1,038.9",-11.8,10.7,48.2,2,"3,906.5",173.9 +NMDC Steel Ltd.,NSLNISP,543768,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,290.3,349.6,-72.2,-26.04%,74.5,40.8,-174.7,-43.6,-131.1,-0.5,, +Eris Lifesciences Ltd.,ERIS,540596,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,508.8,324.2,181.1,35.85%,42.1,16.3,126.2,3.9,123.4,9.1,385.6,28.3 +Mankind Pharma Ltd.,MANKIND,543904,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"2,768.1","2,025.5",682.6,25.21%,96.5,8.6,637.5,129.8,501,12.5,"1,564.8",39.1 +Kaynes Technology India Ltd.,KAYNES,ASM,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,369.8,312.1,48.8,13.52%,6.5,11.8,39.4,7.1,32.3,5.5,143.2,24.6 +Safari Industries (India) Ltd.,SAFARI,523025,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,372.9,306.6,63.5,17.15%,12.2,2.2,51.9,12.1,39.8,16.7,162.3,68.2 +Saregama India Ltd.,SAREGAMA,532163,MEDIA,MOVIES & ENTERTAINMENT,185.6,111.5,60.9,35.32%,8.2,0.2,65.6,17.6,48.1,2.5,193.4,10 +Syrma SGS Technology Ltd.,SYRMA,543573,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,720.6,662.7,49,6.88%,11.6,8,37,6.4,28.3,1.6,132.4,7.5 +Jindal Saw Ltd.,JINDALSAW,ASM,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,"5,488.9","4,662",804.2,14.71%,142.5,188.7,495.6,139.6,375.7,11.8,"1,135.8",35.5 +Godawari Power & Ispat Ltd.,GPIL,532734,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"1,314.2",929.6,361.4,28.00%,34.8,10.2,339.6,86.1,256.9,20.6,785.5,63 +Gillette India Ltd.,GILLETTE,507815,FMCG,PERSONAL PRODUCTS,676.2,530.8,136.7,20.48%,20.1,0.1,125.2,32.5,92.7,28.4,361.6,111 +Symphony Ltd.,SYMPHONY,517385,CONSUMER DURABLES,CONSUMER ELECTRONICS,286,234,41,14.91%,7,2,43,8,35,5.1,114,16.5 +Glenmark Life Sciences Ltd.,GLS,543322,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,600.7,428.3,167.1,28.06%,13.1,0.4,158.9,40.2,118.7,9.7,505.5,41.3 +Usha Martin Ltd.,USHAMART,517146,METALS & MINING,IRON & STEEL PRODUCTS,806,640.4,144.3,18.39%,18,6.4,141.2,35,109.5,3.6,399.4,13.1 +Ircon International Ltd.,IRCON,541956,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"3,136.3","2,771.2",215.7,7.22%,27.1,36.9,301.2,77.6,250.7,2.7,884.6,9.4 +Ujjivan Small Finance Bank Ltd.,UJJIVANSFB,542904,BANKING AND FINANCE,BANKS,"1,579.8",528.6,483.4,34.75%,0,567.8,436.4,108.7,327.7,1.7,"1,254.5",6.4 +Procter & Gamble Health Ltd.,PGHL,500126,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,311,216.3,88.7,29.08%,6.5,0.2,88,22.5,65.6,39.5,231.4,139.4 +Allcargo Logistics Ltd.,ALLCARGO,532749,TRANSPORTATION,TRANSPORTATION - LOGISTICS,"3,336.3","3,188.8",118,3.57%,106.7,36.7,14.2,1.3,21.8,0.9,361.9,14.7 +Sheela Foam Ltd.,SFL,540203,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,637.6,547,66.2,10.80%,21.9,8.6,60.2,15.6,44,4.5,192.4,17.7 +Alok Industries Ltd.,ALOKINDS,521070,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"1,369.3","1,323.1",35.9,2.64%,78.6,142.2,-174.6,0,-174.8,-0.3,-948.4,-1.9 +Minda Corporation Ltd.,MINDACORP,538962,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,197.9","1,064.5",131.3,10.98%,41.4,14.9,77,18.7,58.8,2.5,278.2,11.6 +Concord Biotech Ltd.,CONCORDBIO,543960,PHARMACEUTICALS & BIOTECHNOLOGY,BIOTECHNOLOGY,270.5,143.2,119.2,45.43%,13.3,0.8,113.2,28.7,81,7.7,, \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/samples/product_info_1.md b/sdk/ai/azure-ai-assistants/samples/product_info_1.md new file mode 100644 index 000000000000..041155831d53 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/product_info_1.md @@ -0,0 +1,51 @@ +# Information about product item_number: 1 + +## Brand +Contoso Galaxy Innovations + +## Category +Smart Eyewear + +## Features +- Augmented Reality interface +- Voice-controlled AI assistant +- HD video recording with 3D audio +- UV protection and blue light filtering +- Wireless charging with extended battery life + +## User Guide + +### 1. Introduction +Introduction to your new SmartView Glasses + +### 2. Product Overview +Overview of features and controls + +### 3. Sizing and Fit +Finding your perfect fit and style adjustments + +### 4. Proper Care and Maintenance +Cleaning and caring for your SmartView Glasses + +### 5. Break-in Period +Adjusting to the augmented reality experience + +### 6. Safety Tips +Safety guidelines for public and private spaces + +### 7. Troubleshooting +Quick fixes for common issues + +## Warranty Information +Two-year limited warranty on all electronic components + +## Contact Information +Customer Support at support@contoso-galaxy-innovations.com + +## Return Policy +30-day return policy with no questions asked + +## FAQ +- How to sync your SmartView Glasses with your devices +- Troubleshooting connection issues +- Customizing your augmented reality environment diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py new file mode 100644 index 000000000000..e5fc84cba49e --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py @@ -0,0 +1,132 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with the + Azure AI Search tool from the Azure assistants service using a synchronous client. + +PREREQUISITES: + You will need an Azure AI Search Resource. + If you already have one, you must create an assistant that can use an existing Azure AI Search index: + https://learn.microsoft.com/azure/ai-services/assistants/how-to/tools/azure-ai-search?tabs=azurecli%2Cpython&pivots=overview-azure-ai-search + + If you do not already have an assistant Setup with an Azure AI Search resource, follow the guide for a Standard assistant setup: + https://learn.microsoft.com/azure/ai-services/assistants/quickstart?pivots=programming-language-python-azure + +USAGE: + python sample_assistants_azure_ai_search.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AI_SEARCH_CONNECTION_NAME - The connection name of the AI Search connection to your Foundry project, + as found under the "Name" column in the "Connected Resources" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import AzureAISearchQueryType, AzureAISearchTool, ListSortOrder, MessageRole + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# [START create_assistant_with_azure_ai_search_tool] +conn_id = os.environ["AI_AZURE_AI_CONNECTION_ID"] + +print(conn_id) + +# Initialize assistant AI search tool and add the search index connection id +ai_search = AzureAISearchTool( + index_connection_id=conn_id, + index_name="sample_index", + query_type=AzureAISearchQueryType.SIMPLE, + top_k=3, + filter="" +) + +# Create assistant with AI search tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=ai_search.definitions, + tool_resources=ai_search.resources, + ) + # [END create_assistant_with_azure_ai_search_tool] + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="What is the temperature rating of the cozynights sleeping bag?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Fetch run steps to get the details of the assistant run + run_steps = assistants_client.list_run_steps(thread_id=thread.id, run_id=run.id) + for step in run_steps.data: + print(f"Step {step['id']} status: {step['status']}") + step_details = step.get("step_details", {}) + tool_calls = step_details.get("tool_calls", []) + + if tool_calls: + print(" Tool calls:") + for call in tool_calls: + print(f" Tool Call ID: {call.get('id')}") + print(f" Type: {call.get('type')}") + + azure_ai_search_details = call.get("azure_ai_search", {}) + if azure_ai_search_details: + print(f" azure_ai_search input: {azure_ai_search_details.get('input')}") + print(f" azure_ai_search output: {azure_ai_search_details.get('output')}") + print() # add an extra newline between steps + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # [START populate_references_assistant_with_azure_ai_search_tool] + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for message in messages.data: + if message.role == MessageRole.ASSISTANT and message.url_citation_annotations: + placeholder_annotations = { + annotation.text: f" [see {annotation.url_citation.title}] ({annotation.url_citation.url})" + for annotation in message.url_citation_annotations + } + for message_text in message.text_messages: + message_str = message_text.text.value + for k, v in placeholder_annotations.items(): + message_str = message_str.replace(k, v) + print(f"{message.role}: {message_str}") + else: + for message_text in message.text_messages: + print(f"{message.role}: {message_text.text.value}") + # [END populate_references_assistant_with_azure_ai_search_tool] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py new file mode 100644 index 000000000000..ea0ca27d79d0 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py @@ -0,0 +1,103 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use azure function assistant operations from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_azure_functions.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) STORAGE_SERVICE_ENDPONT - the storage service queue endpoint, triggering Azure function. + Please see Getting Started with Azure Functions page for more information on Azure Functions: + https://learn.microsoft.com/azure/azure-functions/functions-get-started +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import AzureFunctionStorageQueue, AzureFunctionTool, MessageRole +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(exclude_managed_identity_credential=True, exclude_environment_credential=True), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +with assistants_client: + + storage_service_endpoint = os.environ["STORAGE_SERVICE_ENDPONT"] + + # [START create_assistant_with_azure_function_tool] + azure_function_tool = AzureFunctionTool( + name="foo", + description="Get answers from the foo bot.", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + }, + input_queue=AzureFunctionStorageQueue( + queue_name="azure-function-foo-input", + storage_service_endpoint=storage_service_endpoint, + ), + output_queue=AzureFunctionStorageQueue( + queue_name="azure-function-tool-output", + storage_service_endpoint=storage_service_endpoint, + ), + ) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="azure-function-assistant-foo", + instructions=f"You are a helpful support assistant. Use the provided function any time the prompt contains the string 'What would foo say?'. When you invoke the function, ALWAYS specify the output queue uri parameter as '{storage_service_endpoint}/azure-function-tool-output'. Always responds with \"Foo says\" and then the response from the tool.", + tools=azure_function_tool.definitions, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + # [END create_assistant_with_azure_function_tool] + + # Create a thread + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="What is the most prevalent element in the universe? What would foo say?", + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Get messages from the thread + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + # Get the last message from assistant + last_msg = messages.get_last_text_message_by_role(MessageRole.ASSISTANT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + # Delete the assistant once done + result = assistants_client.delete_assistant(assistant.id) + if result.deleted: + print(f"Deleted assistant {result.id}") + else: + print(f"Failed to delete assistant {result.id}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py new file mode 100644 index 000000000000..46f0fc863398 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py @@ -0,0 +1,86 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_basics.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os, time +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + ListSortOrder, + MessageTextContent +) + +# [START create_project_client] +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) +# [END create_project_client] + +with assistants_client: + + # [START create_assistant] + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + # [END create_assistant] + print(f"Created assistant, assistant ID: {assistant.id}") + + # [START create_thread] + thread = assistants_client.create_thread() + # [END create_thread] + print(f"Created thread, thread ID: {thread.id}") + + # [START create_message] + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + # [END create_message] + print(f"Created message, message ID: {message.id}") + + # [START create_run] + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + # [END create_run] + print(f"Run status: {run.status}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # [START list_messages] + messages = assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + # [END list_messages] + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..2272df5ef505 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py @@ -0,0 +1,82 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations from + the Azure Assistants service using a synchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Foundry project page. + +USAGE: + python sample_assistants_basics_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity azure-monitor-opentelemetry + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, time +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.telemetry import enable_telemetry +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# [START enable_tracing] +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +# Enable Azure Monitor tracing +application_insights_connection_string = os.environ['AI_APPINSIGHTS_CONNECTION_STRING'] +configure_azure_monitor(connection_string=application_insights_connection_string) + +# enable additional instrumentations +enable_telemetry() + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with assistants_client: + # [END enable_tracing] + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a hilarious joke" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py new file mode 100644 index 000000000000..ebf991cda895 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py @@ -0,0 +1,81 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations from + the Azure Assistants service using a synchronous client with tracing to console. + +USAGE: + python sample_assistants_basics_with_console_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, sys, time +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.telemetry import enable_telemetry +from azure.identity import DefaultAzureCredential +from opentelemetry import trace + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# Enable console tracing +# or, if you have local OTLP endpoint running, change it to +# assistants_client.telemetry.enable(destination="http://localhost:4317") +enable_telemetry(destination=sys.stdout) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py new file mode 100644 index 000000000000..dd974c97278f --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py @@ -0,0 +1,109 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations from + the Azure Assistants service using a synchronous client with tracing to console and adding + custom attributes to the span. + +USAGE: + python sample_assistants_basics_with_console_tracing_custom_attributes.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, sys, time +from typing import cast +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ListSortOrder +from azure.ai.assistants.telemetry import enable_telemetry +from azure.identity import DefaultAzureCredential +from opentelemetry import trace +from opentelemetry.sdk.trace import SpanProcessor, ReadableSpan, Span, TracerProvider + + +# Define the custom span processor that is used for adding the custom +# attributes to spans when they are started. +class CustomAttributeSpanProcessor(SpanProcessor): + def __init__(self): + pass + + def on_start(self, span: Span, parent_context=None): + # Add this attribute to all spans + span.set_attribute("trace_sample.sessionid", "123") + + # Add another attribute only to create_message spans + if span.name == "create_message": + span.set_attribute("trace_sample.message.context", "abc") + + def on_end(self, span: ReadableSpan): + # Clean-up logic can be added here if necessary + pass + + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# Enable console tracing +# or, if you have local OTLP endpoint running, change it to +# enable_telemetry(destination="http://localhost:4317") +enable_telemetry(destination=sys.stdout) + +# Add the custom span processor to the global tracer provider +provider = cast(TracerProvider, trace.get_tracer_provider()) +provider.add_span_processor(CustomAttributeSpanProcessor()) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py new file mode 100644 index 000000000000..3443cd4da0a6 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py @@ -0,0 +1,90 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with the Bing grounding tool from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_bing_grounding.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) BING_CONNECTION_NAME - The connection name of the Bing connection, as found in the + "Connected resources" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import MessageRole, BingGroundingTool +from azure.identity import DefaultAzureCredential + + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# [START create_assistant_with_bing_grounding_tool] +conn_id = os.environ["AZURE_BING_CONECTION_ID"] + +print(conn_id) + +# Initialize assistant bing tool and add the connection id +bing = BingGroundingTool(connection_id=conn_id) + +# Create assistant with the bing tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=bing.definitions, + headers={"x-ms-enable-preview": "true"}, + ) + # [END create_assistant_with_bing_grounding_tool] + + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, + role=MessageRole.USER, + content="How does wikipedia explain Euler's Identity?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Print the Assistant's response message with optional citation + response_message = assistants_client.list_messages(thread_id=thread.id).get_last_message_by_role( + MessageRole.ASSISTANT + ) + if response_message: + for text_message in response_message.text_messages: + print(f"Assistant response: {text_message.text.value}") + for annotation in response_message.url_citation_annotations: + print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py new file mode 100644 index 000000000000..5d4e5b4c2cb5 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py @@ -0,0 +1,105 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with code interpreter from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_code_interpreter.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import CodeInterpreterTool +from azure.ai.assistants.models import FilePurpose, MessageRole +from azure.identity import DefaultAzureCredential +from pathlib import Path + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + + # Upload a file and wait for it to be processed + # [START upload_file_and_create_assistant_with_code_interpreter] + file = assistants_client.upload_file_and_poll( + file_path="nifty_500_quarterly_results.csv", purpose=FilePurpose.ASSISTANTS + ) + print(f"Uploaded file, file ID: {file.id}") + + code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + + # Create assistant with code interpreter tool and tools_resources + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, + ) + # [END upload_file_and_create_assistant_with_code_interpreter] + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + assistants_client.delete_file(file.id) + print("Deleted file") + + # [START get_messages_and_save_files] + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + for image_content in messages.image_contents: + file_id = image_content.image_file.file_id + print(f"Image File ID: {file_id}") + file_name = f"{file_id}_image_file.png" + assistants_client.save_file(file_id=file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + + for file_path_annotation in messages.file_path_annotations: + print(f"File Paths:") + print(f"Type: {file_path_annotation.type}") + print(f"Text: {file_path_annotation.text}") + print(f"File ID: {file_path_annotation.file_path.file_id}") + print(f"Start Index: {file_path_annotation.start_index}") + print(f"End Index: {file_path_annotation.end_index}") + # [END get_messages_and_save_files] + + last_msg = messages.get_last_text_message_by_role(MessageRole.ASSISTANT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py new file mode 100644 index 000000000000..0ca5b3b4637c --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py @@ -0,0 +1,82 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with code interpreter from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_code_interpreter_attachment_enterprise_search.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + CodeInterpreterTool, + MessageAttachment, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + + # [START create_assistant] + code_interpreter = CodeInterpreterTool() + + # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + # [END create_assistant] + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # [START upload_file_and_create_message_with_code_interpreter] + # We will upload the local file to Azure and will use it for vector store creation. + _, asset_uri = assistants_client.upload_file_to_azure_blob("./product_info_1.md") + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + + # Create a message with the attachment + attachment = MessageAttachment(data_source=ds, tools=code_interpreter.definitions) + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + # [END upload_file_and_create_message_with_code_interpreter] + + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py new file mode 100644 index 000000000000..b61aec56d587 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py @@ -0,0 +1,75 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to add files to assistant during the vector store creation. + +USAGE: + python sample_assistants_enterprise_file_search.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity azure-ai-ml + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + + # [START upload_file_and_create_assistant_with_file_search] + # We will upload the local file to Azure and will use it for vector store creation. + _, asset_uri = assistants_client.upload_file_to_azure_blob("./product_info_1.md") + + # Create a vector store with no file and wait for it to be processed + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + vector_store = assistants_client.create_vector_store_and_poll(data_sources=[ds], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + # [END upload_file_and_create_assistant_with_file_search] + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py new file mode 100644 index 000000000000..29c5909cb7b1 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py @@ -0,0 +1,79 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_assistants_fabric.py + +DESCRIPTION: + This sample demonstrates how to use Assistant operations with the Microsoft Fabric grounding tool from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_fabric.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import FabricTool + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# [START create_assistant_with_fabric_tool] +conn_id = os.environ['FABRIC_CONNECTION_ID'] + +print(conn_id) + +# Initialize an Assistant Fabric tool and add the connection id +fabric = FabricTool(connection_id=conn_id) + +# Create an Assistant with the Fabric tool and process an Assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=fabric.definitions, + headers={"x-ms-enable-preview": "true"}, + ) + # [END create_assistant_with_fabric_tool] + print(f"Created Assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="", + ) + print(f"Created message, ID: {message.id}") + + # Create and process an Assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the Assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py new file mode 100644 index 000000000000..a12f4e0ee2d6 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py @@ -0,0 +1,96 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with file searching from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_file_search.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + FileSearchTool, +) +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + + # Upload file and create vector store + # [START upload_file_create_vector_store_and_assistant_with_file_search_tool] + file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") + print(f"Uploaded file, file ID: {file.id}") + + vector_store = assistants_client.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create file search tool with resources followed by creating assistant + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + # [END upload_file_create_vector_store_and_assistant_with_file_search_tool] + + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?" + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + # [START teardown] + # Delete the file when done + assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + assistants_client.delete_file(file_id=file.id) + print("Deleted file") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + # [END teardown] + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + + # Print messages from the thread + for text_message in messages.text_messages: + print(text_message) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py new file mode 100644 index 000000000000..fcd0d98408b6 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py @@ -0,0 +1,102 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with custom functions from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_functions.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import os, time +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput +from user_functions import user_functions + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +# Initialize function tool with user functions +functions = FunctionTool(functions=user_functions) + +with assistants_client: + # Create an assistant and run user's request with function calls + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + print(f"Created assistant, ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, ID: {run.id}") + + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + assistants_client.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + print(f"Executing tool call: {tool_call}") + output = functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + assistants_client.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..f8ac75ffe5e6 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py @@ -0,0 +1,149 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations with function tools from + the Azure Assistants service using a synchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Foundry project page. + +USAGE: + python sample_assistants_functions_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-monitor-opentelemetry + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" +from typing import Any, Callable, Set + +import os, time, json +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.telemetry import trace_function, enable_telemetry +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +# Enable Azure Monitor tracing +application_insights_connection_string = os.environ['AI_APPINSIGHTS_CONNECTION_STRING'] +configure_azure_monitor(connection_string=application_insights_connection_string) + +# enable additional instrumentations if needed +enable_telemetry() + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + + +# The trace_func decorator will trace the function call and enable adding additional attributes +# to the span in the function implementation. Note that this will trace the function parameters and their values. +@trace_function() +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + + # Adding attributes to the current span + span = trace.get_current_span() + span.set_attribute("requested_location", location) + + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_weather, +} + +# Initialize function tool with user function +functions = FunctionTool(functions=user_functions) + +with tracer.start_as_current_span(scenario): + with assistants_client: + # Create an assistant and run user's request with function calls + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + print(f"Created assistant, ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, what is the weather in New York?", + ) + print(f"Created message, ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, ID: {run.id}") + + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + assistants_client.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + assistants_client.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py new file mode 100644 index 000000000000..90232ce20c8a --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py @@ -0,0 +1,151 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations with function tools from + the Azure Assistants service using a synchronous client with tracing to console. + +USAGE: + python sample_assistants_functions_with_console_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" +from typing import Any, Callable, Set + +import os, sys, time, json +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput +from azure.ai.assistants.telemetry import trace_function, enable_telemetry +from opentelemetry import trace + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +# Enable console tracing +# or, if you have local OTLP endpoint running, change it to +# assistants_client.telemetry.enable(destination="http://localhost:4317") +enable_telemetry(destination=sys.stdout) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + + +# The trace_func decorator will trace the function call and enable adding additional attributes +# to the span in the function implementation. Note that this will trace the function parameters and their values. +@trace_function() +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + + # Adding attributes to the current span + span = trace.get_current_span() + span.set_attribute("requested_location", location) + + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_weather, +} + +# Initialize function tool with user function +functions = FunctionTool(functions=user_functions) + +with tracer.start_as_current_span(scenario): + with assistants_client: + # Create an assistant and run user's request with function calls + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + print(f"Created assistant, ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, what is the weather in New York?", + ) + print(f"Created message, ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, ID: {run.id}") + + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + assistants_client.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + assistants_client.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py new file mode 100644 index 000000000000..e1a89c18e5da --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py @@ -0,0 +1,114 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistants with JSON schema output format. + +USAGE: + python sample_assistants_json_schema.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity pydantic + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os + +from enum import Enum +from pydantic import BaseModel, TypeAdapter +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageRole, + ResponseFormatJsonSchema, + ResponseFormatJsonSchemaType, + RunStatus, +) + +# [START create_assistants_client] +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) +# [END create_assistants_client] + + +# Create the pydantic model to represent the planet names and there masses. +class Planets(str, Enum): + Earth = "Earth" + Mars = "Mars" + Jupyter = "Jupyter" + + +class Planet(BaseModel): + planet: Planets + mass: float + + +with assistants_client: + + # [START create_assistant] + assistant = assistants_client.create_assistant( + # Note only gpt-4o-mini-2024-07-18 and + # gpt-4o-2024-08-06 and later support structured output. + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Extract the information about planets.", + headers={"x-ms-enable-preview": "true"}, + response_format=ResponseFormatJsonSchemaType( + json_schema=ResponseFormatJsonSchema( + name="planet_mass", + description="Extract planet mass.", + schema=Planet.model_json_schema(), + ) + ), + ) + # [END create_assistant] + print(f"Created assistant, assistant ID: {assistant.id}") + + # [START create_thread] + thread = assistants_client.create_thread() + # [END create_thread] + print(f"Created thread, thread ID: {thread.id}") + + # [START create_message] + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content=("The mass of the Mars is 6.4171E23 kg; the mass of the Earth is 5.972168E24 kg;"), + ) + # [END create_message] + print(f"Created message, message ID: {message.id}") + + # [START create_run] + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + + if run.status != RunStatus.COMPLETED: + print(f"The run did not succeed: {run.status=}.") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # [START list_messages] + messages = assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + # We will only list assistant responses here. + if isinstance(last_message_content, MessageTextContent) and data_point.role == MessageRole.ASSISTANT: + planet = TypeAdapter(Planet).validate_json(last_message_content.text.value) + print(f"The mass of {planet.planet} is {planet.mass} kg.") + + # [END list_messages] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py new file mode 100644 index 000000000000..62528ebacf5f --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py @@ -0,0 +1,123 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistants with Logic Apps to execute the task of sending an email. + +PREREQUISITES: + 1) Create a Logic App within the same resource group as your Azure AI Project in Azure Portal + 2) To configure your Logic App to send emails, you must include an HTTP request trigger that is + configured to accept JSON with 'to', 'subject', and 'body'. The guide to creating a Logic App Workflow + can be found here: + https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/assistants-logic-apps#create-logic-apps-workflows-for-function-calling + +USAGE: + python sample_assistants_logic_apps.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set this environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + + Replace the following values in the sample with your own values: + 1) - The name of the Logic App you created. + 2) - The name of the trigger in the Logic App you created (the default name for HTTP + triggers in the Azure Portal is "When_a_HTTP_request_is_received"). + 3) - The email address of the recipient. +""" + + +import os +import requests +from typing import Set + +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ToolSet, FunctionTool +from azure.identity import DefaultAzureCredential + +# Example user function +from user_functions import fetch_current_datetime + +# Import AzureLogicAppTool and the function factory from user_logic_apps +from user_logic_apps import AzureLogicAppTool, create_send_email_function + +# [START register_logic_app] + +# Create the project client +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# Extract subscription and resource group from the project scope +subscription_id = assistants_client.scope["subscription_id"] +resource_group = assistants_client.scope["resource_group_name"] + +# Logic App details +logic_app_name = "" +trigger_name = "" + +# Create and initialize AzureLogicAppTool utility +logic_app_tool = AzureLogicAppTool(subscription_id, resource_group) +logic_app_tool.register_logic_app(logic_app_name, trigger_name) +print(f"Registered logic app '{logic_app_name}' with trigger '{trigger_name}'.") + +# Create the specialized "send_email_via_logic_app" function for your assistant tools +send_email_func = create_send_email_function(logic_app_tool, logic_app_name) + +# Prepare the function tools for the assistant +functions_to_use: Set = { + fetch_current_datetime, + send_email_func, # This references the AzureLogicAppTool instance via closure +} +# [END register_logic_app] + +with assistants_client: + # Create an assistant + functions = FunctionTool(functions=functions_to_use) + toolset = ToolSet() + toolset.add(functions) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="SendEmailAssistant", + instructions="You are a specialized assistant for sending emails.", + toolset=toolset, + ) + print(f"Created assistant, ID: {assistant.id}") + + # Create a thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create a message in the thread + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, please send an email to with the date and time in '%Y-%m-%d %H:%M:%S' format.", + ) + print(f"Created message, ID: {message.id}") + + # Create and process an assistant run in the thread + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py new file mode 100644 index 000000000000..4a955be24415 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py @@ -0,0 +1,116 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with the + OpenAPI tool from the Azure Assistants service using a synchronous client. + To learn more about OpenAPI specs, visit https://learn.microsoft.com/openapi + +USAGE: + python sample_assistants_openapi.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity jsonref + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +import jsonref +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import OpenApiTool, OpenApiAnonymousAuthDetails + + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) +# [START create_assistant_with_openapi] + +with open("./weather_openapi.json", "r") as f: + openapi_weather = jsonref.loads(f.read()) + +with open("./countries.json", "r") as f: + openapi_countries = jsonref.loads(f.read()) + +# Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) +auth = OpenApiAnonymousAuthDetails() + +# Initialize assistant OpenApi tool using the read in OpenAPI spec +openapi_tool = OpenApiTool( + name="get_weather", spec=openapi_weather, description="Retrieve weather information for a location", auth=auth +) +openapi_tool.add_definition( + name="get_countries", spec=openapi_countries, description="Retrieve a list of countries", auth=auth +) + +# Create assistant with OpenApi tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=openapi_tool.definitions, + ) + + # [END create_assistant_with_openapi] + + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="What's the weather in Seattle and What is the name and population of the country that uses currency with abbreviation THB?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + run_steps = assistants_client.list_run_steps(thread_id=thread.id, run_id=run.id) + + # Loop through each step + for step in run_steps.data: + print(f"Step {step['id']} status: {step['status']}") + + # Check if there are tool calls in the step details + step_details = step.get("step_details", {}) + tool_calls = step_details.get("tool_calls", []) + + if tool_calls: + print(" Tool calls:") + for call in tool_calls: + print(f" Tool Call ID: {call.get('id')}") + print(f" Type: {call.get('type')}") + + function_details = call.get("function", {}) + if function_details: + print(f" Function name: {function_details.get('name')}") + print() # add an extra newline between steps + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py new file mode 100644 index 000000000000..14d8622707ea --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py @@ -0,0 +1,97 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_assistants_openapi_connection_auth.py + +DESCRIPTION: + This sample demonstrates how to use assistant operations with the + OpenAPI tool from the Azure Assistants service using a synchronous client, using + custom key authentication against the TripAdvisor API. + To learn more about OpenAPI specs, visit https://learn.microsoft.com/openapi + +USAGE: + python sample_assistants_openapi_connection_auth.py + + Before running the sample: + + Set up an account at https://www.tripadvisor.com/developers and get an API key. + + Set up a custom key connection and save the connection name following the steps at + https://aka.ms/azsdk/azure-ai-assistants/custom-key-setup + + Save that connection name as the PROJECT_OPENAPI_CONNECTION_NAME environment variable + + pip install azure-ai-assistants azure-identity jsonref + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your Foundry Project. + PROJECT_OPENAPI_CONNECTION_NAME - the connection name for the OpenAPI connection authentication + MODEL_DEPLOYMENT_NAME - name of the model deployment in the project to use Assistants against +""" + +import os +import jsonref +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import OpenApiTool, OpenApiConnectionAuthDetails, OpenApiConnectionSecurityScheme + + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +connection_name = os.environ["PROJECT_OPENAPI_CONNECTION_NAME"] +model_name = os.environ["MODEL_DEPLOYMENT_NAME"] +connection_id = os.environ["OPENAPI_CONNECTION_ID"] + +print(connection_id) + +with open('./tripadvisor_openapi.json', 'r') as f: + openapi_spec = jsonref.loads(f.read()) + +# Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) +auth = OpenApiConnectionAuthDetails(security_scheme=OpenApiConnectionSecurityScheme(connection_id=connection_id)) + +# Initialize an Assistant OpenApi tool using the read in OpenAPI spec +openapi = OpenApiTool(name="get_weather", spec=openapi_spec, description="Retrieve weather information for a location", auth=auth) + +# Create an Assistant with OpenApi tool and process Assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=model_name, + name="my-assistant", + instructions="You are a helpful assistant", + tools=openapi.definitions + ) + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Summarize the reviews for the top rated hotel in Paris", + ) + print(f"Created message: {message['id']}") + + # Create and process an Assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the Assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py new file mode 100644 index 000000000000..3edf52a10b9e --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py @@ -0,0 +1,83 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with toolset from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_run_with_toolset.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import FunctionTool, ToolSet, CodeInterpreterTool +from user_functions import user_functions + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# Create assistant with toolset and process assistant run +with assistants_client: + # Initialize assistant toolset with user functions and code interpreter + # [START create_assistant_toolset] + functions = FunctionTool(user_functions) + code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, + ) + # [END create_assistant_toolset] + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + # [START create_and_process_run] + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + # [END create_and_process_run] + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py new file mode 100644 index 000000000000..495c15a770f1 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py @@ -0,0 +1,80 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_assistants_sharepoint.py + +DESCRIPTION: + This sample demonstrates how to use assistant operations with the + Sharepoint tool from the Azure Assistants service using a synchronous client. + The sharepoint tool is currently available only to whitelisted customers. + For access and onboarding instructions, please contact azureassistants-preview@microsoft.com. + +USAGE: + python sample_assistants_sharepoint.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import SharepointTool + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# Initialize Sharepoint tool with connection id +sharepoint = SharepointTool(connection_id="sharepoint_connection_name") + +# Create assistant with Sharepoint tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=sharepoint.definitions, + headers={"x-ms-enable-preview": "true"}, + ) + print(f"Created assistant, ID: {assistant.id}") + + # Create thread for communication + thread = assistants_client.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, summarize the key points of the ", + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py new file mode 100644 index 000000000000..b69e2baba867 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py @@ -0,0 +1,102 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler in streaming from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_stream_eventhandler.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential + +from azure.ai.assistants.models import ( + AssistantEventHandler, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) + +from typing import Any, Optional + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + + +# [START stream_event_handler] +# With AssistantEventHandler[str], the return type for each event functions is optional string. +class MyEventHandler(AssistantEventHandler[str]): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> Optional[str]: + return f"Text delta received: {delta.text}" + + def on_thread_message(self, message: "ThreadMessage") -> Optional[str]: + return f"ThreadMessage created. ID: {message.id}, Status: {message.status}" + + def on_thread_run(self, run: "ThreadRun") -> Optional[str]: + return f"ThreadRun status: {run.status}" + + def on_run_step(self, step: "RunStep") -> Optional[str]: + return f"RunStep type: {step.type}, Status: {step.status}" + + def on_error(self, data: str) -> Optional[str]: + return f"An error occurred. Data: {data}" + + def on_done(self) -> Optional[str]: + return "Stream completed." + + def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional[str]: + return f"Unhandled Event Type: {event_type}, Data: {event_data}" + + +# [END stream_event_handler] + + +with assistants_client: + # Create an assistant and run stream with event handler + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created assistant, assistant ID {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + # [START create_stream] + with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + for event_type, event_data, func_return in stream: + print(f"Received data.") + print(f"Streaming receive Event Type: {event_type}") + print(f"Event Data: {str(event_data)[:100]}...") + print(f"Event Function return: {func_return}\n") + # [END create_stream] + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..22a76c64f3d0 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py @@ -0,0 +1,114 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler in streaming from + the Azure Assistants service using a synchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Foundry project page. + +USAGE: + python sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-monitor-opentelemetry + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. + +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + AssistantEventHandler, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) +from azure.ai.assistants.telemetry import enable_telemetry +from typing import Any +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + + +class MyEventHandler(AssistantEventHandler): + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + if len(message.content): + print( + f"ThreadMessage created. ID: {message.id}, " + f"Status: {message.status}, Content: {message.content[0].as_dict()}" + ) + else: + print(f"ThreadMessage created. ID: {message.id}, " f"Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +# Enable Azure Monitor tracing +application_insights_connection_string = os.environ['AI_APPINSIGHTS_CONNECTION_STRING'] +configure_azure_monitor(connection_string=application_insights_connection_string) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +# enable additional instrumentations +enable_telemetry() + +with tracer.start_as_current_span(scenario): + with assistants_client: + # Create an assistant and run stream with event handler + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created assistant, assistant ID {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py new file mode 100644 index 000000000000..c1fc673681ba --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py @@ -0,0 +1,126 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use Assistant operations with an event handler and + the Bing grounding tool. It uses a synchronous client. + +USAGE: + python sample_assistants_stream_eventhandler_with_bing_grounding.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) BING_CONNECTION_NAME - The connection name of the Bing connection, as found in the "Connected resources" tab + in your Azure AI Foundry project. +""" + +import os +from typing import Any +from azure.identity import DefaultAzureCredential +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + MessageDeltaChunk, + RunStep, + ThreadMessage, + ThreadRun, + AssistantEventHandler, + BingGroundingTool, + MessageRole, + MessageDeltaTextUrlCitationAnnotation, + MessageDeltaTextContent, +) + + +# When using FunctionTool with ToolSet in assistant creation, the tool call events are handled inside the create_stream +# method and functions gets automatically called by default. +class MyEventHandler(AssistantEventHandler): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + if delta.delta.content and isinstance(delta.delta.content[0], MessageDeltaTextContent): + delta_text_content = delta.delta.content[0] + if delta_text_content.text and delta_text_content.text.annotations: + for delta_annotation in delta_text_content.text.annotations: + if isinstance(delta_annotation, MessageDeltaTextUrlCitationAnnotation): + print( + f"URL citation delta received: [{delta_annotation.url_citation.title}]({delta_annotation.url_citation.url})" + ) + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + + bing_connection_id = os.environ["AZURE_BING_CONECTION_ID"] + print(f"Bing Connection ID: {bing_connection_id}") + + # Initialize assistant bing tool and add the connection id + bing = BingGroundingTool(connection_id=bing_connection_id) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=bing.definitions, + ) + print(f"Created assistant, ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, + role=MessageRole.USER, + content="How does wikipedia explain Euler's Identity?", + ) + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + response_message = assistants_client.list_messages(thread_id=thread.id).get_last_message_by_role( + MessageRole.ASSISTANT + ) + if response_message: + for text_message in response_message.text_messages: + print(f"Assistant response: {text_message.text.value}") + for annotation in response_message.url_citation_annotations: + print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py new file mode 100644 index 000000000000..8b5db1b0b91d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py @@ -0,0 +1,115 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler in streaming from + the Azure Assistants service using a synchronous client with tracing to console. + +USAGE: + python sample_assistants_stream_eventhandler_with_console_tracing.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, sys +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + AssistantEventHandler, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) +from azure.ai.assistants.telemetry import enable_telemetry +from typing import Any +from opentelemetry import trace + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + + +class MyEventHandler(AssistantEventHandler): + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + if len(message.content): + print( + f"ThreadMessage created. ID: {message.id}, " + f"Status: {message.status}, Content: {message.content[0].as_dict()}" + ) + else: + print(f"ThreadMessage created. ID: {message.id}, " f"Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +# Enable console tracing +# or, if you have local OTLP endpoint running, change it to +# enable_telemetry(destination="http://localhost:4317") +enable_telemetry(destination=sys.stdout) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with assistants_client: + # Create an assistant and run stream with event handler + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created assistant, assistant ID {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py new file mode 100644 index 000000000000..c76f9d316cf0 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py @@ -0,0 +1,137 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler and toolset from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_stream_eventhandler_with_functions.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +from typing import Any + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + AssistantEventHandler, + FunctionTool, + MessageDeltaChunk, + RequiredFunctionToolCall, + RunStep, + SubmitToolOutputsAction, + ThreadMessage, + ThreadRun, + ToolOutput, +) +from azure.identity import DefaultAzureCredential +from user_functions import user_functions + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + + +class MyEventHandler(AssistantEventHandler): + + def __init__(self, functions: FunctionTool) -> None: + super().__init__() + self.functions = functions + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + # Once we receive 'requires_action' status, the next event will be DONE. + # Here we associate our existing event handler to the next stream. + assistants_client.submit_tool_outputs_to_stream( + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=self + ) + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +with assistants_client: + + # [START create_assistant_with_function_tool] + functions = FunctionTool(user_functions) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + # [END create_assistant_with_function_tool] + print(f"Created assistant, ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.", + ) + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler(functions) + ) as stream: + stream.until_done() + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py new file mode 100644 index 000000000000..756ba2bd2e17 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py @@ -0,0 +1,109 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with an event handler and toolset from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_stream_eventhandler_with_toolset.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + MessageDeltaChunk, + RunStep, + ThreadMessage, + ThreadRun, +) +from azure.ai.assistants.models import AssistantEventHandler +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import FunctionTool, ToolSet + +import os +from typing import Any +from user_functions import user_functions + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + + +# When using FunctionTool with ToolSet in assistant creation, the tool call events are handled inside the create_stream +# method and functions gets automatically called by default. +class MyEventHandler(AssistantEventHandler): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +with assistants_client: + # [START create_assistant_with_function_tool] + functions = FunctionTool(user_functions) + toolset = ToolSet() + toolset.add(functions) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, + ) + # [END create_assistant_with_function_tool] + print(f"Created assistant, ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", + ) + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py new file mode 100644 index 000000000000..39e213b65649 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py @@ -0,0 +1,86 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations in streaming from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_stream_iteration.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + AssistantStreamEvent, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +with assistants_client: + # Create an assistant and run stream with iteration + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created assistant, ID {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + # [START iterate_stream] + with assistants_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AssistantStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + # [END iterate_stream] + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py new file mode 100644 index 000000000000..1474ee93aeff --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py @@ -0,0 +1,116 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use Assistant operations with the Bing grounding + tool, and iteration in streaming. It uses a synchronous client. + +USAGE: + python sample_assistants_stream_iteration_with_bing_grounding.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) BING_CONNECTION_NAME - The connection name of the Bing connection, as found in the "Connected resources" tab + in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import AssistantStreamEvent, RunStepDeltaChunk +from azure.ai.assistants.models import ( + MessageDeltaChunk, + RunStep, + ThreadMessage, + ThreadRun, + BingGroundingTool, + MessageRole, + MessageDeltaTextContent, + MessageDeltaTextUrlCitationAnnotation, +) +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + bing_connection_id = os.environ['AZURE_BING_CONECTION_ID'] + bing = BingGroundingTool(connection_id=bing_connection_id) + print(f"Bing Connection ID: {bing_connection_id}") + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=bing.definitions, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role=MessageRole.USER, content="How does wikipedia explain Euler's Identity?" + ) + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + if event_data.delta.content and isinstance(event_data.delta.content[0], MessageDeltaTextContent): + delta_text_content = event_data.delta.content[0] + if delta_text_content.text and delta_text_content.text.annotations: + for delta_annotation in delta_text_content.text.annotations: + if isinstance(delta_annotation, MessageDeltaTextUrlCitationAnnotation): + print( + f"URL citation delta received: [{delta_annotation.url_citation.title}]({delta_annotation.url_citation.url})" + ) + + elif isinstance(event_data, RunStepDeltaChunk): + print(f"RunStepDeltaChunk received. ID: {event_data.id}.") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + if event_data.status == "failed": + print(f"Run failed. Error: {event_data.last_error}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AssistantStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + response_message = assistants_client.list_messages(thread_id=thread.id).get_last_message_by_role( + MessageRole.ASSISTANT + ) + if response_message: + for text_message in response_message.text_messages: + print(f"Assistant response: {text_message.text.value}") + for annotation in response_message.url_citation_annotations: + print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py new file mode 100644 index 000000000000..948b6496b489 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py @@ -0,0 +1,105 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with file search tools and iteration in streaming from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_stream_iteration_with_file_search.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import AssistantStreamEvent, FileSearchTool, RunStepDeltaChunk +from azure.ai.assistants.models import MessageDeltaChunk, RunStep, ThreadMessage, ThreadRun +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + + # Upload file and create vector store + # [START upload_file_create_vector_store_and_assistant_with_file_search_tool] + file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") + print(f"Uploaded file, file ID: {file.id}") + + vector_store = assistants_client.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create file search tool with resources followed by creating assistant + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, RunStepDeltaChunk): + print(f"RunStepDeltaChunk received. ID: {event_data.id}.") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + for annotation in event_data.file_citation_annotations: + print( + f"Citation {annotation.text} from file ID: {annotation.file_citation.file_id}, start index: {annotation.start_index}, end index: {annotation.end_index}" + ) + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + if event_data.status == "failed": + print(f"Run failed. Error: {event_data.last_error}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AssistantStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py new file mode 100644 index 000000000000..d689e165a3b2 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py @@ -0,0 +1,96 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with toolset and iteration in streaming from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_stream_iteration_with_toolset.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import AssistantStreamEvent, RunStepDeltaChunk +from azure.ai.assistants.models import ( + MessageDeltaChunk, + RunStep, + ThreadMessage, + ThreadRun, +) +from azure.ai.assistants.models import FunctionTool, ToolSet +from azure.identity import DefaultAzureCredential +from user_functions import user_functions + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +functions = FunctionTool(user_functions) +toolset = ToolSet() +toolset.add(functions) + +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, what's the time?") + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, RunStepDeltaChunk): + print(f"RunStepDeltaChunk received. ID: {event_data.id}.") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + if event_data.status == "failed": + print(f"Run failed. Error: {event_data.last_error}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AssistantStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py new file mode 100644 index 000000000000..4b4325ccdcb1 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py @@ -0,0 +1,102 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to override the base event handler and parse the events and iterate through them + In your use case, you might not want to write the iteration code similar to sample_assistants_stream_iteration_async.py. + If you have multiple places to call create_stream, you might find the iteration code cumbersome. + This example shows how to override the base event handler, parse the events, and iterate through them, which can be reused in multiple create_stream calls to help keep the code clean. + +USAGE: + python sample_assistants_stream_with_base_override_eventhandler.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import json +from typing import Generator, Optional + +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + MessageDeltaChunk, + MessageDeltaTextContent, +) +from azure.ai.assistants.models import AssistantStreamEvent, BaseAssistantEventHandler +from azure.identity import DefaultAzureCredential + +import os + + +# Our goal is to parse the event data in a string and return the chunk in text for each iteration. +# Because we want the iteration to be a string, we define str as the generic type for BaseAsyncAssistantEventHandler +# and override the _process_event method to return a string. +# The get_stream_chunks method is defined to return the chunks as strings because the iteration is a string. +class MyEventHandler(BaseAssistantEventHandler[Optional[str]]): + + def _process_event(self, event_data_str: str) -> Optional[str]: # type: ignore[return] + event_lines = event_data_str.strip().split("\n") + event_type: Optional[str] = None + event_data = "" + for line in event_lines: + if line.startswith("event:"): + event_type = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + event_data = line.split(":", 1)[1].strip() + + if not event_type: + raise ValueError("Event type not specified in the event data.") + + if event_type == AssistantStreamEvent.THREAD_MESSAGE_DELTA.value: + + event_obj: MessageDeltaChunk = MessageDeltaChunk(**json.loads(event_data)) + + for content_part in event_obj.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + if content_part.text is not None: + return content_part.text.value + return None + + def get_stream_chunks(self) -> Generator[str, None, None]: + for chunk in self: + if chunk: + yield chunk + + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() + ) as stream: + for chunk in stream.get_stream_chunks(): + print(chunk) + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py new file mode 100644 index 000000000000..2d9ac0fdee12 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py @@ -0,0 +1,100 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to create the vector store with the list of files. + +USAGE: + python sample_assistants_vector_store_batch_enterprise_file_search.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity azure-ai-ml + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + + # We will upload the local file to Azure and will use it for vector store creation. + _, asset_uri = assistants_client.upload_file_to_azure_blob("./product_info_1.md") + + # [START attach_files_to_store] + # Create a vector store with no file and wait for it to be processed + vector_store = assistants_client.create_vector_store_and_poll(data_sources=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + # Add the file to the vector store or you can supply data sources in the vector store creation + vector_store_file_batch = assistants_client.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=[ds] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + # [END attach_files_to_store] + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + assistants_client.update_assistant( + assistant_id=assistant.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources + ) + print(f"Updated assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py new file mode 100644 index 000000000000..4ca83aaa8384 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py @@ -0,0 +1,102 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations to add files to an existing vector store and perform search from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_vector_store_batch_file_search.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FileSearchTool, FilePurpose +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + + # Upload a file and wait for it to be processed + file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.ASSISTANTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create a vector store with no file and wait for it to be processed + vector_store = assistants_client.create_vector_store_and_poll(data_sources=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Add the file to the vector store or you can supply file ids in the vector store creation + vector_store_file_batch = assistants_client.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, file_ids=[file.id] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # Create a file search tool + # [START create_assistant_with_tools_and_tool_resources] + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + # [END create_assistant_with_tools_and_tool_resources] + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + assistants_client.update_assistant( + assistant_id=assistant.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources + ) + print(f"Updated assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py new file mode 100644 index 000000000000..b96797c97d5f --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py @@ -0,0 +1,80 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to add files to assistant during the vector store creation. + +USAGE: + python sample_assistants_vector_store_file_search.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FileSearchTool, FilePurpose +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + + # Upload a file and wait for it to be processed + file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.ASSISTANTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create a vector store with no file and wait for it to be processed + vector_store = assistants_client.create_vector_store_and_poll(file_ids=[file.id], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + + for message in reversed(messages.data): + # To remove characters, which are not correctly handled by print, we will encode the message + # and then decode it again. + clean_message = "\n".join( + text_msg.text.value.encode("ascii", "ignore").decode("utf-8") for text_msg in message.text_messages + ) + print(f"Role: {message.role} Message: {clean_message}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py new file mode 100644 index 000000000000..93e85c3abe3e --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py @@ -0,0 +1,105 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with code interpreter through file attachment from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_with_code_interpreter_file_attachment.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import CodeInterpreterTool, MessageAttachment +from azure.ai.assistants.models import FilePurpose, MessageRole +from azure.identity import DefaultAzureCredential +from pathlib import Path + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + + # Upload a file and wait for it to be processed + file = assistants_client.upload_file_and_poll( + file_path="nifty_500_quarterly_results.csv", purpose=FilePurpose.ASSISTANTS + ) + print(f"Uploaded file, file ID: {file.id}") + + # [START create_assistant_and_message_with_code_interpreter_file_attachment] + # Notice that CodeInterpreter must be enabled in the assistant creation, + # otherwise the assistant will not be able to see the file attachment for code interpretation + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=CodeInterpreterTool().definitions, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create an attachment + attachment = MessageAttachment(file_id=file.id, tools=CodeInterpreterTool().definitions) + + # Create a message + message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + attachments=[attachment], + ) + # [END create_assistant_and_message_with_code_interpreter_file_attachment] + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + assistants_client.delete_file(file.id) + print("Deleted file") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + last_msg = messages.get_last_text_message_by_role(MessageRole.ASSISTANT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + for image_content in messages.image_contents: + print(f"Image File ID: {image_content.image_file.file_id}") + file_name = f"{image_content.image_file.file_id}_image_file.png" + assistants_client.save_file(file_id=image_content.image_file.file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + + for file_path_annotation in messages.file_path_annotations: + print(f"File Paths:") + print(f"Type: {file_path_annotation.type}") + print(f"Text: {file_path_annotation.text}") + print(f"File ID: {file_path_annotation.file_path.file_id}") + print(f"Start Index: {file_path_annotation.start_index}") + print(f"End Index: {file_path_annotation.end_index}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py new file mode 100644 index 000000000000..765d696e9fd2 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py @@ -0,0 +1,71 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations to create messages with file search attachments from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_with_file_search_attachment.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FilePurpose, FileSearchTool, MessageAttachment +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + + # Upload a file and wait for it to be processed + file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.ASSISTANTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create assistant + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + # [START create_message_with_attachment] + attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] + ) + # [END create_message_with_attachment] + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + assistants_client.delete_file(file.id) + print("Deleted file") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py new file mode 100644 index 000000000000..765d696e9fd2 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py @@ -0,0 +1,71 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations to create messages with file search attachments from + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_with_file_search_attachment.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FilePurpose, FileSearchTool, MessageAttachment +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + + # Upload a file and wait for it to be processed + file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.ASSISTANTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create assistant + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + # [START create_message_with_attachment] + attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] + ) + # [END create_message_with_attachment] + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Created run, run ID: {run.id}") + + assistants_client.delete_file(file.id) + print("Deleted file") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py new file mode 100644 index 000000000000..ae67c9fba346 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py @@ -0,0 +1,91 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use assistant operations with file searching from + the Azure Assistants service using a synchronous client. The file is attached to thread. + +USAGE: + python sample_assistants_with_resources_in_thread.py + + Before running the sample: + + pip install azure-ai-assistants azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import FileSearchTool +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with assistants_client: + + # Upload file and create vector store + # [START create_assistant_and_thread_for_file_search] + file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") + print(f"Uploaded file, file ID: {file.id}") + + vector_store = assistants_client.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create file search tool with resources followed by creating assistant + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + ) + + print(f"Created assistant, ID: {assistant.id}") + + # Create thread with file resources. + # If the assistant has multiple threads, only this thread can search this file. + thread = assistants_client.create_thread(tool_resources=file_search.resources) + # [END create_assistant_and_thread_for_file_search] + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = assistants_client.create_message( + thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?" + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + # [START teardown] + # Delete the file when done + assistants_client.delete_vector_store(vector_store.id) + print("Deleted vector store") + + assistants_client.delete_file(file_id=file.id) + print("Deleted file") + + # Delete the assistant when done + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + # [END teardown] + + # Fetch and log all messages + messages = assistants_client.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/tripadvisor_openapi.json b/sdk/ai/azure-ai-assistants/samples/tripadvisor_openapi.json new file mode 100644 index 000000000000..d7e495f7a061 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/tripadvisor_openapi.json @@ -0,0 +1,1606 @@ +{ + "openapi": "3.0.1", + "servers": [ + { + "url": "https://api.content.tripadvisor.com/api" + } + ], + "info": { + "version": "1.0.0", + "title": "Content API - TripAdvisor(Knowledge)", + "description": "SSP includes Locations Details, Locations Photos, Locations Reviews, Location Search" + }, + "paths": { + "/v1/location/{locationId}/details": { + "get": { + "summary": "Location Details", + "description": "A Location Details request returns comprehensive information about a location (hotel, restaurant, or an attraction) such as name, address, rating, and URLs for the listing on Tripadvisor.", + "operationId": "getLocationDetails", + "tags": [ + "Location Details" + ], + "parameters": [ + { + "name": "locationId", + "in": "path", + "description": "A unique identifier for a location on Tripadvisor. The location ID can be obtained using the Location Search.", + "required": true, + "schema": { + "type": "integer", + "format": "int32" + } + }, + { + "name": "language", + "in": "query", + "description": "The language in which to return results (e.g. \"en\" for English or \"es\" for Spanish) from the list of our Supported Languages.", + "required": false, + "schema": { + "default": "en", + "type": "string", + "enum": [ + "ar", + "zh", + "zh_TW", + "da", + "nl", + "en_AU", + "en_CA", + "en_HK", + "en_IN", + "en_IE", + "en_MY", + "en_NZ", + "en_PH", + "en_SG", + "en_ZA", + "en_UK", + "en", + "fr", + "fr_BE", + "fr_CA", + "fr_CH", + "de_AT", + "de", + "el", + "iw", + "in", + "it", + "it_CH", + "ja", + "ko", + "no", + "pt_PT", + "pt", + "ru", + "es_AR", + "es_CO", + "es_MX", + "es_PE", + "es", + "es_VE", + "es_CL", + "sv", + "th", + "tr", + "vi" + ] + } + }, + { + "name": "currency", + "in": "query", + "description": "The currency code to use for request and response (should follow ISO 4217).", + "required": false, + "schema": { + "type": "string", + "default": "USD" + } + } + ], + "responses": { + "200": { + "description": "Details for the location", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "location_id": { + "description": "Unique Tripadvisor location ID of the destination or POI", + "type": "integer", + "format": "int32" + }, + "name": { + "description": "Name of the POI as listed on Tripadvisor", + "type": "string" + }, + "description": { + "description": "Description of the POI as listed on Tripadvisor", + "type": "string" + }, + "web_url": { + "description": "Link to the POI detail page on Tripadvisor. Link is localized to the correct domain if a language other than English is requested.", + "type": "string" + }, + "address_obj": { + "description": "Object containing address data for this location", + "type": "object", + "properties": { + "street1": { + "type": "string", + "description": "The street name" + }, + "street2": { + "type": "string", + "description": "The street name continuation" + }, + "city": { + "type": "string", + "description": "The city name" + }, + "state": { + "type": "string", + "description": "The state" + }, + "country": { + "type": "string", + "description": "The country" + }, + "postalcode": { + "type": "string", + "description": "The address postal code" + }, + "address_string": { + "type": "string", + "description": "The address in one single sentence" + } + } + }, + "ancestors": { + "description": "Ancestors describe where the POI or destination lives within the Tripadvisor destination or geo hierarchy.From this, you can derive the city where a POI is located, as well as state/province/region and country.", + "type": "array", + "items": { + "type": "object", + "properties": { + "abbrv": { + "description": "The ancestor location abbreviation", + "type": "string" + }, + "level": { + "description": "The ancestor location level in relation to the location", + "type": "string" + }, + "name": { + "description": "The ancestor location name", + "type": "string" + }, + "location_id": { + "description": "The ancestor location location identifier", + "type": "integer", + "format": "int32" + } + } + } + }, + "latitude": { + "description": "The latitude of this location in degrees, if available", + "type": "number" + }, + "longitude": { + "description": "The longitude of this location in degrees, if available", + "type": "number" + }, + "timezone": { + "description": "The timezone of the location", + "type": "string" + }, + "email": { + "description": "The email of the location, if available", + "type": "string" + }, + "phone": { + "description": "The phone number of the location, if available", + "type": "string" + }, + "website": { + "description": "The website of the location, if available", + "type": "string" + }, + "write_review": { + "description": "Link to the review form for this specific POI on Tripadvisor. Link is localized to the correct domain if a language other than English is requested.", + "type": "string" + }, + "ranking_data": { + "description": "Describes a POI's Popularity Index ranking on Tripadvisor, which compares places of interest (accomodations, restaurants, and attractions) within the same destination based on their popularity.This is measured by the quality, quantity, and recency of their review content on Tripadvisor.", + "type": "object", + "properties": { + "geo_location_id": { + "description": "The destination id", + "type": "integer", + "format": "int32" + }, + "ranking_string": { + "description": "The description of the ranking", + "type": "string" + }, + "geo_location_name": { + "description": "The destination name", + "type": "string" + }, + "ranking_out_of": { + "description": "The total number of locations on the ranking score", + "type": "integer", + "format": "int32" + }, + "ranking": { + "description": "The location ranking", + "type": "integer", + "format": "int32" + } + } + }, + "rating": { + "description": "Overall rating for this POI. Not applicable to geographic locations. Rating levels are defined as follows:5: Excellent4: Very good3: Average2: Poor1: Terrible", + "type": "number" + }, + "rating_image_url": { + "description": "URL to the bubble rating image for this location. Overall Bubble Ratings must be displayed using the Tripadvisor bubble rating image with the owl icon.", + "type": "string" + }, + "num_reviews": { + "description": "Count of total reviews published for this location", + "type": "string" + }, + "review_rating_count": { + "description": "Count of reviews for this location at each traveler rating level (1,2,3,4,5)", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "subratings": { + "type": "object", + "additionalProperties": { + "allOf": [ + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + }, + { + "type": "object", + "properties": { + "rating_image_url": { + "type": "string" + }, + "value": { + "type": "number", + "format": "float" + } + } + } + ] + } + }, + "photo_count": { + "description": "The count of photos for this POI published on Tripadvisor", + "type": "integer", + "format": "int32" + }, + "see_all_photos": { + "description": "Link to open all photos posted for this POI in a photo viewer on Tripadvisor. Link is localized to the correct domain if a language other than English is requested.", + "type": "string" + }, + "price_level": { + "description": "The relative price level for the POI. Not available for all POIs. This string is localized to other currency symbols (e.g. ££££ or €€€€) if a language other than English (en_US) is requested or if a specific currency is selected.", + "type": "string" + }, + "hours": { + "description": "Provides localized opening hours for Restaurants and Attractions, using ISO 8601 format", + "type": "object", + "properties": { + "periods": { + "type": "array", + "items": { + "type": "object", + "properties": { + "open": { + "description": "The day and times intervals in which the location is open", + "type": "object", + "properties": { + "day": { + "type": "integer", + "format": "int32" + }, + "time": { + "type": "string" + } + } + }, + "close": { + "description": "The day and times intervals in which the location is closed", + "type": "object", + "properties": { + "day": { + "type": "integer", + "format": "int32" + }, + "time": { + "type": "string" + } + } + } + } + } + }, + "weekday_text": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "amenities": { + "description": "The amenities provided by this hotel", + "type": "array", + "items": { + "type": "string" + } + }, + "features": { + "description": "The features provided by this restaurant", + "type": "array", + "items": { + "type": "string" + } + }, + "cuisine": { + "description": "The cuisines of this restaurant", + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + } + }, + "parent_brand": { + "description": "The parent brand of this hotel", + "type": "string" + }, + "brand": { + "description": "The brand of this hotel", + "type": "string" + }, + "category": { + "description": "Each POI on Tripadvisor is classified under a \"category\" and \"subcategory\", which is included in the API response.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + }, + "subcategory": { + "description": "Listings that are accommodations/hotels or restaurants are assigned a single subcategory.Deprecated as of February 2017 for Attractions. Refer to the \"groups\" object for the most up to date classifications.", + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + } + }, + "groups": { + "description": "Hierarchical display of Attraction Groups and Categories. These fields are only applicable for location type \"attraction\".", + "type": "array", + "items": { + "allOf": [ + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + }, + { + "type": "object", + "properties": { + "categories": { + "description": "Attraction Categories", + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + } + } + } + } + ] + } + }, + "styles": { + "description": "The styles of the hotel", + "type": "array", + "items": { + "type": "string" + } + }, + "neighborhood_info": { + "description": "List of neighborhoods close to the location", + "type": "array", + "items": { + "type": "object", + "properties": { + "location_id": { + "type": "string" + }, + "name": { + "type": "string" + } + } + } + }, + "trip_types": { + "description": "Each review submitted on Tripadvisor is tagged with a trip type, as designated by the reviewer.For each POI location, a breakdown of the total review count by trip type is included in the \"trip_types\" object.", + "type": "array", + "items": { + "allOf": [ + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + }, + { + "type": "object", + "properties": { + "value": { + "type": "string" + } + } + } + ] + } + }, + "awards": { + "description": "Returns a list of all of the awards for this location, which could include Certificate of Excellence, Travelers' Choice, and Green Leader.For each award, a small and large image will be returned as well.", + "type": "array", + "items": { + "type": "object", + "properties": { + "award_type": { + "description": "Award type name", + "type": "string" + }, + "year": { + "description": "The year in which the award was awarded", + "type": "integer", + "format": "int32" + }, + "images": { + "description": "The award image in its different sizes", + "type": "object", + "properties": { + "tiny": { + "type": "string" + }, + "small": { + "type": "string" + }, + "large": { + "type": "string" + } + } + }, + "categories": { + "description": "The categories in which the award was awarded", + "type": "array", + "items": { + "type": "string" + } + }, + "display_name": { + "type": "string" + } + } + } + }, + "error": { + "type": "object", + "properties": { + "message": { + "type": "string" + }, + "type": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + } + } + } + } + } + } + } + } + }, + "security": [ + { + "cosoLocationApiLambdaAuthorizer": [] + } + ] + } + }, + "/v1/location/{locationId}/photos": { + "get": { + "summary": "Location Photos", + "description": "The Location Photos request returns up to 5 high-quality photos for a specific location. Please note that the limits are different for the beta subscribers. You need to upgrade to get the higher limits mentioned here.The photos are ordered by recency.Sizes (height x width) for each photo type are as follows:Thumbnail: Fixed 50x50px, cropped, resized, and optimized by TripadvisorSmall: Fixed 150x150px, cropped, resized, and optimized by TripadvisorMedium: Max dimension 250px (can be height or width, depending on photo orientation), the other dimension is resized to maintain the aspect ratioLarge: Max dimension 550px (same rules as Medium, resized to maintain aspect ratio)Original: This is the photo in its original resolution and aspect ratio as provided by the user who submitted it.", + "operationId": "getLocationPhotos", + "tags": [ + "Location Photos" + ], + "parameters": [ + { + "name": "locationId", + "in": "path", + "description": "A unique identifier for a location on Tripadvisor. The location ID can be obtained using the Location Search.", + "required": true, + "schema": { + "type": "integer", + "format": "int32" + } + }, + { + "name": "language", + "in": "query", + "description": "The language in which to return results (e.g. \"en\" for English or \"es\" for Spanish) from the list of our Supported Languages.", + "required": false, + "schema": { + "default": "en", + "type": "string", + "enum": [ + "ar", + "zh", + "zh_TW", + "da", + "nl", + "en_AU", + "en_CA", + "en_HK", + "en_IN", + "en_IE", + "en_MY", + "en_NZ", + "en_PH", + "en_SG", + "en_ZA", + "en_UK", + "en", + "fr", + "fr_BE", + "fr_CA", + "fr_CH", + "de_AT", + "de", + "el", + "iw", + "in", + "it", + "it_CH", + "ja", + "ko", + "no", + "pt_PT", + "pt", + "ru", + "es_AR", + "es_CO", + "es_MX", + "es_PE", + "es", + "es_VE", + "es_CL", + "sv", + "th", + "tr", + "vi" + ] + } + }, + { + "name": "limit", + "in": "query", + "description": "The number of results to return", + "required": false, + "schema": { + "type": "number", + "format": "int32" + } + }, + { + "name": "offset", + "in": "query", + "description": "The index of the first result", + "required": false, + "schema": { + "type": "number", + "format": "int32" + } + }, + { + "name": "source", + "in": "query", + "description": "A comma-separated list of allowed photo sources. Allowed values are 'Expert', 'Management', 'Traveler'. If not specified, allow photos from all sources.", + "required": false, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Details for the location", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "description": "A unique ID for this photo", + "type": "integer", + "format": "int32" + }, + "is_blessed": { + "description": "Boolean whether or not this photo is blessed, i.e. reviewed at Tripadvisor as being a photo of exceptional quality", + "type": "boolean" + }, + "album": { + "description": "Name of the album the photo is featured in", + "type": "string" + }, + "caption": { + "description": "Caption of the photo", + "type": "string" + }, + "published_date": { + "description": "Date when this photo was published to Tripadvisor", + "type": "string" + }, + "images": { + "description": "Links to the photo in various sizes, along with the dimensions in pixels of each size", + "type": "object", + "additionalProperties": { + "type": "object", + "properties": { + "width": { + "type": "number" + }, + "url": { + "type": "string" + }, + "height": { + "type": "number" + } + } + } + }, + "source": { + "description": "Origin of the photo (Traveler, Expert, Management)", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + }, + "user": { + "type": "object", + "properties": { + "username": { + "description": "The username that appears on the Tripadvisor website for the user", + "type": "string" + }, + "user_location": { + "type": "object", + "properties": { + "name": { + "description": "The name of the user's location", + "type": "string" + }, + "id": { + "description": "The location ID of the user's location", + "type": "string" + } + } + }, + "review_count": { + "description": "The Review Count that appears on the Tripadvisor website for the user", + "type": "integer", + "format": "int32" + }, + "reviewer_badge": { + "description": "The Reviewer Badge that appears on the Tripadvisor website for the user", + "type": "string" + }, + "avatar": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + } + }, + "paging": { + "type": "object", + "properties": { + "next": { + "type": "string" + }, + "previous": { + "type": "string" + }, + "results": { + "type": "integer", + "format": "int32" + }, + "total_results": { + "type": "integer", + "format": "int32" + }, + "skipped": { + "type": "integer", + "format": "int32" + } + } + }, + "error": { + "type": "object", + "properties": { + "message": { + "type": "string" + }, + "type": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + } + } + } + } + } + } + } + } + }, + "security": [ + { + "cosoLocationApiLambdaAuthorizer": [] + } + ] + } + }, + "/v1/location/{locationId}/reviews": { + "get": { + "summary": "Location Reviews", + "description": "The Location Reviews request returns up to 5 of the most recent reviews for a specific location. Please note that the limits are different for the beta subscribers. You need to upgrade to get the higher limits mentioned here.", + "operationId": "getLocationReviews", + "tags": [ + "Location Reviews" + ], + "parameters": [ + { + "name": "locationId", + "in": "path", + "description": "A unique identifier for a location on Tripadvisor. The location ID can be obtained using the Location Search.", + "required": true, + "schema": { + "type": "integer", + "format": "int32" + } + }, + { + "name": "language", + "in": "query", + "description": "The language in which to return results (e.g. \"en\" for English or \"es\" for Spanish) from the list of our Supported Languages.", + "required": false, + "schema": { + "default": "en", + "type": "string", + "enum": [ + "ar", + "zh", + "zh_TW", + "da", + "nl", + "en_AU", + "en_CA", + "en_HK", + "en_IN", + "en_IE", + "en_MY", + "en_NZ", + "en_PH", + "en_SG", + "en_ZA", + "en_UK", + "en", + "fr", + "fr_BE", + "fr_CA", + "fr_CH", + "de_AT", + "de", + "el", + "iw", + "in", + "it", + "it_CH", + "ja", + "ko", + "no", + "pt_PT", + "pt", + "ru", + "es_AR", + "es_CO", + "es_MX", + "es_PE", + "es", + "es_VE", + "es_CL", + "sv", + "th", + "tr", + "vi" + ] + } + }, + { + "name": "limit", + "in": "query", + "description": "The number of results to return", + "required": false, + "schema": { + "type": "number", + "format": "int32" + } + }, + { + "name": "offset", + "in": "query", + "description": "The index of the first result", + "required": false, + "schema": { + "type": "number", + "format": "int32" + } + } + ], + "responses": { + "200": { + "description": "Details for the location", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "description": "The Tripadvisor ID for the review.", + "type": "integer", + "format": "int32" + }, + "lang": { + "description": "The language of the review.", + "type": "string" + }, + "location_id": { + "description": "Unique Tripadvisor location ID of the destination or POI.", + "type": "integer", + "format": "int32" + }, + "published_date": { + "description": "The date the review was published to Tripadvisor.", + "type": "string" + }, + "rating": { + "description": "Overall rating for this POI. Not applicable to geographic locations. Rating levels are defined as follows:5: Excellent4: Very good3: Average2: Poor1: Terrible", + "type": "integer", + "format": "int32" + }, + "helpful_votes": { + "description": "The number of helpful votes", + "type": "integer", + "format": "int32" + }, + "rating_image_url": { + "description": "The URL to the bubble rating image for this location.", + "type": "string" + }, + "url": { + "description": "The URL to the review", + "type": "string" + }, + "trip_type": { + "description": "The Trip type of the review (Business, Couples, Family, Friends, Solo).", + "type": "string" + }, + "travel_date": { + "description": "The travel date of the review", + "type": "string" + }, + "text": { + "description": "The full text of the review.", + "type": "string" + }, + "title": { + "description": "The title of this review.", + "type": "string" + }, + "owner_response": { + "description": "The Management Response to this review, if one exists.", + "type": "object", + "properties": { + "id": { + "description": "The Tripadvisor ID for the owner respose.", + "type": "integer", + "format": "int32" + }, + "lang": { + "description": "The language of the review.", + "type": "string" + }, + "text": { + "description": "The full text of the review.", + "type": "string" + }, + "title": { + "description": "The title of this review.", + "type": "string" + }, + "author": { + "description": "The owners name.", + "type": "string" + }, + "published_date": { + "description": "The date the review response was published to Tripadvisor.", + "type": "string" + } + } + }, + "is_machine_translated": { + "description": "True or false depending on whether this is a machine-translated review. (Outputs only if partner configured for inclusion of machine-translated reviews)", + "type": "boolean" + }, + "user": { + "type": "object", + "properties": { + "username": { + "description": "The username that appears on the Tripadvisor website for the user", + "type": "string" + }, + "user_location": { + "type": "object", + "properties": { + "name": { + "description": "The name of the user's location", + "type": "string" + }, + "id": { + "description": "The location ID of the user's location", + "type": "string" + } + } + }, + "review_count": { + "description": "The Review Count that appears on the Tripadvisor website for the user", + "type": "integer", + "format": "int32" + }, + "reviewer_badge": { + "description": "The Reviewer Badge that appears on the Tripadvisor website for the user", + "type": "string" + }, + "avatar": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "subratings": { + "type": "object", + "additionalProperties": { + "allOf": [ + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "localized_name": { + "type": "string" + } + } + }, + { + "type": "object", + "properties": { + "rating_image_url": { + "type": "string" + }, + "value": { + "type": "number", + "format": "float" + } + } + } + ] + } + } + } + } + }, + "paging": { + "type": "object", + "properties": { + "next": { + "type": "string" + }, + "previous": { + "type": "string" + }, + "results": { + "type": "integer", + "format": "int32" + }, + "total_results": { + "type": "integer", + "format": "int32" + }, + "skipped": { + "type": "integer", + "format": "int32" + } + } + }, + "error": { + "type": "object", + "properties": { + "message": { + "type": "string" + }, + "type": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + } + } + } + } + } + } + } + } + }, + "security": [ + { + "cosoLocationApiLambdaAuthorizer": [] + } + ] + } + }, + "/v1/location/search": { + "get": { + "summary": "Find Search", + "description": "The Location Search request returns up to 10 locations found by the given search query.You can use category (\"hotels\", \"attractions\", \"restaurants\", \"geos\"), phone number, address, and latitude/longtitude to search with more accuracy.", + "operationId": "searchForLocations", + "tags": [ + "Location Search" + ], + "parameters": [ + { + "name": "searchQuery", + "in": "query", + "description": "Text to use for searching based on the name of the location", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "category", + "in": "query", + "description": "Filters result set based on property type. Valid options are \"hotels\", \"attractions\", \"restaurants\", and \"geos\"", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "phone", + "in": "query", + "description": "Phone number to filter the search results by (this can be in any format with spaces and dashes but without the \"+\" sign at the beginning)", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "address", + "in": "query", + "description": "Address to filter the search results by", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "latLong", + "in": "query", + "description": "Latitude/Longitude pair to scope down the search around a specifc point - eg. \"42.3455,-71.10767\"", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "radius", + "in": "query", + "description": "Length of the radius from the provided latitude/longitude pair to filter results.", + "required": false, + "schema": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + } + }, + { + "name": "radiusUnit", + "in": "query", + "description": "Unit for length of the radius. Valid options are \"km\", \"mi\", \"m\" (km=kilometers, mi=miles, m=meters)", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "language", + "in": "query", + "description": "The language in which to return results (e.g. \"en\" for English or \"es\" for Spanish) from the list of our Supported Languages.", + "required": false, + "schema": { + "default": "en", + "type": "string", + "enum": [ + "ar", + "zh", + "zh_TW", + "da", + "nl", + "en_AU", + "en_CA", + "en_HK", + "en_IN", + "en_IE", + "en_MY", + "en_NZ", + "en_PH", + "en_SG", + "en_ZA", + "en_UK", + "en", + "fr", + "fr_BE", + "fr_CA", + "fr_CH", + "de_AT", + "de", + "el", + "iw", + "in", + "it", + "it_CH", + "ja", + "ko", + "no", + "pt_PT", + "pt", + "ru", + "es_AR", + "es_CO", + "es_MX", + "es_PE", + "es", + "es_VE", + "es_CL", + "sv", + "th", + "tr", + "vi" + ] + } + } + ], + "responses": { + "200": { + "description": "Location Search Results", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "location_id": { + "description": "A unique identifier for a location on Tripadvisor. This is to be used in the other endpoints that require a location ID.", + "type": "integer", + "format": "int32" + }, + "name": { + "description": "Name of the location", + "type": "string" + }, + "distance": { + "description": "Distance, in miles, this location is from the passed in LatLong parameters", + "type": "string" + }, + "bearing": { + "description": "Direction this location is from the passed in LatLong parameters", + "type": "string" + }, + "address_obj": { + "description": "Object consisting of various address data", + "type": "object", + "properties": { + "street1": { + "type": "string", + "description": "The street name" + }, + "street2": { + "type": "string", + "description": "The street name continuation" + }, + "city": { + "type": "string", + "description": "The city name" + }, + "state": { + "type": "string", + "description": "The state" + }, + "country": { + "type": "string", + "description": "The country" + }, + "postalcode": { + "type": "string", + "description": "The address postal code" + }, + "address_string": { + "type": "string", + "description": "The address in one single sentence" + } + } + } + } + } + }, + "error": { + "type": "object", + "properties": { + "message": { + "type": "string" + }, + "type": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + } + } + } + } + } + } + } + } + }, + "security": [ + { + "cosoLocationApiLambdaAuthorizer": [] + } + ] + } + }, + "/v1/location/nearby_search": { + "get": { + "summary": "Nearby Search", + "description": "The Nearby Location Search request returns up to 10 locations found near the given latitude/longtitude.You can use category (\"hotels\", \"attractions\", \"restaurants\", \"geos\"), phone number, address to search with more accuracy.", + "operationId": "searchForNearbyLocations", + "tags": [ + "Nearby Location Search" + ], + "parameters": [ + { + "name": "latLong", + "in": "query", + "description": "Latitude/Longitude pair to scope down the search around a specifc point - eg. \"42.3455,-71.10767\"", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "category", + "in": "query", + "description": "Filters result set based on property type. Valid options are \"hotels\", \"attractions\", \"restaurants\", and \"geos\"", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "phone", + "in": "query", + "description": "Phone number to filter the search results by (this can be in any format with spaces and dashes but without the \"+\" sign at the beginning)", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "address", + "in": "query", + "description": "Address to filter the search results by", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "radius", + "in": "query", + "description": "Length of the radius from the provided latitude/longitude pair to filter results.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "radiusUnit", + "in": "query", + "description": "Unit for length of the radius. Valid options are \"km\", \"mi\", \"m\" (km=kilometers, mi=miles, m=meters)", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "language", + "in": "query", + "description": "The language in which to return results (e.g. \"en\" for English or \"es\" for Spanish) from the list of our Supported Languages.", + "required": false, + "schema": { + "default": "en", + "type": "string", + "enum": [ + "ar", + "zh", + "zh_TW", + "da", + "nl", + "en_AU", + "en_CA", + "en_HK", + "en_IN", + "en_IE", + "en_MY", + "en_NZ", + "en_PH", + "en_SG", + "en_ZA", + "en_UK", + "en", + "fr", + "fr_BE", + "fr_CA", + "fr_CH", + "de_AT", + "de", + "el", + "iw", + "in", + "it", + "it_CH", + "ja", + "ko", + "no", + "pt_PT", + "pt", + "ru", + "es_AR", + "es_CO", + "es_MX", + "es_PE", + "es", + "es_VE", + "es_CL", + "sv", + "th", + "tr", + "vi" + ] + } + } + ], + "responses": { + "200": { + "description": "Nearby Location Search Results", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "location_id": { + "description": "A unique identifier for a location on Tripadvisor. This is to be used in the other endpoints that require a location ID.", + "type": "integer", + "format": "int32" + }, + "name": { + "description": "Name of the location", + "type": "string" + }, + "distance": { + "description": "Distance, in miles, this location is from the passed in LatLong parameters", + "type": "string" + }, + "bearing": { + "description": "Direction this location is from the passed in LatLong parameters", + "type": "string" + }, + "address_obj": { + "description": "Object consisting of various address data", + "type": "object", + "properties": { + "street1": { + "type": "string", + "description": "The street name" + }, + "street2": { + "type": "string", + "description": "The street name continuation" + }, + "city": { + "type": "string", + "description": "The city name" + }, + "state": { + "type": "string", + "description": "The state" + }, + "country": { + "type": "string", + "description": "The country" + }, + "postalcode": { + "type": "string", + "description": "The address postal code" + }, + "address_string": { + "type": "string", + "description": "The address in one single sentence" + } + } + } + } + } + }, + "error": { + "type": "object", + "properties": { + "message": { + "type": "string" + }, + "type": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + } + } + } + } + } + } + } + } + }, + "security": [ + { + "cosoLocationApiLambdaAuthorizer": [] + } + ] + } + } + }, + "components": { + "securitySchemes": { + "cosoLocationApiLambdaAuthorizer": { + "type": "apiKey", + "name": "key", + "in": "query" + } + } + } + } \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/samples/user_functions.py b/sdk/ai/azure-ai-assistants/samples/user_functions.py new file mode 100644 index 000000000000..cb1e3d9cf43d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/user_functions.py @@ -0,0 +1,248 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import json +import datetime +from typing import Any, Callable, Set, Dict, List, Optional + +# These are the user-defined functions that can be called by the agent. + + +def fetch_current_datetime(format: Optional[str] = None) -> str: + """ + Get the current time as a JSON string, optionally formatted. + + :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. + :return: The current time in JSON format. + :rtype: str + """ + current_time = datetime.datetime.now() + + # Use the provided format if available, else use a default format + if format: + time_format = format + else: + time_format = "%Y-%m-%d %H:%M:%S" + + time_json = json.dumps({"current_time": current_time.strftime(time_format)}) + return time_json + + +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +def send_email(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Email address of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +def send_email_using_recipient_name(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Name of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +def calculate_sum(a: int, b: int) -> str: + """Calculates the sum of two integers. + + :param a (int): First integer. + :rtype: int + :param b (int): Second integer. + :rtype: int + + :return: The sum of the two integers. + :rtype: str + """ + result = a + b + return json.dumps({"result": result}) + + +def convert_temperature(celsius: float) -> str: + """Converts temperature from Celsius to Fahrenheit. + + :param celsius (float): Temperature in Celsius. + :rtype: float + + :return: Temperature in Fahrenheit. + :rtype: str + """ + fahrenheit = (celsius * 9 / 5) + 32 + return json.dumps({"fahrenheit": fahrenheit}) + + +def toggle_flag(flag: bool) -> str: + """Toggles a boolean flag. + + :param flag (bool): The flag to toggle. + :rtype: bool + + :return: The toggled flag. + :rtype: str + """ + toggled = not flag + return json.dumps({"toggled_flag": toggled}) + + +def merge_dicts(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> str: + """Merges two dictionaries. + + :param dict1 (Dict[str, Any]): First dictionary. + :rtype: dict + :param dict2 (Dict[str, Any]): Second dictionary. + :rtype: dict + + :return: The merged dictionary. + :rtype: str + """ + merged = dict1.copy() + merged.update(dict2) + return json.dumps({"merged_dict": merged}) + + +def get_user_info(user_id: int) -> str: + """Retrieves user information based on user ID. + + :param user_id (int): ID of the user. + :rtype: int + + :return: User information as a JSON string. + :rtype: str + """ + mock_users = { + 1: {"name": "Alice", "email": "alice@example.com"}, + 2: {"name": "Bob", "email": "bob@example.com"}, + 3: {"name": "Charlie", "email": "charlie@example.com"}, + } + user_info = mock_users.get(user_id, {"error": "User not found."}) + return json.dumps({"user_info": user_info}) + + +def longest_word_in_sentences(sentences: List[str]) -> str: + """Finds the longest word in each sentence. + + :param sentences (List[str]): A list of sentences. + :return: A JSON string mapping each sentence to its longest word. + :rtype: str + """ + if not sentences: + return json.dumps({"error": "The list of sentences is empty"}) + + longest_words = {} + for sentence in sentences: + # Split sentence into words + words = sentence.split() + if words: + # Find the longest word + longest_word = max(words, key=len) + longest_words[sentence] = longest_word + else: + longest_words[sentence] = "" + + return json.dumps({"longest_words": longest_words}) + + +def process_records(records: List[Dict[str, int]]) -> str: + """ + Process a list of records, where each record is a dictionary with string keys and integer values. + + :param records: A list containing dictionaries that map strings to integers. + :return: A list of sums of the integer values in each record. + """ + sums = [] + for record in records: + # Sum up all the values in each dictionary and append the result to the sums list + total = sum(record.values()) + sums.append(total) + return json.dumps({"sums": sums}) + + +# Example User Input for Each Function +# 1. Fetch Current DateTime +# User Input: "What is the current date and time?" +# User Input: "What is the current date and time in '%Y-%m-%d %H:%M:%S' format?" + +# 2. Fetch Weather +# User Input: "Can you provide the weather information for New York?" + +# 3. Send Email +# User Input: "Send an email to john.doe@example.com with the subject 'Meeting Reminder' and body 'Don't forget our meeting at 3 PM.'" + +# 4. Calculate Sum +# User Input: "What is the sum of 45 and 55?" + +# 5. Convert Temperature +# User Input: "Convert 25 degrees Celsius to Fahrenheit." + +# 6. Toggle Flag +# User Input: "Toggle the flag True." + +# 7. Merge Dictionaries +# User Input: "Merge these two dictionaries: {'name': 'Alice'} and {'age': 30}." + +# 8. Get User Info +# User Input: "Retrieve user information for user ID 1." + +# 9. Longest Word in Sentences +# User Input: "Find the longest word in each of these sentences: ['The quick brown fox jumps over the lazy dog', 'Python is an amazing programming language', 'Azure AI capabilities are impressive']." + +# 10. Process Records +# User Input: "Process the following records: [{'a': 10, 'b': 20}, {'x': 5, 'y': 15, 'z': 25}, {'m': 30}]." + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_current_datetime, + fetch_weather, + send_email, + calculate_sum, + convert_temperature, + toggle_flag, + merge_dicts, + get_user_info, + longest_word_in_sentences, + process_records, +} diff --git a/sdk/ai/azure-ai-assistants/samples/user_logic_apps.py b/sdk/ai/azure-ai-assistants/samples/user_logic_apps.py new file mode 100644 index 000000000000..979fd5eca143 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/user_logic_apps.py @@ -0,0 +1,80 @@ +import json +import requests +from typing import Dict, Any, Callable + +from azure.identity import DefaultAzureCredential +from azure.mgmt.logic import LogicManagementClient + + +class AzureLogicAppTool: + """ + A service that manages multiple Logic Apps by retrieving and storing their callback URLs, + and then invoking them with an appropriate payload. + """ + + def __init__(self, subscription_id: str, resource_group: str, credential=None): + if credential is None: + credential = DefaultAzureCredential() + self.subscription_id = subscription_id + self.resource_group = resource_group + self.logic_client = LogicManagementClient(credential, subscription_id) + + self.callback_urls: Dict[str, str] = {} + + def register_logic_app(self, logic_app_name: str, trigger_name: str) -> None: + """ + Retrieves and stores a callback URL for a specific Logic App + trigger. + Raises a ValueError if the callback URL is missing. + """ + callback = self.logic_client.workflow_triggers.list_callback_url( + resource_group_name=self.resource_group, + workflow_name=logic_app_name, + trigger_name=trigger_name, + ) + + if callback.value is None: + raise ValueError(f"No callback URL returned for Logic App '{logic_app_name}'.") + + self.callback_urls[logic_app_name] = callback.value + + def invoke_logic_app(self, logic_app_name: str, payload: Dict[str, Any]) -> Dict[str, Any]: + """ + Invokes the registered Logic App (by name) with the given JSON payload. + Returns a dictionary summarizing success/failure. + """ + if logic_app_name not in self.callback_urls: + raise ValueError(f"Logic App '{logic_app_name}' has not been registered.") + + url = self.callback_urls[logic_app_name] + response = requests.post(url=url, json=payload) + + if response.ok: + return {"result": f"Successfully invoked {logic_app_name}."} + else: + return {"error": (f"Error invoking {logic_app_name} " f"({response.status_code}): {response.text}")} + + +def create_send_email_function(service: AzureLogicAppTool, logic_app_name: str) -> Callable[[str, str, str], str]: + """ + Returns a function that sends an email by invoking the specified Logic App in LogicAppService. + This keeps the LogicAppService instance out of global scope by capturing it in a closure. + """ + + def send_email_via_logic_app(recipient: str, subject: str, body: str) -> str: + """ + Sends an email by invoking the specified Logic App with the given recipient, subject, and body. + + :param recipient: The email address of the recipient. + :param subject: The subject of the email. + :param body: The body of the email. + :return: A JSON string summarizing the result of the operation. + """ + payload = { + "to": recipient, + "subject": subject, + "body": body, + } + result = service.invoke_logic_app(logic_app_name, payload) + return json.dumps(result) + + return send_email_via_logic_app diff --git a/sdk/ai/azure-ai-assistants/samples/weather_openapi.json b/sdk/ai/azure-ai-assistants/samples/weather_openapi.json new file mode 100644 index 000000000000..df0192590adb --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/weather_openapi.json @@ -0,0 +1,62 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "get weather data", + "description": "Retrieves current weather data for a location based on wttr.in.", + "version": "v1.0.0" + }, + "servers": [ + { + "url": "https://wttr.in" + } + ], + "auth": [], + "paths": { + "/{location}": { + "get": { + "description": "Get weather information for a specific location", + "operationId": "GetCurrentWeather", + "parameters": [ + { + "name": "location", + "in": "path", + "description": "City or location to retrieve the weather for", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "format", + "in": "query", + "description": "Always use j1 value for this parameter", + "required": true, + "schema": { + "type": "string", + "default": "j1" + } + } + ], + "responses": { + "200": { + "description": "Successful response", + "content": { + "text/plain": { + "schema": { + "type": "string" + } + } + } + }, + "404": { + "description": "Location not found" + } + }, + "deprecated": false + } + } + }, + "components": { + "schemes": {} + } +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/tests/README.md b/sdk/ai/azure-ai-assistants/tests/README.md new file mode 100644 index 000000000000..a69b9c40bdeb --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/README.md @@ -0,0 +1,60 @@ +# Azure AI Project client library tests for Python + +The instructions below are for running tests locally, on a Windows machine, against the live service using a local build of the client library. + +## Build and install the client library + +- Clone or download this sample repository. +- Open a command prompt window in the folder `sdk\ai\azure-ai-projects` +- Install development dependencies: + ```bash + pip install -r dev_requirements.txt + ``` +- Build the package: + ```bash + pip install wheel + python setup.py bdist_wheel + ``` +- Install the resulting wheel (update version `1.0.0b5` to the current one): + ```bash + pip install dist\azure_ai_projects-1.0.0b5-py3-none-any.whl --user --force-reinstall + ``` + +## Log in to Azure + +```bash +az login +``` + +## Setup up environment variables + +Edit the file `azure_ai_projects_tests.env` located in the folder above. Follow the instructions there on how to set up Azure AI Foundry projects to be used for testing, and enter appropriate values for the environment variables used for the tests you want to run. + +## Configure test proxy + +Configure the test proxy to run live service tests without recordings: + +```bash +set AZURE_TEST_RUN_LIVE=true +set AZURE_SKIP_LIVE_RECORDING=true +set PROXY_URL=http://localhost:5000 +set AZURE_TEST_USE_CLI_AUTH=true +``` + +## Run tests + +To run all tests, type: + +```bash +pytest +``` + +To run tests in a particular folder (`tests\connections` for example): + +```bash +pytest tests\connections +``` + +## Additional information + +See [test documentation](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md) for additional information, including how to set proxy recordings and run tests using recordings. diff --git a/sdk/ai/azure-ai-assistants/tests/assets/fetch_current_datetime_and_weather_stream_response.txt b/sdk/ai/azure-ai-assistants/tests/assets/fetch_current_datetime_and_weather_stream_response.txt new file mode 100644 index 000000000000..138c8eda465e --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/assets/fetch_current_datetime_and_weather_stream_response.txt @@ -0,0 +1,255 @@ +event: thread.run.step.completed +data: {"id":"step_01","object":"thread.run.step","created_at":1735945043,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1735945046,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_01","type":"function","function":{"name":"fetch_current_datetime","arguments":"{}","output":"{\"current_time\": \"2025-01-03 14:57:24\"}"}},{"id":"call_02","type":"function","function":{"name":"fetch_weather","arguments":"{\"location\": \"New York\"}","output":"{\"weather\": \"Sunny, 25\\u00b0C\"}"}}]},"usage":{"prompt_tokens":648,"completion_tokens":71,"total_tokens":719}} + +event: thread.run.queued +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":1735945041,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: thread.run.in_progress +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"in_progress","started_at":1735945048,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: thread.run.step.created +data: {"id":"step_02","object":"thread.run.step","created_at":1735945052,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + +event: thread.run.step.in_progress +data: {"id":"step_02","object":"thread.run.step","created_at":1735945052,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_03","type":"function","function":{"name":"send_email","arguments":"","output":null}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"{\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" "}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" \""}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"recipient"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\":"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" \""}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"user"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"@example"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":".com"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\",\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" "}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" \""}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"subject"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\":"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" \""}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"Current"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" New"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" York"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" Weather"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" and"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" DateTime"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" Information"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\",\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" "}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" \""}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"body"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\":"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" \""}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"Hello"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":",\\"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"Here"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" are"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" the"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" details"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" you"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" requested"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":":\\"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"-"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" Date"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" and"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" Time"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":":"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" "}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"202"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"5"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"-"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"01"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"-"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"03"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" "}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"14"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":":"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"57"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":":"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"24"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"-"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" Weather"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" in"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" New"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" York"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":":"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" Sunny"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":","}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" "}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"25"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"�C"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"Best"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":" regards"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":".\"\n"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"}"}}]}}} + +event: thread.run.requires_action +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"requires_action","started_at":1735945048,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_03","type":"function","function":{"name":"send_email","arguments":"{\n \"recipient\": \"user@example.com\",\n \"subject\": \"Current New York Weather and DateTime Information\",\n \"body\": \"Hello,\\n\\nHere are the details you requested:\\n\\n- Date and Time: 2025-01-03 14:57:24\\n- Weather in New York: Sunny, 25�C\\n\\nBest regards.\"\n}"}}]}},"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: done +data: [DONE] + diff --git a/sdk/ai/azure-ai-assistants/tests/assets/main_stream_response.txt b/sdk/ai/azure-ai-assistants/tests/assets/main_stream_response.txt new file mode 100644 index 000000000000..14d56a0f74a1 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/assets/main_stream_response.txt @@ -0,0 +1,45 @@ +event: thread.run.created +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":null,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: thread.run.queued +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":null,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: thread.run.in_progress +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"in_progress","started_at":1735945041,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: thread.run.step.created +data: {"id":"step_01","object":"thread.run.step","created_at":1735945043,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + +event: thread.run.step.in_progress +data: {"id":"step_01","object":"thread.run.step","created_at":1735945043,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_01","type":"function","function":{"name":"fetch_current_datetime","arguments":"","output":null}}]}}} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"{}"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":1,"id":"call_02","type":"function","function":{"name":"fetch_weather","arguments":"","output":null}}]}}} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":1,"type":"function","function":{"arguments":"{\"location"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":1,"type":"function","function":{"arguments":""}}]}}} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":1,"type":"function","function":{"arguments":"\": \"N"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":1,"type":"function","function":{"arguments":"ew Y"}}]}}} + +event: thread.run.step.delta +data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":1,"type":"function","function":{"arguments":"ork\"}"}}]}}} + +event: thread.run.requires_action +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"requires_action","started_at":1735945041,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_01","type":"function","function":{"name":"fetch_current_datetime","arguments":"{}"}},{"id":"call_02","type":"function","function":{"name":"fetch_weather","arguments":"{\"location\": \"New York\"}"}}]}},"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: done +data: [DONE] + diff --git a/sdk/ai/azure-ai-assistants/tests/assets/send_email_stream_response.txt b/sdk/ai/azure-ai-assistants/tests/assets/send_email_stream_response.txt new file mode 100644 index 000000000000..fe3afe7a9ba2 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/assets/send_email_stream_response.txt @@ -0,0 +1,213 @@ +event: thread.run.step.completed +data: {"id":"step_02","object":"thread.run.step","created_at":1735945052,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1735945059,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_03","type":"function","function":{"name":"send_email","arguments":"{\n \"recipient\": \"user@example.com\",\n \"subject\": \"Current New York Weather and DateTime Information\",\n \"body\": \"Hello,\\n\\nHere are the details you requested:\\n\\n- Date and Time: 2025-01-03 14:57:24\\n- Weather in New York: Sunny, 25�C\\n\\nBest regards.\"\n}","output":"{\"message\": \"Email successfully sent to user@example.com.\"}"}}]},"usage":{"prompt_tokens":735,"completion_tokens":87,"total_tokens":822}} + +event: thread.run.queued +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":1735945048,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: thread.run.in_progress +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"in_progress","started_at":1735945059,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: thread.run.step.created +data: {"id":"step_03","object":"thread.run.step","created_at":1735945060,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_01"}},"usage":null} + +event: thread.run.step.in_progress +data: {"id":"step_03","object":"thread.run.step","created_at":1735945060,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_01"}},"usage":null} + +event: thread.message.created +data: {"id":"msg_01","object":"thread.message","created_at":1735945060,"agent_id":"asst_01","thread_id":"thread_01","run_id":"run_01","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"attachments":[],"metadata":{}} + +event: thread.message.in_progress +data: {"id":"msg_01","object":"thread.message","created_at":1735945060,"agent_id":"asst_01","thread_id":"thread_01","run_id":"run_01","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"attachments":[],"metadata":{}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"The","annotations":[]}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" email"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" has"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" been"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" successfully"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" sent"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" to"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" the"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" recipient"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" with"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" the"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" following"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" details"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":":\n\n"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"-"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" Date"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" and"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" Time"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":":"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" "}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"202"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"5"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"-"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"01"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"-"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"03"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" "}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"14"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":":"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"57"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":":"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"24"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"\n"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"-"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" Weather"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" in"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" New"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" York"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":":"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" Sunny"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":","}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" "}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"25"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"�C"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"\n\n"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"If"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" you"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" need"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" any"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" further"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" assistance"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" or"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" information"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":","}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" please"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" feel"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" free"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" to"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" ask"}}]}} + +event: thread.message.delta +data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}} + +event: thread.message.completed +data: {"id":"msg_01","object":"thread.message","created_at":1735945060,"agent_id":"asst_01","thread_id":"thread_01","run_id":"run_01","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1735945061,"role":"assistant","content":[{"type":"text","text":{"value":"The email has been successfully sent to the recipient with the following details:\n\n- Date and Time: 2025-01-03 14:57:24\n- Weather in New York: Sunny, 25�C\n\nIf you need any further assistance or information, please feel free to ask.","annotations":[]}}],"attachments":[],"metadata":{}} + +event: thread.run.step.completed +data: {"id":"step_03","object":"thread.run.step","created_at":1735945060,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1735945061,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_01"}},"usage":{"prompt_tokens":834,"completion_tokens":62,"total_tokens":896}} + +event: thread.run.completed +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"completed","started_at":1735945059,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1735945061,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":2217,"completion_tokens":220,"total_tokens":2437},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + +event: done +data: [DONE] + diff --git a/sdk/ai/azure-ai-assistants/tests/check_sample_name.sh b/sdk/ai/azure-ai-assistants/tests/check_sample_name.sh new file mode 100644 index 000000000000..5c7cbfa53363 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/check_sample_name.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# This is simple helper script to chreck the name of a file +# the name should appear at least once as: +# +# python $fname +# +# If the file contain its name less times, we print its name. + +SAMPLES_SYNC="`dirname ${0}`/../samples/agents" +SAMPLES_ASYNC="`dirname ${0}`/../samples/agents/async_samples" + +for sample_dir in "$SAMPLES_SYNC" "$SAMPLES_ASYNC"; do + for fname in `ls "$sample_dir" | grep \^sample_ | grep \[.\]py\$`; do + cnt=`grep -c "${fname}" "${sample_dir}/${fname}"` + if [ $cnt -lt 1 ]; then + echo "${sample_dir}/${fname} name encountered ${cnt} times." + fi + done +done +exit 0 diff --git a/sdk/ai/azure-ai-assistants/tests/conftest.py b/sdk/ai/azure-ai-assistants/tests/conftest.py new file mode 100644 index 000000000000..07dbb1f70ef3 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/conftest.py @@ -0,0 +1,159 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import os + +import pytest +from devtools_testutils import ( + remove_batch_sanitizers, + get_credential, + test_proxy, + add_general_regex_sanitizer, + add_body_key_sanitizer, +) +from dotenv import load_dotenv, find_dotenv + +if not load_dotenv(find_dotenv(filename="azure_ai_assistants_tests.env"), override=True): + print("Failed to apply environment variables for azure-ai-projects tests.") + + +def pytest_collection_modifyitems(items): + if os.environ.get("AZURE_TEST_RUN_LIVE") == "true": + return + for item in items: + if "tests\\evaluation" in item.fspath.strpath or "tests/evaluation" in item.fspath.strpath: + item.add_marker( + pytest.mark.skip( + reason="Skip running Evaluations tests in PR pipeline until we can sort out the failures related to AI Foundry project settings" + ) + ) + + +class SanitizedValues: + SUBSCRIPTION_ID = "00000000-0000-0000-0000-000000000000" + RESOURCE_GROUP_NAME = "00000" + WORKSPACE_NAME = "00000" + DATASET_NAME = "00000" + TENANT_ID = "00000000-0000-0000-0000-000000000000" + USER_OBJECT_ID = "00000000-0000-0000-0000-000000000000" + API_KEY = "00000000000000000000000000000000000000000000000000000000000000000000" + VECTOR_STORE_NAME = "vs_000000000000000000000000" + # cSpell:disable-next-line + FILE_BATCH = "vsfb_00000000000000000000000000000000" + + +@pytest.fixture(scope="session") +def mock_project_scope(): + return { + "subscription_id": f"{SanitizedValues.SUBSCRIPTION_ID}", + "resource_group_name": f"{SanitizedValues.RESOURCE_GROUP_NAME}", + "project_name": f"{SanitizedValues.WORKSPACE_NAME}", + } + + +@pytest.fixture(scope="session") +def mock_dataset_name(): + return { + "dataset_name": f"{SanitizedValues.DATASET_NAME}", + } + + +@pytest.fixture(scope="session") +def mock_vector_store_name(): + return { + "vector_store_name": f"{SanitizedValues.VECTOR_STORE_NAME}", + "file_batches": f"{SanitizedValues.FILE_BATCH}", + } + + +# autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method +@pytest.fixture(scope="session", autouse=True) +def start_proxy(test_proxy): + return + + +@pytest.fixture(scope="session", autouse=True) +def add_sanitizers(test_proxy, mock_project_scope, mock_dataset_name, mock_vector_store_name): + + def azure_workspace_triad_sanitizer(): + """Sanitize subscription, resource group, and workspace.""" + + add_general_regex_sanitizer( + regex=r"/subscriptions/([-\w\._\(\)]+)", + value=mock_project_scope["subscription_id"], + group_for_replace="1", + ) + + add_general_regex_sanitizer( + regex=r"/resource[gG]roups/([-\w\._\(\)]+)", + value=mock_project_scope["resource_group_name"], + group_for_replace="1", + ) + + add_general_regex_sanitizer( + regex=r"/workspaces/([-\w\._\(\)]+)", value=mock_project_scope["project_name"], group_for_replace="1" + ) + + # TODO (Darren): Check why this is needed in addition to the above + add_general_regex_sanitizer( + regex=r"%2Fsubscriptions%2F([-\w\._\(\)]+)", + value=mock_project_scope["subscription_id"], + group_for_replace="1", + ) + + # TODO (Darren): Check why this is needed in addition to the above + add_general_regex_sanitizer( + regex=r"%2Fresource[gG]roups%2F([-\w\._\(\)]+)", + value=mock_project_scope["resource_group_name"], + group_for_replace="1", + ) + + azure_workspace_triad_sanitizer() + + add_general_regex_sanitizer(regex=r"/runs/([-\w\._\(\)]+)", value="Sanitized", group_for_replace="1") + + add_general_regex_sanitizer( + regex=r"/data/([-\w\._\(\)]+)", value=mock_dataset_name["dataset_name"], group_for_replace="1" + ) + + add_general_regex_sanitizer( + regex=r"/vector_stores/([-\w\._\(\)]+)", + value=mock_vector_store_name["vector_store_name"], + group_for_replace="1", + ) + + add_general_regex_sanitizer( + regex=r"/file_batches/([-\w\._\(\)]+)/", value=mock_vector_store_name["file_batches"], group_for_replace="1" + ) + + # Sanitize Application Insights connection string from service response (/tests/telemetry) + add_body_key_sanitizer( + json_path="properties.ConnectionString", + value="InstrumentationKey=00000000-0000-0000-0000-000000000000;IngestionEndpoint=https://region.applicationinsights.azure.com/;LiveEndpoint=https://region.livediagnostics.monitor.azure.com/;ApplicationId=00000000-0000-0000-0000-000000000000", + ) + + add_body_key_sanitizer( + json_path="data_sources[*].uri", + value="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/workspaces/00000/datastores/workspaceblobstore/paths/LocalUpload/00000000000/product_info_1.md", + ) + + add_body_key_sanitizer( + json_path="configuration.data_sources[*].uri", + value="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/workspaces/00000/datastores/workspaceblobstore/paths/LocalUpload/00000000000/product_info_1.md", + ) + + add_body_key_sanitizer( + json_path="data_source.uri", + value="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/workspaces/00000/datastores/workspaceblobstore/paths/LocalUpload/00000000000/product_info_1.md", + ) + + # Sanitize API key from service response (/tests/connections) + add_body_key_sanitizer(json_path="properties.credentials.key", value="Sanitized") + + # Remove the following sanitizers since certain fields are needed in tests and are non-sensitive: + # - AZSDK3493: $..name + # - AZSDK3430: $..id + remove_batch_sanitizers(["AZSDK3493"]) + remove_batch_sanitizers(["AZSDK3430"]) diff --git a/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py b/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py new file mode 100644 index 000000000000..6b632064c12d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py @@ -0,0 +1,186 @@ +import io +import json +import unittest +from typing import Any, Dict, IO, Union +from unittest.mock import Mock, MagicMock, AsyncMock +from requests.structures import CaseInsensitiveDict +import inspect +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.aio import AssistantsClient as AsyncAssistantsClient +from azure.ai.assistants._model_base import SdkJSONEncoder + + +def dict_to_io_bytes(input: Dict[str, Any]) -> io.BytesIO: + input_string = json.dumps(input, cls=SdkJSONEncoder, exclude_readonly=True) + return io.BytesIO(input_string.encode("utf-8")) + + +class OverloadAssertion: + def __init__(self, mock: Mock, async_mock: AsyncMock, **args): + self.mock = mock + self.async_mock = async_mock + + def _to_dict(self, input: Union[None, str, IO[bytes]]) -> Dict[str, Any]: + json_string = "" + if isinstance(input, io.BytesIO): + json_string = input.getvalue().decode("utf-8") + elif isinstance(input, str): + json_string = input + else: + json_string = "{}" + return json.loads(json_string) + + def assert_deep_equal_header_except_content_length( + self, header1: CaseInsensitiveDict, header2: CaseInsensitiveDict, msg: str + ): + """ + Compare two HTTP headers for deep equality, except for the Content-Length header. + Because it seems only created by HttpRequest class automatically when the type is bytes + """ + header1 = header1.copy() + header2 = header2.copy() + header1.pop("Content-Length", None) + header2.pop("Content-Length", None) + unittest.TestCase().assertDictEqual(dict(header1), dict(header2), msg) + + def _assert_same_http_request(self, call1: Any, call2: Any, index1: int, index2: int): + """ + Compare two HTTP request objects for deep equality. + """ + + # Compare method, URL, headers, body, and other relevant attributes + req1 = call1.args[0] + req2 = call2.args[0] + req1_body = self._to_dict(req1.body) + req2_body = self._to_dict(req2.body) + unittest.TestCase().assertEqual( + req1.method, + req2.method, + f"call[{index1}] method is {req1.method}, but call[{index2}] method is {req2.method}", + ) + unittest.TestCase().assertEqual( + req1.url, req2.url, f"call[{index1}] url is {req1.url}, but call[{index2}] url is {req2.url}" + ) + unittest.TestCase().assertDictEqual( + req1_body, + req2_body, + f"call[{index1}] body is {json.dumps(req1_body, sort_keys=True)}, but call[{index2}] body is {json.dumps(req2_body, sort_keys=True)}", + ) + self.assert_deep_equal_header_except_content_length( + req1.headers, + req2.headers, + f"call[{index1}] headers are {req1.headers}, but call[{index2}] headers are {req2.headers}", + ) + unittest.TestCase().assertDictEqual( + call1.kwargs, + call2.kwargs, + f"call[{index1}] kwargs are {call1.kwargs}, but call[{index2}] kwargs are {call2.kwargs}", + ) + + def same_http_requests_from(self, *, operation_count: int, api_per_operation_count: int): + all_calls = self.mock.call_args_list + self.async_mock.call_args_list + assert len(all_calls) == operation_count * api_per_operation_count + + # Compare first followed by second followed by third call etc of each operations, + # Assert they have the same http request + template = all_calls[:api_per_operation_count] + for j in range(api_per_operation_count, len(all_calls), api_per_operation_count): + for i, (api_one, api_other) in enumerate(zip(template, all_calls[j : j + api_per_operation_count])): + self._assert_same_http_request(api_one, api_other, i, i + j) + + +def assert_same_http_requests(test_func): + """ + Decorator to mock pipeline responses and call the test function with the mock clients and assertion. + + :param test_func: The test function to be decorated. + :return: The wrapper function. + """ + + def _get_mock_client() -> AssistantsClient: + """Return the fake project client""" + client = AssistantsClient( + endpoint="www.bcac95dd-a1eb-11ef-978f-8c1645fec84b.com", + subscription_id="00000000-0000-0000-0000-000000000000", + resource_group_name="non-existing-rg", + project_name="non-existing-project", + credential=MagicMock(), + ) + client.submit_tool_outputs_to_run = MagicMock() + client.submit_tool_outputs_to_stream = MagicMock() + return client + + def _get_async_mock_client() -> AsyncAssistantsClient: + """Return the fake project client""" + client = AsyncAssistantsClient( + endpoint="www.bcac95dd-a1eb-11ef-978f-8c1645fec84b.com", + subscription_id="00000000-0000-0000-0000-000000000000", + resource_group_name="non-existing-rg", + project_name="non-existing-project", + credential=AsyncMock(), + ) + client.submit_tool_outputs_to_run = AsyncMock() + client.submit_tool_outputs_to_stream = AsyncMock() + return client + + async def wrapper(self, *args, **kwargs): + """ + Wrapper function to set up mocks and call the test function. + + :param self: The test class instance. + :param args: Positional arguments to pass to the test function. + :param kwargs: Keyword arguments to pass to the test function. + """ + if not test_func: + return + + # Mock the pipeline response + pipeline_response_mock_return = Mock() + http_response = Mock() + http_response_json = Mock() + iter_bytes = Mock() + + # Set up the mock HTTP response + http_response_json.return_value = {} + http_response.status_code = 200 + http_response.json = http_response_json + http_response.iter_bytes = iter_bytes + + # Set up the pipeline response mock + pipeline_response_mock = Mock() + pipeline_response_mock_async = AsyncMock() + pipeline_response_mock.return_value = pipeline_response_mock_return + pipeline_response_mock_async.return_value = pipeline_response_mock_return + pipeline_response_mock_return.http_response = http_response + + # Get the mock clients + client = _get_mock_client() + async_client = _get_async_mock_client() + + async with async_client: + with client: + # Assign the pipeline mock to the client + client._client._pipeline.run = pipeline_response_mock + async_client._client._pipeline.run = pipeline_response_mock_async + + # Create an assertion object with the call arguments list + assertion = OverloadAssertion(pipeline_response_mock, pipeline_response_mock_async) + + # Call the test function with the mock clients and assertion + await test_func(self, client, async_client, assertion, *args, **kwargs) + + return wrapper + + +def get_mock_fn(fn, return_val): + def mock_func(*args, **kwargs): + fn(*args, **kwargs) + return return_val + + async def mock_func_async(*args, **kwargs): + await fn(*args, **kwargs) + return return_val + + if inspect.iscoroutinefunction(fn): + return mock_func_async + return mock_func diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py b/sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py new file mode 100644 index 000000000000..a6b41004ba18 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py @@ -0,0 +1,139 @@ +from unittest.mock import patch + +import pytest +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.aio import AssistantsClient as AsyncAssistantsClient +from typing import List, Dict, MutableMapping, Any + +from overload_assert_utils import ( + assert_same_http_requests, + OverloadAssertion, + dict_to_io_bytes, + get_mock_fn, +) + +from azure.ai.assistants.models import ThreadMessageOptions, ToolResources, VectorStore + + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object + + +class TestSignatures: + + @pytest.mark.asyncio + @assert_same_http_requests + async def test_create_assistant( + self, assistant: AssistantsClient, async_assistant: AsyncAssistantsClient, assertion: OverloadAssertion + ): + model = "gpt-4-1106-preview" + name = "first" + instructions = "You are a helpful assistant" + body = {"model": model, "name": name, "instructions": instructions} + + assistant.create_assistant(model=model, name=name, instructions=instructions) + assistant.create_assistant(body=body) + assistant.create_assistant(body=dict_to_io_bytes(body)) + + await async_assistant.create_assistant(model=model, name=name, instructions=instructions) + await async_assistant.create_assistant(body=body) + await async_assistant.create_assistant(body=dict_to_io_bytes(body)) + + assertion.same_http_requests_from(operation_count=6, api_per_operation_count=1) + + @pytest.mark.asyncio + @assert_same_http_requests + async def test_create_vector_store_and_poll( + self, assistant: AssistantsClient, async_assistant: AsyncAssistantsClient, assertion: OverloadAssertion + ): + file_ids = ["file_id"] + body = {"file_ids": file_ids} + + with patch( + "azure.ai.assistants._operations.AssistantsClientOperationsMixin.create_vector_store", + wraps=get_mock_fn( + assistant.create_vector_store, return_val=VectorStore({"id": "store_1", "status": "in_progress"}) + ), + ), patch( + "azure.ai.assistants._operations.AssistantsClientOperationsMixin.get_vector_store", + wraps=get_mock_fn(assistant.get_vector_store, return_val=VectorStore({"id": "store_1", "status": "completed"})), + ): + + assistant.create_vector_store_and_poll(file_ids=file_ids, sleep_interval=0) + assistant.create_vector_store_and_poll(body=body, sleep_interval=0) + assistant.create_vector_store_and_poll(body=dict_to_io_bytes(body), sleep_interval=0) + + with patch( + "azure.ai.assistants.aio._operations.AssistantsClientOperationsMixin.create_vector_store", + wraps=get_mock_fn( + async_assistant.create_vector_store, return_val=VectorStore({"id": "store_1", "status": "in_progress"}) + ), + ), patch( + "azure.ai.assistants.aio._operations.AssistantsClientOperationsMixin.get_vector_store", + wraps=get_mock_fn( + async_assistant.get_vector_store, return_val=VectorStore({"id": "store_1", "status": "completed"}) + ), + ): + await async_assistant.create_vector_store_and_poll(file_ids=file_ids, sleep_interval=0) + await async_assistant.create_vector_store_and_poll(body=body, sleep_interval=0) + await async_assistant.create_vector_store_and_poll(body=dict_to_io_bytes(body), sleep_interval=0) + assertion.same_http_requests_from(operation_count=6, api_per_operation_count=2) + + @pytest.mark.asyncio + @assert_same_http_requests + async def test_create_thread( + self, assistant: AssistantsClient, async_assistant: AsyncAssistantsClient, assertion: OverloadAssertion + ): + messages: List[ThreadMessageOptions] = [] + tool_resources: ToolResources = ToolResources() + metadata: Dict[str, str] = {} + body = {"messages": messages, "tool_resources": tool_resources, "metadata": metadata} + + assistant.create_thread(messages=messages, tool_resources=tool_resources, metadata=metadata) + assistant.create_thread(body=body) + assistant.create_thread(body=dict_to_io_bytes(body)) + + await async_assistant.create_thread(messages=messages, tool_resources=tool_resources, metadata=metadata) + await async_assistant.create_thread(body=body) + await async_assistant.create_thread(body=dict_to_io_bytes(body)) + + assertion.same_http_requests_from(operation_count=6, api_per_operation_count=1) + + @pytest.mark.asyncio + @pytest.mark.skip("Defect: during body as JSON and IO Bytes don't, backend not called with stream=False") + @assert_same_http_requests + async def test_create_run( + self, assistant: AssistantsClient, async_assistant: AsyncAssistantsClient, assertion: OverloadAssertion + ): + thread_id = "thread_id" + assistant_id = "assistant_id" + body = {"assistant_id": assistant_id} + + assistant.create_run(thread_id, assistant_id=assistant_id) + assistant.create_run(thread_id, body=body) + assistant.create_run(thread_id, body=dict_to_io_bytes(body)) + + await async_assistant.create_run(thread_id, assistant_id=assistant_id) + await async_assistant.create_run(thread_id, body=body) + await async_assistant.create_run(thread_id, body=dict_to_io_bytes(body)) + + assertion.same_http_requests_from(operation_count=6, api_per_operation_count=1) + + @pytest.mark.asyncio + @pytest.mark.skip("Defect: during body as JSON and IO Bytes don't, backend not called with stream=True") + @assert_same_http_requests + async def test_create_stream( + self, assistant: AssistantsClient, async_assistant: AsyncAssistantsClient, assertion: OverloadAssertion + ): + thread_id = "thread_id" + assistant_id = "assistant_id" + body = {"assistant_id": assistant_id} + + assistant.create_stream(thread_id, assistant_id=assistant_id) + assistant.create_stream(thread_id, body=body) + assistant.create_stream(thread_id, body=dict_to_io_bytes(body)) + + await async_assistant.create_stream(thread_id, assistant_id=assistant_id) + await async_assistant.create_stream(thread_id, body=body) + await async_assistant.create_stream(thread_id, body=dict_to_io_bytes(body)) + + assertion.same_http_requests_from(operation_count=6, api_per_operation_count=1) diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistant_models.py b/sdk/ai/azure-ai-assistants/tests/test_assistant_models.py new file mode 100644 index 000000000000..b9e5e1b6b81d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_assistant_models.py @@ -0,0 +1,272 @@ +from typing import Iterator, List +from unittest.mock import Mock, patch +import pytest +import os +from azure.ai.assistants.models import ( + AssistantEventHandler, + BaseAssistantEventHandler, + SubmitToolOutputsAction, + ThreadRun, +) +from azure.ai.assistants.models._patch import _parse_event +from azure.ai.assistants.models import AssistantStreamEvent +from azure.ai.assistants.models import ThreadRun, RunStep, ThreadMessage, MessageDeltaChunk, RunStepDeltaChunk + + +def read_file(file_name: str) -> str: + with open(os.path.join(os.path.dirname(__file__), "assets", f"{file_name}.txt"), "r") as file: + return file.read() + + +main_stream_response = read_file("main_stream_response") +fetch_current_datetime_and_weather_stream_response = read_file("fetch_current_datetime_and_weather_stream_response") + + +def convert_to_byte_iterator(input: str) -> Iterator[bytes]: + yield input.encode() + + +class TestBaseAssistantEventHandler: + class MyAssistantEventhHandler(BaseAssistantEventHandler[str]): + def _process_event(self, event_data_str: str) -> str: + return event_data_str + + def break_main_stream_response(self, indices: List[int], response: str): + previous_index = 0 + for index in indices: + yield response[previous_index:index].encode() + previous_index = index + yield response[previous_index:].encode() + + def mock_callable(self, _: ThreadRun, __: BaseAssistantEventHandler[str]) -> None: + pass + + def test_event_handler_process_response_when_break_around_event_separators(self): + # events are split into multiple chunks. + # Each chunk might contains more than one or incomplete response. + # Test the chunks are borken around the event separators which are "\n\n" + handler = self.MyAssistantEventhHandler() + new_line_indices = [i for i in range(len(main_stream_response)) if main_stream_response.startswith("\n\n", i)] + + indices_around_new_lines = [i + offset for i, offset in zip(new_line_indices, [0, -1, 1, 2, 3, 4, 5])] + handler.initialize( + self.break_main_stream_response(indices_around_new_lines, main_stream_response), self.mock_callable + ) + count = 0 + all_event_str: List[str] = [] + for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == main_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + def test_event_handler_process_response_when_break_at_the_start(self): + handler = self.MyAssistantEventhHandler() + + handler.initialize( + # the numbers of the index around the new line characters, middle of the event, or at the end + self.break_main_stream_response([2], main_stream_response), + self.mock_callable, + ) + count = 0 + all_event_str: List[str] = [] + for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == main_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + def test_event_handler_process_response_when_break_at_the_end(self): + handler = self.MyAssistantEventhHandler() + + response_len = len(main_stream_response) + indices_around_new_lines = list(range(response_len - 5, response_len + 1)) + + handler.initialize( + # the numbers of the index around the new line characters, middle of the event, or at the end + self.break_main_stream_response(indices_around_new_lines, main_stream_response), + self.mock_callable, + ) + count = 0 + all_event_str: List[str] = [] + for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == main_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + def test_event_handler_chain_responses(self): + # Test if the event handler can have the second stream followed by the first one. + handler = self.MyAssistantEventhHandler() + handler.initialize(convert_to_byte_iterator(main_stream_response), self.mock_callable) + handler.initialize( + convert_to_byte_iterator(fetch_current_datetime_and_weather_stream_response), self.mock_callable + ) + count = 0 + all_event_str: List[str] = [] + for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + + assert count == main_stream_response.count("event:") + fetch_current_datetime_and_weather_stream_response.count( + "event:" + ) + assert all_event_str[-1].startswith("event: done") + + def test_event_handler_reusable(self): + # Test if the event handler can be reused after a stream is done. + handler = self.MyAssistantEventhHandler() + handler.initialize(convert_to_byte_iterator(main_stream_response), self.mock_callable) + count = 0 + all_event_str: List[str] = [] + for event_str in handler: + assert event_str.startswith("event:") + + handler.initialize( + convert_to_byte_iterator(fetch_current_datetime_and_weather_stream_response), self.mock_callable + ) + + for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + + assert fetch_current_datetime_and_weather_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + def test_event_handler_with_split_chinese_char(self): + response_bytes_split_chinese_char: List[bytes] = [ + b'event: thread.message.delta\ndata: data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"\xe5', + b"\xa4", + b'\xa9"}}]}}\n\n', + b'event: thread.message.delta\ndata: data: {"id":"msg_02","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}}}\n\nevent: done\ndata: [DONE]\n\n', + ] + + handler = self.MyAssistantEventhHandler() + + handler.initialize( + # the numbers of the index around the new line characters, middle of the event, or at the end + iter(response_bytes_split_chinese_char), + self.mock_callable, + ) + count = 0 + all_event_str: List[str] = [] + for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == 3 + assert all_event_str[-1].startswith("event: done") + + +class TestAssistantEventHandler: + + deserializable_events = [ + AssistantStreamEvent.THREAD_CREATED.value, + AssistantStreamEvent.ERROR.value, + AssistantStreamEvent.DONE.value, + ] + + class MyAssistantEventHandler(AssistantEventHandler[None]): + pass + + @patch("azure.ai.assistants.models._patch._parse_event") + def test_tool_calls(self, mock_parse_event: Mock): + # Test if the event type and status are met, submit function calls. + submit_tool_outputs = Mock() + handler = self.MyAssistantEventHandler() + + handler.initialize(convert_to_byte_iterator("event\n\n"), submit_tool_outputs) + + event_obj = ThreadRun({}) + event_obj.status = "requires_action" + event_obj.required_action = SubmitToolOutputsAction({}) + mock_parse_event.return_value = ("", event_obj) + + for _ in handler: + handler.until_done() + + assert mock_parse_event.call_count == 1 + assert mock_parse_event.call_args[0][0] == "event" + assert submit_tool_outputs.call_count == 1 + assert submit_tool_outputs.call_args[0] == (event_obj, handler) + + @patch("azure.ai.assistants.models._patch.AssistantEventHandler.on_unhandled_event") + @pytest.mark.parametrize("event_type", [e.value for e in AssistantStreamEvent]) + def test_parse_event(self, mock_on_unhandled_event: Mock, event_type: str): + # Make sure all the event types defined in AssistantStreamEvent are deserializable except Created, Done, and Error + # And ensure handle_event is never raised. + + handler = self.MyAssistantEventHandler() + event_data_str = f"event: {event_type}\ndata: {{}}" + _, event_obj, _ = handler._process_event(event_data_str) + + if event_type in self.deserializable_events: + assert isinstance(event_obj, str) + else: + assert not isinstance(event_obj, str) + + # The only event we are not handling today is CREATED which is never sent by backend. + if event_type == AssistantStreamEvent.THREAD_CREATED.value: + assert mock_on_unhandled_event.call_count == 1 + else: + assert mock_on_unhandled_event.call_count == 0 + + +class TestParseEvent: + + def test_parse_event_thread_run_created(self): + event_data_str = 'event: thread.run.created\ndata: {"id": "123"}' + event_type, event_obj = _parse_event(event_data_str) + assert event_type == AssistantStreamEvent.THREAD_RUN_CREATED.value + assert isinstance(event_obj, ThreadRun) + assert event_obj.id == "123" + + def test_parse_event_thread_run_step_created(self): + event_data_str = 'event: thread.run.step.created\ndata: {"id": "456"}' + event_type, event_obj = _parse_event(event_data_str) + assert event_type == AssistantStreamEvent.THREAD_RUN_STEP_CREATED.value + assert isinstance(event_obj, RunStep) + assert event_obj.id == "456" + + def test_parse_event_thread_message_created(self): + event_data_str = 'event: thread.message.created\ndata: {"id": "789"}' + event_type, event_obj = _parse_event(event_data_str) + assert event_type == AssistantStreamEvent.THREAD_MESSAGE_CREATED.value + assert isinstance(event_obj, ThreadMessage) + assert event_obj.id == "789" + + def test_parse_event_thread_message_delta(self): + event_data_str = 'event: thread.message.delta\ndata: {"id": "101"}' + event_type, event_obj = _parse_event(event_data_str) + assert event_type == AssistantStreamEvent.THREAD_MESSAGE_DELTA.value + assert isinstance(event_obj, MessageDeltaChunk) + assert event_obj.id == "101" + + def test_parse_event_thread_run_step_delta(self): + event_data_str = 'event: thread.run.step.delta\ndata: {"id": "202"}' + event_type, event_obj = _parse_event(event_data_str) + assert event_type == AssistantStreamEvent.THREAD_RUN_STEP_DELTA.value + assert isinstance(event_obj, RunStepDeltaChunk) + assert event_obj.id == "202" + + def test_parse_event_invalid_event_type(self): + event_data_str = 'event: invalid.event\ndata: {"id": "303"}' + event_type, event_obj = _parse_event(event_data_str) + assert event_type == "invalid.event" + assert event_obj == "{'id': '303'}" + + def test_parse_event_no_event_type(self): + event_data_str = 'data: {"id": "404"}' + with pytest.raises(ValueError): + _parse_event(event_data_str) + + def test_parse_event_invalid_json(self): + event_data_str = "event: thread.run.created\ndata: invalid_json" + event_type, event_obj = _parse_event(event_data_str) + assert event_type == AssistantStreamEvent.THREAD_RUN_CREATED.value + assert event_obj == "invalid_json" diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py new file mode 100644 index 000000000000..0a680bcabe33 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py @@ -0,0 +1,229 @@ +from typing import AsyncIterator, List +from unittest.mock import AsyncMock, patch +import pytest +import os +from azure.ai.assistants.models import ( + AsyncAssistantEventHandler, + BaseAsyncAssistantEventHandler, + SubmitToolOutputsAction, + ThreadRun, +) +from azure.ai.assistants.models import AssistantStreamEvent + + +def read_file(file_name: str) -> str: + with open(os.path.join(os.path.dirname(__file__), "assets", f"{file_name}.txt"), "r") as file: + return file.read() + + +main_stream_response = read_file("main_stream_response") +fetch_current_datetime_and_weather_stream_response = read_file("fetch_current_datetime_and_weather_stream_response") +send_email_stream_response = read_file("send_email_stream_response") + + +async def convert_to_byte_iterator(input: str) -> AsyncIterator[bytes]: + yield input.encode() + + +async def async_bytes_iter(iterable: List[bytes]) -> AsyncIterator[bytes]: + for item in iterable: + yield item + + +class TestBaseAsyncAssistantEventHandler: + class MyAssistantEventhHandler(BaseAsyncAssistantEventHandler[str]): + async def _process_event(self, event_data_str: str) -> str: + return event_data_str + + async def break_main_stream_response(self, indices: List[int], response: str): + previous_index = 0 + for index in indices: + yield response[previous_index:index].encode() + previous_index = index + yield response[previous_index:].encode() + + async def mock_callable(self, _: ThreadRun, __: BaseAsyncAssistantEventHandler[str]) -> None: + pass + + @pytest.mark.asyncio + async def test_event_handler_process_response_when_break_around_event_separators(self): + # events are split into multiple chunks. + # Each chunk might contains more than one or incomplete response. + # Test the chunks are borken around the event separators which are "\n\n" + handler = self.MyAssistantEventhHandler() + new_line_indices = [i for i in range(len(main_stream_response)) if main_stream_response.startswith("\n\n", i)] + + indices_around_new_lines = [i + offset for i, offset in zip(new_line_indices, [0, -1, 1, 2, 3, 4, 5])] + handler.initialize( + self.break_main_stream_response(indices_around_new_lines, main_stream_response), self.mock_callable + ) + count = 0 + all_event_str: List[str] = [] + async for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == main_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + @pytest.mark.asyncio + async def test_event_handler_process_response_when_break_at_the_start(self): + handler = self.MyAssistantEventhHandler() + + handler.initialize( + # the numbers of the index around the new line characters, middle of the event, or at the end + self.break_main_stream_response([2], main_stream_response), + self.mock_callable, + ) + count = 0 + all_event_str: List[str] = [] + async for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == main_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + @pytest.mark.asyncio + async def test_event_handler_process_response_when_break_at_the_end(self): + handler = self.MyAssistantEventhHandler() + + response_len = len(main_stream_response) + indices_around_new_lines = list(range(response_len - 5, response_len + 1)) + + handler.initialize( + # the numbers of the index around the new line characters, middle of the event, or at the end + self.break_main_stream_response(indices_around_new_lines, main_stream_response), + self.mock_callable, + ) + count = 0 + all_event_str: List[str] = [] + async for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == main_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + @pytest.mark.asyncio + async def test_event_handler_chain_responses(self): + # Test if the event handler can have the second stream followed by the first one. + handler = self.MyAssistantEventhHandler() + handler.initialize(convert_to_byte_iterator(main_stream_response), self.mock_callable) + handler.initialize( + convert_to_byte_iterator(fetch_current_datetime_and_weather_stream_response), self.mock_callable + ) + count = 0 + all_event_str: List[str] = [] + async for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + + assert count == main_stream_response.count("event:") + fetch_current_datetime_and_weather_stream_response.count( + "event:" + ) + assert all_event_str[-1].startswith("event: done") + + @pytest.mark.asyncio + async def test_event_handler_reusable(self): + # Test if the event handler can be reused after a stream is done. + handler = self.MyAssistantEventhHandler() + handler.initialize(convert_to_byte_iterator(main_stream_response), self.mock_callable) + count = 0 + all_event_str: List[str] = [] + async for event_str in handler: + assert event_str.startswith("event:") + + handler.initialize( + convert_to_byte_iterator(fetch_current_datetime_and_weather_stream_response), self.mock_callable + ) + + async for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + + assert fetch_current_datetime_and_weather_stream_response.count("event:") + assert all_event_str[-1].startswith("event: done") + + @pytest.mark.asyncio + async def test_event_handler_with_split_chinese_char(self): + response_bytes_split_chinese_char: List[bytes] = [ + b'event: thread.message.delta\ndata: data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"\xe5', + b"\xa4", + b'\xa9"}}]}}\n\n', + b'event: thread.message.delta\ndata: data: {"id":"msg_02","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}}}\n\nevent: done\ndata: [DONE]\n\n', + ] + + handler = self.MyAssistantEventhHandler() + + handler.initialize( + # the numbers of the index around the new line characters, middle of the event, or at the end + async_bytes_iter(response_bytes_split_chinese_char), + self.mock_callable, + ) + count = 0 + all_event_str: List[str] = [] + async for event_str in handler: + assert event_str.startswith("event:") + all_event_str.append(event_str) + count += 1 + assert count == 3 + assert all_event_str[-1].startswith("event: done") + + +class TestAsyncAssistantEventHandler: + + deserializable_events = [ + AssistantStreamEvent.THREAD_CREATED.value, + AssistantStreamEvent.ERROR.value, + AssistantStreamEvent.DONE.value, + ] + + class MyAssistantEventHandler(AsyncAssistantEventHandler[None]): + pass + + @pytest.mark.asyncio + @patch("azure.ai.assistants.models._patch._parse_event") + async def test_tool_calls(self, mock_parse_event: AsyncMock): + # Test if the event type and status are met, submit function calls. + submit_tool_outputs = AsyncMock() + handler = self.MyAssistantEventHandler() + + handler.initialize(convert_to_byte_iterator("event\n\n"), submit_tool_outputs) + + event_obj = ThreadRun({}) + event_obj.status = "requires_action" + event_obj.required_action = SubmitToolOutputsAction({}) + mock_parse_event.return_value = ("", event_obj) + + async for _ in handler: + await handler.until_done() + + assert mock_parse_event.call_count == 1 + assert mock_parse_event.call_args[0][0] == "event" + assert submit_tool_outputs.call_count == 1 + assert submit_tool_outputs.call_args[0] == (event_obj, handler) + + @pytest.mark.asyncio + @patch("azure.ai.assistants.models._patch.AsyncAssistantEventHandler.on_unhandled_event") + @pytest.mark.parametrize("event_type", [e.value for e in AssistantStreamEvent]) + async def test_parse_event(self, mock_on_unhandled_event: AsyncMock, event_type: str): + # Make sure all the event types defined in AssistantStreamEvent are deserializable except Created, Done, and Error + # And ensure handle_event is never raised. + + handler = self.MyAssistantEventHandler() + event_data_str = f"event: {event_type}\ndata: {{}}" + _, event_obj, _ = await handler._process_event(event_data_str) + + if event_type in self.deserializable_events: + assert isinstance(event_obj, str) + else: + assert not isinstance(event_obj, str) + + # The only event we are not handling today is CREATED which is never sent by backend. + if event_type == AssistantStreamEvent.THREAD_CREATED.value: + assert mock_on_unhandled_event.call_count == 1 + else: + assert mock_on_unhandled_event.call_count == 0 diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py new file mode 100644 index 000000000000..55b8a02b7f2c --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py @@ -0,0 +1,3227 @@ +# pylint: disable=too-many-lines,line-too-long,useless-suppression +# # ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +# cSpell:disable +from typing import Any, Optional + +import os +import datetime +import json +import logging +import tempfile +import sys +import time +import pytest +import functools +import io +import user_functions + +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + ThreadMessage, + RunStep, +) +from azure.core.exceptions import HttpResponseError +from devtools_testutils import ( + AzureRecordedTestCase, + EnvironmentVariableLoader, + recorded_by_proxy, +) +from azure.ai.assistants.models import ( + AssistantEventHandler, + AssistantStreamEvent, + AssistantThread, + AzureAISearchTool, + AzureFunctionStorageQueue, + AzureFunctionTool, + CodeInterpreterTool, + CodeInterpreterToolResource, + FilePurpose, + FileSearchTool, + FileSearchToolCallContent, + FileSearchToolResource, + FunctionTool, + MessageAttachment, + MessageDeltaChunk, + MessageTextContent, + MessageRole, + OpenAIFile, + ResponseFormatJsonSchema, + ResponseFormatJsonSchemaType, + RunAdditionalFieldList, + RunStepDeltaChunk, + RunStepDeltaToolCallObject, + RunStepFileSearchToolCall, + RunStepFileSearchToolCallResult, + RunStepFileSearchToolCallResults, + RunStatus, + ThreadMessageOptions, + ThreadRun, + ToolResources, + ToolSet, + VectorStore, + VectorStoreConfigurations, + VectorStoreConfiguration, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) + + +# Set to True to enable SDK logging +LOGGING_ENABLED = True + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + + +assistantClientPreparer = functools.partial( + EnvironmentVariableLoader, + "azure_ai.assistants", + azure_ai_assistants_assistants_tests_project_connection_string="region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", + azure_ai_assistants_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", + azure_ai_assistants_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", + azure_ai_assistants_assistants_tests_search_index_name="sample_index", + azure_ai_assistants_assistants_tests_search_connection_name="search_connection_name" +) + + +# create tool for assistant use +def fetch_current_datetime_live(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + time_json = json.dumps({"current_time": current_datetime}) + return time_json + + +# create tool for assistant use +def fetch_current_datetime_recordings(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + time_json = json.dumps({"current_time": "2024-10-10 12:30:19"}) + return time_json + + +# Statically defined user functions for fast reference +user_functions_recording = {fetch_current_datetime_recordings} +user_functions_live = {fetch_current_datetime_live} + + +# The test class name needs to start with "Test" to get collected by pytest +class TestAssistantClient(AzureRecordedTestCase): + + # helper function: create client using environment variables + def create_client(self, **kwargs): + # fetch environment variables + connection_string = kwargs.pop("azure_ai.assistants_assistants_tests_project_connection_string") + credential = self.get_credential(AssistantsClient, is_async=False) + + # create and return client + client = AssistantsClient.from_connection_string( + credential=credential, + conn_str=connection_string, + ) + + return client + + def _get_data_file(self) -> str: + """Return the test file name.""" + return os.path.join(os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md") + + # ********************************************************************************** + # + # HAPPY PATH SERVICE TESTS - assistant APIs + # + # ********************************************************************************** + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_client(self, **kwargs): + """test client creation""" + + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + + # close client + client.close() + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_delete_assistant(self, **kwargs): + """test assistant creation and deletion""" + # create client + # client = self.create_client(**kwargs) + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + print("Created client") + self._do_test_create_assistant(client=client, body=None, functions=None) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_body(self, **kwargs): + """test assistant creation with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + print("Created client") + + # create body for assistant and call helper function + body = {"name": "my-assistant", "model": "gpt-4o", "instructions": "You are helpful assistant"} + self._do_test_create_assistant(client=client, body=body, functions=None) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_iobytes(self, **kwargs): + """test assistant creation with body: IO[bytes]""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + print("Created client") + + # create body for assistant and call helper function + body = {"name": "my-assistant", "model": "gpt-4o", "instructions": "You are helpful assistant"} + binary_body = json.dumps(body).encode("utf-8") + self._do_test_create_assistant(client=client, body=io.BytesIO(binary_body), functions=None) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_tools(self, **kwargs): + """test assistant creation with tools""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # initialize assistant functions + functions = FunctionTool(functions=user_functions_recording) + self._do_test_create_assistant(client=client, body=None, functions=functions) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_tools_and_resources(self, **kwargs): + """test assistant creation with tools and resources""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # initialize assistant functions + functions = FunctionTool(functions=user_functions_recording) + self._do_test_create_assistant(client=client, body=None, functions=functions) + + def _do_test_create_assistant(self, client, body, functions): + """helper function for creating assistant with different body inputs""" + + # create assistant + if body: + assistant = client.assistants.create_assistant(body=body) + elif functions: + assistant = client.assistants.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="You are helpful assistant", + tools=functions.definitions, + ) + assert assistant.tools + assert assistant.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + else: + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + assert assistant.name == "my-assistant" + assert assistant.model == "gpt-4o" + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_assistant(self, **kwargs): + """test assistant update without body""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_update_assistant(client=client, use_body=False, use_io=False) + + @assistantClientPreparer() + @pytest.mark.skip("Update assistant with body is failing") + @recorded_by_proxy + def test_update_assistant_with_body(self, **kwargs): + """test assistant update with body: JSON""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_update_assistant(client=client, use_body=True, use_io=False) + + @assistantClientPreparer() + @pytest.mark.skip("Update assistant with body is failing") + @recorded_by_proxy + def test_update_assistant_with_iobytes(self, **kwargs): + """test assistant update with body: IO[bytes]""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_update_assistant(client=client, use_body=True, use_io=True) + + def _do_test_update_assistant(self, client, use_body, use_io): + """helper function for updating assistant with different body inputs""" + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + + # update assistant + if use_body: + body = {"assistant_id": assistant.id, "name": "my-assistant2"} + if use_io: + binary_body = json.dumps(body).encode("utf-8") + body = io.BytesIO(binary_body) + assistant = client.assistants.update_assistant(assistant_id=assistant.id, body=body) + else: + assistant = client.assistants.update_assistant(assistant_id=assistant.id, name="my-assistant2") + assert assistant.name + assert assistant.name == "my-assistant2" + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @pytest.mark.skip("Does not perform consistently on a shared resource") + @recorded_by_proxy + def test_assistant_list(self, **kwargs): + """test list assistants""" + # create client and ensure there are no previous assistants + with self.create_client(**kwargs) as client: + list_length = client.assistants.list_assistants().data.__len__() + + # create assistant and check that it appears in the list + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert client.assistants.list_assistants().data.__len__() == list_length + 1 + assert client.assistants.list_assistants().data[0].id == assistant.id + + # create second assistant and check that it appears in the list + assistant2 = client.assistants.create_assistant(model="gpt-4o", name="my-assistant2", instructions="You are helpful assistant") + assert client.assistants.list_assistants().data.__len__() == list_length + 2 + assert ( + client.assistants.list_assistants().data[0].id == assistant.id or client.assistants.list_assistants().data[1].id == assistant.id + ) + + # delete assistants and check list + client.assistants.delete_assistant(assistant.id) + assert client.assistants.list_assistants().data.__len__() == list_length + 1 + assert client.assistants.list_assistants().data[0].id == assistant2.id + + client.assistants.delete_assistant(assistant2.id) + assert client.assistants.list_assistants().data.__len__() == list_length + print("Deleted assistants") + + # ********************************************************************************** + # + # HAPPY PATH SERVICE TESTS - Thread APIs + # + # ********************************************************************************** + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_thread(self, **kwargs): + """test creating thread""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_thread_with_metadata(self, **kwargs): + """test creating thread with no body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + self._do_test_create_thread(client=client, body=None) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_thread_with_body(self, **kwargs): + """test creating thread with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for thread and call helper function + body = { + "metadata": {"key1": "value1", "key2": "value2"}, + } + self._do_test_create_thread(client=client, body=body) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_thread_with_iobytes(self, **kwargs): + """test creating thread with body: IO[bytes]""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for thread and call helper function + body = { + "metadata": {"key1": "value1", "key2": "value2"}, + } + binary_body = json.dumps(body).encode("utf-8") + self._do_test_create_thread(client=client, body=io.BytesIO(binary_body)) + + def _do_test_create_thread(self, client, body): + """helper function for creating thread with different body inputs""" + # create thread + if body: + thread = client.assistants.create_thread(body=body) + else: + thread = client.assistants.create_thread(metadata={"key1": "value1", "key2": "value2"}) + assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + @assistantClientPreparer() + @recorded_by_proxy + def test_get_thread(self, **kwargs): + """test getting thread""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # get thread + thread2 = client.assistants.get_thread(thread.id) + assert thread2.id + assert thread.id == thread2.id + print("Got thread, thread ID", thread2.id) + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_thread(self, **kwargs): + """test updating thread without body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # update thread + thread = client.assistants.update_thread(thread.id, metadata={"key1": "value1", "key2": "value2"}) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_thread_with_metadata(self, **kwargs): + """test updating thread without body""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # set metadata + metadata = {"key1": "value1", "key2": "value2"} + + # create thread + thread = client.assistants.create_thread(metadata=metadata) + assert thread.id + print("Created thread, thread ID", thread.id) + + # set metadata + metadata2 = {"key1": "value1", "key2": "newvalue2"} + + # update thread + thread = client.assistants.update_thread(thread.id, metadata=metadata2) + assert thread.metadata == {"key1": "value1", "key2": "newvalue2"} + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_thread_with_body(self, **kwargs): + """test updating thread with body: JSON""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # set body and run test + body = {"metadata": {"key1": "value1", "key2": "value2"}} + self._do_test_update_thread(client=client, body=body) + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_thread_with_iobytes(self, **kwargs): + """test updating thread with body: IO[bytes]""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # set body and run test + body = {"metadata": {"key1": "value1", "key2": "value2"}} + binary_body = json.dumps(body).encode("utf-8") + io_body = io.BytesIO(binary_body) + self._do_test_update_thread(client=client, body=io_body) + + def _do_test_update_thread(self, client, body): + """helper function for updating thread with different body inputs""" + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # update thread + if body: + thread = client.assistants.update_thread(thread.id, body=body) + else: + metadata = {"key1": "value1", "key2": "value2"} + thread = client.assistants.update_thread(thread.id, metadata=metadata) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + @assistantClientPreparer() + @recorded_by_proxy + def test_delete_thread(self, **kwargs): + """test deleting thread""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete thread + deletion_status = client.assistants.delete_thread(thread.id) + assert deletion_status.id == thread.id + assert deletion_status.deleted == True + print("Deleted thread, thread ID", deletion_status.id) + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Message APIs + # # + # # ********************************************************************************** + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_message(self, **kwargs): + """test creating message in a thread without body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_create_message(client=client, body=None) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_message_with_body(self, **kwargs): + """test creating message in a thread with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for message and call helper function + body = {"role": "user", "content": "Hello, tell me a joke"} + self._do_test_create_message(client=client, body=body) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_message_with_iobytes(self, **kwargs): + """test creating message in a thread with body: IO[bytes]""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for message and call helper function + body = {"role": "user", "content": "Hello, tell me a joke"} + binary_body = json.dumps(body).encode("utf-8") + self._do_test_create_message(client=client, body=io.BytesIO(binary_body)) + + def _do_test_create_message(self, client, body): + """helper function for creating message with different body inputs""" + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + if body: + message = client.assistants.create_message(thread_id=thread.id, body=body) + else: + message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_multiple_messages(self, **kwargs): + """test creating multiple messages in a thread""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create messages + message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + message2 = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me another joke" + ) + assert message2.id + print("Created message, message ID", message2.id) + message3 = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a third joke" + ) + assert message3.id + print("Created message, message ID", message3.id) + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_list_messages(self, **kwargs): + """test listing messages in a thread""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # check that initial message list is empty + messages0 = client.assistants.list_messages(thread_id=thread.id) + print(messages0.data) + assert messages0.data.__len__() == 0 + + # create messages and check message list for each one + message1 = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message1.id + print("Created message, message ID", message1.id) + messages1 = client.assistants.list_messages(thread_id=thread.id) + assert messages1.data.__len__() == 1 + assert messages1.data[0].id == message1.id + + message2 = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me another joke" + ) + assert message2.id + print("Created message, message ID", message2.id) + messages2 = client.assistants.list_messages(thread_id=thread.id) + assert messages2.data.__len__() == 2 + assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id + + message3 = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a third joke" + ) + assert message3.id + print("Created message, message ID", message3.id) + messages3 = client.assistants.list_messages(thread_id=thread.id) + assert messages3.data.__len__() == 3 + assert ( + messages3.data[0].id == message3.id + or messages3.data[1].id == message2.id + or messages3.data[2].id == message2.id + ) + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_get_message(self, **kwargs): + """test getting message in a thread""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # get message + message2 = client.assistants.get_message(thread_id=thread.id, message_id=message.id) + assert message2.id + assert message.id == message2.id + print("Got message, message ID", message.id) + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_message(self, **kwargs): + """test updating message in a thread without body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_update_message(client=client, body=None) + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_message_with_body(self, **kwargs): + """test updating message in a thread with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for message and call helper function + body = {"metadata": {"key1": "value1", "key2": "value2"}} + self._do_test_update_message(client=client, body=body) + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_message_with_iobytes(self, **kwargs): + """test updating message in a thread with body: IO[bytes]""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for message and call helper function + body = {"metadata": {"key1": "value1", "key2": "value2"}} + binary_body = json.dumps(body).encode("utf-8") + self._do_test_update_message(client=client, body=io.BytesIO(binary_body)) + + def _do_test_update_message(self, client, body): + """helper function for updating message with different body inputs""" + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # update message + if body: + message = client.assistants.update_message(thread_id=thread.id, message_id=message.id, body=body) + else: + message = client.assistants.update_message( + thread_id=thread.id, message_id=message.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert message.metadata == {"key1": "value1", "key2": "value2"} + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Run APIs + # # + # # ********************************************************************************** + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_run(self, **kwargs): + """test creating run""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_run_with_metadata(self, **kwargs): + """test creating run without body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_create_run(client=client, use_body=False, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_run_with_body(self, **kwargs): + """test creating run with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_create_run(client=client, use_body=True, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_run_with_iobytes(self, **kwargs): + """test creating run with body: IO[bytes]""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_create_run(client=client, use_body=True, use_io=True) + + def _do_test_create_run(self, client, use_body, use_io=False): + """helper function for creating run with different body inputs""" + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + if use_body: + body = {"assistant_id": assistant.id, "metadata": {"key1": "value1", "key2": "value2"}} + if use_io: + binary_body = json.dumps(body).encode("utf-8") + body = io.BytesIO(binary_body) + run = client.assistants.create_run(thread_id=thread.id, body=body) + else: + run = client.assistants.create_run( + thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_get_run(self, **kwargs): + """test getting run""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # get run + run2 = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + assert run2.id + assert run.id == run2.id + print("Got run, run ID", run2.id) + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_run_status(self, **kwargs): + """test run status""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + print("Run status:", run.status) + + assert run.status in ["cancelled", "failed", "completed", "expired"] + print("Run completed with status:", run.status) + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_run(self, **kwargs): + """test updating run without body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # update run + while run.status in ["queued", "in_progress"]: + # wait for a second + time.sleep(1) + run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.assistants.update_run( + thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.metadata == {"key1": "value1", "key2": "value2"} + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_run_with_metadata(self, **kwargs): + """test updating run without body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_update_run(client=client, body=None) + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_run_with_body(self, **kwargs): + """test updating run with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for run and call helper function + body = {"metadata": {"key1": "value1", "key2": "newvalue2"}} + self._do_test_update_run(client=client, body=body) + + @assistantClientPreparer() + @recorded_by_proxy + def test_update_run_with_iobytes(self, **kwargs): + """test updating run with body: IO[bytes]""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create body for run and call helper function + body = {"metadata": {"key1": "value1", "key2": "newvalue2"}} + binary_body = json.dumps(body).encode("utf-8") + self._do_test_update_run(client=client, body=io.BytesIO(binary_body)) + + def _do_test_update_run(self, client, body): + """helper function for updating run with different body inputs""" + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.assistants.create_run( + thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # update run + while run.status in ["queued", "in_progress"]: + time.sleep(5) + run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + if body: + run = client.assistants.update_run(thread_id=thread.id, run_id=run.id, body=body) + else: + run = client.assistants.update_run( + thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "newvalue2"} + ) + assert run.metadata == {"key1": "value1", "key2": "newvalue2"} + + # delete assistant + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_run(self, **kwargs): + """test submitting tool outputs to run without body""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_submit_tool_outputs_to_run(client=client, use_body=False, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_run_with_body(self, **kwargs): + """test submitting tool outputs to run with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_submit_tool_outputs_to_run(client=client, use_body=True, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): + """test submitting tool outputs to run with body: IO[bytes]""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_submit_tool_outputs_to_run(client=client, use_body=True, use_io=True) + + def _do_test_submit_tool_outputs_to_run(self, client, use_body, use_io): + """helper function for submitting tool outputs to run with different body inputs""" + + # Initialize assistant tools + functions = FunctionTool(user_functions_recording) + # code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + # toolset.add(code_interpreter) + + # create assistant + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) + print("Tool outputs:", tool_outputs) + if tool_outputs: + if use_body: + body = {"tool_outputs": tool_outputs} + if use_io: + binary_body = json.dumps(body).encode("utf-8") + body = io.BytesIO(binary_body) + client.assistants.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, body=body) + else: + client.assistants.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + print("Messages: ") + messages = client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + # if user_functions_live is used, the time will be the current time + # since user_functions_recording is used, the time will be 12:30 + assert "12:30" in tool_message + print("Used tool_outputs") + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy + def test_create_parallel_tool_thread_true(self, **kwargs): + """Test creation of parallel runs.""" + self._do_test_create_parallel_thread_runs(True, True, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy + def test_create_parallel_tool_thread_false(self, **kwargs): + """Test creation of parallel runs.""" + self._do_test_create_parallel_thread_runs(False, True, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy + def test_create_parallel_tool_run_true(self, **kwargs): + """Test creation of parallel runs.""" + self._do_test_create_parallel_thread_runs(True, False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy + def test_create_parallel_tool_run_false(self, **kwargs): + """Test creation of parallel runs.""" + self._do_test_create_parallel_thread_runs(False, False, **kwargs) + + def _wait_for_run(self, client, run, timeout=1): + """Wait while run will get to terminal state.""" + while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS, RunStatus.REQUIRES_ACTION]: + time.sleep(timeout) + run = client.assistants.get_run(thread_id=run.thread_id, run_id=run.id) + return run + + def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_thread_run, **kwargs): + """Test creation of parallel runs.""" + + # create client + client = self.create_client( + **kwargs, + ) + assert isinstance(client, AssistantsClient) + + # Initialize assistant tools + functions = FunctionTool(functions=user_functions_recording) + code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + assistant = client.assistants.create_assistant( + model="gpt-4", + name="my-assistant", + instructions="You are helpful assistant", + toolset=toolset, + ) + assert assistant.id + + message = ThreadMessageOptions( + role="user", + content="Hello, what time is it?", + ) + + if create_thread_run: + run = client.assistants.create_thread_and_run( + assistant_id=assistant.id, + parallel_tool_calls=use_parallel_runs, + ) + run = self._wait_for_run(client, run) + else: + thread = client.assistants.create_thread(messages=[message]) + assert thread.id + + run = client.assistants.create_and_process_run( + thread_id=thread.id, + assistant_id=assistant.id, + parallel_tool_calls=use_parallel_runs, + ) + assert run.id + assert run.status == RunStatus.COMPLETED, run.last_error.message + assert run.parallel_tool_calls == use_parallel_runs + + assert client.assistants.delete_assistant(assistant.id).deleted, "The assistant was not deleted" + messages = client.assistants.list_messages(thread_id=run.thread_id) + assert len(messages.data), "The data from the assistant was not received." + + """ + # DISABLED: rewrite to ensure run is not complete when cancel_run is called + @assistantClientPreparer() + @recorded_by_proxy + def test_cancel_run(self, **kwargs): + '''test cancelling run''' + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check status and cancel + assert run.status in ["queued", "in_progress", "requires_action"] + client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + + while run.status in ["queued", "cancelling"]: + time.sleep(1) + run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + print("Current run status:", run.status) + assert run.status == "cancelled" + print("Run cancelled") + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + client.close() + """ + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_thread_and_run(self, **kwargs): + """Test creating thread and run""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_create_thread_and_run(client=client, use_body=False, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_thread_and_run_with_body(self, **kwargs): + """Test creating thread and run with body: JSON""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_create_thread_and_run(client=client, use_body=True, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_thread_and_run_with_iobytes(self, **kwargs): + """Test creating thread and run with body: IO[bytes]""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_create_thread_and_run(client=client, use_body=True, use_io=True) + + def _do_test_create_thread_and_run(self, client, use_body, use_io): + """helper function for creating thread and run with different body inputs""" + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create run + if use_body: + body = { + "assistant_id": assistant.id, + "metadata": {"key1": "value1", "key2": "value2"}, + } + if use_io: + binary_body = json.dumps(body).encode("utf-8") + body = io.BytesIO(binary_body) + run = client.assistants.create_thread_and_run(body=body) + assert run.metadata == {"key1": "value1", "key2": "value2"} + else: + run = client.assistants.create_thread_and_run(assistant_id=assistant.id) + + # create thread and run + assert run.id + assert run.thread_id + print("Created run, run ID", run.id) + + # get thread + thread = client.assistants.get_thread(run.thread_id) + assert thread.id + print("Created thread, thread ID", thread.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + # assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + assert run.status == "completed" + print("Run completed") + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @pytest.mark.skip("Working on recordings") + @recorded_by_proxy + def test_list_run_step(self, **kwargs): + """Test listing run steps.""" + + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + steps = client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + # commenting assertion out below, do we know exactly when run starts? + # assert steps['data'].__len__() == 0 + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "completed", + ] + print("Run status:", run.status) + if run.status != "queued": + steps = client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + print("Steps:", steps) + assert steps["data"].__len__() > 0 + + assert run.status == "completed" + print("Run completed") + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + client.close() + + @assistantClientPreparer() + @recorded_by_proxy + def test_get_run_step(self, **kwargs): + """Test getting run step.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "completed", + ] + print("Run status:", run.status) + + # list steps, check that get_run_step works with first step_id + steps = client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + assert steps["data"].__len__() > 0 + step = steps["data"][0] + get_step = client.assistants.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) + assert step == get_step + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Streaming APIs + # # + # # ********************************************************************************** + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_stream(self, **kwargs): + """Test creating stream.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create stream + with client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + for event_type, event_data, _ in stream: + assert ( + isinstance(event_data, (MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep)) + or event_type == AssistantStreamEvent.DONE + ) + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + # TODO create_stream doesn't work with body -- fails on for event_type, event_data : TypeError: 'ThreadRun' object is not an iterator + @assistantClientPreparer() + @recorded_by_proxy + def test_create_stream_with_body(self, **kwargs): + """Test creating stream with body.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create body for stream + body = {"assistant_id": assistant.id, "stream": True} + + # create stream + with client.assistants.create_stream(thread_id=thread.id, body=body, stream=True) as stream: + + for event_type, event_data, _ in stream: + print("event type: event data") + print(event_type, event_data) + assert ( + isinstance(event_data, (MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep)) + or event_type == AssistantStreamEvent.DONE + ) + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_stream_with_iobytes(self, **kwargs): + """Test creating stream with body: IO[bytes].""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create body for stream + body = {"assistant_id": assistant.id, "stream": True} + binary_body = json.dumps(body).encode("utf-8") + + # create stream + with client.assistants.create_stream(thread_id=thread.id, body=io.BytesIO(binary_body), stream=True) as stream: + for event_type, event_data, _ in stream: + assert ( + isinstance(event_data, (MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep)) + or event_type == AssistantStreamEvent.DONE + ) + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_stream(self, **kwargs): + """Test submitting tool outputs to stream.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_submit_tool_outputs_to_stream(client=client, use_body=False, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_stream_with_body(self, **kwargs): + """Test submitting tool outputs to stream with body: JSON.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_submit_tool_outputs_to_stream(client=client, use_body=True, use_io=False) + + @assistantClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_stream_with_iobytes(self, **kwargs): + """Test submitting tool outputs to stream with body: IO[bytes].""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + self._do_test_submit_tool_outputs_to_stream(client=client, use_body=True, use_io=True) + + def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): + """helper function for submitting tool outputs to stream with different body inputs""" + + # Initialize assistant tools + functions = FunctionTool(functions=user_functions_recording) + + toolset = ToolSet() + toolset.add(functions) + # toolset.add(code_interpreter) + + # create assistant + assistant = client.assistants.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="You are helpful assistant", + tools=functions.definitions, + tool_resources=functions.resources, + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create stream + with client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + for event_type, event_data, _ in stream: + + # Check if tools are needed + if ( + event_type == AssistantStreamEvent.THREAD_RUN_REQUIRES_ACTION + and event_data.required_action.submit_tool_outputs + ): + print("Requires action: submit tool outputs") + tool_calls = event_data.required_action.submit_tool_outputs.tool_calls + + if not tool_calls: + print("No tool calls provided - cancelling run") + client.assistants.cancel_run(thread_id=thread.id, run_id=event_data.id) + break + + # submit tool outputs to stream + tool_outputs = toolset.execute_tool_calls(tool_calls) + + tool_event_handler = AssistantEventHandler() + if tool_outputs: + if use_body: + body = {"tool_outputs": tool_outputs, "stream": True} + if use_io: + binary_body = json.dumps(body).encode("utf-8") + body = io.BytesIO(binary_body) + client.assistants.submit_tool_outputs_to_stream( + thread_id=thread.id, + run_id=event_data.id, + body=body, + event_handler=tool_event_handler, + stream=True, + ) + else: + client.assistants.submit_tool_outputs_to_stream( + thread_id=thread.id, + run_id=event_data.id, + tool_outputs=tool_outputs, + event_handler=tool_event_handler, + ) + for tool_event_type, tool_event_data, _ in tool_event_handler: + assert ( + isinstance(tool_event_data, (MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep)) + or tool_event_type == AssistantStreamEvent.DONE + ) + + print("Submitted tool outputs to stream") + + print("Stream processing completed") + + # check that messages used the tool + messages = client.assistants.list_messages(thread_id=thread.id) + print("Messages: ", messages) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + # TODO if testing live, uncomment these + # hour12 = time.strftime("%H") + # hour24 = time.strftime("%I") + # minute = time.strftime("%M") + # hour12string = str(hour12)+":"+str(minute) + # hour24string = str(hour24)+":"+str(minute) + # assert hour12string in tool_message or hour24string in tool_message + recorded_time = "12:30" + assert recorded_time in tool_message + print("Used tool_outputs") + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + # client.close() + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - User function APIs + # # + # # ********************************************************************************** + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_string_input(self, **kwargs): + """Test submitting tool outputs to run with function input being a single string.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.fetch_weather}, + content="Hello, what is the weather in New York?", + expected_values=["sunny", "25"], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_multiple_strings(self, **kwargs): + """Test submitting tool outputs to run with function input being multiple strings.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.send_email}, + content="Hello, can you send an email to my manager (manager@microsoft.com) with the subject 'thanksgiving' asking when he is OOF?", + possible_values=["email has been sent", "email has been successfully sent"], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_integers(self, **kwargs): + """Test submitting tool outputs to run with function input being multiple integers.""" + + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.calculate_sum}, + content="Hello, what is 293 + 243?", + expected_values=["536"], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_integer(self, **kwargs): + """Test submitting tool outputs to run with function input being a single integer.""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.convert_temperature}, + content="Hello, what is 32 degrees Celsius in Fahrenheit?", + expected_value=["89.6"], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_multiple_dicts(self, **kwargs): + """Test submitting tool outputs to run with function input being multiple dictionaries.""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.merge_dicts}, + content="If I have a dictionary with the key 'name' and value 'John' and another dictionary with the key 'age' and value '25', what is the merged dictionary?", + possible_values=[ + "{'name': 'john', 'age': '25'}", + "{'age': '25', 'name': 'john'}", + '{"name": "john", "age": "25"}', + '{"age": "25", "name": "john"}', + "{'name': 'john', 'age': 25}", + "{'age': 25, 'name': 'john'}", + '"name": "john",\n "age": 25', + '"name": "john",\n "age": "25"', + '"name": "john",\n "age": 25', + ], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_input_string_output_dict(self, **kwargs): + """Test submitting tool outputs to run with function input being one string and output being a dictionary.""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.get_user_info}, + content="What is the name and email of the first user in our database?", + expected_values=["alice", "alice@example.com"], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_list(self, **kwargs): + """Test submitting tool outputs to run with function input being a list.""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.longest_word_in_sentences}, + content="Hello, please give me the longest word in the following sentences: 'Hello, how are you?' and 'I am good.'", + expected_values=["hello", "good"], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def test_tools_with_multiple_dicts2(self, **kwargs): + """Test submitting tool outputs to run with function input being multiple dictionaries.""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # run test with function input, content, and expected/possible values + self._test_tools_with_different_functions( + client=client, + function={user_functions.process_records}, + content="Hello, please process the following records: [{'a': 10, 'b': 20}, {'x': 5, 'y': 15, 'z': 25}, {'m': 35}]", + expected_values=["30", "45", "35"], + ) + + @assistantClientPreparer() + @recorded_by_proxy + def _test_tools_with_different_functions( + self, client, function, content, expected_values=None, possible_values=None + ): + """Helper function to test submitting tool outputs to run with different function inputs.""" + # Initialize assistant tools + functions = FunctionTool(functions=function) + toolset = ToolSet() + toolset.add(functions) + + # create assistant + assistant = client.assistants.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="You are helpful assistant", + toolset=toolset, + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.assistants.create_message(thread_id=thread.id, role="user", content=content) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) + print("Tool outputs:", tool_outputs) + if tool_outputs: + client.assistants.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + messages = client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + print("Messages: ", messages) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + if expected_values: + for value in expected_values: + assert value in tool_message.lower() + if possible_values: + value_used = False + for value in possible_values: + if value in tool_message.lower(): + value_used = True + assert value_used + # assert expected_value in tool_message + print("Used tool_outputs") + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + # # ********************************************************************************** + # # + # # NEGATIVE TESTS + # # + # # ********************************************************************************** + + ''' + @assistantClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_invalid_code_interpreter_tool_resource(self, **kwargs): + """test assistant creation with invalid code interpreter tool resource.""" + # create client + with self.create_client(**kwargs) as client: + + # initialize resources + tool_resources = ToolResources() + tool_resources.code_interpreter = CodeInterpreterToolResource() + + exception_message = "" + try: + client.assistants.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="You are helpful assistant", + tools=[], + tool_resources=tool_resources, + ) + except: + print("exception here") + # except ValueError as e: + # exception_message = e.args[0] + else: + print("no exception") + + assert ( + exception_message + == "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" + ) + + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_invalid_file_search_tool_resource(self, **kwargs): + """test assistant creation with invalid file search tool resource.""" + # create client + with self.create_client(**kwargs) as client: + + # initialize resources + tool_resources = ToolResources() + tool_resources.file_search = FileSearchToolResource() + + exception_message = "" + try: + client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", tools=[], tool_resources=tool_resources + ) + except: + print("exception here") + # except ValueError as e: + # exception_message = e.args[0] + else: + print("no exception") + + assert exception_message == "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" + ''' + + @assistantClientPreparer() + @pytest.mark.skip("PASSES LIVE ONLY: recordings don't capture DNS lookup errors") + @recorded_by_proxy + def test_create_assistant_with_invalid_file_search_tool_resource(self, **kwargs): + """test assistant creation with invalid file search tool resource.""" + # create client + with self.create_client(**kwargs) as client: + + # initialize resources + tool_resources = ToolResources() + tool_resources.file_search = FileSearchToolResource() + + exception_message = "" + try: + client.assistants.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="You are helpful assistant", + tools=[], + tool_resources=tool_resources, + ) + except ValueError as e: + exception_message = e.args[0] + + assert ( + exception_message + == "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" + ) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_file_search_add_vector_store(self, **kwargs): + """Test the assistant with file search and vector store creation.""" + + # Create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + print("Created client") + + # Create file search tool + file_search = FileSearchTool() + + # Adjust the file path to be relative to the test file location + file_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md") + openai_file = client.assistants.upload_file_and_poll(file_path=file_path, purpose="assistants") + print(f"Uploaded file, file ID: {openai_file.id}") + + openai_vectorstore = client.assistants.create_vector_store_and_poll( + file_ids=[openai_file.id], name="my_vectorstore" + ) + print(f"Created vector store, vector store ID: {openai_vectorstore.id}") + + file_search.add_vector_store(openai_vectorstore.id) + + toolset = ToolSet() + toolset.add(file_search) + print("Created toolset and added file search") + + # create assistant + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # check assistant tools and vector store resources + assert assistant.tools + assert assistant.tools[0]["type"] == "file_search" + assert assistant.tool_resources + assert assistant.tool_resources["file_search"]["vector_store_ids"][0] == openai_vectorstore.id + + # delete assistant and close client + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + client.close() + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store_and_poll(self, **kwargs): + """test create vector store and poll""" + # Create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + print("Created client") + + # Create vector store + body = {"name": "test_vector_store", "metadata": {"key1": "value1", "key2": "value2"}} + try: + vector_store = client.assistants.create_vector_store_and_poll(body=body, sleep_interval=2) + # check correct creation + assert isinstance(vector_store, VectorStore) + assert vector_store.name == "test_vector_store" + assert vector_store.id + assert vector_store.metadata == {"key1": "value1", "key2": "value2"} + assert vector_store.status == "completed" + print(f"Vector store created and polled successfully: {vector_store.id}") + + # throw error if failed to create and poll vector store + except HttpResponseError as e: + print(f"Failed to create and poll vector store: {e}") + raise + + # close client + client.close() + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store(self, **kwargs): + """Test the assistant with vector store creation.""" + # Create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + print("Created client") + + # Create vector store + body = {"name": "test_vector_store", "metadata": {"key1": "value1", "key2": "value2"}} + try: + vector_store = client.assistants.create_vector_store(body=body) + print("here") + print(vector_store) + # check correct creation + assert isinstance(vector_store, VectorStore) + assert vector_store.name == "test_vector_store" + assert vector_store.id + assert vector_store.metadata == {"key1": "value1", "key2": "value2"} + assert vector_store.status == "completed" + print(f"Vector store created and polled successfully: {vector_store.id}") + + # throw error if failed to create and poll vector store + except HttpResponseError as e: + print(f"Failed to create and poll vector store: {e}") + raise + + # close client + client.close() + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store_azure(self, **kwargs): + """Test the assistant with vector store creation.""" + self._do_test_create_vector_store(streaming=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_vector_store_file_id(self, **kwargs): + """Test the assistant with vector store creation.""" + self._do_test_create_vector_store(file_path=self._get_data_file(), streaming=False, **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store_azure_streaming(self, **kwargs): + """Test the assistant with vector store creation.""" + self._do_test_create_vector_store(streaming=True, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_vector_store_file_id_streaming(self, **kwargs): + """Test the assistant with vector store creation.""" + self._do_test_create_vector_store(file_path=self._get_data_file(), streaming=True, **kwargs) + + def _do_test_create_vector_store(self, streaming, **kwargs): + """Test the assistant with vector store creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + file_ids = [file_id] if file_id else None + if file_ids: + ds = None + else: + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = ai_client.assistants.create_vector_store_and_poll( + file_ids=file_ids, data_sources=ds, name="my_vectorstore" + ) + assert vector_store.id + self._test_file_search(ai_client, vector_store, file_id, streaming) + + @assistantClientPreparer() + @pytest.mark.skip("Not deployed in all regions.") + @recorded_by_proxy + def test_vector_store_threads_file_search_azure(self, **kwargs): + """Test file search when azure asset ids are sopplied during thread creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + fs = FileSearchToolResource( + vector_stores=[ + VectorStoreConfigurations( + store_name="my_vector_store", + store_configuration=VectorStoreConfiguration(data_sources=ds), + ) + ] + ) + file_search = FileSearchTool() + assistant = ai_client.assistants.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert assistant.id + + thread = ai_client.assistants.create_thread(tool_resources=ToolResources(file_search=fs)) + assert thread.id + # create message + message = ai_client.assistants.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = ai_client.assistants.list_messages(thread.id) + assert len(messages) + ai_client.assistants.delete_assistant(assistant.id) + ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_vector_store_add_file_file_id(self, **kwargs): + """Test adding single file to vector store withn file ID.""" + self._do_test_create_vector_store_add_file(file_path=self._get_data_file(), streaming=False, **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store_add_file_azure(self, **kwargs): + """Test adding single file to vector store with azure asset ID.""" + self._do_test_create_vector_store_add_file(streaming=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_vector_store_add_file_file_id_streaming(self, **kwargs): + """Test adding single file to vector store withn file ID.""" + self._do_test_create_vector_store_add_file(file_path=self._get_data_file(), streaming=True, **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store_add_file_azure_streaming(self, **kwargs): + """Test adding single file to vector store with azure asset ID.""" + self._do_test_create_vector_store_add_file(streaming=True, **kwargs) + + def _do_test_create_vector_store_add_file(self, streaming, **kwargs): + """Test adding single file to vector store.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + if file_id: + ds = None + else: + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type="uri_asset", + ) + vector_store = ai_client.assistants.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + assert vector_store.id + vector_store_file = ai_client.assistants.create_vector_store_file( + vector_store_id=vector_store.id, data_source=ds, file_id=file_id + ) + assert vector_store_file.id + self._test_file_search(ai_client, vector_store, file_id, streaming) + ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_vector_store_batch_file_ids(self, **kwargs): + """Test adding multiple files to vector store with file IDs.""" + self._do_test_create_vector_store_batch(streaming=False, file_path=self._get_data_file(), **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store_batch_azure(self, **kwargs): + """Test adding multiple files to vector store with azure asset IDs.""" + self._do_test_create_vector_store_batch(streaming=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_vector_store_batch_file_ids_streaming(self, **kwargs): + """Test adding multiple files to vector store with file IDs.""" + self._do_test_create_vector_store_batch(streaming=True, file_path=self._get_data_file(), **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy + def test_create_vector_store_batch_azure_streaming(self, **kwargs): + """Test adding multiple files to vector store with azure asset IDs.""" + self._do_test_create_vector_store_batch(streaming=True, **kwargs) + + def _do_test_create_vector_store_batch(self, streaming, **kwargs): + """Test the assistant with vector store creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + if file_id: + file_ids = [file_id] + ds = None + else: + file_ids = None + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = ai_client.assistants.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + assert vector_store.id + vector_store_file_batch = ai_client.assistants.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids + ) + assert vector_store_file_batch.id + self._test_file_search(ai_client, vector_store, file_id, streaming) + ai_client.close() + + def _test_file_search( + self, ai_client: AssistantsClient, vector_store: VectorStore, file_id: Optional[str], streaming: bool + ) -> None: + """Test the file search""" + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + assistant = ai_client.assistants.create_assistant( + model="gpt-4", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert assistant.id + + thread = ai_client.assistants.create_thread() + assert thread.id + + # create message + message = ai_client.assistants.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + if streaming: + thread_run = None + with ai_client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + for _, event_data, _ in stream: + if isinstance(event_data, ThreadRun): + thread_run = event_data + elif ( + isinstance(event_data, RunStepDeltaChunk) + and isinstance(event_data.delta.step_details, RunStepDeltaToolCallObject) + and event_data.delta.step_details.tool_calls + ): + assert isinstance( + event_data.delta.step_details.tool_calls[0].file_search, RunStepFileSearchToolCallResults + ) + assert thread_run is not None + run = ai_client.assistants.get_run(thread_id=thread_run.thread_id, run_id=thread_run.id) + assert run is not None + else: + run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + + ai_client.assistants.delete_vector_store(vector_store.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = ai_client.assistants.list_messages(thread.id) + assert len(messages) + ai_client.assistants.delete_assistant(assistant.id) + self._remove_file_maybe(file_id, ai_client) + ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_message_attachement_azure(self, **kwargs): + """Test message attachment with azure ID.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + self._do_test_message_attachment(data_source=ds, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_message_attachment_file_ids(self, **kwargs): + """Test message attachment with file ID.""" + self._do_test_message_attachment(file_path=self._get_data_file(), **kwargs) + + def _do_test_message_attachment(self, **kwargs): + """Test assistant with the message attachment.""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + + # Create assistant with file search tool + assistant = ai_client.assistants.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + ) + assert assistant.id, "Assistant was not created" + + thread = ai_client.assistants.create_thread() + assert thread.id, "The thread was not created." + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment( + file_id=file_id, + data_source=kwargs.get("data_source"), + tools=[ + FileSearchTool().definitions[0], + CodeInterpreterTool().definitions[0], + ], + ) + message = ai_client.assistants.create_message( + thread_id=thread.id, + role="user", + content="What does the attachment say?", + attachments=[attachment], + ) + assert message.id, "The message was not created." + + run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id, "The run was not created." + self._remove_file_maybe(file_id, ai_client) + ai_client.assistants.delete_assistant(assistant.id) + + messages = ai_client.assistants.list_messages(thread_id=thread.id) + assert len(messages), "No messages were created" + ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("The API is not supported yet.") + @recorded_by_proxy + def test_create_assistant_with_interpreter_azure(self, **kwargs): + """Test Create assistant with code interpreter with azure asset ids.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_assistant_with_interpreter_file_ids(self, **kwargs): + """Test Create assistant with code interpreter with file IDs.""" + self._do_test_create_assistant_with_interpreter(file_path=self._get_data_file(), **kwargs) + + def _do_test_create_assistant_with_interpreter(self, **kwargs): + """Test create assistant with code interpreter and project asset id""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + code_interpreter = CodeInterpreterTool() + + file_id = None + if "file_path" in kwargs: + file = ai_client.assistants.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS) + assert file.id, "The file was not uploaded." + file_id = file.id + + cdr = CodeInterpreterToolResource( + file_ids=[file_id] if file_id else None, + data_sources=kwargs.get("data_sources"), + ) + tr = ToolResources(code_interpreter=cdr) + # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment + assistant = ai_client.assistants.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=tr, + ) + assert assistant.id, "Assistant was not created" + + thread = ai_client.assistants.create_thread() + assert thread.id, "The thread was not created." + + message = ai_client.assistants.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id, "The run was not created." + self._remove_file_maybe(file_id, ai_client) + assert run.status == "completed", f"Error in run: {run.last_error}" + ai_client.assistants.delete_assistant(assistant.id) + assert len(ai_client.assistants.list_messages(thread_id=thread.id)), "No messages were created" + ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("The API is not supported yet.") + @recorded_by_proxy + def test_create_thread_with_interpreter_azure(self, **kwargs): + """Test Create assistant with code interpreter with azure asset ids.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_thread_with_interpreter_file_ids(self, **kwargs): + """Test Create assistant with code interpreter with file IDs.""" + self._do_test_create_thread_with_interpreter(file_path=self._get_data_file(), **kwargs) + + def _do_test_create_thread_with_interpreter(self, **kwargs): + """Test create assistant with code interpreter and project asset id""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + code_interpreter = CodeInterpreterTool() + + file_id = None + if "file_path" in kwargs: + file = ai_client.assistants.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS) + assert file.id, "The file was not uploaded." + file_id = file.id + + cdr = CodeInterpreterToolResource( + file_ids=[file_id] if file_id else None, + data_sources=kwargs.get("data_sources"), + ) + tr = ToolResources(code_interpreter=cdr) + # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment + assistant = ai_client.assistants.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + assert assistant.id, "Assistant was not created" + + thread = ai_client.assistants.create_thread(tool_resources=tr) + assert thread.id, "The thread was not created." + + message = ai_client.assistants.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id, "The run was not created." + self._remove_file_maybe(file_id, ai_client) + assert run.status == "completed", f"Error in run: {run.last_error}" + ai_client.assistants.delete_assistant(assistant.id) + messages = ai_client.assistants.list_messages(thread.id) + assert len(messages) + ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("Not deployed in all regions.") + @recorded_by_proxy + def test_create_assistant_with_inline_vs_azure(self, **kwargs): + """Test creation of asistant with vector store inline.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + fs = FileSearchToolResource( + vector_stores=[ + VectorStoreConfigurations( + store_name="my_vector_store", + store_configuration=VectorStoreConfiguration(data_sources=ds), + ) + ] + ) + file_search = FileSearchTool() + assistant = ai_client.assistants.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=ToolResources(file_search=fs), + ) + assert assistant.id + + thread = ai_client.assistants.create_thread() + assert thread.id + # create message + message = ai_client.assistants.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = ai_client.assistants.list_messages(thread.id) + assert len(messages) + ai_client.assistants.delete_assistant(assistant.id) + ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("The API is not supported yet.") + @recorded_by_proxy + def test_create_attachment_in_thread_azure(self, **kwargs): + """Create thread with message attachment inline with azure asset IDs.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + self._do_test_create_attachment_in_thread_azure(data_source=ds, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_create_attachment_in_thread_file_ids(self, **kwargs): + """Create thread with message attachment inline with azure asset IDs.""" + self._do_test_create_attachment_in_thread_azure(file_path=self._get_data_file(), **kwargs) + + def _do_test_create_attachment_in_thread_azure(self, **kwargs): + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + + file_search = FileSearchTool() + assistant = ai_client.assistants.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + ) + assert assistant.id + + # create message + attachment = MessageAttachment( + file_id=file_id, + data_source=kwargs.get("data_source"), + tools=[ + FileSearchTool().definitions[0], + CodeInterpreterTool().definitions[0], + ], + ) + message = ThreadMessageOptions( + role="user", + content="What does the attachment say?", + attachments=[attachment], + ) + thread = ai_client.assistants.create_thread(messages=[message]) + assert thread.id + + run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = ai_client.assistants.list_messages(thread.id) + assert len(messages) + ai_client.assistants.delete_assistant(assistant.id) + ai_client.close() + + @assistantClientPreparer() + @recorded_by_proxy + def test_azure_ai_search_tool(self, **kwargs): + """Test using the AzureAISearchTool with an assistant.""" + # create client + with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + + # Create AzureAISearchTool + connection_name = kwargs.pop("azure_ai.assistants_assistants_tests_search_connection_name", "my-search-connection-name") + connection = client.connections.get(connection_name=connection_name) + conn_id = connection.id + index_name = kwargs.pop("azure_ai.assistants_assistants_tests_search_index_name", "my-search-index") + + azure_search_tool = AzureAISearchTool( + index_connection_id=conn_id, + index_name=index_name, + ) + + # Create assistant with the search tool + assistant = client.assistants.create_assistant( + model="gpt-4o", + name="search-assistant", + instructions="You are a helpful assistant that can search for information using Azure AI Search.", + tools=azure_search_tool.definitions, + tool_resources=azure_search_tool.resources + ) + assert assistant.id + print(f"Created assistant with ID: {assistant.id}") + + # Create thread + thread = client.assistants.create_thread() + assert thread.id + print(f"Created thread with ID: {thread.id}") + + # Create message + message = client.assistants.create_message( + thread_id=thread.id, + role="user", + content="Search for information about iPhone prices." + ) + assert message.id + print(f"Created message with ID: {message.id}") + + # Create and process run + run = client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == RunStatus.COMPLETED, run.last_error.message + + # List messages to verify tool was used + messages = client.assistants.list_messages(thread_id=thread.id) + assert len(messages.data) > 0 + + # Clean up + client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy + def test_include_file_search_results_no_stream(self, **kwargs): + """Test using include_file_search.""" + self._do_test_include_file_search_results(use_stream=False, include_content=True, **kwargs) + self._do_test_include_file_search_results(use_stream=False, include_content=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy + def test_include_file_search_results_stream(self, **kwargs): + """Test using include_file_search with streaming.""" + self._do_test_include_file_search_results(use_stream=True, include_content=True, **kwargs) + self._do_test_include_file_search_results(use_stream=True, include_content=False, **kwargs) + + def _do_test_include_file_search_results(self, use_stream, include_content, **kwargs): + """Run the test with file search results.""" + with self.create_client(**kwargs) as ai_client: + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = ai_client.assistants.create_vector_store_and_poll( + file_ids=[], data_sources=ds, name="my_vectorstore" + ) + # vector_store = await ai_client.assistants.get_vector_store('vs_M9oxKG7JngORHcYNBGVZ6Iz3') + assert vector_store.id + + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + assistant = ai_client.assistants.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert assistant.id + thread = ai_client.assistants.create_thread() + assert thread.id + # create message + message = ai_client.assistants.create_message( + thread_id=thread.id, + role="user", + # content="What does the attachment say?" + content="What Contoso Galaxy Innovations produces?", + ) + assert message.id, "The message was not created." + include = [RunAdditionalFieldList.FILE_SEARCH_CONTENTS] if include_content else None + + if use_stream: + run = None + with ai_client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id, include=include) as stream: + for event_type, event_data, _ in stream: + if isinstance(event_data, ThreadRun): + run = event_data + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + break + else: + run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id, include=include) + assert run.status == RunStatus.COMPLETED + assert run is not None + steps = ai_client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id, include=include) + # The 1st (not 0th) step is a tool call. + step_id = steps.data[1].id + one_step = ai_client.assistants.get_run_step( + thread_id=thread.id, run_id=run.id, step_id=step_id, include=include + ) + self._assert_file_search_valid(one_step.step_details.tool_calls[0], include_content) + self._assert_file_search_valid(steps.data[1].step_details.tool_calls[0], include_content) + + messages = ai_client.assistants.list_messages(thread_id=thread.id) + assert len(messages) + + ai_client.assistants.delete_vector_store(vector_store.id) + # delete assistant and close client + ai_client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + ai_client.close() + + def _assert_file_search_valid(self, tool_call: Any, include_content: bool) -> None: + """Test that file search result is properly populated.""" + assert isinstance(tool_call, RunStepFileSearchToolCall), f"Wrong type of tool call: {type(tool_call)}." + assert isinstance( + tool_call.file_search, RunStepFileSearchToolCallResults + ), f"Wrong type of search results: {type(tool_call.file_search)}." + assert isinstance( + tool_call.file_search.results[0], RunStepFileSearchToolCallResult + ), f"Wrong type of search result: {type(tool_call.file_search.results[0])}." + assert tool_call.file_search.results + if include_content: + assert tool_call.file_search.results[0].content + assert isinstance(tool_call.file_search.results[0].content[0], FileSearchToolCallContent) + assert tool_call.file_search.results[0].content[0].type == "text" + assert tool_call.file_search.results[0].content[0].text + else: + assert tool_call.file_search.results[0].content is None + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy + def test_assistants_with_json_schema(self, **kwargs): + """Test structured output from the assistant.""" + with self.create_client(**kwargs) as ai_client: + assistant = ai_client.assistants.create_assistant( + # Note only gpt-4o-mini-2024-07-18 and + # gpt-4o-2024-08-06 and later support structured output. + model="gpt-4o-mini", + name="my-assistant", + instructions="Extract the information about planets.", + headers={"x-ms-enable-preview": "true"}, + response_format=ResponseFormatJsonSchemaType( + json_schema=ResponseFormatJsonSchema( + name="planet_mass", + description="Extract planet mass.", + schema={ + "$defs": { + "Planets": {"enum": ["Earth", "Mars", "Jupyter"], "title": "Planets", "type": "string"} + }, + "properties": { + "planet": {"$ref": "#/$defs/Planets"}, + "mass": {"title": "Mass", "type": "number"}, + }, + "required": ["planet", "mass"], + "title": "Planet", + "type": "object", + }, + ) + ), + ) + assert assistant.id + + thread = ai_client.assistants.create_thread() + assert thread.id + + message = ai_client.assistants.create_message( + thread_id=thread.id, + role="user", + content=("The mass of the Mars is 6.4171E23 kg"), + ) + assert message.id + + run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + + assert run.status == RunStatus.COMPLETED, run.last_error.message + + del_assistant = ai_client.assistants.delete_assistant(assistant.id) + assert del_assistant.deleted + + messages = ai_client.assistants.list_messages(thread_id=thread.id) + + planet_info = [] + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + # We will only list assistant responses here. + if isinstance(last_message_content, MessageTextContent) and data_point.role == MessageRole.ASSISTANT: + planet_info.append(json.loads(last_message_content.text.value)) + assert len(planet_info) == 1 + assert len(planet_info[0]) == 2 + assert planet_info[0].get("mass") == pytest.approx(6.4171e23, 1e22) + assert planet_info[0].get("planet") == "Mars" + + def _get_file_id_maybe(self, ai_client: AssistantsClient, **kwargs) -> str: + """Return file id if kwargs has file path.""" + if "file_path" in kwargs: + file = ai_client.assistants.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS) + assert file.id, "The file was not uploaded." + return file.id + return None + + def _remove_file_maybe(self, file_id: str, ai_client: AssistantsClient) -> None: + """Remove file if we have file ID.""" + if file_id: + ai_client.assistants.delete_file(file_id) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy + def test_code_interpreter_and_save_file(self, **kwargs): + output_file_exist = False + + # create client + with self.create_client(**kwargs) as client: + + with tempfile.TemporaryDirectory() as temp_dir: + + # create a temporary input file for upload + test_file_path = os.path.join(temp_dir, "input.txt") + + with open(test_file_path, "w") as f: + f.write("This is a test file") + + file: OpenAIFile = client.assistants.upload_file_and_poll( + file_path=test_file_path, purpose=FilePurpose.ASSISTANTS + ) + + # create assistant + code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + assistant = client.assistants.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = client.assistants.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # create a message + message = client.assistants.create_message( + thread_id=thread.id, + role="user", + content="Create an image file same as the text file and give me file id?", + ) + print(f"Created message, message ID: {message.id}") + + # create run + run = client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + print(f"Run finished with status: {run.status}") + + # delete file + client.assistants.delete_file(file.id) + print("Deleted file") + + # get messages + messages = client.assistants.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + last_msg = messages.get_last_text_message_by_role(MessageRole.ASSISTANT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + for file_path_annotation in messages.file_path_annotations: + file_id = file_path_annotation.file_path.file_id + print(f"Image File ID: {file_path_annotation.file_path.file_id}") + temp_file_path = os.path.join(temp_dir, "output.png") + client.assistants.save_file(file_id=file_id, file_name="output.png", target_dir=temp_dir) + output_file_exist = os.path.exists(temp_file_path) + + assert output_file_exist + + @assistantClientPreparer() + @recorded_by_proxy + def test_azure_function_call(self, **kwargs): + """Test calling Azure functions.""" + # Note: This test was recorded in westus region as for now + # 2025-02-05 it is not supported in test region (East US 2) + # create client + storage_queue = kwargs["azure_ai.assistants_assistants_tests_storage_queue"] + with self.create_client(**kwargs) as client: + azure_function_tool = AzureFunctionTool( + name="foo", + description="Get answers from the foo bot.", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + }, + input_queue=AzureFunctionStorageQueue( + queue_name="azure-function-foo-input", + storage_service_endpoint=storage_queue, + ), + output_queue=AzureFunctionStorageQueue( + queue_name="azure-function-tool-output", + storage_service_endpoint=storage_queue, + ), + ) + assistant = client.assistants.create_assistant( + model="gpt-4", + name="azure-function-assistant-foo", + instructions=( + "You are a helpful support assistant. Use the provided function any " + "time the prompt contains the string 'What would foo say?'. When " + "you invoke the function, ALWAYS specify the output queue uri parameter as " + f"'{storage_queue}/azure-function-tool-output'" + '. Always responds with "Foo says" and then the response from the tool.' + ), + headers={"x-ms-enable-preview": "true"}, + tools=azure_function_tool.definitions, + ) + assert assistant.id, "The assistant was not created" + + # Create a thread + thread = client.assistants.create_thread() + assert thread.id, "The thread was not created." + + # Create a message + message = client.assistants.create_message( + thread_id=thread.id, + role="user", + content="What is the most prevalent element in the universe? What would foo say?", + ) + assert message.id, "The message was not created." + + run = client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == RunStatus.COMPLETED, f"The run is in {run.status} state." + + # Get messages from the thread + messages = client.assistants.list_messages(thread_id=thread.id) + assert len(messages.text_messages) > 1, "No messages were received from assistant." + + # Check that we have function response in at least one message. + assert any("bar" in msg.text.value.lower() for msg in messages.text_messages) + + # Delete the assistant once done + result = client.assistants.delete_assistant(assistant.id) + assert result.deleted, "The assistant was not deleted." + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented.") + @recorded_by_proxy + def test_client_with_thread_messages(self, **kwargs): + """Test assistant with thread messages.""" + with self.create_client(**kwargs) as client: + + # [START create_assistant] + assistant = client.assistants.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a personal electronics tutor. Write and run code to answer questions.", + ) + assert assistant.id, "The assistant was not created." + thread = client.assistants.create_thread() + assert thread.id, "Thread was not created" + + message = client.assistants.create_message( + thread_id=thread.id, role="user", content="What is the equation of light energy?" + ) + assert message.id, "The message was not created." + + additional_messages = [ + ThreadMessageOptions(role=MessageRole.ASSISTANT, content="E=mc^2"), + ThreadMessageOptions(role=MessageRole.USER, content="What is the impedance formula?"), + ] + run = client.assistants.create_run( + thread_id=thread.id, assistant_id=assistant.id, additional_messages=additional_messages + ) + + # poll the run as long as run status is queued or in progress + while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS]: + # wait for a second + time.sleep(1) + run = client.assistants.get_run( + thread_id=thread.id, + run_id=run.id, + ) + assert run.status in RunStatus.COMPLETED + + assert client.assistants.delete_assistant(assistant.id).deleted, "The assistant was not deleted" + messages = client.assistants.list_messages(thread_id=thread.id) + assert len(messages.data), "The data from the assistant was not received." diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py new file mode 100644 index 000000000000..3c125f7a078c --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py @@ -0,0 +1,3092 @@ +# pylint: disable=too-many-lines,line-too-long,useless-suppression +# # ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +# cSpell:disable +from typing import Any + +import datetime +import functools +import json +import logging +import os +import pytest +import sys +import io +import time + +from azure.ai.assistants.aio import AssistantsClient +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader +from devtools_testutils.aio import recorded_by_proxy_async +from azure.ai.assistants.models import ( + AzureFunctionTool, + AzureFunctionStorageQueue, + AssistantStreamEvent, + AssistantThread, + CodeInterpreterTool, + CodeInterpreterToolResource, + FilePurpose, + FileSearchTool, + FileSearchToolCallContent, + FileSearchToolResource, + FunctionTool, + MessageAttachment, + MessageRole, + MessageTextContent, + ResponseFormatJsonSchema, + ResponseFormatJsonSchemaType, + RunAdditionalFieldList, + RunStepDeltaChunk, + RunStepDeltaToolCallObject, + RunStepFileSearchToolCall, + RunStepFileSearchToolCallResult, + RunStepFileSearchToolCallResults, + RunStatus, + ThreadMessageOptions, + ThreadRun, + ToolResources, + ToolSet, + VectorStore, + VectorStoreConfigurations, + VectorStoreConfiguration, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) + +# TODO clean this up / get rid of anything not in use + +""" +issues I've noticed with the code: + delete_thread(thread.id) fails + cancel_thread(thread.id) expires/times out occasionally + added time.sleep() to the beginning of my last few tests to avoid limits + when using the endpoint from Howie, delete_assistant(assistant.id) did not work but would not cause an error +""" + +# Set to True to enable SDK logging +LOGGING_ENABLED = True + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + + +assistantClientPreparer = functools.partial( + EnvironmentVariableLoader, + "azure_ai.assistants", + azure_ai_assistants_assistants_tests_project_connection_string="region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", + azure_ai_assistants_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", + azure_ai_assistants_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", +) + + +# create tool for assistant use +def fetch_current_datetime_live(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + time_json = json.dumps({"current_time": current_datetime}) + return time_json + + +# create tool for assistant use +def fetch_current_datetime_recordings(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + time_json = json.dumps({"current_time": "2024-10-10 12:30:19"}) + return time_json + + +# Statically defined user functions for fast reference +user_functions_recording = {fetch_current_datetime_recordings} +user_functions_live = {fetch_current_datetime_live} + + +# The test class name needs to start with "Test" to get collected by pytest +class TestAssistantClientAsync(AzureRecordedTestCase): + + # helper function: create client using environment variables + def create_client(self, **kwargs): + # fetch environment variables + connection_string = kwargs.pop("azure_ai.assistants_assistants_tests_project_connection_string") + credential = self.get_credential(AssistantsClient, is_async=True) + + # create and return client + client = AssistantsClient.from_connection_string( + credential=credential, + conn_str=connection_string, + ) + + return client + + def _get_data_file(self) -> str: + """Return the test file name.""" + return os.path.join(os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md") + + # for debugging purposes: if a test fails and its assistant has not been deleted, it will continue to show up in the assistants list + """ + # NOTE: this test should not be run against a shared resource, as it will delete all assistants + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_clear_client(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # clear assistant list + assistants = await client.assistants.list_assistants().data + for assistant in assistants: + await client.assistants.delete_assistant(assistant.id) + assert client.assistants.list_assistants().data.__len__() == 0 + + # close client + await client.close() + """ + + # ********************************************************************************** + # + # UNIT TESTS + # + # ********************************************************************************** + + # ********************************************************************************** + # + # HAPPY PATH SERVICE TESTS - assistant APIs + # + # ********************************************************************************** + + # test client creation + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_client(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + print("Created client") + + # close client + await client.close() + + # test assistant creation and deletion + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_delete_assistant(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + # test assistant creation with tools + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_assistant_with_tools(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + print("Created client") + + # initialize assistant functions + functions = FunctionTool(functions=user_functions_recording) + + # create assistant with tools + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", tools=functions.definitions + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + assert assistant.tools + assert assistant.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + # test update assistant without body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_assistant(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + assert isinstance(client, AssistantsClient) + print("Created client") + + # create body for assistant + body = {"name": "my-assistant", "model": "gpt-4o", "instructions": "You are helpful assistant"} + + # create assistant + assistant = await client.assistants.create_assistant(body=body) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # update assistant and confirm changes went through + assistant = await client.assistants.update_assistant(assistant.id, name="my-assistant2") + assert assistant.name + assert assistant.name == "my-assistant2" + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test update assistant with body: JSON + @assistantClientPreparer() + @pytest.mark.skip("Overload performs inconsistently.") + @recorded_by_proxy_async + async def test_update_assistant_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create body for assistant + body = {"name": "my-assistant", "model": "gpt-4o", "instructions": "You are helpful assistant"} + + # create assistant + assistant = await client.assistants.create_assistant(body=body) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create body for assistant + body2 = {"name": "my-assistant2", "instructions": "You are helpful assistant"} + + # update assistant and confirm changes went through + assistant = await client.assistants.update_assistant(assistant.id, body=body2) + assert assistant.name + assert assistant.name == "my-assistant2" + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # NOTE update_assistant with overloads isn't working + # test update assistant with body: IO[bytes] + @assistantClientPreparer() + @pytest.mark.skip("Overload performs inconsistently.") + @recorded_by_proxy_async + async def test_update_assistant_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + + # create body for assistant + body = {"name": "my-assistant2", "instructions": "You are helpful assistant"} + binary_body = json.dumps(body).encode("utf-8") + + # update assistant and confirm changes went through + assistant = await client.assistants.update_assistant(assistant.id, body=io.BytesIO(binary_body)) + assert assistant.name + assert assistant.name == "my-assistant2" + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + """ + DISABLED: can't perform consistently on shared resource + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_assistant_list(self, **kwargs): + # create client and ensure there are no previous assistants + client = self.create_client(**kwargs) + list_length = await client.assistants.list_assistants().data.__len__() + + # create assistant and check that it appears in the list + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert client.assistants.list_assistants().data.__len__() == list_length + 1 + assert client.assistants.list_assistants().data[0].id == assistant.id + + # create second assistant and check that it appears in the list + assistant2 = await client.assistants.create_assistant(model="gpt-4o", name="my-assistant2", instructions="You are helpful assistant") + assert client.assistants.list_assistants().data.__len__() == list_length + 2 + assert client.assistants.list_assistants().data[0].id == assistant.id or client.assistants.list_assistants().data[1].id == assistant.id + + # delete assistants and check list + await client.assistants.delete_assistant(assistant.id) + assert client.assistants.list_assistants().data.__len__() == list_length + 1 + assert client.assistants.list_assistants().data[0].id == assistant2.id + + client.assistants.delete_assistant(assistant2.id) + assert client.assistants.list_assistants().data.__len__() == list_length + print("Deleted assistants") + + # close client + await client.close() + """ + + # ********************************************************************************** + # + # HAPPY PATH SERVICE TESTS - Thread APIs + # + # ********************************************************************************** + + # test creating thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_thread(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + # test creating thread with no body + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_with_metadata(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create metadata for thread + metadata = {"key1": "value1", "key2": "value2"} + + # create thread + thread = await client.assistants.create_thread(metadata=metadata) + assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + # close client + print("Deleted assistant") + await client.close() + + # test creating thread with body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create body for thread + body = { + "metadata": {"key1": "value1", "key2": "value2"}, + } + + # create thread + thread = await client.assistants.create_thread(body=body) + assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + # close client + print("Deleted assistant") + await client.close() + + # test creating thread with body: IO[bytes] + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create body for thread + body = { + "metadata": {"key1": "value1", "key2": "value2"}, + } + binary_body = json.dumps(body).encode("utf-8") + + # create thread + thread = await client.assistants.create_thread(body=io.BytesIO(binary_body)) + assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + # close client + print("Deleted assistant") + await client.close() + + # test getting thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_get_thread(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # get thread + thread2 = await client.assistants.get_thread(thread.id) + assert thread2.id + assert thread.id == thread2.id + print("Got thread, thread ID", thread2.id) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + # test updating thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_thread(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # update thread + thread = await client.assistants.update_thread(thread.id, metadata={"key1": "value1", "key2": "value2"}) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test updating thread without body + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_thread_with_metadata(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # set metadata + metadata = {"key1": "value1", "key2": "value2"} + + # create thread + thread = await client.assistants.create_thread(metadata=metadata) + assert thread.id + print("Created thread, thread ID", thread.id) + + # set metadata + metadata2 = {"key1": "value1", "key2": "newvalue2"} + + # update thread + thread = await client.assistants.update_thread(thread.id, metadata=metadata2) + assert thread.metadata == {"key1": "value1", "key2": "newvalue2"} + + # close client + await client.close() + + # test updating thread with body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_thread_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # set metadata + body = {"metadata": {"key1": "value1", "key2": "value2"}} + + # update thread + thread = await client.assistants.update_thread(thread.id, body=body) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + # close client + await client.close() + + # test updating thread with body: IO[bytes] + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_thread_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # set metadata + body = {"metadata": {"key1": "value1", "key2": "value2"}} + binary_body = json.dumps(body).encode("utf-8") + + # update thread + thread = await client.assistants.update_thread(thread.id, body=io.BytesIO(binary_body)) + assert thread.metadata == {"key1": "value1", "key2": "value2"} + + # close client + await client.close() + + # test deleting thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_delete_thread(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + # assert isinstance(thread, AssistantThread) + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete thread + deletion_status = await client.assistants.delete_thread(thread.id) + assert deletion_status.id == thread.id + assert deletion_status.deleted == True + print("Deleted thread, thread ID", deletion_status.id) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Message APIs + # # + # # ********************************************************************************** + + # test creating message in a thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_message(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test creating message in a thread with body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_message_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create body for message + body = {"role": "user", "content": "Hello, tell me a joke"} + + # create message + message = await client.assistants.create_message(thread_id=thread.id, body=body) + assert message.id + print("Created message, message ID", message.id) + + # close client + await client.close() + + # test creating message in a thread with body: IO[bytes] + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_message_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create body for message + body = {"role": "user", "content": "Hello, tell me a joke"} + binary_body = json.dumps(body).encode("utf-8") + + # create message + message = await client.assistants.create_message(thread_id=thread.id, body=io.BytesIO(binary_body)) + assert message.id + print("Created message, message ID", message.id) + + # close client + await client.close() + + # test creating multiple messages in a thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_multiple_messages(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create messages + message = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + message2 = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me another joke" + ) + assert message2.id + print("Created message, message ID", message2.id) + message3 = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a third joke" + ) + assert message3.id + print("Created message, message ID", message3.id) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test listing messages in a thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_list_messages(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # check that initial message list is empty + messages0 = await client.assistants.list_messages(thread_id=thread.id) + print(messages0.data) + assert messages0.data.__len__() == 0 + + # create messages and check message list for each one + message1 = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message1.id + print("Created message, message ID", message1.id) + messages1 = await client.assistants.list_messages(thread_id=thread.id) + assert messages1.data.__len__() == 1 + assert messages1.data[0].id == message1.id + + message2 = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me another joke" + ) + assert message2.id + print("Created message, message ID", message2.id) + messages2 = await client.assistants.list_messages(thread_id=thread.id) + assert messages2.data.__len__() == 2 + assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id + + message3 = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a third joke" + ) + assert message3.id + print("Created message, message ID", message3.id) + messages3 = await client.assistants.list_messages(thread_id=thread.id) + assert messages3.data.__len__() == 3 + assert ( + messages3.data[0].id == message3.id + or messages3.data[1].id == message2.id + or messages3.data[2].id == message2.id + ) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test getting message in a thread + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_get_message(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + # get message + message2 = await client.assistants.get_message(thread_id=thread.id, message_id=message.id) + assert message2.id + assert message.id == message2.id + print("Got message, message ID", message.id) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + # test updating message in a thread without body + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_message(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + # update message + message = await client.assistants.update_message( + thread_id=thread.id, message_id=message.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert message.metadata == {"key1": "value1", "key2": "value2"} + + # close client + await client.close() + + # test updating message in a thread with body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_message_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + # create body for message + body = {"metadata": {"key1": "value1", "key2": "value2"}} + + # update message + message = await client.assistants.update_message(thread_id=thread.id, message_id=message.id, body=body) + assert message.metadata == {"key1": "value1", "key2": "value2"} + + # close client + await client.close() + + # test updating message in a thread with body: IO[bytes] + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_message_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + # create body for message + body = {"metadata": {"key1": "value1", "key2": "value2"}} + binary_body = json.dumps(body).encode("utf-8") + + # update message + message = await client.assistants.update_message( + thread_id=thread.id, message_id=message.id, body=io.BytesIO(binary_body) + ) + assert message.metadata == {"key1": "value1", "key2": "value2"} + + # close client + await client.close() + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Run APIs + # # + # # ********************************************************************************** + + # test creating run + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_run(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test creating run without body + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_run_with_metadata(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.assistants.create_run( + thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test creating run with body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_run_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create body for run + body = {"assistant_id": assistant.id, "metadata": {"key1": "value1", "key2": "value2"}} + + # create run + run = await client.assistants.create_run(thread_id=thread.id, body=body) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test creating run with body: IO[bytes] + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_run_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create body for run + body = {"assistant_id": assistant.id, "metadata": {"key1": "value1", "key2": "value2"}} + binary_body = json.dumps(body).encode("utf-8") + + # create run + run = await client.assistants.create_run(thread_id=thread.id, body=io.BytesIO(binary_body)) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test getting run + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_get_run(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # get run + run2 = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + assert run2.id + assert run.id == run2.id + print("Got run, run ID", run2.id) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test sucessful run status + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_run_status(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + print("Run status:", run.status) + + assert run.status in ["cancelled", "failed", "completed", "expired"] + print("Run completed with status:", run.status) + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + """ + # TODO another, but check that the number of runs decreases after cancelling runs + # TODO can each thread only support one run? + # test listing runs + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_list_runs(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # check list for current runs + runs0 = await client.assistants.list_runs(thread_id=thread.id) + assert runs0.data.__len__() == 0 + + # create run and check list + run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + runs1 = await client.assistants.list_runs(thread_id=thread.id) + assert runs1.data.__len__() == 1 + assert runs1.data[0].id == run.id + + # create second run + run2 = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run2.id + print("Created run, run ID", run2.id) + runs2 = await client.assistants.list_runs(thread_id=thread.id) + assert runs2.data.__len__() == 2 + assert runs2.data[0].id == run2.id or runs2.data[1].id == run2.id + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + """ + + # test updating run + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_run(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # update run + while run.status in ["queued", "in_progress"]: + time.sleep(5) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.assistants.update_run( + thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.metadata == {"key1": "value1", "key2": "value2"} + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test updating run without body + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_run_with_metadata(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.assistants.create_run( + thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # update run + while run.status in ["queued", "in_progress"]: + time.sleep(5) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.assistants.update_run( + thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "newvalue2"} + ) + assert run.metadata == {"key1": "value1", "key2": "newvalue2"} + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test updating run with body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_run_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.assistants.create_run( + thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # create body for run + body = {"metadata": {"key1": "value1", "key2": "newvalue2"}} + + # update run + while run.status in ["queued", "in_progress"]: + time.sleep(5) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.assistants.update_run(thread_id=thread.id, run_id=run.id, body=body) + assert run.metadata == {"key1": "value1", "key2": "newvalue2"} + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test updating run with body: IO[bytes] + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_update_run_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.assistants.create_run( + thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} + ) + assert run.id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # create body for run + body = {"metadata": {"key1": "value1", "key2": "newvalue2"}} + binary_body = json.dumps(body).encode("utf-8") + + # update run + while run.status in ["queued", "in_progress"]: + time.sleep(5) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.assistants.update_run(thread_id=thread.id, run_id=run.id, body=io.BytesIO(binary_body)) + assert run.metadata == {"key1": "value1", "key2": "newvalue2"} + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test submitting tool outputs to run + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_submit_tool_outputs_to_run(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # Initialize assistant tools + functions = FunctionTool(user_functions_recording) + # TODO add files for code interpreter tool + # code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + # toolset.add(code_interpreter) + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, what time is it?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + await client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) + print("Tool outputs:", tool_outputs) + if tool_outputs: + await client.assistants.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + messages = await client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + hour12 = time.strftime("%H") + hour24 = time.strftime("%I") + minute = time.strftime("%M") + assert hour12 + ":" + minute in tool_message or hour24 + ":" + minute + print("Used tool_outputs") + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + # test submitting tool outputs to run with body: JSON + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # Initialize assistant tools + functions = FunctionTool(user_functions_recording) + toolset = ToolSet() + toolset.add(functions) + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, what time is it?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + await client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) + print("Tool outputs:", tool_outputs) + if tool_outputs: + body = {"tool_outputs": tool_outputs} + await client.assistants.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, body=body) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + messages = await client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + # hour12 = time.strftime("%H") + # hour24 = time.strftime("%I") + # minute = time.strftime("%M") + # assert hour12 + ":" + minute in tool_message or hour24 + ":" + minute + recorded_time = "12:30" + assert recorded_time in tool_message + print("Used tool_outputs") + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + # test submitting tool outputs to run with body: IO[bytes] + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # Initialize assistant tools + functions = FunctionTool(user_functions_recording) + toolset = ToolSet() + toolset.add(functions) + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, what time is it?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) + print("Tool outputs:", tool_outputs) + if tool_outputs: + body = {"tool_outputs": tool_outputs} + binary_body = json.dumps(body).encode("utf-8") + await client.assistants.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, body=io.BytesIO(binary_body) + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + messages = await client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + # hour12 = time.strftime("%H") + # hour24 = time.strftime("%I") + # minute = time.strftime("%M") + # assert hour12 + ":" + minute in tool_message or hour24 + ":" + minute + recorded_time = "12:30" + assert recorded_time in tool_message + print("Used tool_outputs") + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + """ + # DISABLED: rewrite to ensure run is not complete when cancel_run is called + # test cancelling run + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_cancel_run(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print( + "No tool calls provided - cancelling run" + ) # TODO how can i make sure that it wants tools? should i have some kind of error message? + await client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here + print("Tool outputs:", tool_outputs) + if tool_outputs: + await client.assistants.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + messages = await client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + hour12 = time.strftime("%H") + hour24 = time.strftime("%I") + minute = time.strftime("%M") + assert hour12 + ":" + minute in tool_message or hour24 + ":" + minute + print("Used tool_outputs") + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + """ + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy_async + async def test_create_parallel_tool_thread_true(self, **kwargs): + """Test creation of parallel runs.""" + await self._do_test_create_parallel_thread_runs(True, True, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy_async + async def test_create_parallel_tool_thread_false(self, **kwargs): + """Test creation of parallel runs.""" + await self._do_test_create_parallel_thread_runs(False, True, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy_async + async def test_create_parallel_tool_run_true(self, **kwargs): + """Test creation of parallel runs.""" + await self._do_test_create_parallel_thread_runs(True, False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy_async + async def test_create_parallel_tool_run_false(self, **kwargs): + """Test creation of parallel runs.""" + await self._do_test_create_parallel_thread_runs(False, False, **kwargs) + + async def _wait_for_run(self, client, run, timeout=1): + """Wait while run will get to terminal state.""" + while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS, RunStatus.REQUIRES_ACTION]: + time.sleep(timeout) + run = await client.assistants.get_run(thread_id=run.thread_id, run_id=run.id) + return run + + async def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_thread_run, **kwargs): + """Test creation of parallel runs.""" + + # create client + client = self.create_client( + **kwargs, + ) + assert isinstance(client, AssistantsClient) + + # Initialize assistant tools + functions = FunctionTool(functions=user_functions_recording) + code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + assistant = await client.assistants.create_assistant( + model="gpt-4", + name="my-assistant", + instructions="You are helpful assistant", + toolset=toolset, + ) + assert assistant.id + + message = ThreadMessageOptions( + role="user", + content="Hello, what time is it?", + ) + + if create_thread_run: + run = await client.assistants.create_thread_and_run( + assistant_id=assistant.id, + parallel_tool_calls=use_parallel_runs, + ) + run = await self._wait_for_run(client, run) + else: + thread = await client.assistants.create_thread(messages=[message]) + assert thread.id + + run = await client.assistants.create_and_process_run( + thread_id=thread.id, + assistant_id=assistant.id, + parallel_tool_calls=use_parallel_runs, + ) + assert run.id + assert run.status == RunStatus.COMPLETED, run.last_error.message + assert run.parallel_tool_calls == use_parallel_runs + + assert (await client.assistants.delete_assistant(assistant.id)).deleted, "The assistant was not deleted" + messages = await client.assistants.list_messages(thread_id=run.thread_id) + assert len(messages.data), "The data from the assistant was not received." + + """ + # DISABLED: rewrite to ensure run is not complete when cancel_run is called + # test cancelling run + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_cancel_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AssistantsClient) + + # create assistant + assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + # check status and cancel + assert run.status in ["queued", "in_progress", "requires_action"] + client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + + while run.status in ["queued", "cancelling"]: + time.sleep(1) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + print("Current run status:", run.status) + assert run.status == "cancelled" + print("Run cancelled") + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + """ + + # test create thread and run + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_and_run(self, **kwargs): + time.sleep(26) + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread and run + run = await client.assistants.create_thread_and_run(assistant_id=assistant.id) + assert run.id + assert run.thread_id + print("Created run, run ID", run.id) + + # get thread + thread = await client.assistants.get_thread(run.thread_id) + assert thread.id + print("Created thread, thread ID", thread.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + # assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + assert run.status == "completed" + print("Run completed") + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + + # test create thread and run with body: JSON + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_and_run_with_body(self, **kwargs): + # time.sleep(26) + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create body for thread + body = { + "assistant_id": assistant.id, + "metadata": {"key1": "value1", "key2": "value2"}, + } + + # create thread and run + run = await client.assistants.create_thread_and_run(body=body) + assert run.id + assert run.thread_id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # get thread + thread = await client.assistants.get_thread(run.thread_id) + assert thread.id + print("Created thread, thread ID", thread.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + # assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + assert run.status == "completed" + print("Run completed") + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + # test create thread and run with body: IO[bytes] + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_and_run_with_iobytes(self, **kwargs): + # time.sleep(26) + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create body for thread + body = { + "assistant_id": assistant.id, + "metadata": {"key1": "value1", "key2": "value2"}, + } + binary_body = json.dumps(body).encode("utf-8") + + # create thread and run + run = await client.assistants.create_thread_and_run(body=io.BytesIO(binary_body)) + assert run.id + assert run.thread_id + assert run.metadata == {"key1": "value1", "key2": "value2"} + print("Created run, run ID", run.id) + + # get thread + thread = await client.assistants.get_thread(run.thread_id) + assert thread.id + print("Created thread, thread ID", thread.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + # assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + assert run.status == "completed" + print("Run completed") + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + """ + # test listing run steps + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_list_run_step(self, **kwargs): + + time.sleep(50) + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, what time is it?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + steps = await client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + # commenting assertion out below, do we know exactly when run starts? + # assert steps['data'].__len__() == 0 + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "completed", + ] + print("Run status:", run.status) + steps = await client.assistants.list_run_steps( + thread_id=thread.id, run_id=run.id + ) + assert steps["data"].__len__() > 0 + + assert run.status == "completed" + print("Run completed") + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + """ + + # test getting run step + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_get_run_step(self, **kwargs): + # create client + async with self.create_client(**kwargs) as client: + print("Created client") + + # create assistant + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + assert assistant.id + print("Created assistant, assistant ID", assistant.id) + + # create thread + thread = await client.assistants.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id + print("Created run, run ID", run.id) + + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "completed", + ] + print("Run status:", run.status) + + # list steps, check that get_run_step works with first step_id + steps = await client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + assert steps["data"].__len__() > 0 + step = steps["data"][0] + get_step = await client.assistants.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) + assert step == get_step + + # delete assistant and close client + await client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await client.close() + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_azure(self, **kwargs): + """Test the assistant with vector store creation.""" + await self._do_test_create_vector_store(streaming=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_vector_store_file_id(self, **kwargs): + """Test the assistant with vector store creation.""" + await self._do_test_create_vector_store(streaming=False, file_path=self._get_data_file(), **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_azure_streaming(self, **kwargs): + """Test the assistant with vector store creation.""" + await self._do_test_create_vector_store(streaming=True, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_vector_store_file_id_streaming(self, **kwargs): + """Test the assistant with vector store creation.""" + await self._do_test_create_vector_store(streaming=True, file_path=self._get_data_file(), **kwargs) + + async def _do_test_create_vector_store(self, streaming, **kwargs): + """Test the assistant with vector store creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + file_ids = [file_id] if file_id else None + if file_ids: + ds = None + else: + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = await ai_client.assistants.create_vector_store_and_poll( + file_ids=file_ids, data_sources=ds, name="my_vectorstore" + ) + assert vector_store.id + await self._test_file_search(ai_client, vector_store, file_id, streaming) + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_vector_store_add_file_file_id(self, **kwargs): + """Test adding single file to vector store withn file ID.""" + await self._do_test_create_vector_store_add_file(streaming=False, file_path=self._get_data_file(), **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_add_file_azure(self, **kwargs): + """Test adding single file to vector store with azure asset ID.""" + await self._do_test_create_vector_store_add_file(streaming=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_vector_store_add_file_file_id_streaming(self, **kwargs): + """Test adding single file to vector store withn file ID.""" + await self._do_test_create_vector_store_add_file(streaming=True, file_path=self._get_data_file(), **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_add_file_azure_streaming(self, **kwargs): + """Test adding single file to vector store with azure asset ID.""" + await self._do_test_create_vector_store_add_file(streaming=True, **kwargs) + + async def _do_test_create_vector_store_add_file(self, streaming, **kwargs): + """Test adding single file to vector store.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + if file_id: + ds = None + else: + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + vector_store = await ai_client.assistants.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + assert vector_store.id + vector_store_file = await ai_client.assistants.create_vector_store_file( + vector_store_id=vector_store.id, data_source=ds, file_id=file_id + ) + assert vector_store_file.id + await self._test_file_search(ai_client, vector_store, file_id, streaming) + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_vector_store_batch_file_ids(self, **kwargs): + """Test adding multiple files to vector store with file IDs.""" + await self._do_test_create_vector_store_batch(streaming=False, file_path=self._get_data_file(), **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_batch_azure(self, **kwargs): + """Test adding multiple files to vector store with azure asset IDs.""" + await self._do_test_create_vector_store_batch(streaming=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_vector_store_batch_file_ids_streaming(self, **kwargs): + """Test adding multiple files to vector store with file IDs.""" + await self._do_test_create_vector_store_batch(streaming=True, file_path=self._get_data_file(), **kwargs) + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_batch_azure_streaming(self, **kwargs): + """Test adding multiple files to vector store with azure asset IDs.""" + await self._do_test_create_vector_store_batch(streaming=True, **kwargs) + + async def _do_test_create_vector_store_batch(self, streaming, **kwargs): + """Test the assistant with vector store creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + if file_id: + file_ids = [file_id] + ds = None + else: + file_ids = None + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = await ai_client.assistants.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + assert vector_store.id + vector_store_file_batch = await ai_client.assistants.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids + ) + assert vector_store_file_batch.id + await self._test_file_search(ai_client, vector_store, file_id, streaming) + + async def _test_file_search( + self, ai_client: AssistantsClient, vector_store: VectorStore, file_id: str, streaming: bool + ) -> None: + """Test the file search""" + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + assistant = await ai_client.assistants.create_assistant( + model="gpt-4", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert assistant.id + thread = await ai_client.assistants.create_thread() + assert thread.id + # create message + message = await ai_client.assistants.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + if streaming: + thread_run = None + async with await ai_client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + async for _, event_data, _ in stream: + if isinstance(event_data, ThreadRun): + thread_run = event_data + elif ( + isinstance(event_data, RunStepDeltaChunk) + and isinstance(event_data.delta.step_details, RunStepDeltaToolCallObject) + and event_data.delta.step_details.tool_calls + ): + assert isinstance( + event_data.delta.step_details.tool_calls[0].file_search, RunStepFileSearchToolCallResults + ) + assert thread_run is not None + run = await ai_client.assistants.get_run(thread_id=thread_run.thread_id, run_id=thread_run.id) + assert run is not None + else: + run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + await ai_client.assistants.delete_vector_store(vector_store.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = await ai_client.assistants.list_messages(thread_id=thread.id) + assert len(messages) + await self._remove_file_maybe(file_id, ai_client) + # delete assistant and close client + await ai_client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_message_attachement_azure(self, **kwargs): + """Test message attachment with azure ID.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + await self._do_test_message_attachment(data_sources=[ds], **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_message_attachement_file_ids(self, **kwargs): + """Test message attachment with file ID.""" + await self._do_test_message_attachment(file_path=self._get_data_file(), **kwargs) + + async def _do_test_message_attachment(self, **kwargs): + """Test assistant with the message attachment.""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + + # Create assistant with file search tool + assistant = await ai_client.assistants.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + ) + assert assistant.id, "Assistant was not created" + + thread = await ai_client.assistants.create_thread() + assert thread.id, "The thread was not created." + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment( + file_id=file_id, + data_sources=kwargs.get("data_sources"), + tools=[ + FileSearchTool().definitions[0], + CodeInterpreterTool().definitions[0], + ], + ) + message = await ai_client.assistants.create_message( + thread_id=thread.id, + role="user", + content="What does the attachment say?", + attachments=[attachment], + ) + assert message.id, "The message was not created." + + run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id, "The run was not created." + await self._remove_file_maybe(file_id, ai_client) + await ai_client.assistants.delete_assistant(assistant.id) + + messages = await ai_client.assistants.list_messages(thread_id=thread.id) + assert len(messages), "No messages were created" + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("Failing with Http Response Errors.") + @recorded_by_proxy_async + async def test_vector_store_threads_file_search_azure(self, **kwargs): + """Test file search when azure asset ids are supplied during thread creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + fs = FileSearchToolResource( + vector_stores=[ + VectorStoreConfigurations( + store_name="my_vector_store", + store_configuration=VectorStoreConfiguration(data_sources=ds), + ) + ] + ) + file_search = FileSearchTool() + assistant = await ai_client.assistants.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert assistant.id + + thread = await ai_client.assistants.create_thread(tool_resources=ToolResources(file_search=fs)) + assert thread.id + # create message + message = await ai_client.assistants.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = await ai_client.assistants.list_messages(thread.id) + assert len(messages) + await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("The API is not supported yet.") + @recorded_by_proxy_async + async def test_create_assistant_with_interpreter_azure(self, **kwargs): + """Test Create assistant with code interpreter with azure asset ids.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + await self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_assistant_with_interpreter_file_ids(self, **kwargs): + """Test Create assistant with code interpreter with file IDs.""" + await self._do_test_create_assistant_with_interpreter(file_path=self._get_data_file(), **kwargs) + + async def _do_test_create_assistant_with_interpreter(self, **kwargs): + """Test create assistant with code interpreter and project asset id""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + code_interpreter = CodeInterpreterTool() + + file_id = None + if "file_path" in kwargs: + file = await ai_client.assistants.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) + assert file.id, "The file was not uploaded." + file_id = file.id + + cdr = CodeInterpreterToolResource( + file_ids=[file_id] if file_id else None, + data_sources=kwargs.get("data_sources"), + ) + tr = ToolResources(code_interpreter=cdr) + # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment + assistant = await ai_client.assistants.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=code_interpreter.definitions, + tool_resources=tr, + ) + assert assistant.id, "Assistant was not created" + + thread = await ai_client.assistants.create_thread() + assert thread.id, "The thread was not created." + + message = await ai_client.assistants.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id, "The run was not created." + await self._remove_file_maybe(file_id, ai_client) + assert run.status == "completed", f"Error in run: {run.last_error}" + await ai_client.assistants.delete_assistant(assistant.id) + messages = await ai_client.assistants.list_messages(thread_id=thread.id) + assert len(messages), "No messages were created" + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("The API is not supported yet.") + @recorded_by_proxy_async + async def test_create_thread_with_interpreter_azure(self, **kwargs): + """Test Create assistant with code interpreter with azure asset ids.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + await self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_thread_with_interpreter_file_ids(self, **kwargs): + """Test Create assistant with code interpreter with file IDs.""" + await self._do_test_create_thread_with_interpreter(file_path=self._get_data_file(), **kwargs) + + async def _do_test_create_thread_with_interpreter(self, **kwargs): + """Test create assistant with code interpreter and project asset id""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + code_interpreter = CodeInterpreterTool() + + file_id = None + if "file_path" in kwargs: + file = await ai_client.assistants.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) + assert file.id, "The file was not uploaded." + file_id = file.id + + cdr = CodeInterpreterToolResource( + file_ids=[file_id] if file_id else None, + data_sources=kwargs.get("data_sources"), + ) + tr = ToolResources(code_interpreter=cdr) + # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment + assistant = await ai_client.assistants.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + assert assistant.id, "Assistant was not created" + + thread = await ai_client.assistants.create_thread(tool_resources=tr) + assert thread.id, "The thread was not created." + + message = await ai_client.assistants.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.id, "The run was not created." + await self._remove_file_maybe(file_id, ai_client) + assert run.status == "completed", f"Error in run: {run.last_error}" + await ai_client.assistants.delete_assistant(assistant.id) + messages = await ai_client.assistants.list_messages(thread.id) + assert len(messages) + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("Failing with Http Response Errors.") + @recorded_by_proxy_async + async def test_create_assistant_with_inline_vs_azure(self, **kwargs): + """Test creation of asistant with vector store inline.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + fs = FileSearchToolResource( + vector_stores=[ + VectorStoreConfigurations( + store_name="my_vector_store", + store_configuration=VectorStoreConfiguration(data_sources=ds), + ) + ] + ) + file_search = FileSearchTool() + assistant = await ai_client.assistants.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=ToolResources(file_search=fs), + ) + assert assistant.id + + thread = await ai_client.assistants.create_thread() + assert thread.id + # create message + message = await ai_client.assistants.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = await ai_client.assistants.list_messages(thread.id) + assert len(messages) + await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.close() + + @assistantClientPreparer() + @pytest.mark.skip("The API is not supported yet.") + @recorded_by_proxy_async + async def test_create_attachment_in_thread_azure(self, **kwargs): + """Create thread with message attachment inline with azure asset IDs.""" + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + await self._do_test_create_attachment_in_thread_azure(data_sources=[ds], **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("File ID issues with sanitization.") + @recorded_by_proxy_async + async def test_create_attachment_in_thread_file_ids(self, **kwargs): + """Create thread with message attachment inline with azure asset IDs.""" + await self._do_test_create_attachment_in_thread_azure(file_path=self._get_data_file(), **kwargs) + + async def _do_test_create_attachment_in_thread_azure(self, **kwargs): + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AssistantsClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + + file_search = FileSearchTool() + assistant = await ai_client.assistants.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + ) + assert assistant.id + + # create message + attachment = MessageAttachment( + file_id=file_id, + data_sources=kwargs.get("data_sources"), + tools=[ + FileSearchTool().definitions[0], + CodeInterpreterTool().definitions[0], + ], + ) + message = ThreadMessageOptions( + role="user", + content="What does the attachment say?", + attachments=[attachment], + ) + thread = await ai_client.assistants.create_thread(messages=[message]) + assert thread.id + + run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = await ai_client.assistants.list_messages(thread.id) + assert len(messages) + await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.close() + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_azure_function_call(self, **kwargs): + """Test calling Azure functions.""" + # Note: This test was recorded in westus region as for now + # 2025-02-05 it is not supported in test region (East US 2) + # create client + storage_queue = kwargs["azure_ai.assistants_assistants_tests_storage_queue"] + async with self.create_client(**kwargs) as client: + azure_function_tool = AzureFunctionTool( + name="foo", + description="Get answers from the foo bot.", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + }, + input_queue=AzureFunctionStorageQueue( + queue_name="azure-function-foo-input", + storage_service_endpoint=storage_queue, + ), + output_queue=AzureFunctionStorageQueue( + queue_name="azure-function-tool-output", + storage_service_endpoint=storage_queue, + ), + ) + assistant = await client.assistants.create_assistant( + model="gpt-4", + name="azure-function-assistant-foo", + instructions=( + "You are a helpful support assistant. Use the provided function any " + "time the prompt contains the string 'What would foo say?'. When " + "you invoke the function, ALWAYS specify the output queue uri parameter as " + f"'{storage_queue}/azure-function-tool-output'" + '. Always responds with "Foo says" and then the response from the tool.' + ), + headers={"x-ms-enable-preview": "true"}, + tools=azure_function_tool.definitions, + ) + assert assistant.id, "The assistant was not created" + + # Create a thread + thread = await client.assistants.create_thread() + assert thread.id, "The thread was not created." + + # Create a message + message = await client.assistants.create_message( + thread_id=thread.id, + role="user", + content="What is the most prevalent element in the universe? What would foo say?", + ) + assert message.id, "The message was not created." + + run = await client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + assert run.status == RunStatus.COMPLETED, f"The run is in {run.status} state." + + # Get messages from the thread + messages = await client.assistants.list_messages(thread_id=thread.id) + assert len(messages.text_messages) > 1, "No messages were received from assistant." + + # Chech that we have function response in at least one message. + assert any("bar" in msg.text.value.lower() for msg in messages.text_messages) + + # Delete the assistant once done + result = await client.assistants.delete_assistant(assistant.id) + assert result.deleted, "The assistant was not deleted." + + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_client_with_thread_messages(self, **kwargs): + """Test assistant with thread messages.""" + async with self.create_client(**kwargs) as client: + + # [START create_assistant] + assistant = await client.assistants.create_assistant( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + ) + assert assistant.id, "The assistant was not created." + thread = await client.assistants.create_thread() + assert thread.id, "Thread was not created" + + message = await client.assistants.create_message( + thread_id=thread.id, role="user", content="What is the equation of light energy?" + ) + assert message.id, "The message was not created." + + additional_messages = [ + ThreadMessageOptions(role=MessageRole.ASSISTANT, content="E=mc^2"), + ThreadMessageOptions(role=MessageRole.USER, content="What is the impedance formula?"), + ] + run = await client.assistants.create_run( + thread_id=thread.id, assistant_id=assistant.id, additional_messages=additional_messages + ) + + # poll the run as long as run status is queued or in progress + while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS]: + # wait for a second + time.sleep(1) + run = await client.assistants.get_run( + thread_id=thread.id, + run_id=run.id, + ) + assert run.status in RunStatus.COMPLETED + + assert (await client.assistants.delete_assistant(assistant.id)).deleted, "The assistant was not deleted" + messages = await client.assistants.list_messages(thread_id=thread.id) + assert len(messages.data), "The data from the assistant was not received." + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy_async + async def test_include_file_search_results_no_stream(self, **kwargs): + """Test using include_file_search.""" + await self._do_test_include_file_search_results(use_stream=False, include_content=True, **kwargs) + await self._do_test_include_file_search_results(use_stream=False, include_content=False, **kwargs) + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy_async + async def test_include_file_search_results_stream(self, **kwargs): + """Test using include_file_search with streaming.""" + await self._do_test_include_file_search_results(use_stream=True, include_content=True, **kwargs) + await self._do_test_include_file_search_results(use_stream=True, include_content=False, **kwargs) + + async def _do_test_include_file_search_results(self, use_stream, include_content, **kwargs): + """Run the test with file search results.""" + async with self.create_client(**kwargs) as ai_client: + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = await ai_client.assistants.create_vector_store_and_poll( + file_ids=[], data_sources=ds, name="my_vectorstore" + ) + # vector_store = await ai_client.assistants.get_vector_store('vs_M9oxKG7JngORHcYNBGVZ6Iz3') + assert vector_store.id + + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + assistant = await ai_client.assistants.create_assistant( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert assistant.id + thread = await ai_client.assistants.create_thread() + assert thread.id + # create message + message = await ai_client.assistants.create_message( + thread_id=thread.id, + role="user", + # content="What does the attachment say?" + content="What Contoso Galaxy Innovations produces?", + ) + assert message.id, "The message was not created." + include = [RunAdditionalFieldList.FILE_SEARCH_CONTENTS] if include_content else None + + if use_stream: + run = None + async with await ai_client.assistants.create_stream( + thread_id=thread.id, assistant_id=assistant.id, include=include + ) as stream: + async for event_type, event_data, _ in stream: + if isinstance(event_data, ThreadRun): + run = event_data + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + break + else: + run = await ai_client.assistants.create_and_process_run( + thread_id=thread.id, assistant_id=assistant.id, include=include + ) + assert run.status == RunStatus.COMPLETED + assert run is not None + steps = await ai_client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id, include=include) + # The 1st (not 0th) step is a tool call. + step_id = steps.data[1].id + one_step = await ai_client.assistants.get_run_step( + thread_id=thread.id, run_id=run.id, step_id=step_id, include=include + ) + self._assert_file_search_valid(one_step.step_details.tool_calls[0], include_content) + self._assert_file_search_valid(steps.data[1].step_details.tool_calls[0], include_content) + + messages = await ai_client.assistants.list_messages(thread_id=thread.id) + assert len(messages) + + await ai_client.assistants.delete_vector_store(vector_store.id) + # delete assistant and close client + await ai_client.assistants.delete_assistant(assistant.id) + print("Deleted assistant") + await ai_client.close() + + def _assert_file_search_valid(self, tool_call: Any, include_content: bool) -> None: + """Test that file search result is properly populated.""" + assert isinstance(tool_call, RunStepFileSearchToolCall), f"Wrong type of tool call: {type(tool_call)}." + assert isinstance( + tool_call.file_search, RunStepFileSearchToolCallResults + ), f"Wrong type of search results: {type(tool_call.file_search)}." + assert isinstance( + tool_call.file_search.results[0], RunStepFileSearchToolCallResult + ), f"Wrong type of search result: {type(tool_call.file_search.results[0])}." + assert tool_call.file_search.results + if include_content: + assert tool_call.file_search.results[0].content + assert isinstance(tool_call.file_search.results[0].content[0], FileSearchToolCallContent) + assert tool_call.file_search.results[0].content[0].type == "text" + assert tool_call.file_search.results[0].content[0].text + else: + assert tool_call.file_search.results[0].content is None + + @assistantClientPreparer() + @pytest.mark.skip("Recordings not yet implemented") + @recorded_by_proxy_async + async def test_assistants_with_json_schema(self, **kwargs): + """Test structured output from the assistant.""" + async with self.create_client(**kwargs) as ai_client: + assistant = await ai_client.assistants.create_assistant( + # Note only gpt-4o-mini-2024-07-18 and + # gpt-4o-2024-08-06 and later support structured output. + model="gpt-4o-mini", + name="my-assistant", + instructions="Extract the information about planets.", + headers={"x-ms-enable-preview": "true"}, + response_format=ResponseFormatJsonSchemaType( + json_schema=ResponseFormatJsonSchema( + name="planet_mass", + description="Extract planet mass.", + schema={ + "$defs": { + "Planets": {"enum": ["Earth", "Mars", "Jupyter"], "title": "Planets", "type": "string"} + }, + "properties": { + "planet": {"$ref": "#/$defs/Planets"}, + "mass": {"title": "Mass", "type": "number"}, + }, + "required": ["planet", "mass"], + "title": "Planet", + "type": "object", + }, + ) + ), + ) + assert assistant.id + + thread = await ai_client.assistants.create_thread() + assert thread.id + + message = await ai_client.assistants.create_message( + thread_id=thread.id, + role="user", + content=("The mass of the Mars is 6.4171E23 kg"), + ) + assert message.id + + run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + + assert run.status == RunStatus.COMPLETED, run.last_error.message + + del_assistant = await ai_client.assistants.delete_assistant(assistant.id) + assert del_assistant.deleted + + messages = await ai_client.assistants.list_messages(thread_id=thread.id) + + planet_info = [] + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + # We will only list assistant responses here. + if isinstance(last_message_content, MessageTextContent) and data_point.role == MessageRole.ASSISTANT: + planet_info.append(json.loads(last_message_content.text.value)) + assert len(planet_info) == 1 + assert len(planet_info[0]) == 2 + assert planet_info[0].get("mass") == pytest.approx(6.4171e23, 1e22) + assert planet_info[0].get("planet") == "Mars" + + async def _get_file_id_maybe(self, ai_client: AssistantsClient, **kwargs) -> str: + """Return file id if kwargs has file path.""" + if "file_path" in kwargs: + file = await ai_client.assistants.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) + assert file.id, "The file was not uploaded." + return file.id + return None + + async def _remove_file_maybe(self, file_id: str, ai_client: AssistantsClient) -> None: + """Remove file if we have file ID.""" + if file_id: + await ai_client.assistants.delete_file(file_id) + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Streaming APIs + # # + # # ********************************************************************************** + + # TODO + + # # ********************************************************************************** + # # + # # NEGATIVE TESTS + # # + # # ********************************************************************************** + + """ + # DISABLED, PASSES LIVE ONLY: recordings don't capture DNS lookup errors + # test assistant creation and deletion + @assistantClientPreparer() + @recorded_by_proxy_async + async def test_negative_create_delete_assistant(self, **kwargs): + # create client using bad endpoint + bad_connection_string = "https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm" + + credential = self.get_credential(AssistantsClient, is_async=False) + client = AssistantsClient.from_connection_string( + credential=credential, + connection=bad_connection_string, + ) + + # attempt to create assistant with bad client + exception_caught = False + try: + assistant = await client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) + # check for error (will not have a status code since it failed on request -- no response was recieved) + except (ServiceRequestError, HttpResponseError) as e: + exception_caught = True + if type(e) == ServiceRequestError: + assert e.message + assert "failed to resolve 'foo.bar.some-domain.ms'" in e.message.lower() + else: + assert "No such host is known" and "foo.bar.some-domain.ms" in str(e) + + # close client and confirm an exception was caught + await client.close() + assert exception_caught + """ diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py new file mode 100644 index 000000000000..2f9083f6ad26 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py @@ -0,0 +1,559 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +from typing import Any, Iterator, List, MutableMapping, Optional, Dict + +import json +import os +import pytest +from unittest.mock import MagicMock, Mock, patch + +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.models import ( + CodeInterpreterTool, + FunctionTool, + RequiredFunctionToolCall, + RequiredFunctionToolCallDetails, + RequiredToolCall, + RunStatus, + SubmitToolOutputsAction, + SubmitToolOutputsDetails, + ToolSet, + ToolOutput, +) + +from user_functions import user_functions + + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object + + +def read_file(file_name: str) -> str: + with open(os.path.join(os.path.dirname(__file__), "assets", f"{file_name}.txt"), "r") as file: + return file.read() + + +main_stream_response = read_file("main_stream_response") +fetch_current_datetime_and_weather_stream_response = read_file("fetch_current_datetime_and_weather_stream_response") +send_email_stream_response = read_file("send_email_stream_response") + + +def convert_to_byte_iterator(main_stream_response: str) -> Iterator[bytes]: + yield main_stream_response.encode() + + +def function1(): + return "output from the first assistant" + + +def function2(): + return "output from the second assistant" + + +class TestAssistantsMock: + """Tests for assistant operations""" + + LOCAL_FN = {function1.__name__: function1, function2.__name__: function2} + + def get_mock_client(self) -> AssistantsClient: + """Return the fake project client""" + client = AssistantsClient( + endpoint="www.bcac95dd-a1eb-11ef-978f-8c1645fec84b.com", + subscription_id="00000000-0000-0000-0000-000000000000", + resource_group_name="non-existing-rg", + project_name="non-existing-project", + credential=MagicMock(), + ) + client.submit_tool_outputs_to_run = MagicMock() + client.submit_tool_outputs_to_stream = MagicMock() + return client + + def get_toolset(self, file_id: Optional[str], function: Optional[str]) -> Optional[ToolSet]: + """Get the tool set with given file id and function""" + if file_id is None or function is None: + return None + functions = FunctionTool({function}) + code_interpreter = CodeInterpreterTool(file_ids=[file_id]) + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + return toolset + + def _assert_pipeline_and_reset(self, mock_pipeline_run: MagicMock, tool_set: Optional[ToolSet]) -> None: + """Check that the pipeline has correct values of tools.""" + mock_pipeline_run.assert_called_once() + data = json.loads(mock_pipeline_run.call_args_list[0].args[0].body) + assert isinstance(data, dict), f"Wrong body JSON type expected dict, found {type(data)}" + if tool_set is not None: + assert "tool_resources" in data, "tool_resources must be in data" + assert "tools" in data, "tools must be in data" + expected_file_id = tool_set.resources.code_interpreter.file_ids[0] + expected_function_name = tool_set.definitions[0].function.name + # Check code interpreter file id. + assert data["tool_resources"], "Tools resources is empty." + assert "code_interpreter" in data["tool_resources"] + assert data["tool_resources"]["code_interpreter"], "Code interpreter section is empty." + assert "file_ids" in data["tool_resources"]["code_interpreter"] + assert ( + data["tool_resources"]["code_interpreter"]["file_ids"][0] == expected_file_id + ), f"{expected_file_id[0]=}, but found {data['tool_resources']['code_interpreter']['file_ids']}" + # Check tools. + assert data["tools"], "Tools must not be empty" + assert "function" in data["tools"][0] + assert "name" in data["tools"][0]["function"] + assert ( + data["tools"][0]["function"]["name"] == expected_function_name + ), f"{expected_function_name=}, but encountered {data['tools'][0]['function']['name']}" + else: + assert "tool_resources" not in data, "tool_resources must not be in data" + assert "tools" not in data, "tools must not be in data" + mock_pipeline_run.reset_mock() + + def _get_assistant_json(self, name: str, assistant_id: str, tool_set: Optional[ToolSet]) -> Dict[str, Any]: + """Read in the assistant JSON, so that we can assume service returnred it.""" + with open( + os.path.join(os.path.dirname(__file__), "test_data", "assistant.json"), + "r", + ) as fp: + assistant_dict: Dict[str, Any] = json.load(fp) + assert isinstance(assistant_dict, dict) + assistant_dict["name"] = name + assistant_dict["id"] = assistant_id + if tool_set is not None: + assistant_dict["tool_resources"] = tool_set.resources.as_dict() + assistant_dict["tools"] = tool_set.definitions + return assistant_dict + + def _get_run( + self, thread_id: str, tool_set: Optional[ToolSet], add_azure_fn: bool = False, is_complete: bool = False + ) -> Dict[str, Any]: + """Return JSON as if we have created the run.""" + with open( + os.path.join( + os.path.dirname(__file__), + "test_data", + "thread_run.json", + ), + "r", + ) as fp: + run_dict: Dict[str, Any] = json.load(fp) + run_dict["id"] = thread_id + run_dict["assistant_id"] = thread_id[3:] + assert isinstance(run_dict, dict) + if is_complete: + run_dict["status"] = RunStatus.COMPLETED + tool_calls = [] + definitions = [] + if add_azure_fn: + tool_calls.append(RequiredToolCall(id="1", type="azure_function")) + definitions.append( + { + "type": "azure_function", + "azure_function": { + "function": { + "name": "foo", + "description": "Get answers from the foo bot.", + "parameters": { + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + "required": ["query"], + }, + }, + "input_binding": { + "type": "storage_queue", + "storage_queue": { + "queue_service_uri": "https://example.windows.net", + "queue_name": "azure-function-foo-input", + }, + }, + "output_binding": { + "type": "storage_queue", + "storage_queue": { + "queue_service_uri": "https://example.queue.core.windows.net", + "queue_name": "azure-function-tool-output", + }, + }, + }, + } + ) + if tool_set is not None: + tool_calls.append( + RequiredFunctionToolCall( + id="0", + function=RequiredFunctionToolCallDetails( + name=tool_set.definitions[0].function.name, + arguments="{}", + ), + ) + ) + definitions.extend(tool_set.definitions) + run_dict["tool_resources"] = tool_set.resources.as_dict() + if tool_calls: + sb = SubmitToolOutputsAction(submit_tool_outputs=SubmitToolOutputsDetails(tool_calls=tool_calls)) + run_dict["required_action"] = sb.as_dict() + run_dict["tools"] = definitions + return run_dict + + def _assert_tool_call(self, submit_tool_mock: MagicMock, run_id: str, tool_set: Optional[ToolSet]) -> None: + """Check that submit_tool_outputs_to_run was called with correct parameters or was not called""" + if tool_set is not None: + expected_out = TestAssistantsMock.LOCAL_FN[tool_set.definitions[0].function.name]() + submit_tool_mock.assert_called_once() + submit_tool_mock.assert_called_with( + thread_id="some_thread_id", + run_id=run_id, + tool_outputs=[{"tool_call_id": "0", "output": expected_out}], + ) + submit_tool_mock.reset_mock() + else: + submit_tool_mock.assert_not_called() + + def _assert_toolset_dict(self, assistants_client: AssistantsClient, assistant_id: str, toolset: Optional[ToolSet]): + """Check that the tool set dictionary state is as expected.""" + if toolset is None: + assert assistant_id not in assistants_client._toolset + else: + assert assistants_client._toolset.get(assistant_id) is not None + + @patch("azure.ai.assistants._client.PipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,file_assistant_2", + [ + ("file_for_assistant1", "file_for_assistant2"), + (None, "file_for_assistant2"), + ("file_for_assistant1", None), + (None, None), + ], + ) + def test_multiple_assistants_create( + self, + mock_pipeline_client_gen: MagicMock, + file_assistant_1: Optional[str], + file_assistant_2: Optional[str], + ) -> None: + """Test assistants can get correct toolset.""" + toolset1 = self.get_toolset(file_assistant_1, function1) + toolset2 = self.get_toolset(file_assistant_2, function2) + + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset1), + self._get_assistant_json("second", "456", toolset2), + self._get_run("run123", toolset1), # create_run + self._get_run("run123", toolset1), # get_run + self._get_run("run123", toolset1, is_complete=True), # get_run after resubmitting with tool results + self._get_run("run456", toolset2), # create_run + self._get_run("run456", toolset2), # get_run + self._get_run("run456", toolset2, is_complete=True), # get_run after resubmitting with tool results + "{}", # delete assistant 1 + "{}", # delete assistant 2 + ] + mock_pipeline_response = MagicMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = MagicMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + with assistants_client: + # Check that pipelines are created as expected. + assistant1 = assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset1, + ) + self._assert_pipeline_and_reset(mock_pipeline._pipeline.run, tool_set=toolset1) + + assistant2 = assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="second", + instructions="You are a helpful assistant", + toolset=toolset2, + ) + self._assert_pipeline_and_reset(mock_pipeline._pipeline.run, tool_set=toolset2) + # Check that the new assistants are called with correct tool sets. + assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant1.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset1) + + assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant2.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run456", toolset2) + # Check the contents of a toolset + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + self._assert_toolset_dict(assistants_client, assistant2.id, toolset2) + # Check that we cleanup tools after deleting assistant. + assistants_client.delete_assistant(assistant1.id) + self._assert_toolset_dict(assistants_client, assistant1.id, None) + self._assert_toolset_dict(assistants_client, assistant2.id, toolset2) + assistants_client.delete_assistant(assistant2.id) + self._assert_toolset_dict(assistants_client, assistant1.id, None) + self._assert_toolset_dict(assistants_client, assistant2.id, None) + + @patch("azure.ai.assistants._client.PipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,file_assistant_2", + [ + ("file_for_assistant1", "file_for_assistant2"), + (None, "file_for_assistant2"), + ("file_for_assistant1", None), + (None, None), + ], + ) + def test_update_assistant_tools( + self, + mock_pipeline_client_gen: MagicMock, + file_assistant_1: Optional[str], + file_assistant_2: Optional[str], + ) -> None: + """Test that tools are properly updated.""" + toolset1 = self.get_toolset(file_assistant_1, function1) + toolset2 = self.get_toolset(file_assistant_2, function2) + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset1), + self._get_assistant_json("first", "123", toolset2), + ] + mock_pipeline_response = MagicMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = MagicMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + with assistants_client: + # Check that pipelines are created as expected. + assistant1 = assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset1, + ) + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + assistants_client.update_assistant(assistant1.id, toolset=toolset2) + if toolset2 is None: + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + else: + self._assert_toolset_dict(assistants_client, assistant1.id, toolset2) + + @patch("azure.ai.assistants._client.PipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,file_assistant_2", + [ + ("file_for_assistant1", "file_for_assistant2"), + (None, "file_for_assistant2"), + ("file_for_assistant1", None), + (None, None), + ], + ) + def test_create_run_tools_override( + self, + mock_pipeline_client_gen: MagicMock, + file_assistant_1: Optional[str], + file_assistant_2: Optional[str], + ) -> None: + """Test that if user have set tool set in create create_and_process_run method, that tools are used.""" + toolset1 = self.get_toolset(file_assistant_1, function1) + toolset2 = self.get_toolset(file_assistant_2, function2) + mock_response = MagicMock() + mock_response.status_code = 200 + side_effect = [self._get_assistant_json("first", "123", toolset1)] + if toolset1 is not None or toolset2 is not None: + toolset = toolset2 if toolset2 is not None else toolset1 + side_effect.append(self._get_run("run123", toolset)) # create_run + side_effect.append(self._get_run("run123", toolset)) # get_run + side_effect.append( + self._get_run("run123", toolset, is_complete=True) + ) # get_run after resubmitting with tool results + else: + side_effect.append( + self._get_run("run123", None, is_complete=True) + ) # Run must be marked as completed in this case. + mock_response.json.side_effect = side_effect + mock_pipeline_response = MagicMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = MagicMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + with assistants_client: + # Check that pipelines are created as expected. + assistant1 = assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset1, + ) + self._assert_pipeline_and_reset(mock_pipeline._pipeline.run, tool_set=toolset1) + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + + # Create run with new tool set, which also can be none. + assistants_client.create_and_process_run( + thread_id="some_thread_id", assistant_id=assistant1.id, toolset=toolset2 + ) + if toolset2 is not None: + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset2) + else: + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset1) + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + + @patch("azure.ai.assistants._client.PipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,add_azure_fn", + [ + ("file_for_assistant1", True), + (None, True), + ("file_for_assistant1", False), + (None, False), + ], + ) + def test_with_azure_function( + self, + mock_pipeline_client_gen: MagicMock, + file_assistant_1: Optional[str], + add_azure_fn: bool, + ) -> None: + """Test azure function with toolset.""" + toolset = self.get_toolset(file_assistant_1, function1) + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset), + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # create_run + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # get_run + self._get_run( + "run123", toolset, add_azure_fn=add_azure_fn, is_complete=True + ), # get_run after resubmitting with tool results + ] + mock_pipeline_response = MagicMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = MagicMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + with assistants_client: + # Check that pipelines are created as expected. + assistant1 = assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset, + ) + # Create run with new tool set, which also can be none. + assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant1.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset) + + def _assert_stream_call(self, submit_tool_mock: MagicMock, run_id: str, tool_set: Optional[ToolSet]) -> None: + """Assert that stream has received the correct values.""" + if tool_set is not None: + expected_out = TestAssistantsMock.LOCAL_FN[tool_set.definitions[0].function.name]() + submit_tool_mock.assert_called_once() + submit_tool_mock.assert_called_with( + thread_id="some_thread_id", + run_id=run_id, + tool_outputs=[{"tool_call_id": "0", "output": expected_out}], + event_handler=None, + ) + submit_tool_mock.reset_mock() + else: + submit_tool_mock.assert_not_called() + + @patch("azure.ai.assistants._client.PipelineClient") + @pytest.mark.skip("Recordings not yet available") + @pytest.mark.parametrize( + "file_assistant_1,add_azure_fn", + [ + ("file_for_assistant1", True), + (None, True), + ("file_for_assistant1", False), + (None, False), + ], + ) + def test_handle_submit_tool_outputs( + self, + mock_pipeline_client_gen: MagicMock, + file_assistant_1: Optional[str], + add_azure_fn: bool, + ) -> None: + """Test handling of stream tools response.""" + toolset = self.get_toolset(file_assistant_1, function1) + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset), + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # create_run + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # get_run + self._get_run( + "run123", toolset, add_azure_fn=add_azure_fn, is_complete=True + ), # get_run after resubmitting with tool results + ] + mock_pipeline_response = MagicMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = MagicMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + with assistants_client: + # Check that pipelines are created as expected. + assistant1 = assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset, + ) + # Create run with new tool set, which also can be none. + run = assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant1.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset) + assistants_client._handle_submit_tool_outputs(run) + self._assert_stream_call(assistants_client.submit_tool_outputs_to_stream, "run123", toolset) + + +class TestIntegrationAssistantsMock: + + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, *, tool_outputs: List[ToolOutput], stream_parameter: bool, stream: bool + ) -> Iterator[bytes]: + assert thread_id == "thread_01" + assert run_id == "run_01" + assert stream_parameter == True + assert stream == True + if ( + len(tool_outputs) == 2 + and tool_outputs[0]["tool_call_id"] == "call_01" + and tool_outputs[1]["tool_call_id"] == "call_02" + ): + return convert_to_byte_iterator(fetch_current_datetime_and_weather_stream_response) + elif len(tool_outputs) == 1 and tool_outputs[0]["tool_call_id"] == "call_03": + return convert_to_byte_iterator(send_email_stream_response) + raise ValueError("Unexpected tool outputs") + + @patch( + "azure.ai.assistants._operations.AssistantsClientOperationsMixin.create_run", + return_value=convert_to_byte_iterator(main_stream_response), + ) + @patch("azure.ai.assistants.AssistantsClient.__init__", return_value=None) + @patch( + "azure.ai.assistants.AssistantsClient.submit_tool_outputs_to_run", + ) + def test_create_stream_with_tool_calls(self, mock_submit_tool_outputs_to_run: Mock, *args): + mock_submit_tool_outputs_to_run.side_effect = self.submit_tool_outputs_to_run + functions = FunctionTool(user_functions) + toolset = ToolSet() + toolset.add(functions) + + operation = AssistantsClient() + operation._toolset = {"asst_01": toolset} + count = 0 + + with operation.create_stream(thread_id="thread_id", assistant_id="asst_01") as stream: + for _ in stream: + count += 1 + # TODO: Fix this test; it does not submit the tool output and hence cannot return all output. + assert count == ( + main_stream_response.count("event:") + # + fetch_current_datetime_and_weather_stream_response.count("event:") + # + send_email_stream_response.count("event:") + ) diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py new file mode 100644 index 000000000000..51351b93b624 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py @@ -0,0 +1,568 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +from typing import Any, MutableMapping, Optional, Dict, List, AsyncIterator + +import json +import os +import pytest +from unittest.mock import AsyncMock, MagicMock, Mock, patch + +from azure.ai.assistants.aio import AssistantsClient +from azure.ai.assistants.models import ( + AsyncFunctionTool, + AsyncToolSet, + CodeInterpreterTool, + RequiredFunctionToolCall, + RequiredFunctionToolCallDetails, + RequiredToolCall, + RunStatus, + SubmitToolOutputsAction, + SubmitToolOutputsDetails, + ToolOutput, +) + +from user_functions import user_functions + + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object + + +def read_file(file_name: str) -> str: + with open(os.path.join(os.path.dirname(__file__), "assets", f"{file_name}.txt"), "r") as file: + return file.read() + + +main_stream_response = read_file("main_stream_response") +fetch_current_datetime_and_weather_stream_response = read_file("fetch_current_datetime_and_weather_stream_response") +send_email_stream_response = read_file("send_email_stream_response") + + +async def convert_to_byte_iterator(main_stream_response: str) -> AsyncIterator[bytes]: + yield main_stream_response.encode() + + +def function1(): + return "output from the first assistant" + + +def function2(): + return "output from the second assistant" + + +class TestAssistantsMock: + """Tests for assistant operations""" + + LOCAL_FN = {function1.__name__: function1, function2.__name__: function2} + + def get_mock_client(self) -> AssistantsClient: + """Return the fake project client""" + client = AssistantsClient( + endpoint="www.bcac95dd-a1eb-11ef-978f-8c1645fec84b.com", + subscription_id="00000000-0000-0000-0000-000000000000", + resource_group_name="non-existing-rg", + project_name="non-existing-project", + credential=AsyncMock(), + ) + client.submit_tool_outputs_to_run = AsyncMock() + client.submit_tool_outputs_to_stream = AsyncMock() + # Set sync method to avoid warning. + client._client.format_url = MagicMock() + return client + + def get_toolset(self, file_id: Optional[str], function: Optional[str]) -> Optional[AsyncToolSet]: + """Get the tool set with given file id and function""" + if file_id is None or function is None: + return None + functions = AsyncFunctionTool({function}) + code_interpreter = CodeInterpreterTool(file_ids=[file_id]) + toolset = AsyncToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + return toolset + + def _assert_pipeline_and_reset(self, mock_pipeline_run: AsyncMock, tool_set: Optional[AsyncToolSet]) -> None: + """Check that the pipeline has correct values of tools.""" + mock_pipeline_run.assert_called_once() + data = json.loads(mock_pipeline_run.call_args_list[0].args[0].body) + assert isinstance(data, dict), f"Wrong body JSON type expected dict, found {type(data)}" + if tool_set is not None: + assert "tool_resources" in data, "tool_resources must be in data" + assert "tools" in data, "tools must be in data" + expected_file_id = tool_set.resources.code_interpreter.file_ids[0] + expected_function_name = tool_set.definitions[0].function.name + # Check code interpreter file id. + assert data["tool_resources"], "Tools resources is empty." + assert "code_interpreter" in data["tool_resources"] + assert data["tool_resources"]["code_interpreter"], "Code interpreter section is empty." + assert "file_ids" in data["tool_resources"]["code_interpreter"] + assert ( + data["tool_resources"]["code_interpreter"]["file_ids"][0] == expected_file_id + ), f"{expected_file_id[0]=}, but found {data['tool_resources']['code_interpreter']['file_ids']}" + # Check tools. + assert data["tools"], "Tools must not be empty" + assert "function" in data["tools"][0] + assert "name" in data["tools"][0]["function"] + assert ( + data["tools"][0]["function"]["name"] == expected_function_name + ), f"{expected_function_name=}, but encountered {data['tools'][0]['function']['name']}" + else: + assert "tool_resources" not in data, "tool_resources must not be in data" + assert "tools" not in data, "tools must not be in data" + mock_pipeline_run.reset_mock() + + def _get_assistant_json(self, name: str, assistant_id: str, tool_set: Optional[AsyncToolSet]) -> Dict[str, Any]: + """Read in the assistant JSON, so that we can assume service returnred it.""" + with open( + os.path.join(os.path.dirname(__file__), "test_data", "assistant.json"), + "r", + ) as fp: + assistant_dict: Dict[str, Any] = json.load(fp) + assert isinstance(assistant_dict, dict) + assistant_dict["name"] = name + assistant_dict["id"] = assistant_id + if tool_set is not None: + assistant_dict["tool_resources"] = tool_set.resources.as_dict() + assistant_dict["tools"] = tool_set.definitions + return assistant_dict + + def _get_run( + self, thread_id: str, tool_set: Optional[AsyncToolSet], add_azure_fn: bool = False, is_complete: bool = False + ) -> Dict[str, Any]: + """Return JSON as if we have created the run.""" + with open( + os.path.join( + os.path.dirname(__file__), + "test_data", + "thread_run.json", + ), + "r", + ) as fp: + run_dict: Dict[str, Any] = json.load(fp) + run_dict["id"] = thread_id + run_dict["assistant_id"] = thread_id[3:] + assert isinstance(run_dict, dict) + if is_complete: + run_dict["status"] = RunStatus.COMPLETED + tool_calls = [] + definitions = [] + if add_azure_fn: + tool_calls.append(RequiredToolCall(id="1", type="azure_function")) + definitions.append( + { + "type": "azure_function", + "azure_function": { + "function": { + "name": "foo", + "description": "Get answers from the foo bot.", + "parameters": { + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + "required": ["query"], + }, + }, + "input_binding": { + "type": "storage_queue", + "storage_queue": { + "queue_service_uri": "https://example.windows.net", + "queue_name": "azure-function-foo-input", + }, + }, + "output_binding": { + "type": "storage_queue", + "storage_queue": { + "queue_service_uri": "https://example.queue.core.windows.net", + "queue_name": "azure-function-tool-output", + }, + }, + }, + } + ) + if tool_set is not None: + tool_calls.append( + RequiredFunctionToolCall( + id="0", + function=RequiredFunctionToolCallDetails( + name=tool_set.definitions[0].function.name, + arguments="{}", + ), + ) + ) + definitions.extend(tool_set.definitions) + run_dict["tool_resources"] = tool_set.resources.as_dict() + if tool_calls: + sb = SubmitToolOutputsAction(submit_tool_outputs=SubmitToolOutputsDetails(tool_calls=tool_calls)) + run_dict["required_action"] = sb.as_dict() + run_dict["tools"] = definitions + return run_dict + + def _assert_tool_call(self, submit_tool_mock: AsyncMock, run_id: str, tool_set: Optional[AsyncToolSet]) -> None: + """Check that submit_tool_outputs_to_run was called with correct parameters or was not called""" + if tool_set is not None: + expected_out = TestAssistantsMock.LOCAL_FN[tool_set.definitions[0].function.name]() + submit_tool_mock.assert_called_once() + submit_tool_mock.assert_called_with( + thread_id="some_thread_id", + run_id=run_id, + tool_outputs=[{"tool_call_id": "0", "output": expected_out}], + ) + submit_tool_mock.reset_mock() + else: + submit_tool_mock.assert_not_called() + + def _assert_toolset_dict(self, assistants_client: AssistantsClient, assistant_id: str, toolset: Optional[AsyncToolSet]): + """Check that the tool set dictionary state is as expected.""" + if toolset is None: + assert assistant_id not in assistants_client._toolset + else: + assert assistants_client._toolset.get(assistant_id) is not None + + @pytest.mark.asyncio + @patch("azure.ai.assistants.aio._client.AsyncPipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,file_assistant_2", + [ + # ("file_for_assistant1", "file_for_assistant2"), + (None, "file_for_assistant2"), + # ("file_for_assistant1", None), + # (None, None), + ], + ) + async def test_multiple_assistants_create( + self, + mock_pipeline_client_gen: AsyncMock, + file_assistant_1: Optional[str], + file_assistant_2: Optional[str], + ) -> None: + """Test assistants can get correct toolset.""" + toolset1 = self.get_toolset(file_assistant_1, function1) + toolset2 = self.get_toolset(file_assistant_2, function2) + + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset1), + self._get_assistant_json("second", "456", toolset2), + self._get_run("run123", toolset1), # create_run + self._get_run("run123", toolset1), # get_run + self._get_run("run123", toolset1, is_complete=True), # get_run after resubmitting with tool results + self._get_run("run456", toolset2), # create_run + self._get_run("run456", toolset2), # get_run + self._get_run("run456", toolset2, is_complete=True), # get_run after resubmitting with tool results + "{}", # delete assistant 1 + "{}", # delete assistant 2 + ] + mock_pipeline_response = AsyncMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = AsyncMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + async with assistants_client: + # Check that pipelines are created as expected. + assistant1 = await assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset1, + ) + self._assert_pipeline_and_reset(mock_pipeline._pipeline.run, tool_set=toolset1) + + assistant2 = await assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="second", + instructions="You are a helpful assistant", + toolset=toolset2, + ) + self._assert_pipeline_and_reset(mock_pipeline._pipeline.run, tool_set=toolset2) + # Check that the new assistants are called with correct tool sets. + await assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant1.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset1) + + await assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant2.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run456", toolset2) + # Check the contents of a toolset + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + self._assert_toolset_dict(assistants_client, assistant2.id, toolset2) + # Check that we cleanup tools after deleting assistant. + await assistants_client.delete_assistant(assistant1.id) + self._assert_toolset_dict(assistants_client, assistant1.id, None) + self._assert_toolset_dict(assistants_client, assistant2.id, toolset2) + await assistants_client.delete_assistant(assistant2.id) + self._assert_toolset_dict(assistants_client, assistant1.id, None) + self._assert_toolset_dict(assistants_client, assistant2.id, None) + + @pytest.mark.asyncio + @patch("azure.ai.assistants.aio._client.AsyncPipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,file_assistant_2", + [ + ("file_for_assistant1", "file_for_assistant2"), + (None, "file_for_assistant2"), + ("file_for_assistant1", None), + (None, None), + ], + ) + async def test_update_assistant_tools( + self, + mock_pipeline_client_gen: AsyncMock, + file_assistant_1: Optional[str], + file_assistant_2: Optional[str], + ) -> None: + """Test that tools are properly updated.""" + toolset1 = self.get_toolset(file_assistant_1, function1) + toolset2 = self.get_toolset(file_assistant_2, function2) + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset1), + self._get_assistant_json("first", "123", toolset2), + ] + mock_pipeline_response = AsyncMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = AsyncMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + async with assistants_client: + # Check that pipelines are created as expected. + assistant1 = await assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset1, + ) + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + await assistants_client.update_assistant(assistant1.id, toolset=toolset2) + if toolset2 is None: + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + else: + self._assert_toolset_dict(assistants_client, assistant1.id, toolset2) + + @pytest.mark.asyncio + @patch("azure.ai.assistants.aio._client.AsyncPipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,file_assistant_2", + [ + ("file_for_assistant1", "file_for_assistant2"), + (None, "file_for_assistant2"), + ("file_for_assistant1", None), + (None, None), + ], + ) + async def test_create_run_tools_override( + self, + mock_pipeline_client_gen: AsyncMock, + file_assistant_1: Optional[str], + file_assistant_2: Optional[str], + ) -> None: + """Test that if user have set tool set in create create_and_process_run method, that tools are used.""" + toolset1 = self.get_toolset(file_assistant_1, function1) + toolset2 = self.get_toolset(file_assistant_2, function2) + mock_response = MagicMock() + mock_response.status_code = 200 + side_effect = [self._get_assistant_json("first", "123", toolset1)] + if toolset1 is not None or toolset2 is not None: + toolset = toolset2 if toolset2 is not None else toolset1 + side_effect.append(self._get_run("run123", toolset)) # create_run + side_effect.append(self._get_run("run123", toolset)) # get_run + side_effect.append( + self._get_run("run123", toolset, is_complete=True) + ) # get_run after resubmitting with tool results + else: + side_effect.append( + self._get_run("run123", None, is_complete=True) + ) # Run must be marked as completed in this case. + mock_response.json.side_effect = side_effect + mock_pipeline_response = AsyncMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = AsyncMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + async with assistants_client: + # Check that pipelines are created as expected. + assistant1 = await assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset1, + ) + self._assert_pipeline_and_reset(mock_pipeline._pipeline.run, tool_set=toolset1) + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + + # Create run with new tool set, which also can be none. + await assistants_client.create_and_process_run( + thread_id="some_thread_id", assistant_id=assistant1.id, toolset=toolset2 + ) + if toolset2 is not None: + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset2) + else: + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset1) + self._assert_toolset_dict(assistants_client, assistant1.id, toolset1) + + @pytest.mark.asyncio + @patch("azure.ai.assistants.aio._client.AsyncPipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,add_azure_fn", + [ + ("file_for_assistant1", True), + (None, True), + ("file_for_assistant1", False), + (None, False), + ], + ) + async def test_with_azure_function( + self, + mock_pipeline_client_gen: AsyncMock, + file_assistant_1: Optional[str], + add_azure_fn: bool, + ) -> None: + """Test azure function with toolset.""" + toolset = self.get_toolset(file_assistant_1, function1) + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset), + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # create_run + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # get_run + self._get_run( + "run123", toolset, add_azure_fn=add_azure_fn, is_complete=True + ), # get_run after resubmitting with tool results + ] + mock_pipeline_response = AsyncMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = AsyncMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + async with assistants_client: + # Check that pipelines are created as expected. + assistant1 = await assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset, + ) + # Create run with new tool set, which also can be none. + await assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant1.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset) + + def _assert_stream_call(self, submit_tool_mock: AsyncMock, run_id: str, tool_set: Optional[AsyncToolSet]) -> None: + """Assert that stream has received the correct values.""" + if tool_set is not None: + expected_out = TestAssistantsMock.LOCAL_FN[tool_set.definitions[0].function.name]() + submit_tool_mock.assert_called_once() + submit_tool_mock.assert_called_with( + thread_id="some_thread_id", + run_id=run_id, + tool_outputs=[{"tool_call_id": "0", "output": expected_out}], + event_handler=None, + ) + submit_tool_mock.reset_mock() + else: + submit_tool_mock.assert_not_called() + + @pytest.mark.asyncio + @pytest.mark.skip("Recordings not yet available") + @patch("azure.ai.assistants.aio._client.AsyncPipelineClient") + @pytest.mark.parametrize( + "file_assistant_1,add_azure_fn", + [ + ("file_for_assistant1", True), + (None, True), + ("file_for_assistant1", False), + (None, False), + ], + ) + async def test_handle_submit_tool_outputs( + self, + mock_pipeline_client_gen: AsyncMock, + file_assistant_1: Optional[str], + add_azure_fn: bool, + ) -> None: + """Test handling of stream tools response.""" + toolset = self.get_toolset(file_assistant_1, function1) + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + self._get_assistant_json("first", "123", toolset), + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # create_run + self._get_run("run123", toolset, add_azure_fn=add_azure_fn), # get_run + self._get_run( + "run123", toolset, add_azure_fn=add_azure_fn, is_complete=True + ), # get_run after resubmitting with tool results + ] + mock_pipeline_response = AsyncMock() + mock_pipeline_response.http_response = mock_response + mock_pipeline = AsyncMock() + mock_pipeline._pipeline.run.return_value = mock_pipeline_response + mock_pipeline_client_gen.return_value = mock_pipeline + assistants_client = self.get_mock_client() + async with assistants_client: + # Check that pipelines are created as expected. + assistant1 = await assistants_client.create_assistant( + model="gpt-4-1106-preview", + name="first", + instructions="You are a helpful assistant", + toolset=toolset, + ) + # Create run with new tool set, which also can be none. + run = await assistants_client.create_and_process_run(thread_id="some_thread_id", assistant_id=assistant1.id) + self._assert_tool_call(assistants_client.submit_tool_outputs_to_run, "run123", toolset) + await assistants_client._handle_submit_tool_outputs(run) + self._assert_stream_call(assistants_client.submit_tool_outputs_to_stream, "run123", toolset) + + +class TestIntegrationAssistantsClient: + + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, *, tool_outputs: List[ToolOutput], stream_parameter: bool, stream: bool + ) -> AsyncIterator[bytes]: + assert thread_id == "thread_01" + assert run_id == "run_01" + assert stream_parameter == True + assert stream == True + if ( + len(tool_outputs) == 2 + and tool_outputs[0]["tool_call_id"] == "call_01" + and tool_outputs[1]["tool_call_id"] == "call_02" + ): + return convert_to_byte_iterator(fetch_current_datetime_and_weather_stream_response) + elif len(tool_outputs) == 1 and tool_outputs[0]["tool_call_id"] == "call_03": + return convert_to_byte_iterator(send_email_stream_response) + raise ValueError("Unexpected tool outputs") + + @pytest.mark.asyncio + @patch( + "azure.ai.assistants.aio._operations.AssistantsClientOperationsMixin.create_run", + return_value=convert_to_byte_iterator(main_stream_response), + ) + @patch("azure.ai.assistants.aio.AssistantsClient.__init__", return_value=None) + @patch( + "azure.ai.assistants.aio.AssistantsClient.submit_tool_outputs_to_run", + ) + async def test_create_stream_with_tool_calls(self, mock_submit_tool_outputs_to_run: Mock, *args): + mock_submit_tool_outputs_to_run.side_effect = self.submit_tool_outputs_to_run + functions = AsyncFunctionTool(user_functions) + toolset = AsyncToolSet() + toolset.add(functions) + + operation = AssistantsClient() + operation._toolset = {"asst_01": toolset} + count = 0 + + async with await operation.create_stream(thread_id="thread_id", assistant_id="asst_01") as stream: + async for _ in stream: + count += 1 + # TODO: Fix this test; it does not submit the tool output and hence cannot return all output. + assert count == ( + main_stream_response.count("event:") + + # + fetch_current_datetime_and_weather_stream_response.count("event:") + # + send_email_stream_response.count("event:") + ) diff --git a/sdk/ai/azure-ai-assistants/tests/test_data/assistant.json b/sdk/ai/azure-ai-assistants/tests/test_data/assistant.json new file mode 100644 index 000000000000..7221baa942bc --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_data/assistant.json @@ -0,0 +1,11 @@ +{ + "id": "{agent_id_placeholder}", + "object": "assistant", + "created_at": 1731539287, + "name": "{agent_name_placeholder}", + "description": null, + "model": "gpt-4-1106-preview", + "instructions": "You are a helpful assistant", + "metadata": {}, + "response_format": "auto" +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/tests/test_data/product_info_1.md b/sdk/ai/azure-ai-assistants/tests/test_data/product_info_1.md new file mode 100644 index 000000000000..041155831d53 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_data/product_info_1.md @@ -0,0 +1,51 @@ +# Information about product item_number: 1 + +## Brand +Contoso Galaxy Innovations + +## Category +Smart Eyewear + +## Features +- Augmented Reality interface +- Voice-controlled AI assistant +- HD video recording with 3D audio +- UV protection and blue light filtering +- Wireless charging with extended battery life + +## User Guide + +### 1. Introduction +Introduction to your new SmartView Glasses + +### 2. Product Overview +Overview of features and controls + +### 3. Sizing and Fit +Finding your perfect fit and style adjustments + +### 4. Proper Care and Maintenance +Cleaning and caring for your SmartView Glasses + +### 5. Break-in Period +Adjusting to the augmented reality experience + +### 6. Safety Tips +Safety guidelines for public and private spaces + +### 7. Troubleshooting +Quick fixes for common issues + +## Warranty Information +Two-year limited warranty on all electronic components + +## Contact Information +Customer Support at support@contoso-galaxy-innovations.com + +## Return Policy +30-day return policy with no questions asked + +## FAQ +- How to sync your SmartView Glasses with your devices +- Troubleshooting connection issues +- Customizing your augmented reality environment diff --git a/sdk/ai/azure-ai-assistants/tests/test_data/thread_run.json b/sdk/ai/azure-ai-assistants/tests/test_data/thread_run.json new file mode 100644 index 000000000000..37c431735d97 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_data/thread_run.json @@ -0,0 +1,31 @@ +{ + "id": "thread_id_placeholder", + "object": "thread.run", + "created_at": 1731542794, + "assistant_id": "some_id", + "thread_id": "some_thread_id", + "status": "requires_action", + "started_at": null, + "expires_at": 1731543394, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "required_action": null, + "last_error": null, + "model": "gpt-4-1106-preview", + "instructions": "You are a helpful assistant", + "metadata": {}, + "temperature": 1.0, + "top_p": 1.0, + "max_completion_tokens": null, + "max_prompt_tokens": null, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "incomplete_details": null, + "usage": null, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/tests/test_deserialization.py b/sdk/ai/azure-ai-assistants/tests/test_deserialization.py new file mode 100644 index 000000000000..9c22e5e9c24d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_deserialization.py @@ -0,0 +1,93 @@ +# # ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import copy +import datetime +import pytest + +from azure.ai.assistants.models._models import ThreadRun, RunStep, ThreadMessage +from azure.ai.assistants.models._patch import _safe_instantiate, _filter_parameters + + +class TestDeserialization: + """Tests for deserialization of sse responses.""" + + @pytest.mark.parametrize( + "valid_params,model_cls", + [ + ( + { + "id": "12345", + "object": "thread.run", + "thread_id": "6789", + "assistant_id": "101112", + "status": "in_progress", + "required_action": "test", + "last_error": "none", + "model": "gpt-4", + "instructions": "Test instruction", + "tools": "Test function", + "created_at": datetime.datetime(2024, 11, 14), + "expires_at": datetime.datetime(2024, 11, 17), + "started_at": datetime.datetime(2024, 11, 15), + "completed_at": datetime.datetime(2024, 11, 16), + "cancelled_at": datetime.datetime(2024, 11, 16), + "failed_at": datetime.datetime(2024, 11, 16), + "incomplete_details": "max_completion_tokens", + "usage": "in_progress", + "temperature": 1.0, + "top_p": 1.0, + "max_completion_tokens": 1000, + "truncation_strategy": "test", + "tool_choice": "tool name", + "response_format": "json", + "metadata": {"foo": "bar"}, + "tool_resources": "test", + "parallel_tool_calls": True, + }, + ThreadRun, + ), + ( + { + "id": "1233", + "object": "thread.message", + "created_at": datetime.datetime(2024, 11, 14), + "thread_id": "5678", + "status": "incomplete", + "incomplete_details": "test", + "completed_at": datetime.datetime(2024, 11, 16), + "incomplete_at": datetime.datetime(2024, 11, 16), + "role": "assistant", + "content": "Test", + "assistant_id": "9911", + "run_id": "11", + "attachments": ["4", "8", "15", "16", "23", "42"], + "metadata": {"foo", "bar"}, + }, + ThreadMessage, + ), + ], + ) + def test_correct_thread_params(self, valid_params, model_cls): + """Test that if service returned extra parameter in SSE response, it does not create issues.""" + + bad_params = {"foo": "bar"} + params = copy.deepcopy(valid_params) + params.update(bad_params) + # We should bot e able to create Thread Run with bad parameters. + with pytest.raises(TypeError): + model_cls(**params) + filtered_params = _filter_parameters(model_cls, params) + for k in valid_params: + assert k in filtered_params, f"{k} not in {list(filtered_params.keys())}" + for k in bad_params: + assert k not in filtered_params + # Implicitly check that we can create object with the filtered parameters. + model_cls(**filtered_params) + # Check safe initialization. + assert isinstance(_safe_instantiate(model_cls, params), model_cls) + + def test_safe_instantiate_non_dict(self): + """Test that safe_instantiate method when user supplies not a dictionary.""" + assert _safe_instantiate(RunStep, 42) == 42 diff --git a/sdk/ai/azure-ai-assistants/tests/test_overload_assert.py b/sdk/ai/azure-ai-assistants/tests/test_overload_assert.py new file mode 100644 index 000000000000..3f7c8b343e18 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_overload_assert.py @@ -0,0 +1,25 @@ +import unittest +import pytest +from azure.ai.assistants import AssistantsClient +from azure.ai.assistants.aio import AssistantsClient as AsyncAssistantsOperations +from overload_assert_utils import OverloadAssertion, assert_same_http_requests + + +class TestDeclarator(unittest.TestCase): + + @pytest.mark.asyncio + @assert_same_http_requests + async def test_assert_errors(self, assistant: AssistantsClient, _: AsyncAssistantsOperations, assertion: OverloadAssertion): + # This is a special test case tested verified the decorator assert name field presents in one call but not another + model = "gpt-4-1106-preview" + name = "first" + instructions = "You are a helpful assistant" + body = {"model": model, "name": name, "instructions": instructions} + + assistant.create_agent(model=model, instructions=instructions) + assistant.create_agent(body=body) + + # Expect failure because the name field is missing in the second call + # If it doesn't assert, it means the decorator is not working and the test is failing here + with pytest.raises(AssertionError): + assertion.same_http_requests_from(operation_count=2, api_per_operation_count=1) diff --git a/sdk/ai/azure-ai-assistants/tests/test_vector_store.py b/sdk/ai/azure-ai-assistants/tests/test_vector_store.py new file mode 100644 index 000000000000..1c741bfca2bd --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/test_vector_store.py @@ -0,0 +1,43 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +# cSpell:disable +import unittest +from azure.ai.assistants._model_base import _deserialize +from azure.ai.assistants.models import _models + + +class Test(unittest.TestCase): + + def testName(self): + val = { + "id": "vs_OQpX6y9YM368EBZ5GmF45kRO", + "object": "vector_store", + "name": "TV Support FAQ", + "status": "completed", + "usage_bytes": 0, + "created_at": 1729730726, + "file_counts": {"in_progress": 0, "completed": 0, "failed": 0, "cancelled": 0, "total": 0}, + "metadata": {"source": "Assistant API Tests"}, + "expires_after": None, + "expires_at": None, + "last_active_at": 1729730726, + "configuration": { + "data_sources": [ + { + "type": "uri_asset", + "uri": "azureml://subscriptions/10e1de13-9717-4242-acf5-3e241940d326/resourcegroups/rg-sawidderai/workspaces/sawidder-0278/datastores/workspaceblobstore/paths/UI/2024-10-01_001042_UTC/unit-test.txt", + } + ] + }, + "configuration1": {}, + } + # json_val = json.dumps(val) + vct = _deserialize(_models.VectorStore, val) + + +if __name__ == "__main__": + # import sys;sys.argv = ['', 'Test.testName'] + unittest.main() diff --git a/sdk/ai/azure-ai-assistants/tests/user_functions.py b/sdk/ai/azure-ai-assistants/tests/user_functions.py new file mode 100644 index 000000000000..883fd2fa8e32 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/tests/user_functions.py @@ -0,0 +1,228 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import json +import datetime +from typing import Any, Callable, Set, Dict, List, Optional + +# These are the user-defined functions that can be called by the agent. + + +def fetch_current_datetime(format: Optional[str] = None) -> str: + """ + Get the current time as a JSON string, optionally formatted. + + :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. + :return: The current time in JSON format. + :rtype: str + """ + current_time = datetime.datetime.now() + + # Use the provided format if available, else use a default format + if format: + time_format = format + else: + time_format = "%Y-%m-%d %H:%M:%S" + + time_json = json.dumps({"current_time": current_time.strftime(time_format)}) + return time_json + + +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +def send_email(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Email address of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +def calculate_sum(a: int, b: int) -> str: + """Calculates the sum of two integers. + + :param a (int): First integer. + :rtype: int + :param b (int): Second integer. + :rtype: int + + :return: The sum of the two integers. + :rtype: str + """ + result = a + b + return json.dumps({"result": result}) + + +def convert_temperature(celsius: float) -> str: + """Converts temperature from Celsius to Fahrenheit. + + :param celsius (float): Temperature in Celsius. + :rtype: float + + :return: Temperature in Fahrenheit. + :rtype: str + """ + fahrenheit = (celsius * 9 / 5) + 32 + return json.dumps({"fahrenheit": fahrenheit}) + + +def toggle_flag(flag: bool) -> str: + """Toggles a boolean flag. + + :param flag (bool): The flag to toggle. + :rtype: bool + + :return: The toggled flag. + :rtype: str + """ + toggled = not flag + return json.dumps({"toggled_flag": toggled}) + + +def merge_dicts(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> str: + """Merges two dictionaries. + + :param dict1 (Dict[str, Any]): First dictionary. + :rtype: dict + :param dict2 (Dict[str, Any]): Second dictionary. + :rtype: dict + + :return: The merged dictionary. + :rtype: str + """ + merged = dict1.copy() + merged.update(dict2) + return json.dumps({"merged_dict": merged}) + + +def get_user_info(user_id: int) -> str: + """Retrieves user information based on user ID. + + :param user_id (int): ID of the user. + :rtype: int + + :return: User information as a JSON string. + :rtype: str + """ + mock_users = { + 1: {"name": "Alice", "email": "alice@example.com"}, + 2: {"name": "Bob", "email": "bob@example.com"}, + 3: {"name": "Charlie", "email": "charlie@example.com"}, + } + user_info = mock_users.get(user_id, {"error": "User not found."}) + return json.dumps({"user_info": user_info}) + + +def longest_word_in_sentences(sentences: List[str]) -> str: + """Finds the longest word in each sentence. + + :param sentences (List[str]): A list of sentences. + :return: A JSON string mapping each sentence to its longest word. + :rtype: str + """ + if not sentences: + return json.dumps({"error": "The list of sentences is empty"}) + + longest_words = {} + for sentence in sentences: + # Split sentence into words + words = sentence.split() + if words: + # Find the longest word + longest_word = max(words, key=len) + longest_words[sentence] = longest_word + else: + longest_words[sentence] = "" + + return json.dumps({"longest_words": longest_words}) + + +def process_records(records: List[Dict[str, int]]) -> str: + """ + Process a list of records, where each record is a dictionary with string keys and integer values. + + :param records: A list containing dictionaries that map strings to integers. + :return: A list of sums of the integer values in each record. + """ + sums = [] + for record in records: + # Sum up all the values in each dictionary and append the result to the sums list + total = sum(record.values()) + sums.append(total) + return json.dumps({"sums": sums}) + + +# Example User Input for Each Function +# 1. Fetch Current DateTime +# User Input: "What is the current date and time?" +# User Input: "What is the current date and time in '%Y-%m-%d %H:%M:%S' format?" + +# 2. Fetch Weather +# User Input: "Can you provide the weather information for New York?" + +# 3. Send Email +# User Input: "Send an email to john.doe@example.com with the subject 'Meeting Reminder' and body 'Don't forget our meeting at 3 PM.'" + +# 4. Calculate Sum +# User Input: "What is the sum of 45 and 55?" + +# 5. Convert Temperature +# User Input: "Convert 25 degrees Celsius to Fahrenheit." + +# 6. Toggle Flag +# User Input: "Toggle the flag True." + +# 7. Merge Dictionaries +# User Input: "Merge these two dictionaries: {'name': 'Alice'} and {'age': 30}." + +# 8. Get User Info +# User Input: "Retrieve user information for user ID 1." + +# 9. Longest Word in Sentences +# User Input: "Find the longest word in each of these sentences: ['The quick brown fox jumps over the lazy dog', 'Python is an amazing programming language', 'Azure AI capabilities are impressive']." + +# 10. Process Records +# User Input: "Process the following records: [{'a': 10, 'b': 20}, {'x': 5, 'y': 15, 'z': 25}, {'m': 30}]." + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_current_datetime, + fetch_weather, + send_email, + calculate_sum, + convert_temperature, + toggle_flag, + merge_dicts, + get_user_info, + longest_word_in_sentences, + process_records, +} From 86931ecb82fe6060e61283e9aa6e5adc61d2319d Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Thu, 10 Apr 2025 08:33:48 -0700 Subject: [PATCH 4/7] Add telenetry --- .../azure/ai/assistants/telemetry/__init__.py | 17 + .../telemetry/_ai_assistants_instrumentor.py | 1907 +++++++++++++++++ .../assistants/telemetry/_trace_function.py | 204 ++ .../azure/ai/assistants/telemetry/_utils.py | 292 +++ 4 files changed, 2420 insertions(+) create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py create mode 100644 sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py new file mode 100644 index 000000000000..cc45b34ae7d5 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py @@ -0,0 +1,17 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._ai_assistants_instrumentor import AIAssistantsInstrumentor +from ._utils import enable_telemetry +from ._trace_function import trace_function + +__all__ = [ + "AIAssistantsInstrumentor", + "enable_telemetry" + "trace_function" +] diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py new file mode 100644 index 000000000000..8729f04d1f6e --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py @@ -0,0 +1,1907 @@ +# pylint: disable=too-many-lines,line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import copy +import functools +import importlib +import json +import logging +import os +from enum import Enum +from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast +from urllib.parse import urlparse + +from azure.ai.assistants import _types +from azure.ai.assistants.models import AssistantRunStream, AsyncAssistantRunStream, _models +from azure.ai.assistants.models._enums import AssistantsApiResponseFormatMode, MessageRole, RunStepStatus +from azure.ai.assistants.models import ( + MessageAttachment, + MessageDeltaChunk, + MessageIncompleteDetails, + RequiredFunctionToolCall, + RunStep, + RunStepDeltaChunk, + RunStepFunctionToolCall, + RunStepToolCallDetails, + SubmitToolOutputsAction, + ThreadMessage, + ThreadRun, + ToolDefinition, + ToolOutput, + ToolResources, +) +from azure.ai.assistants.models._patch import AssistantEventHandler, AsyncAssistantEventHandler, ToolSet +from azure.ai.assistants.telemetry._utils import ( + AZ_AI_ASSISTANT_SYSTEM, + ERROR_TYPE, + GEN_AI_ASSISTANT_DESCRIPTION, + GEN_AI_ASSISTANT_ID, + GEN_AI_ASSISTANT_NAME, + GEN_AI_EVENT_CONTENT, + GEN_AI_MESSAGE_ID, + GEN_AI_MESSAGE_STATUS, + GEN_AI_RESPONSE_MODEL, + GEN_AI_SYSTEM, + GEN_AI_SYSTEM_MESSAGE, + GEN_AI_THREAD_ID, + GEN_AI_THREAD_RUN_ID, + GEN_AI_THREAD_RUN_STATUS, + GEN_AI_USAGE_INPUT_TOKENS, + GEN_AI_USAGE_OUTPUT_TOKENS, + OperationName, + start_span, +) +from azure.core import CaseInsensitiveEnumMeta # type: ignore +from azure.core.settings import settings +from azure.core.tracing import AbstractSpan + +_Unset: Any = object() + +try: + # pylint: disable = no-name-in-module + from opentelemetry.trace import Span, StatusCode + + _tracing_library_available = True +except ModuleNotFoundError: + _tracing_library_available = False + + +__all__ = [ + "AIAssistantsInstrumentor", +] + + +_assistants_traces_enabled: bool = False +_trace_assistants_content: bool = False + + +class TraceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): # pylint: disable=C4747 + """An enumeration class to represent different types of traces.""" + + AssistantS = "Assistants" + + +class AIAssistantsInstrumentor: + """ + A class for managing the trace instrumentation of AI Assistants. + + This class allows enabling or disabling tracing for AI Assistants. + and provides functionality to check whether instrumentation is active. + + """ + + def __init__(self): + if not _tracing_library_available: + raise ModuleNotFoundError( + "Azure Core Tracing Opentelemetry is not installed. " + "Please install it using 'pip install azure-core-tracing-opentelemetry'" + ) + # In the future we could support different versions from the same library + # and have a parameter that specifies the version to use. + self._impl = _AIAssistantsInstrumentorPreview() + + def instrument(self, enable_content_recording: Optional[bool] = None) -> None: + """ + Enable trace instrumentation for AI Assistants. + + :param enable_content_recording: Whether content recording is enabled as part + of the traces or not. Content in this context refers to chat message content + and function call tool related function names, function parameter names and + values. True will enable content recording, False will disable it. If no value + is provided, then the value read from environment variable + AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED is used. If the environment variable + is not found, then the value will default to False. Please note that successive calls + to instrument will always apply the content recording value provided with the most + recent call to instrument (including applying the environment variable if no value is + provided and defaulting to false if the environment variable is not found), even if + instrument was already previously called without uninstrument being called in between + the instrument calls. + :type enable_content_recording: bool, optional + + """ + self._impl.instrument(enable_content_recording) + + def uninstrument(self) -> None: + """ + Remove trace instrumentation for AI Assistants. + + This method removes any active instrumentation, stopping the tracing + of AI Assistants. + """ + self._impl.uninstrument() + + def is_instrumented(self) -> bool: + """ + Check if trace instrumentation for AI Assistants is currently enabled. + + :return: True if instrumentation is active, False otherwise. + :rtype: bool + """ + return self._impl.is_instrumented() + + def is_content_recording_enabled(self) -> bool: + """This function gets the content recording value. + + :return: A bool value indicating whether content recording is enabled. + :rtype: bool + """ + return self._impl.is_content_recording_enabled() + + +class _AIAssistantsInstrumentorPreview: + # pylint: disable=R0904 + """ + A class for managing the trace instrumentation of AI Assistants. + + This class allows enabling or disabling tracing for AI Assistants. + and provides functionality to check whether instrumentation is active. + """ + + def _str_to_bool(self, s): + if s is None: + return False + return str(s).lower() == "true" + + def instrument(self, enable_content_recording: Optional[bool] = None): + """ + Enable trace instrumentation for AI Assistants. + + :param enable_content_recording: Whether content recording is enabled as part + of the traces or not. Content in this context refers to chat message content + and function call tool related function names, function parameter names and + values. True will enable content recording, False will disable it. If no value + is provided, then the value read from environment variable + AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED is used. If the environment variable + is not found, then the value will default to False. + + :type enable_content_recording: bool, optional + """ + if enable_content_recording is None: + var_value = os.environ.get("AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED") + enable_content_recording = self._str_to_bool(var_value) + if not self.is_instrumented(): + self._instrument_assistants(enable_content_recording) + else: + self._set_enable_content_recording(enable_content_recording=enable_content_recording) + + def uninstrument(self): + """ + Disable trace instrumentation for AI Assistants. + + This method removes any active instrumentation, stopping the tracing + of AI Assistants. + """ + if self.is_instrumented(): + self._uninstrument_assistants() + + def is_instrumented(self): + """ + Check if trace instrumentation for AI Assistants is currently enabled. + + :return: True if instrumentation is active, False otherwise. + :rtype: bool + """ + return self._is_instrumented() + + def set_enable_content_recording(self, enable_content_recording: bool = False) -> None: + """This function sets the content recording value. + + :param enable_content_recording: Indicates whether tracing of message content should be enabled. + This also controls whether function call tool function names, + parameter names and parameter values are traced. + :type enable_content_recording: bool + """ + self._set_enable_content_recording(enable_content_recording=enable_content_recording) + + def is_content_recording_enabled(self) -> bool: + """This function gets the content recording value. + + :return: A bool value indicating whether content tracing is enabled. + :rtype bool + """ + return self._is_content_recording_enabled() + + def _set_attributes(self, span: "AbstractSpan", *attrs: Tuple[str, Any]) -> None: + for attr in attrs: + key, value = attr + if value is not None: + span.add_attribute(key, value) + + def _parse_url(self, url): + parsed = urlparse(url) + server_address = parsed.hostname + port = parsed.port + return server_address, port + + def _remove_function_call_names_and_arguments(self, tool_calls: list) -> list: + tool_calls_copy = copy.deepcopy(tool_calls) + for tool_call in tool_calls_copy: + if "function" in tool_call: + if "name" in tool_call["function"]: + del tool_call["function"]["name"] + if "arguments" in tool_call["function"]: + del tool_call["function"]["arguments"] + if not tool_call["function"]: + del tool_call["function"] + return tool_calls_copy + + def _create_event_attributes( + self, + thread_id: Optional[str] = None, + assistant_id: Optional[str] = None, + thread_run_id: Optional[str] = None, + message_id: Optional[str] = None, + message_status: Optional[str] = None, + usage: Optional[_models.RunStepCompletionUsage] = None, + ) -> Dict[str, Any]: + attrs: Dict[str, Any] = {GEN_AI_SYSTEM: AZ_AI_ASSISTANT_SYSTEM} + if thread_id: + attrs[GEN_AI_THREAD_ID] = thread_id + + if assistant_id: + attrs[GEN_AI_ASSISTANT_ID] = assistant_id + + if thread_run_id: + attrs[GEN_AI_THREAD_RUN_ID] = thread_run_id + + if message_id: + attrs[GEN_AI_MESSAGE_ID] = message_id + + if message_status: + attrs[GEN_AI_MESSAGE_STATUS] = self._status_to_string(message_status) + + if usage: + attrs[GEN_AI_USAGE_INPUT_TOKENS] = usage.prompt_tokens + attrs[GEN_AI_USAGE_OUTPUT_TOKENS] = usage.completion_tokens + + return attrs + + def add_thread_message_event( + self, span, message: ThreadMessage, usage: Optional[_models.RunStepCompletionUsage] = None + ) -> None: + content_body = {} + if _trace_assistants_content: + for content in message.content: + typed_content = content.get(content.type, None) + if typed_content: + content_details = {"value": self._get_field(typed_content, "value")} + annotations = self._get_field(typed_content, "annotations") + if annotations: + content_details["annotations"] = [a.as_dict() for a in annotations] + content_body[content.type] = content_details + + self._add_message_event( + span, + self._get_role(message.role), + content_body, + attachments=message.attachments, + thread_id=message.thread_id, + assistant_id=message.assistant_id, + message_id=message.id, + thread_run_id=message.run_id, + message_status=message.status, + incomplete_details=message.incomplete_details, + usage=usage, + ) + + def _add_message_event( + self, + span, + role: str, + content: Any, + attachments: Any = None, # Optional[List[MessageAttachment]] or dict + thread_id: Optional[str] = None, + assistant_id: Optional[str] = None, + message_id: Optional[str] = None, + thread_run_id: Optional[str] = None, + message_status: Optional[str] = None, + incomplete_details: Optional[MessageIncompleteDetails] = None, + usage: Optional[_models.RunStepCompletionUsage] = None, + ) -> None: + # TODO document new fields + + event_body = {} + if _trace_assistants_content: + event_body["content"] = content + if attachments: + event_body["attachments"] = [] + for attachment in attachments: + attachment_body = {"id": attachment.file_id} + if attachment.tools: + attachment_body["tools"] = [self._get_field(tool, "type") for tool in attachment.tools] + event_body["attachments"].append(attachment_body) + + if incomplete_details: + event_body["incomplete_details"] = incomplete_details + event_body["role"] = role + + attributes = self._create_event_attributes( + thread_id=thread_id, + assistant_id=assistant_id, + thread_run_id=thread_run_id, + message_id=message_id, + message_status=message_status, + usage=usage, + ) + attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_body, ensure_ascii=False) + span.span_instance.add_event(name=f"gen_ai.{role}.message", attributes=attributes) + + def _get_field(self, obj: Any, field: str) -> Any: + if not obj: + return None + + if isinstance(obj, dict): + return obj.get(field, None) + + return getattr(obj, field, None) + + def _add_instructions_event( + self, + span: "AbstractSpan", + instructions: Optional[str], + additional_instructions: Optional[str], + assistant_id: Optional[str] = None, + thread_id: Optional[str] = None, + ) -> None: + if not instructions: + return + + event_body: Dict[str, Any] = {} + if _trace_assistants_content and (instructions or additional_instructions): + if instructions and additional_instructions: + event_body["content"] = f"{instructions} {additional_instructions}" + else: + event_body["content"] = instructions or additional_instructions + + attributes = self._create_event_attributes(assistant_id=assistant_id, thread_id=thread_id) + attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_body, ensure_ascii=False) + span.span_instance.add_event(name=GEN_AI_SYSTEM_MESSAGE, attributes=attributes) + + def _get_role(self, role: Optional[Union[str, MessageRole]]) -> str: + if role is None or role is _Unset: + return "user" + + if isinstance(role, MessageRole): + return role.value + + return role + + def _status_to_string(self, status: Any) -> str: + return status.value if hasattr(status, "value") else status + + def _add_tool_assistant_message_event(self, span, step: RunStep) -> None: + tool_calls = [ + { + "id": t.id, + "type": t.type, + "function": ( + {"name": t.function.name, "arguments": json.loads(t.function.arguments)} + if isinstance(t, RunStepFunctionToolCall) + else None + ), + } + for t in cast(RunStepToolCallDetails, step.step_details).tool_calls + ] + + attributes = self._create_event_attributes( + thread_id=step.thread_id, + assistant_id=step.assistant_id, + thread_run_id=step.run_id, + message_status=step.status, + usage=step.usage, + ) + + if _trace_assistants_content: + attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls}, ensure_ascii=False) + else: + tool_calls_non_recording = self._remove_function_call_names_and_arguments(tool_calls=tool_calls) + attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls_non_recording}, ensure_ascii=False) + span.span_instance.add_event(name="gen_ai.assistant.message", attributes=attributes) + + def _add_tool_event_from_thread_run(self, span, run: ThreadRun) -> None: + tool_calls = [] + + for t in run.required_action.submit_tool_outputs.tool_calls: # type: ignore + try: + parsed_arguments = json.loads(t.function.arguments) + except json.JSONDecodeError: + parsed_arguments = {} + + tool_call = { + "id": t.id, + "type": t.type, + "function": ( + {"name": t.function.name, "arguments": parsed_arguments} + if isinstance(t, RequiredFunctionToolCall) + else None + ), + } + tool_calls.append(tool_call) + + attributes = self._create_event_attributes( + thread_id=run.thread_id, + assistant_id=run.assistant_id, + thread_run_id=run.id, + message_status=run.status, + ) + + if _trace_assistants_content: + attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls}) + else: + tool_calls_non_recording = self._remove_function_call_names_and_arguments(tool_calls=tool_calls) + attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls_non_recording}) + span.span_instance.add_event(name="gen_ai.assistant.message", attributes=attributes) + + def set_end_run(self, span: "AbstractSpan", run: Optional[ThreadRun]) -> None: + if run and span and span.span_instance.is_recording: + span.add_attribute(GEN_AI_THREAD_RUN_STATUS, self._status_to_string(run.status)) + span.add_attribute(GEN_AI_RESPONSE_MODEL, run.model) + if run and run.usage: + span.add_attribute(GEN_AI_USAGE_INPUT_TOKENS, run.usage.prompt_tokens) + span.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, run.usage.completion_tokens) + + @staticmethod + def assistant_api_response_to_str(response_format: Any) -> Optional[str]: + """ + Convert response_format to string. + + :param response_format: The response format. + :type response_format: ~azure.ai.assistants._types.AssistantsApiResponseFormatOption + :returns: string for the response_format. + :rtype: Optional[str] + :raises: Value error if response_format is not of type AssistantsApiResponseFormatOption. + """ + if isinstance(response_format, str) or response_format is None: + return response_format + if isinstance(response_format, AssistantsApiResponseFormatMode): + return response_format.value + if isinstance(response_format, _models.AssistantsApiResponseFormat): + return response_format.type + if isinstance(response_format, _models.ResponseFormatJsonSchemaType): + return response_format.type + raise ValueError(f"Unknown response format {type(response_format)}") + + def start_thread_run_span( + self, + operation_name: OperationName, + project_name: str, + thread_id: Optional[str] = None, + assistant_id: Optional[str] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[ThreadMessage]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + _tools: Optional[List[ToolDefinition]] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + ) -> "Optional[AbstractSpan]": + span = start_span( + operation_name, + project_name, + thread_id=thread_id, + assistant_id=assistant_id, + model=model, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=_AIAssistantsInstrumentorPreview.assistant_api_response_to_str(response_format), + ) + if span and span.span_instance.is_recording and instructions and additional_instructions: + self._add_instructions_event( + span, instructions, additional_instructions, thread_id=thread_id, assistant_id=assistant_id + ) + + if additional_messages: + for message in additional_messages: + self.add_thread_message_event(span, message) + return span + + def start_submit_tool_outputs_span( + self, + project_name: str, + thread_id: Optional[str] = None, + run_id: Optional[str] = None, + tool_outputs: Optional[List[ToolOutput]] = None, + event_handler: Optional[Union[AssistantEventHandler, AsyncAssistantEventHandler]] = None, + ) -> "Optional[AbstractSpan]": + run_span = event_handler.span if isinstance(event_handler, _AssistantEventHandlerTraceWrapper) else None + if run_span is None: + run_span = event_handler.span if isinstance(event_handler, _AsyncAssistantEventHandlerTraceWrapper) else None + + if run_span: + recorded = self._add_tool_message_events(run_span, tool_outputs) + else: + recorded = False + + span = start_span(OperationName.SUBMIT_TOOL_OUTPUTS, project_name, thread_id=thread_id, run_id=run_id) + if not recorded: + self._add_tool_message_events(span, tool_outputs) + return span + + def _add_tool_message_events( + self, span: "Optional[AbstractSpan]", tool_outputs: Optional[List[ToolOutput]] + ) -> bool: + if span and span.span_instance.is_recording and tool_outputs: + for tool_output in tool_outputs: + if _trace_assistants_content: + body = {"content": tool_output["output"], "id": tool_output["tool_call_id"]} + else: + body = {"content": "", "id": tool_output["tool_call_id"]} + span.span_instance.add_event( + "gen_ai.tool.message", {"gen_ai.event.content": json.dumps(body, ensure_ascii=False)} + ) + return True + + return False + + def start_create_assistant_span( + self, + project_name: str, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + _tools: Optional[List[ToolDefinition]] = None, + _tool_resources: Optional[ToolResources] = None, + _toolset: Optional[ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, + ) -> "Optional[AbstractSpan]": + span = start_span( + OperationName.CREATE_ASSISTANT, + project_name, + span_name=f"{OperationName.CREATE_ASSISTANT.value} {name}", + model=model, + temperature=temperature, + top_p=top_p, + response_format=_AIAssistantsInstrumentorPreview.assistant_api_response_to_str(response_format), + ) + if span and span.span_instance.is_recording: + if name: + span.add_attribute(GEN_AI_ASSISTANT_NAME, name) + if description: + span.add_attribute(GEN_AI_ASSISTANT_DESCRIPTION, description) + self._add_instructions_event(span, instructions, None) + + return span + + def start_create_thread_span( + self, + project_name: str, + messages: Optional[List[ThreadMessage]] = None, + _tool_resources: Optional[ToolResources] = None, + ) -> "Optional[AbstractSpan]": + span = start_span(OperationName.CREATE_THREAD, project_name) + if span and span.span_instance.is_recording: + for message in messages or []: + self.add_thread_message_event(span, message) + + return span + + def start_list_messages_span(self, project_name: str, thread_id: Optional[str] = None) -> "Optional[AbstractSpan]": + return start_span(OperationName.LIST_MESSAGES, project_name, thread_id=thread_id) + + def trace_create_assistant(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + name = kwargs.get("name") + model = kwargs.get("model") + description = kwargs.get("description") + instructions = kwargs.get("instructions") + tools = kwargs.get("tools") + tool_resources = kwargs.get("tool_resources") + toolset = kwargs.get("toolset") + temperature = kwargs.get("temperature") + top_p = kwargs.get("top_p") + response_format = kwargs.get("response_format") + + span = self.start_create_assistant_span( + project_name=project_name, + name=name, + model=model, + description=description, + instructions=instructions, + _tools=tools, + _tool_resources=tool_resources, + _toolset=toolset, + temperature=temperature, + top_p=top_p, + response_format=response_format, + ) + + if span is None: + return function(*args, **kwargs) + + with span: + try: + result = function(*args, **kwargs) + span.add_attribute(GEN_AI_ASSISTANT_ID, result.id) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_assistant_async(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + name = kwargs.get("name") + model = kwargs.get("model") + description = kwargs.get("description") + instructions = kwargs.get("instructions") + tools = kwargs.get("tools") + tool_resources = kwargs.get("tool_resources") + toolset = kwargs.get("toolset") + temperature = kwargs.get("temperature") + top_p = kwargs.get("top_p") + response_format = kwargs.get("response_format") + + span = self.start_create_assistant_span( + project_name=project_name, + name=name, + model=model, + description=description, + instructions=instructions, + _tools=tools, + _tool_resources=tool_resources, + _toolset=toolset, + temperature=temperature, + top_p=top_p, + response_format=response_format, + ) + + if span is None: + return await function(*args, **kwargs) + + with span: + try: + result = await function(*args, **kwargs) + span.add_attribute(GEN_AI_ASSISTANT_ID, result.id) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_create_thread(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + messages = kwargs.get("messages") + + span = self.start_create_thread_span(project_name=project_name, messages=messages) + + if span is None: + return function(*args, **kwargs) + + with span: + try: + result = function(*args, **kwargs) + span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_thread_async(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + messages = kwargs.get("messages") + + span = self.start_create_thread_span(project_name=project_name, messages=messages) + + if span is None: + return await function(*args, **kwargs) + + with span: + try: + result = await function(*args, **kwargs) + span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_create_message(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + role = kwargs.get("role") + content = kwargs.get("content") + attachments = kwargs.get("attachments") + + span = self.start_create_message_span( + project_name=project_name, thread_id=thread_id, content=content, role=role, attachments=attachments + ) + + if span is None: + return function(*args, **kwargs) + + with span: + try: + result = function(*args, **kwargs) + span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_message_async(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + role = kwargs.get("role") + content = kwargs.get("content") + attachments = kwargs.get("attachments") + + span = self.start_create_message_span( + project_name=project_name, thread_id=thread_id, content=content, role=role, attachments=attachments + ) + + if span is None: + return await function(*args, **kwargs) + + with span: + try: + result = await function(*args, **kwargs) + span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_create_run(self, operation_name, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + assistant_id = kwargs.get("assistant_id") + model = kwargs.get("model") + instructions = kwargs.get("instructions") + additional_instructions = kwargs.get("additional_instructions") + additional_messages = kwargs.get("additional_messages") + temperature = kwargs.get("temperature") + tools = kwargs.get("tools") + top_p = kwargs.get("top_p") + max_prompt_tokens = kwargs.get("max_prompt_tokens") + max_completion_tokens = kwargs.get("max_completion_tokens") + response_format = kwargs.get("response_format") + + span = self.start_thread_run_span( + operation_name, + project_name, + thread_id, + assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + temperature=temperature, + _tools=tools, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=response_format, + ) + + if span is None: + return function(*args, **kwargs) + + with span: + try: + result = function(*args, **kwargs) + self.set_end_run(span, result) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_run_async(self, operation_name, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + assistant_id = kwargs.get("assistant_id") + model = kwargs.get("model") + instructions = kwargs.get("instructions") + additional_instructions = kwargs.get("additional_instructions") + additional_messages = kwargs.get("additional_messages") + temperature = kwargs.get("temperature") + tools = kwargs.get("tools") + top_p = kwargs.get("top_p") + max_prompt_tokens = kwargs.get("max_prompt_tokens") + max_completion_tokens = kwargs.get("max_completion_tokens") + response_format = kwargs.get("response_format") + + span = self.start_thread_run_span( + operation_name, + project_name, + thread_id, + assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + temperature=temperature, + _tools=tools, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=response_format, + ) + + if span is None: + return await function(*args, **kwargs) + + with span: + try: + result = await function(*args, **kwargs) + if span.span_instance.is_recording: + span.add_attribute(GEN_AI_THREAD_RUN_STATUS, self._status_to_string(result.status)) + span.add_attribute(GEN_AI_RESPONSE_MODEL, result.model) + if result.usage: + span.add_attribute(GEN_AI_USAGE_INPUT_TOKENS, result.usage.prompt_tokens) + span.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, result.usage.completion_tokens) + span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_submit_tool_outputs(self, stream, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + run_id = kwargs.get("run_id") + tool_outputs = kwargs.get("tool_outputs") + event_handler = kwargs.get("event_handler") + + span = self.start_submit_tool_outputs_span( + project_name=project_name, + thread_id=thread_id, + run_id=run_id, + tool_outputs=tool_outputs, + event_handler=event_handler, + ) + + if span is None: + return function(*args, **kwargs) + + with span: + try: + if stream and event_handler: + kwargs["event_handler"] = self.wrap_handler(event_handler, span) + + result = function(*args, **kwargs) + if not isinstance(result, AssistantRunStream): + self.set_end_run(span, result) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_submit_tool_outputs_async(self, stream, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + run_id = kwargs.get("run_id") + tool_outputs = kwargs.get("tool_outputs") + event_handler = kwargs.get("event_handler") + + span = self.start_submit_tool_outputs_span( + project_name=project_name, + thread_id=thread_id, + run_id=run_id, + tool_outputs=tool_outputs, + event_handler=event_handler, + ) + + if span is None: + return await function(*args, **kwargs) + + with span: + try: + if stream: + kwargs["event_handler"] = self.wrap_async_handler(event_handler, span) + + result = await function(*args, **kwargs) + if not isinstance(result, AsyncAssistantRunStream): + self.set_end_run(span, result) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_handle_submit_tool_outputs(self, function, *args, **kwargs): + event_handler = kwargs.get("event_handler") + if event_handler is None: + event_handler = args[2] + span = getattr(event_handler, "span", None) + + if span is None: + return function(*args, **kwargs) + + with span.change_context(span.span_instance): + try: + result = function(*args, **kwargs) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_handle_submit_tool_outputs_async(self, function, *args, **kwargs): + event_handler = kwargs.get("event_handler") + if event_handler is None: + event_handler = args[2] + span = getattr(event_handler, "span", None) + + if span is None: + return await function(*args, **kwargs) + + with span.change_context(span.span_instance): + try: + result = await function(*args, **kwargs) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_create_stream(self, function, *args, **kwargs): + operation_name = OperationName.PROCESS_THREAD_RUN + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + assistant_id = kwargs.get("assistant_id") + model = kwargs.get("model") + instructions = kwargs.get("instructions") + additional_instructions = kwargs.get("additional_instructions") + additional_messages = kwargs.get("additional_messages") + temperature = kwargs.get("temperature") + tools = kwargs.get("tools") + top_p = kwargs.get("top_p") + max_prompt_tokens = kwargs.get("max_prompt_tokens") + max_completion_tokens = kwargs.get("max_completion_tokens") + response_format = kwargs.get("response_format") + event_handler = kwargs.get("event_handler") + + span = self.start_thread_run_span( + operation_name, + project_name, + thread_id, + assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + temperature=temperature, + _tools=tools, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=response_format, + ) + + if span is None: + return function(*args, **kwargs) + + with span.change_context(span.span_instance): + try: + kwargs["event_handler"] = self.wrap_handler(event_handler, span) + result = function(*args, **kwargs) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_stream_async(self, function, *args, **kwargs): + operation_name = OperationName.PROCESS_THREAD_RUN + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + assistant_id = kwargs.get("assistant_id") + model = kwargs.get("model") + instructions = kwargs.get("instructions") + additional_instructions = kwargs.get("additional_instructions") + additional_messages = kwargs.get("additional_messages") + temperature = kwargs.get("temperature") + tools = kwargs.get("tools") + top_p = kwargs.get("top_p") + max_prompt_tokens = kwargs.get("max_prompt_tokens") + max_completion_tokens = kwargs.get("max_completion_tokens") + response_format = kwargs.get("response_format") + event_handler = kwargs.get("event_handler") + + span = self.start_thread_run_span( + operation_name, + project_name, + thread_id, + assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + temperature=temperature, + _tools=tools, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=response_format, + ) + + if span is None: + return await function(*args, **kwargs) + + # TODO: how to keep span active in the current context without existing? + # TODO: dummy span for none + with span.change_context(span.span_instance): + try: + kwargs["event_handler"] = self.wrap_async_handler(event_handler, span) + result = await function(*args, **kwargs) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_list_messages(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + + span = self.start_list_messages_span(project_name=project_name, thread_id=thread_id) + + if span is None: + return function(*args, **kwargs) + + with span: + try: + result = function(*args, **kwargs) + for message in result.data: + self.add_thread_message_event(span, message) + + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_list_messages_async(self, function, *args, **kwargs): + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name + thread_id = kwargs.get("thread_id") + + span = self.start_list_messages_span(project_name=project_name, thread_id=thread_id) + + if span is None: + return await function(*args, **kwargs) + + with span: + try: + result = await function(*args, **kwargs) + for message in result.data: + self.add_thread_message_event(span, message) + + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def handle_run_stream_exit(self, _function, *args, **kwargs): + assistant_run_stream = args[0] + exc_type = kwargs.get("exc_type") + exc_val = kwargs.get("exc_val") + exc_tb = kwargs.get("exc_tb") + # TODO: is it a good idea? + # if not, we'll need to wrap stream and call exit + if ( + assistant_run_stream.event_handler + and assistant_run_stream.event_handler.__class__.__name__ == "_AssistantEventHandlerTraceWrapper" + ): + assistant_run_stream.event_handler.__exit__(exc_type, exc_val, exc_tb) + elif ( + assistant_run_stream.event_handler + and assistant_run_stream.event_handler.__class__.__name__ == "_AsyncAssistantEventHandlerTraceWrapper" + ): + assistant_run_stream.event_handler.__aexit__(exc_type, exc_val, exc_tb) + + def wrap_handler( + self, handler: "Optional[AssistantEventHandler]" = None, span: "Optional[AbstractSpan]" = None + ) -> "Optional[AssistantEventHandler]": + # Do not create a handler wrapper if we do not have handler in the first place. + if not handler: + return None + + if isinstance(handler, _AssistantEventHandlerTraceWrapper): + return handler + + if span and span.span_instance.is_recording: + return _AssistantEventHandlerTraceWrapper(self, span, handler) + + return handler + + def wrap_async_handler( + self, handler: "Optional[AsyncAssistantEventHandler]" = None, span: "Optional[AbstractSpan]" = None + ) -> "Optional[AsyncAssistantEventHandler]": + # Do not create a handler wrapper if we do not have handler in the first place. + if not handler: + return None + + if isinstance(handler, _AsyncAssistantEventHandlerTraceWrapper): + return handler + + if span and span.span_instance.is_recording: + return _AsyncAssistantEventHandlerTraceWrapper(self, span, handler) + + return handler + + def start_create_message_span( + self, + project_name: str, + thread_id: Optional[str] = None, + content: Optional[str] = None, + role: Optional[Union[str, MessageRole]] = None, + attachments: Optional[List[MessageAttachment]] = None, + ) -> "Optional[AbstractSpan]": + role_str = self._get_role(role) + span = start_span(OperationName.CREATE_MESSAGE, project_name, thread_id=thread_id) + if span and span.span_instance.is_recording: + self._add_message_event(span, role_str, content, attachments=attachments, thread_id=thread_id) + return span + + def _trace_sync_function( + self, + function: Callable, + *, + _args_to_ignore: Optional[List[str]] = None, + _trace_type=TraceType.AssistantS, + _name: Optional[str] = None, + ) -> Callable: + """ + Decorator that adds tracing to a synchronous function. + + :param function: The function to be traced. + :type function: Callable + :param args_to_ignore: A list of argument names to be ignored in the trace. + Defaults to None. + :type: args_to_ignore: [List[str]], optional + :param trace_type: The type of the trace. Defaults to TraceType.AssistantS. + :type trace_type: TraceType, optional + :param name: The name of the trace, will set to func name if not provided. + :type name: str, optional + :return: The traced function. + :rtype: Callable + """ + + @functools.wraps(function) + def inner(*args, **kwargs): # pylint: disable=R0911 + span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 + if span_impl_type is None: + return function(*args, **kwargs) + + class_function_name = function.__qualname__ + + if class_function_name.startswith("AssistantsClient.create_assistant"): + kwargs.setdefault("merge_span", True) + return self.trace_create_assistant(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClientOperationsMixin.create_thread"): + kwargs.setdefault("merge_span", True) + return self.trace_create_thread(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClientOperationsMixin.create_message"): + kwargs.setdefault("merge_span", True) + return self.trace_create_message(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.create_run"): + kwargs.setdefault("merge_span", True) + return self.trace_create_run(OperationName.START_THREAD_RUN, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.create_and_process_run"): + kwargs.setdefault("merge_span", True) + return self.trace_create_run(OperationName.PROCESS_THREAD_RUN, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.submit_tool_outputs_to_run"): + kwargs.setdefault("merge_span", True) + return self.trace_submit_tool_outputs(False, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.submit_tool_outputs_to_stream"): + kwargs.setdefault("merge_span", True) + return self.trace_submit_tool_outputs(True, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient._handle_submit_tool_outputs"): + return self.trace_handle_submit_tool_outputs(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.create_stream"): + kwargs.setdefault("merge_span", True) + return self.trace_create_stream(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClientOperationsMixin.list_messages"): + kwargs.setdefault("merge_span", True) + return self.trace_list_messages(function, *args, **kwargs) + if class_function_name.startswith("AssistantRunStream.__exit__"): + return self.handle_run_stream_exit(function, *args, **kwargs) + # Handle the default case (if the function name does not match) + return None # Ensure all paths return + + return inner + + def _trace_async_function( + self, + function: Callable, + *, + _args_to_ignore: Optional[List[str]] = None, + _trace_type=TraceType.AssistantS, + _name: Optional[str] = None, + ) -> Callable: + """ + Decorator that adds tracing to an asynchronous function. + + :param function: The function to be traced. + :type function: Callable + :param args_to_ignore: A list of argument names to be ignored in the trace. + Defaults to None. + :type: args_to_ignore: [List[str]], optional + :param trace_type: The type of the trace. Defaults to TraceType.AssistantS. + :type trace_type: TraceType, optional + :param name: The name of the trace, will set to func name if not provided. + :type name: str, optional + :return: The traced function. + :rtype: Callable + """ + + @functools.wraps(function) + async def inner(*args, **kwargs): # pylint: disable=R0911 + span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 + if span_impl_type is None: + return function(*args, **kwargs) + + class_function_name = function.__qualname__ + + if class_function_name.startswith("AssistantsClient.create_assistant"): + kwargs.setdefault("merge_span", True) + return await self.trace_create_assistant_async(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClientOperationsMixin.create_thread"): + kwargs.setdefault("merge_span", True) + return await self.trace_create_thread_async(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClientOperationsMixin.create_message"): + kwargs.setdefault("merge_span", True) + return await self.trace_create_message_async(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.create_run"): + kwargs.setdefault("merge_span", True) + return await self.trace_create_run_async(OperationName.START_THREAD_RUN, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.create_and_process_run"): + kwargs.setdefault("merge_span", True) + return await self.trace_create_run_async(OperationName.PROCESS_THREAD_RUN, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.submit_tool_outputs_to_run"): + kwargs.setdefault("merge_span", True) + return await self.trace_submit_tool_outputs_async(False, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.submit_tool_outputs_to_stream"): + kwargs.setdefault("merge_span", True) + return await self.trace_submit_tool_outputs_async(True, function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient._handle_submit_tool_outputs"): + return await self.trace_handle_submit_tool_outputs_async(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClient.create_stream"): + kwargs.setdefault("merge_span", True) + return await self.trace_create_stream_async(function, *args, **kwargs) + if class_function_name.startswith("AssistantsClientOperationsMixin.list_messages"): + kwargs.setdefault("merge_span", True) + return await self.trace_list_messages_async(function, *args, **kwargs) + if class_function_name.startswith("AsyncAssistantRunStream.__aexit__"): + return self.handle_run_stream_exit(function, *args, **kwargs) + # Handle the default case (if the function name does not match) + return None # Ensure all paths return + + return inner + + def _inject_async(self, f, _trace_type, _name): + wrapper_fun = self._trace_async_function(f) + wrapper_fun._original = f # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + return wrapper_fun + + def _inject_sync(self, f, _trace_type, _name): + wrapper_fun = self._trace_sync_function(f) + wrapper_fun._original = f # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + return wrapper_fun + + def _assistants_apis(self): + sync_apis = ( + ("azure.ai.assistants", "AssistantsClient", "create_assistant", TraceType.AssistantS, "assistant_create"), + ("azure.ai.assistants", "AssistantsClient", "create_thread", TraceType.AssistantS, "thread_create"), + ("azure.ai.assistants", "AssistantsClient", "create_message", TraceType.AssistantS, "message_create"), + ("azure.ai.assistants", "AssistantsClient", "create_run", TraceType.AssistantS, "create_run"), + ( + "azure.ai.assistants", + "AssistantsClient", + "create_and_process_run", + TraceType.AssistantS, + "create_and_process_run", + ), + ( + "azure.ai.assistants", + "AssistantsClient", + "submit_tool_outputs_to_run", + TraceType.AssistantS, + "submit_tool_outputs_to_run", + ), + ( + "azure.ai.assistants", + "AssistantsClient", + "submit_tool_outputs_to_stream", + TraceType.AssistantS, + "submit_tool_outputs_to_stream", + ), + ( + "azure.ai.assistants", + "AssistantsClient", + "_handle_submit_tool_outputs", + TraceType.AssistantS, + "_handle_submit_tool_outputs", + ), + ("azure.ai.assistants", "AssistantsClient", "create_stream", TraceType.AssistantS, "create_stream"), + ("azure.ai.assistants", "AssistantsClient", "list_messages", TraceType.AssistantS, "list_messages"), + ("azure.ai.assistants.models", "AssistantRunStream", "__exit__", TraceType.AssistantS, "__exit__"), + ) + async_apis = ( + ("azure.ai.assistants.aio", "AssistantsClient", "create_assistant", TraceType.AssistantS, "assistant_create"), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "create_thread", + TraceType.AssistantS, + "assistants_thread_create", + ), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "create_message", + TraceType.AssistantS, + "assistants_thread_message", + ), + ("azure.ai.assistants.aio", "AssistantsClient", "create_run", TraceType.AssistantS, "create_run"), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "create_and_process_run", + TraceType.AssistantS, + "create_and_process_run", + ), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "submit_tool_outputs_to_run", + TraceType.AssistantS, + "submit_tool_outputs_to_run", + ), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "submit_tool_outputs_to_stream", + TraceType.AssistantS, + "submit_tool_outputs_to_stream", + ), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "_handle_submit_tool_outputs", + TraceType.AssistantS, + "_handle_submit_tool_outputs", + ), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "create_stream", + TraceType.AssistantS, + "create_stream", + ), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "list_messages", + TraceType.AssistantS, + "list_messages", + ), + ("azure.ai.assistants.models", "AsyncAssistantRunStream", "__aexit__", TraceType.AssistantS, "__aexit__"), + ) + return sync_apis, async_apis + + def _assistants_api_list(self): + sync_apis, async_apis = self._assistants_apis() + yield sync_apis, self._inject_sync + yield async_apis, self._inject_async + + def _generate_api_and_injector(self, apis): + for api, injector in apis: + for module_name, class_name, method_name, trace_type, name in api: + try: + module = importlib.import_module(module_name) + api = getattr(module, class_name) + if hasattr(api, method_name): + yield api, method_name, trace_type, injector, name + except AttributeError as e: + # Log the attribute exception with the missing class information + logging.warning( + "AttributeError: The module '%s' does not have the class '%s'. %s", + module_name, + class_name, + str(e), + ) + except Exception as e: # pylint: disable=broad-except + # Log other exceptions as a warning, as we are not sure what they might be + logging.warning("An unexpected error occurred: '%s'", str(e)) + + def _available_assistants_apis_and_injectors(self): + """ + Generates a sequence of tuples containing Assistants API classes, method names, and + corresponding injector functions. + + :return: A generator yielding tuples. + :rtype: tuple + """ + yield from self._generate_api_and_injector(self._assistants_api_list()) + + def _instrument_assistants(self, enable_content_tracing: bool = False): + """This function modifies the methods of the Assistants API classes to + inject logic before calling the original methods. + The original methods are stored as _original attributes of the methods. + + :param enable_content_tracing: Indicates whether tracing of message content should be enabled. + This also controls whether function call tool function names, + parameter names and parameter values are traced. + :type enable_content_tracing: bool + """ + # pylint: disable=W0603 + global _assistants_traces_enabled + global _trace_assistants_content + if _assistants_traces_enabled: + raise RuntimeError("Traces already started for AI Assistants") + _assistants_traces_enabled = True + _trace_assistants_content = enable_content_tracing + for ( + api, + method, + trace_type, + injector, + name, + ) in self._available_assistants_apis_and_injectors(): + # Check if the method of the api class has already been modified + if not hasattr(getattr(api, method), "_original"): + setattr(api, method, injector(getattr(api, method), trace_type, name)) + + def _uninstrument_assistants(self): + """This function restores the original methods of the Assistants API classes + by assigning them back from the _original attributes of the modified methods. + """ + # pylint: disable=W0603 + global _assistants_traces_enabled + global _trace_assistants_content + _trace_assistants_content = False + for api, method, _, _, _ in self._available_assistants_apis_and_injectors(): + if hasattr(getattr(api, method), "_original"): + setattr(api, method, getattr(getattr(api, method), "_original")) + _assistants_traces_enabled = False + + def _is_instrumented(self): + """This function returns True if Assistants API has already been instrumented + for tracing and False if it has not been instrumented. + + :return: A value indicating whether the Assistants API is currently instrumented or not. + :rtype: bool + """ + return _assistants_traces_enabled + + def _set_enable_content_recording(self, enable_content_recording: bool = False) -> None: + """This function sets the content recording value. + + :param enable_content_recording: Indicates whether tracing of message content should be enabled. + This also controls whether function call tool function names, + parameter names and parameter values are traced. + :type enable_content_recording: bool + """ + global _trace_assistants_content # pylint: disable=W0603 + _trace_assistants_content = enable_content_recording + + def _is_content_recording_enabled(self) -> bool: + """This function gets the content recording value. + + :return: A bool value indicating whether content tracing is enabled. + :rtype bool + """ + return _trace_assistants_content + + +class _AssistantEventHandlerTraceWrapper(AssistantEventHandler): + def __init__( + self, + instrumentor: _AIAssistantsInstrumentorPreview, + span: "AbstractSpan", + inner_handler: Optional[AssistantEventHandler] = None, + ): + super().__init__() + self.span = span + self.inner_handler = inner_handler + self.ended = False + self.last_run: Optional[ThreadRun] = None + self.last_message: Optional[ThreadMessage] = None + self.instrumentor = instrumentor + + def initialize( + self, + response_iterator, + submit_tool_outputs, + ) -> None: + self.submit_tool_outputs = submit_tool_outputs + if self.inner_handler: + self.inner_handler.initialize(response_iterator=response_iterator, submit_tool_outputs=submit_tool_outputs) + + def __next__(self) -> Any: + if self.inner_handler: + event_bytes = self.inner_handler.__next_impl__() + return self._process_event(event_bytes.decode("utf-8")) + return None + + # pylint: disable=R1710 + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return self.inner_handler.on_message_delta(delta) # type: ignore + + def on_thread_message(self, message: "ThreadMessage") -> None: # type: ignore[func-returns-value] + retval = None + if self.inner_handler: + retval = self.inner_handler.on_thread_message(message) # type: ignore + + if message.status in {"completed", "incomplete"}: + self.last_message = message + + return retval # type: ignore + + def on_thread_run(self, run: "ThreadRun") -> None: # type: ignore[func-returns-value] + retval = None + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + self.instrumentor._add_tool_event_from_thread_run( # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + self.span, run + ) + + if self.inner_handler: + retval = self.inner_handler.on_thread_run(run) # type: ignore + self.last_run = run + + return retval # type: ignore + + def on_run_step(self, step: "RunStep") -> None: # type: ignore[func-returns-value] + retval = None + if self.inner_handler: + retval = self.inner_handler.on_run_step(step) # type: ignore + + # todo - report errors for failure statuses here and in run ? + if step.type == "message_creation" and step.status == RunStepStatus.COMPLETED: + self.instrumentor.add_thread_message_event(self.span, cast(ThreadMessage, self.last_message), step.usage) + self.last_message = None + + return retval # type: ignore + + def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return self.inner_handler.on_run_step_delta(delta) # type: ignore + + def on_error(self, data: str) -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return self.inner_handler.on_error(data) # type: ignore + + def on_done(self) -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return self.inner_handler.on_done() # type: ignore + # it could be called multiple tines (for each step) __exit__ + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return self.inner_handler.on_unhandled_event(event_type, event_data) # type: ignore + + # pylint: enable=R1710 + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.ended: + self.ended = True + self.instrumentor.set_end_run(self.span, self.last_run) + + if self.last_run and self.last_run.last_error: + self.span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + self.last_run.last_error.message, + ) + self.span.add_attribute(ERROR_TYPE, self.last_run.last_error.code) + + self.span.__exit__(exc_type, exc_val, exc_tb) + self.span.finish() + + +class _AsyncAssistantEventHandlerTraceWrapper(AsyncAssistantEventHandler): + def __init__( + self, + instrumentor: _AIAssistantsInstrumentorPreview, + span: "AbstractSpan", + inner_handler: Optional[AsyncAssistantEventHandler] = None, + ): + super().__init__() + self.span = span + self.inner_handler = inner_handler + self.ended = False + self.last_run: Optional[ThreadRun] = None + self.last_message: Optional[ThreadMessage] = None + self.instrumentor = instrumentor + + def initialize( + self, + response_iterator, + submit_tool_outputs, + ) -> None: + self.submit_tool_outputs = submit_tool_outputs + if self.inner_handler: + self.inner_handler.initialize(response_iterator=response_iterator, submit_tool_outputs=submit_tool_outputs) + + # cspell:disable-next-line + async def __anext__(self) -> Any: + if self.inner_handler: + # cspell:disable-next-line + event_bytes = await self.inner_handler.__anext_impl__() + return await self._process_event(event_bytes.decode("utf-8")) + + # pylint: disable=R1710 + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return await self.inner_handler.on_message_delta(delta) # type: ignore + + async def on_thread_message(self, message: "ThreadMessage") -> None: # type: ignore[func-returns-value] + retval = None + if self.inner_handler: + retval = await self.inner_handler.on_thread_message(message) # type: ignore + + if message.status in {"completed", "incomplete"}: + self.last_message = message + + return retval # type: ignore + + async def on_thread_run(self, run: "ThreadRun") -> None: # type: ignore[func-returns-value] + retval = None + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + self.instrumentor._add_tool_event_from_thread_run( # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + self.span, run + ) + + if self.inner_handler: + retval = await self.inner_handler.on_thread_run(run) # type: ignore + self.last_run = run + + return retval # type: ignore + + async def on_run_step(self, step: "RunStep") -> None: # type: ignore[func-returns-value] + retval = None + if self.inner_handler: + retval = await self.inner_handler.on_run_step(step) # type: ignore + + # todo - report errors for failure statuses here and in run ? + if step.type == "message_creation" and step.status == RunStepStatus.COMPLETED: + self.instrumentor.add_thread_message_event(self.span, cast(ThreadMessage, self.last_message), step.usage) + self.last_message = None + + return retval # type: ignore + + async def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return await self.inner_handler.on_run_step_delta(delta) # type: ignore + + async def on_error(self, data: str) -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return await self.inner_handler.on_error(data) # type: ignore + + async def on_done(self) -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return await self.inner_handler.on_done() # type: ignore + # it could be called multiple tines (for each step) __exit__ + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: # type: ignore[func-returns-value] + if self.inner_handler: + return await self.inner_handler.on_unhandled_event(event_type, event_data) # type: ignore + + # pylint: enable=R1710 + + def __aexit__(self, exc_type, exc_val, exc_tb): + if not self.ended: + self.ended = True + self.instrumentor.set_end_run(self.span, self.last_run) + + if self.last_run and self.last_run.last_error: + self.span.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + self.last_run.last_error.message, + ) + self.span.add_attribute(ERROR_TYPE, self.last_run.last_error.code) + + self.span.__exit__(exc_type, exc_val, exc_tb) + self.span.finish() diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py new file mode 100644 index 000000000000..1890a6f1e88d --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py @@ -0,0 +1,204 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import functools +import asyncio +from typing import Any, Callable, Optional, Dict + +try: + # pylint: disable = no-name-in-module + from opentelemetry import trace as opentelemetry_trace + + tracer = opentelemetry_trace.get_tracer(__name__) + _tracing_library_available = True +except ModuleNotFoundError: + _tracing_library_available = False + +if _tracing_library_available: + + def trace_function(span_name: Optional[str] = None): + """ + A decorator for tracing function calls using OpenTelemetry. + + This decorator handles various data types for function parameters and return values, + and records them as attributes in the trace span. The supported data types include: + - Basic data types: str, int, float, bool + - Collections: list, dict, tuple, set + + Special handling for collections: + - If a collection (list, dict, tuple, set) contains nested collections, the entire collection + is converted to a string before being recorded as an attribute. + - Sets and dictionaries are always converted to strings to ensure compatibility with span attributes. + + Object types are omitted, and the corresponding parameter is not traced. + + :param span_name: The name of the span. If not provided, the function name is used. + :type span_name: Optional[str] + :return: The decorated function with tracing enabled. + :rtype: Callable + """ + + def decorator(func: Callable) -> Callable: + @functools.wraps(func) + async def async_wrapper(*args: Any, **kwargs: Any) -> Any: + """ + Wrapper function for asynchronous functions. + + :param args: Positional arguments passed to the function. + :type args: Tuple[Any] + :return: The result of the decorated asynchronous function. + :rtype: Any + """ + name = span_name if span_name else func.__name__ + with tracer.start_as_current_span(name) as span: + try: + # Sanitize parameters and set them as attributes + sanitized_params = sanitize_parameters(func, *args, **kwargs) + span.set_attributes(sanitized_params) + result = await func(*args, **kwargs) + sanitized_result = sanitize_for_attributes(result) + if sanitized_result is not None: + if isinstance(sanitized_result, (list, dict, tuple, set)): + if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_result): + sanitized_result = str(sanitized_result) + span.set_attribute("code.function.return.value", sanitized_result) # type: ignore + return result + except Exception as e: + span.record_exception(e) + span.set_attribute("error.type", e.__class__.__qualname__) # type: ignore + raise + + @functools.wraps(func) + def sync_wrapper(*args: Any, **kwargs: Any) -> Any: + """ + Wrapper function for synchronous functions. + + :param args: Positional arguments passed to the function. + :type args: Tuple[Any] + :return: The result of the decorated synchronous function. + :rtype: Any + """ + name = span_name if span_name else func.__name__ + with tracer.start_as_current_span(name) as span: + try: + # Sanitize parameters and set them as attributes + sanitized_params = sanitize_parameters(func, *args, **kwargs) + span.set_attributes(sanitized_params) + result = func(*args, **kwargs) + sanitized_result = sanitize_for_attributes(result) + if sanitized_result is not None: + if isinstance(sanitized_result, (list, dict, tuple, set)): + if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_result): + sanitized_result = str(sanitized_result) + span.set_attribute("code.function.return.value", sanitized_result) # type: ignore + return result + except Exception as e: + span.record_exception(e) + span.set_attribute("error.type", e.__class__.__qualname__) # type: ignore + raise + + # Determine if the function is async + if asyncio.iscoroutinefunction(func): + return async_wrapper + return sync_wrapper + + return decorator + +else: + # Define a no-op decorator if OpenTelemetry is not available + def trace_function(span_name: Optional[str] = None): # pylint: disable=unused-argument + """ + A no-op decorator for tracing function calls when OpenTelemetry is not available. + + :param span_name: Not used in this version. + :type span_name: Optional[str] + :return: The original function. + :rtype: Callable + """ + + def decorator(func: Callable) -> Callable: + return func + + return decorator + + +def sanitize_parameters(func, *args, **kwargs) -> Dict[str, Any]: + """ + Sanitize function parameters to include only built-in data types. + + :param func: The function being decorated. + :type func: Callable + :param args: Positional arguments passed to the function. + :type args: Tuple[Any] + :return: A dictionary of sanitized parameters. + :rtype: Dict[str, Any] + """ + import inspect + + params = inspect.signature(func).parameters + sanitized_params = {} + + for i, (name, param) in enumerate(params.items()): + if param.default == inspect.Parameter.empty and i < len(args): + value = args[i] + else: + value = kwargs.get(name, param.default) + + sanitized_value = sanitize_for_attributes(value) + # Check if the collection has nested collections + if isinstance(sanitized_value, (list, dict, tuple, set)): + if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_value): + sanitized_value = str(sanitized_value) + if sanitized_value is not None: + sanitized_params["code.function.parameter." + name] = sanitized_value + + return sanitized_params + + +# pylint: disable=R0911 +def sanitize_for_attributes(value: Any, is_recursive: bool = False) -> Any: + """ + Sanitize a value to be used as an attribute. + + :param value: The value to sanitize. + :type value: Any + :param is_recursive: Indicates if the function is being called recursively. Default is False. + :type is_recursive: bool + :return: The sanitized value or None if the value is not a supported type. + :rtype: Any + """ + if isinstance(value, (str, int, float, bool)): + return value + if isinstance(value, list): + return [ + sanitize_for_attributes(item, True) + for item in value + if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) + ] + if isinstance(value, dict): + retval = { + k: sanitize_for_attributes(v, True) + for k, v in value.items() + if isinstance(v, (str, int, float, bool, list, dict, tuple, set)) + } + # dict to compatible with span attribute, so return it as a string + if is_recursive: + return retval + return str(retval) + if isinstance(value, tuple): + return tuple( + sanitize_for_attributes(item, True) + for item in value + if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) + ) + if isinstance(value, set): + retval_set = { + sanitize_for_attributes(item, True) + for item in value + if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) + } + if is_recursive: + return retval_set + return str(retval_set) + return None diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py new file mode 100644 index 000000000000..3d897fd1c874 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py @@ -0,0 +1,292 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +from typing import Any, Optional, TextIO, Union, cast + +import io +import logging +import sys + +from enum import Enum + +from azure.core.tracing import AbstractSpan, SpanKind # type: ignore +from azure.core.settings import settings # type: ignore + +try: + from opentelemetry.trace import StatusCode, Span # noqa: F401 # pylint: disable=unused-import + + _span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable +except ModuleNotFoundError: + _span_impl_type = None + +logger = logging.getLogger(__name__) + + +GEN_AI_MESSAGE_ID = "gen_ai.message.id" +GEN_AI_MESSAGE_STATUS = "gen_ai.message.status" +GEN_AI_THREAD_ID = "gen_ai.thread.id" +GEN_AI_THREAD_RUN_ID = "gen_ai.thread.run.id" +GEN_AI_ASSISTANT_ID = "gen_ai.assistant.id" +GEN_AI_ASSISTANT_NAME = "gen_ai.assistant.name" +GEN_AI_ASSISTANT_DESCRIPTION = "gen_ai.assistant.description" +GEN_AI_OPERATION_NAME = "gen_ai.operation.name" +GEN_AI_THREAD_RUN_STATUS = "gen_ai.thread.run.status" +GEN_AI_REQUEST_MODEL = "gen_ai.request.model" +GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature" +GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p" +GEN_AI_REQUEST_MAX_INPUT_TOKENS = "gen_ai.request.max_input_tokens" +GEN_AI_REQUEST_MAX_OUTPUT_TOKENS = "gen_ai.request.max_output_tokens" +GEN_AI_RESPONSE_MODEL = "gen_ai.response.model" +GEN_AI_SYSTEM = "gen_ai.system" +SERVER_ADDRESS = "server.address" +AZ_AI_ASSISTANT_SYSTEM = "az.ai.assistants" +GEN_AI_TOOL_NAME = "gen_ai.tool.name" +GEN_AI_TOOL_CALL_ID = "gen_ai.tool.call.id" +GEN_AI_REQUEST_RESPONSE_FORMAT = "gen_ai.request.response_format" +GEN_AI_USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens" +GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens" +GEN_AI_SYSTEM_MESSAGE = "gen_ai.system.message" +GEN_AI_EVENT_CONTENT = "gen_ai.event.content" +ERROR_TYPE = "error.type" + + +class OperationName(Enum): + CREATE_ASSISTANT = "create_assistant" + CREATE_THREAD = "create_thread" + CREATE_MESSAGE = "create_message" + START_THREAD_RUN = "start_thread_run" + EXECUTE_TOOL = "execute_tool" + LIST_MESSAGES = "list_messages" + SUBMIT_TOOL_OUTPUTS = "submit_tool_outputs" + PROCESS_THREAD_RUN = "process_thread_run" + + +def trace_tool_execution( + tool_call_id: str, + tool_name: str, + thread_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow + assistant_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow + run_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow +) -> "Optional[AbstractSpan]": + span = start_span( + OperationName.EXECUTE_TOOL, + server_address=None, + span_name=f"execute_tool {tool_name}", + thread_id=thread_id, + assistant_id=assistant_id, + run_id=run_id, + gen_ai_system=None, + ) # it's a client code execution, not GenAI span + if span is not None and span.span_instance.is_recording: + span.add_attribute(GEN_AI_TOOL_CALL_ID, tool_call_id) + span.add_attribute(GEN_AI_TOOL_NAME, tool_name) + + return span + + +def start_span( + operation_name: OperationName, + server_address: Optional[str], + span_name: Optional[str] = None, + thread_id: Optional[str] = None, + assistant_id: Optional[str] = None, + run_id: Optional[str] = None, + model: Optional[str] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + response_format: Optional[str] = None, + gen_ai_system: Optional[str] = AZ_AI_ASSISTANT_SYSTEM, + kind: SpanKind = SpanKind.CLIENT, +) -> "Optional[AbstractSpan]": + if _span_impl_type is None: + return None + + span = _span_impl_type(name=span_name or operation_name.value, kind=kind) + + if span and span.span_instance.is_recording: + if gen_ai_system: + span.add_attribute(GEN_AI_SYSTEM, AZ_AI_ASSISTANT_SYSTEM) + + span.add_attribute(GEN_AI_OPERATION_NAME, operation_name.value) + + if server_address: + span.add_attribute(SERVER_ADDRESS, server_address) + + if thread_id: + span.add_attribute(GEN_AI_THREAD_ID, thread_id) + + if assistant_id: + span.add_attribute(GEN_AI_ASSISTANT_ID, assistant_id) + + if run_id: + span.add_attribute(GEN_AI_THREAD_RUN_ID, run_id) + + if model: + span.add_attribute(GEN_AI_REQUEST_MODEL, model) + + if temperature: + span.add_attribute(GEN_AI_REQUEST_TEMPERATURE, str(temperature)) + + if top_p: + span.add_attribute(GEN_AI_REQUEST_TOP_P, str(top_p)) + + if max_prompt_tokens: + span.add_attribute(GEN_AI_REQUEST_MAX_INPUT_TOKENS, max_prompt_tokens) + + if max_completion_tokens: + span.add_attribute(GEN_AI_REQUEST_MAX_OUTPUT_TOKENS, max_completion_tokens) + + if response_format: + span.add_attribute(GEN_AI_REQUEST_RESPONSE_FORMAT, response_format) + + return span + + +# Internal helper functions to enable OpenTelemetry, used by both sync and async clients +def _get_trace_exporter(destination: Union[TextIO, str, None]) -> Any: + if isinstance(destination, str): + # `destination` is the OTLP endpoint + # See: https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html#usage + try: + from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter # type: ignore + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + "OpenTelemetry OTLP exporter is not installed. " + + "Please install it using 'pip install opentelemetry-exporter-otlp-proto-grpc'" + ) from e + return OTLPSpanExporter(endpoint=destination) + + if isinstance(destination, io.TextIOWrapper): + if destination is sys.stdout: + # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter # pylint: disable=line-too-long + try: + from opentelemetry.sdk.trace.export import ConsoleSpanExporter + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" + ) from e + + return ConsoleSpanExporter() + raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") + + return None + + +def _get_log_exporter(destination: Union[TextIO, str, None]) -> Any: + if isinstance(destination, str): + # `destination` is the OTLP endpoint + # See: https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html#usage + try: + # _logs are considered beta (not internal) in OpenTelemetry Python API/SDK. + # So it's ok to use it for local development, but we'll swallow + # any errors in case of any breaking changes on OTel side. + from opentelemetry.exporter.otlp.proto.grpc._log_exporter import OTLPLogExporter # type: ignore # pylint: disable=import-error,no-name-in-module + except Exception as ex: # pylint: disable=broad-exception-caught + # since OTel logging is still in beta in Python, we're going to swallow any errors + # and just warn about them. + logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) + return None + + return OTLPLogExporter(endpoint=destination) + + if isinstance(destination, io.TextIOWrapper): + if destination is sys.stdout: + # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter # pylint: disable=line-too-long + try: + from opentelemetry.sdk._logs.export import ConsoleLogExporter + + return ConsoleLogExporter() + except ModuleNotFoundError as ex: + # since OTel logging is still in beta in Python, we're going to swallow any errors + # and just warn about them. + logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) + return None + raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") + + return None + + +def _configure_tracing(span_exporter: Any) -> None: + if span_exporter is None: + return + + try: + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.trace.export import SimpleSpanProcessor + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" + ) from e + + # if tracing was not setup before, we need to create a new TracerProvider + if not isinstance(trace.get_tracer_provider(), TracerProvider): + # If the provider is NoOpTracerProvider, we need to create a new TracerProvider + provider = TracerProvider() + trace.set_tracer_provider(provider) + + # get_tracer_provider returns opentelemetry.trace.TracerProvider + # however, we have opentelemetry.sdk.trace.TracerProvider, which implements + # add_span_processor method, though we need to cast it to fix type checking. + provider = cast(TracerProvider, trace.get_tracer_provider()) + provider.add_span_processor(SimpleSpanProcessor(span_exporter)) + + +def _configure_logging(log_exporter: Any) -> None: + if log_exporter is None: + return + + try: + # _events and _logs are considered beta (not internal) in + # OpenTelemetry Python API/SDK. + # So it's ok to use them for local development, but we'll swallow + # any errors in case of any breaking changes on OTel side. + from opentelemetry import _logs, _events + from opentelemetry.sdk._logs import LoggerProvider # pylint: disable=import-error,no-name-in-module + from opentelemetry.sdk._events import EventLoggerProvider # pylint: disable=import-error,no-name-in-module + from opentelemetry.sdk._logs.export import ( + SimpleLogRecordProcessor, + ) # pylint: disable=import-error,no-name-in-module + + if not isinstance(_logs.get_logger_provider(), LoggerProvider): + logger_provider = LoggerProvider() + _logs.set_logger_provider(logger_provider) + + # get_logger_provider returns opentelemetry._logs.LoggerProvider + # however, we have opentelemetry.sdk._logs.LoggerProvider, which implements + # add_log_record_processor method, though we need to cast it to fix type checking. + logger_provider = cast(LoggerProvider, _logs.get_logger_provider()) + logger_provider.add_log_record_processor(SimpleLogRecordProcessor(log_exporter)) + _events.set_event_logger_provider(EventLoggerProvider(logger_provider)) + except Exception as ex: # pylint: disable=broad-exception-caught + # since OTel logging is still in beta in Python, we're going to swallow any errors + # and just warn about them. + logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) + + +def enable_telemetry(destination: Union[TextIO, str, None] = None, **kwargs) -> None: # pylint: disable=unused-argument + """Enable tracing and logging to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) endpoint. + + :param destination: `sys.stdout` to print telemetry to console or a string holding the + OpenTelemetry protocol (OTLP) endpoint. + If not provided, this method enables instrumentation, but does not configure OpenTelemetry + SDK to export traces and logs. + :type destination: Union[TextIO, str, None] + """ + span_exporter = _get_trace_exporter(destination) + _configure_tracing(span_exporter) + + log_exporter = _get_log_exporter(destination) + _configure_logging(log_exporter) + + try: + from azure.ai.assistants.telemetry import AIAssistantsInstrumentor + + assistants_instrumentor = AIAssistantsInstrumentor() + if not assistants_instrumentor.is_instrumented(): + assistants_instrumentor.instrument() + except Exception as exc: # pylint: disable=broad-exception-caught + logger.warning("Could not call `AIAssistantsInstrumentor().instrument()`", exc_info=exc) From d4dbca7fdeae1510bfaf48cf76c1e1f556b5a970 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Thu, 10 Apr 2025 13:48:48 -0700 Subject: [PATCH 5/7] Fix tests --- sdk/ai/azure-ai-assistants/pyproject.toml | 36 +++++++++++++++++++ ...t_datetime_and_weather_stream_response.txt | 12 +++---- .../tests/assets/main_stream_response.txt | 12 +++---- .../assets/send_email_stream_response.txt | 20 +++++------ .../tests/test_assistants_mock.py | 7 ++-- .../tests/test_assistants_mock_async.py | 8 ++--- 6 files changed, 64 insertions(+), 31 deletions(-) create mode 100644 sdk/ai/azure-ai-assistants/pyproject.toml diff --git a/sdk/ai/azure-ai-assistants/pyproject.toml b/sdk/ai/azure-ai-assistants/pyproject.toml new file mode 100644 index 000000000000..9bbdfd71420c --- /dev/null +++ b/sdk/ai/azure-ai-assistants/pyproject.toml @@ -0,0 +1,36 @@ +[tool.mypy] +python_version = "3.10" +exclude = [ + "downloaded", + # In run_mypy.py python version is hardcoded to 3.8. It does not allow + # obligatory named parameters as fun(a, *, b=1, c=2). + "sample_assistants_vector_store_batch_enterprise_file_search_async\\.py", + # Error in typing caused by the typespec. + "sample_assistants_with_file_search_attachment\\.py", + "sample_assistants_with_code_interpreter_file_attachment\\.py", + "sample_assistants_code_interpreter_attachment_enterprise_search\\.py", + "sample_assistants_with_file_search_attachment_async\\.py", + "sample_assistants_code_interpreter_attachment_enterprise_search_async\\.py", + "sample_assistants_code_interpreter_attachment_enterprise_search_async\\.py", + "sample_assistants_code_interpreter_attachment_async\\.py", +] +warn_unused_configs = true +ignore_missing_imports = true +follow_imports_for_stubs = false + +[tool.isort] +profile = "black" +line_length = 120 +known_first_party = ["azure"] +filter_files=true +extend_skip_glob = [ + "*/_vendor/*", + "*/_generated/*", + "*/_restclient/*", + "*/doc/*", + "*/.tox/*", +] + +[tool.azure-sdk-build] +whl_no_aio= false + diff --git a/sdk/ai/azure-ai-assistants/tests/assets/fetch_current_datetime_and_weather_stream_response.txt b/sdk/ai/azure-ai-assistants/tests/assets/fetch_current_datetime_and_weather_stream_response.txt index 138c8eda465e..f5a1bb4c3ac7 100644 --- a/sdk/ai/azure-ai-assistants/tests/assets/fetch_current_datetime_and_weather_stream_response.txt +++ b/sdk/ai/azure-ai-assistants/tests/assets/fetch_current_datetime_and_weather_stream_response.txt @@ -1,17 +1,17 @@ event: thread.run.step.completed -data: {"id":"step_01","object":"thread.run.step","created_at":1735945043,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1735945046,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_01","type":"function","function":{"name":"fetch_current_datetime","arguments":"{}","output":"{\"current_time\": \"2025-01-03 14:57:24\"}"}},{"id":"call_02","type":"function","function":{"name":"fetch_weather","arguments":"{\"location\": \"New York\"}","output":"{\"weather\": \"Sunny, 25\\u00b0C\"}"}}]},"usage":{"prompt_tokens":648,"completion_tokens":71,"total_tokens":719}} +data: {"id":"step_01","object":"thread.run.step","created_at":1735945043,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1735945046,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_01","type":"function","function":{"name":"fetch_current_datetime","arguments":"{}","output":"{\"current_time\": \"2025-01-03 14:57:24\"}"}},{"id":"call_02","type":"function","function":{"name":"fetch_weather","arguments":"{\"location\": \"New York\"}","output":"{\"weather\": \"Sunny, 25\\u00b0C\"}"}}]},"usage":{"prompt_tokens":648,"completion_tokens":71,"total_tokens":719}} event: thread.run.queued -data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":1735945041,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":1735945041,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.in_progress -data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"in_progress","started_at":1735945048,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"in_progress","started_at":1735945048,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.step.created -data: {"id":"step_02","object":"thread.run.step","created_at":1735945052,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} +data: {"id":"step_02","object":"thread.run.step","created_at":1735945052,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} event: thread.run.step.in_progress -data: {"id":"step_02","object":"thread.run.step","created_at":1735945052,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} +data: {"id":"step_02","object":"thread.run.step","created_at":1735945052,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} event: thread.run.step.delta data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_03","type":"function","function":{"name":"send_email","arguments":"","output":null}}]}}} @@ -248,7 +248,7 @@ event: thread.run.step.delta data: {"id":"step_02","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"}"}}]}}} event: thread.run.requires_action -data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"requires_action","started_at":1735945048,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_03","type":"function","function":{"name":"send_email","arguments":"{\n \"recipient\": \"user@example.com\",\n \"subject\": \"Current New York Weather and DateTime Information\",\n \"body\": \"Hello,\\n\\nHere are the details you requested:\\n\\n- Date and Time: 2025-01-03 14:57:24\\n- Weather in New York: Sunny, 25�C\\n\\nBest regards.\"\n}"}}]}},"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"requires_action","started_at":1735945048,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_03","type":"function","function":{"name":"send_email","arguments":"{\n \"recipient\": \"user@example.com\",\n \"subject\": \"Current New York Weather and DateTime Information\",\n \"body\": \"Hello,\\n\\nHere are the details you requested:\\n\\n- Date and Time: 2025-01-03 14:57:24\\n- Weather in New York: Sunny, 25�C\\n\\nBest regards.\"\n}"}}]}},"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: done data: [DONE] diff --git a/sdk/ai/azure-ai-assistants/tests/assets/main_stream_response.txt b/sdk/ai/azure-ai-assistants/tests/assets/main_stream_response.txt index 14d56a0f74a1..de654aec55ee 100644 --- a/sdk/ai/azure-ai-assistants/tests/assets/main_stream_response.txt +++ b/sdk/ai/azure-ai-assistants/tests/assets/main_stream_response.txt @@ -1,17 +1,17 @@ event: thread.run.created -data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":null,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":null,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.queued -data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":null,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":null,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.in_progress -data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"in_progress","started_at":1735945041,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"in_progress","started_at":1735945041,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.step.created -data: {"id":"step_01","object":"thread.run.step","created_at":1735945043,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} +data: {"id":"step_01","object":"thread.run.step","created_at":1735945043,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} event: thread.run.step.in_progress -data: {"id":"step_01","object":"thread.run.step","created_at":1735945043,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} +data: {"id":"step_01","object":"thread.run.step","created_at":1735945043,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} event: thread.run.step.delta data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_01","type":"function","function":{"name":"fetch_current_datetime","arguments":"","output":null}}]}}} @@ -38,7 +38,7 @@ event: thread.run.step.delta data: {"id":"step_01","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":1,"type":"function","function":{"arguments":"ork\"}"}}]}}} event: thread.run.requires_action -data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"requires_action","started_at":1735945041,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_01","type":"function","function":{"name":"fetch_current_datetime","arguments":"{}"}},{"id":"call_02","type":"function","function":{"name":"fetch_weather","arguments":"{\"location\": \"New York\"}"}}]}},"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"requires_action","started_at":1735945041,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_01","type":"function","function":{"name":"fetch_current_datetime","arguments":"{}"}},{"id":"call_02","type":"function","function":{"name":"fetch_weather","arguments":"{\"location\": \"New York\"}"}}]}},"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: done data: [DONE] diff --git a/sdk/ai/azure-ai-assistants/tests/assets/send_email_stream_response.txt b/sdk/ai/azure-ai-assistants/tests/assets/send_email_stream_response.txt index fe3afe7a9ba2..c8bd94f9005f 100644 --- a/sdk/ai/azure-ai-assistants/tests/assets/send_email_stream_response.txt +++ b/sdk/ai/azure-ai-assistants/tests/assets/send_email_stream_response.txt @@ -1,23 +1,23 @@ event: thread.run.step.completed -data: {"id":"step_02","object":"thread.run.step","created_at":1735945052,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1735945059,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_03","type":"function","function":{"name":"send_email","arguments":"{\n \"recipient\": \"user@example.com\",\n \"subject\": \"Current New York Weather and DateTime Information\",\n \"body\": \"Hello,\\n\\nHere are the details you requested:\\n\\n- Date and Time: 2025-01-03 14:57:24\\n- Weather in New York: Sunny, 25�C\\n\\nBest regards.\"\n}","output":"{\"message\": \"Email successfully sent to user@example.com.\"}"}}]},"usage":{"prompt_tokens":735,"completion_tokens":87,"total_tokens":822}} +data: {"id":"step_02","object":"thread.run.step","created_at":1735945052,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1735945059,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_03","type":"function","function":{"name":"send_email","arguments":"{\n \"recipient\": \"user@example.com\",\n \"subject\": \"Current New York Weather and DateTime Information\",\n \"body\": \"Hello,\\n\\nHere are the details you requested:\\n\\n- Date and Time: 2025-01-03 14:57:24\\n- Weather in New York: Sunny, 25�C\\n\\nBest regards.\"\n}","output":"{\"message\": \"Email successfully sent to user@example.com.\"}"}}]},"usage":{"prompt_tokens":735,"completion_tokens":87,"total_tokens":822}} event: thread.run.queued -data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":1735945048,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"queued","started_at":1735945048,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.in_progress -data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"in_progress","started_at":1735945059,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"in_progress","started_at":1735945059,"expires_at":1735945641,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: thread.run.step.created -data: {"id":"step_03","object":"thread.run.step","created_at":1735945060,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_01"}},"usage":null} +data: {"id":"step_03","object":"thread.run.step","created_at":1735945060,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_01"}},"usage":null} event: thread.run.step.in_progress -data: {"id":"step_03","object":"thread.run.step","created_at":1735945060,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_01"}},"usage":null} +data: {"id":"step_03","object":"thread.run.step","created_at":1735945060,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_01"}},"usage":null} event: thread.message.created -data: {"id":"msg_01","object":"thread.message","created_at":1735945060,"agent_id":"asst_01","thread_id":"thread_01","run_id":"run_01","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"attachments":[],"metadata":{}} +data: {"id":"msg_01","object":"thread.message","created_at":1735945060,"assistant_id":"asst_01","thread_id":"thread_01","run_id":"run_01","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"attachments":[],"metadata":{}} event: thread.message.in_progress -data: {"id":"msg_01","object":"thread.message","created_at":1735945060,"agent_id":"asst_01","thread_id":"thread_01","run_id":"run_01","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"attachments":[],"metadata":{}} +data: {"id":"msg_01","object":"thread.message","created_at":1735945060,"assistant_id":"asst_01","thread_id":"thread_01","run_id":"run_01","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"attachments":[],"metadata":{}} event: thread.message.delta data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"The","annotations":[]}}]}} @@ -200,13 +200,13 @@ event: thread.message.delta data: {"id":"msg_01","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}} event: thread.message.completed -data: {"id":"msg_01","object":"thread.message","created_at":1735945060,"agent_id":"asst_01","thread_id":"thread_01","run_id":"run_01","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1735945061,"role":"assistant","content":[{"type":"text","text":{"value":"The email has been successfully sent to the recipient with the following details:\n\n- Date and Time: 2025-01-03 14:57:24\n- Weather in New York: Sunny, 25�C\n\nIf you need any further assistance or information, please feel free to ask.","annotations":[]}}],"attachments":[],"metadata":{}} +data: {"id":"msg_01","object":"thread.message","created_at":1735945060,"assistant_id":"asst_01","thread_id":"thread_01","run_id":"run_01","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1735945061,"role":"assistant","content":[{"type":"text","text":{"value":"The email has been successfully sent to the recipient with the following details:\n\n- Date and Time: 2025-01-03 14:57:24\n- Weather in New York: Sunny, 25�C\n\nIf you need any further assistance or information, please feel free to ask.","annotations":[]}}],"attachments":[],"metadata":{}} event: thread.run.step.completed -data: {"id":"step_03","object":"thread.run.step","created_at":1735945060,"run_id":"run_01","agent_id":"asst_01","thread_id":"thread_01","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1735945061,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_01"}},"usage":{"prompt_tokens":834,"completion_tokens":62,"total_tokens":896}} +data: {"id":"step_03","object":"thread.run.step","created_at":1735945060,"run_id":"run_01","assistant_id":"asst_01","thread_id":"thread_01","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1735945061,"expires_at":1735945641,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_01"}},"usage":{"prompt_tokens":834,"completion_tokens":62,"total_tokens":896}} event: thread.run.completed -data: {"id":"run_01","object":"thread.run","created_at":1735945041,"agent_id":"asst_01","thread_id":"thread_01","status":"completed","started_at":1735945059,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1735945061,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":2217,"completion_tokens":220,"total_tokens":2437},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} +data: {"id":"run_01","object":"thread.run","created_at":1735945041,"assistant_id":"asst_01","thread_id":"thread_01","status":"completed","started_at":1735945059,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1735945061,"required_action":null,"last_error":null,"model":"gpt-4-1106-preview","instructions":"You are a helpful assistant","tools":[{"type":"function","function":{"name":"get_user_info","description":"Retrieves user information based on user ID.","parameters":{"type":"object","properties":{"user_id":{"type":"integer","description":"ID of the user."}},"required":["user_id"]},"strict":false}},{"type":"function","function":{"name":"convert_temperature","description":"Converts temperature from Celsius to Fahrenheit.","parameters":{"type":"object","properties":{"celsius":{"type":"number","description":"Temperature in Celsius."}},"required":["celsius"]},"strict":false}},{"type":"function","function":{"name":"longest_word_in_sentences","description":"Finds the longest word in each sentence.","parameters":{"type":"object","properties":{"sentences":{"type":"array","items":{"type":"string"},"description":"A list of sentences."}},"required":["sentences"]},"strict":false}},{"type":"function","function":{"name":"toggle_flag","description":"Toggles a boolean flag.","parameters":{"type":"object","properties":{"flag":{"type":"boolean","description":"The flag to toggle."}},"required":["flag"]},"strict":false}},{"type":"function","function":{"name":"send_email","description":"Sends an email with the specified subject and body to the recipient.","parameters":{"type":"object","properties":{"recipient":{"type":"string","description":"Email address of the recipient."},"subject":{"type":"string","description":"Subject of the email."},"body":{"type":"string","description":"Body content of the email."}},"required":["recipient","subject","body"]},"strict":false}},{"type":"function","function":{"name":"process_records","description":"Process a list of records, where each record is a dictionary with string keys and integer values.","parameters":{"type":"object","properties":{"records":{"type":"array","items":{"type":"object"},"description":"A list containing dictionaries that map strings to integers."}},"required":["records"]},"strict":false}},{"type":"function","function":{"name":"merge_dicts","description":"Merges two dictionaries.","parameters":{"type":"object","properties":{"dict1":{"type":"object","description":"First dictionary."},"dict2":{"type":"object","description":"Second dictionary."}},"required":["dict1","dict2"]},"strict":false}},{"type":"function","function":{"name":"calculate_sum","description":"Calculates the sum of two integers.","parameters":{"type":"object","properties":{"a":{"type":"integer","description":"First integer."},"b":{"type":"integer","description":"Second integer."}},"required":["a","b"]},"strict":false}},{"type":"function","function":{"name":"fetch_weather","description":"Fetches the weather information for the specified location.","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The location to fetch weather for."}},"required":["location"]},"strict":false}},{"type":"function","function":{"name":"fetch_current_datetime","description":"Get the current time as a JSON string, optionally formatted.","parameters":{"type":"object","properties":{"format":{"type":["string","null"],"description":"The format in which to return the current time. Defaults to None, which uses a standard format."}},"required":[]},"strict":false}}],"tool_resources":{"code_interpreter":{"file_ids":[]}},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":2217,"completion_tokens":220,"total_tokens":2437},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} event: done data: [DONE] diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py index 2f9083f6ad26..67359b64dae4 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py @@ -536,7 +536,7 @@ def submit_tool_outputs_to_run( ) @patch("azure.ai.assistants.AssistantsClient.__init__", return_value=None) @patch( - "azure.ai.assistants.AssistantsClient.submit_tool_outputs_to_run", + "azure.ai.assistants._operations.AssistantsClientOperationsMixin.submit_tool_outputs_to_run", ) def test_create_stream_with_tool_calls(self, mock_submit_tool_outputs_to_run: Mock, *args): mock_submit_tool_outputs_to_run.side_effect = self.submit_tool_outputs_to_run @@ -551,9 +551,8 @@ def test_create_stream_with_tool_calls(self, mock_submit_tool_outputs_to_run: Mo with operation.create_stream(thread_id="thread_id", assistant_id="asst_01") as stream: for _ in stream: count += 1 - # TODO: Fix this test; it does not submit the tool output and hence cannot return all output. assert count == ( main_stream_response.count("event:") - # + fetch_current_datetime_and_weather_stream_response.count("event:") - # + send_email_stream_response.count("event:") + + fetch_current_datetime_and_weather_stream_response.count("event:") + + send_email_stream_response.count("event:") ) diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py index 51351b93b624..de9bfa0b8763 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py @@ -544,7 +544,7 @@ def submit_tool_outputs_to_run( ) @patch("azure.ai.assistants.aio.AssistantsClient.__init__", return_value=None) @patch( - "azure.ai.assistants.aio.AssistantsClient.submit_tool_outputs_to_run", + "azure.ai.assistants.aio._operations.AssistantsClientOperationsMixin.submit_tool_outputs_to_run", ) async def test_create_stream_with_tool_calls(self, mock_submit_tool_outputs_to_run: Mock, *args): mock_submit_tool_outputs_to_run.side_effect = self.submit_tool_outputs_to_run @@ -559,10 +559,8 @@ async def test_create_stream_with_tool_calls(self, mock_submit_tool_outputs_to_r async with await operation.create_stream(thread_id="thread_id", assistant_id="asst_01") as stream: async for _ in stream: count += 1 - # TODO: Fix this test; it does not submit the tool output and hence cannot return all output. assert count == ( main_stream_response.count("event:") - - # + fetch_current_datetime_and_weather_stream_response.count("event:") - # + send_email_stream_response.count("event:") + + fetch_current_datetime_and_weather_stream_response.count("event:") + + send_email_stream_response.count("event:") ) From 7ad0242cbc0bc3396b4d44a2f2f9bbf4c96258da Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Tue, 15 Apr 2025 16:58:45 -0700 Subject: [PATCH 6/7] Allow image input --- .../apiview-properties.json | 10 +- .../azure/ai/assistants/_client.py | 5 +- .../azure/ai/assistants/_configuration.py | 7 +- .../ai/assistants/_operations/_operations.py | 147 +++++----- .../azure/ai/assistants/_patch.py | 39 +-- .../azure/ai/assistants/_types.py | 3 +- .../azure/ai/assistants/aio/_client.py | 5 +- .../azure/ai/assistants/aio/_configuration.py | 7 +- .../assistants/aio/_operations/_operations.py | 69 ++--- .../ai/assistants/aio/_operations/_patch.py | 24 +- .../azure/ai/assistants/aio/_patch.py | 38 +-- .../azure/ai/assistants/models/__init__.py | 18 +- .../azure/ai/assistants/models/_enums.py | 24 ++ .../azure/ai/assistants/models/_models.py | 259 ++++++++++++++++-- .../azure/ai/assistants/models/_patch.py | 21 +- .../azure/ai/assistants/telemetry/__init__.py | 6 +- .../telemetry/_ai_assistants_instrumentor.py | 12 +- .../azure/ai/assistants/telemetry/_utils.py | 3 +- ...basics_async_with_azure_monitor_tracing.py | 2 +- ...ample_assistants_code_interpreter_async.py | 11 +- ...tants_code_interpreter_attachment_async.py | 7 +- ...ple_assistants_image_input_base64_async.py | 113 ++++++++ ...ample_assistants_image_input_file_async.py | 94 +++++++ ...sample_assistants_image_input_url_async.py | 93 +++++++ ...tream_eventhandler_with_functions_async.py | 4 +- ...m_with_base_override_eventhandler_async.py | 1 + ...tore_batch_enterprise_file_search_async.py | 4 +- ...ts_vector_store_batch_file_search_async.py | 4 +- .../samples/image_file.png | Bin 0 -> 183951 bytes .../samples/multiassistant/assistant_team.py | 9 +- .../assistant_trace_configurator.py | 4 +- .../sample_assistants_azure_ai_search.py | 6 +- .../samples/sample_assistants_basics.py | 6 +- ...tants_basics_with_azure_monitor_tracing.py | 2 +- ..._assistants_basics_with_console_tracing.py | 4 +- ..._with_console_tracing_custom_attributes.py | 4 +- .../samples/sample_assistants_fabric.py | 2 +- ...ts_functions_with_azure_monitor_tracing.py | 2 +- .../sample_assistants_image_input_base64.py | 110 ++++++++ .../sample_assistants_image_input_file.py | 91 ++++++ .../sample_assistants_image_input_url.py | 90 ++++++ .../samples/sample_assistants_openapi.py | 1 - ...mple_assistants_openapi_connection_auth.py | 14 +- ...eventhandler_with_azure_monitor_tracing.py | 6 +- ...stream_eventhandler_with_bing_grounding.py | 1 + ...tream_eventhandler_with_console_tracing.py | 4 +- ...ts_stream_iteration_with_bing_grounding.py | 3 +- .../tests/overload_assert_utils.py | 1 + .../tests/test_assistant_mock_overloads.py | 4 +- .../tests/test_assistant_models.py | 1 + .../tests/test_assistant_models_async.py | 1 + .../tests/test_assistants_client.py | 171 ++++++++---- .../tests/test_assistants_client_async.py | 13 +- .../tests/test_assistants_mock_async.py | 4 +- .../tests/test_overload_assert.py | 5 +- sdk/ai/azure-ai-assistants/tsp-location.yaml | 2 +- 56 files changed, 1238 insertions(+), 353 deletions(-) create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_base64_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_file_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_url_async.py create mode 100644 sdk/ai/azure-ai-assistants/samples/image_file.png create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_base64.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_file.py create mode 100644 sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_url.py diff --git a/sdk/ai/azure-ai-assistants/apiview-properties.json b/sdk/ai/azure-ai-assistants/apiview-properties.json index 407356d45103..15de3b3ab8d4 100644 --- a/sdk/ai/azure-ai-assistants/apiview-properties.json +++ b/sdk/ai/azure-ai-assistants/apiview-properties.json @@ -48,7 +48,13 @@ "azure.ai.assistants.models.MessageDeltaTextUrlCitationDetails": "Azure.AI.Assistants.MessageDeltaTextUrlCitationDetails", "azure.ai.assistants.models.MessageImageFileContent": "Azure.AI.Assistants.MessageImageFileContent", "azure.ai.assistants.models.MessageImageFileDetails": "Azure.AI.Assistants.MessageImageFileDetails", + "azure.ai.assistants.models.MessageImageFileParam": "Azure.AI.Assistants.MessageImageFileParam", + "azure.ai.assistants.models.MessageImageUrlParam": "Azure.AI.Assistants.MessageImageUrlParam", "azure.ai.assistants.models.MessageIncompleteDetails": "Azure.AI.Assistants.MessageIncompleteDetails", + "azure.ai.assistants.models.MessageInputContentBlock": "Azure.AI.Assistants.MessageInputContentBlock", + "azure.ai.assistants.models.MessageInputImageFileBlock": "Azure.AI.Assistants.MessageInputImageFileBlock", + "azure.ai.assistants.models.MessageInputImageUrlBlock": "Azure.AI.Assistants.MessageInputImageUrlBlock", + "azure.ai.assistants.models.MessageInputTextBlock": "Azure.AI.Assistants.MessageInputTextBlock", "azure.ai.assistants.models.MessageTextAnnotation": "Azure.AI.Assistants.MessageTextAnnotation", "azure.ai.assistants.models.MessageTextContent": "Azure.AI.Assistants.MessageTextContent", "azure.ai.assistants.models.MessageTextDetails": "Azure.AI.Assistants.MessageTextDetails", @@ -140,7 +146,6 @@ "azure.ai.assistants.models.UpdateCodeInterpreterToolResourceOptions": "Azure.AI.Assistants.UpdateCodeInterpreterToolResourceOptions", "azure.ai.assistants.models.UpdateFileSearchToolResourceOptions": "Azure.AI.Assistants.UpdateFileSearchToolResourceOptions", "azure.ai.assistants.models.UpdateToolResourcesOptions": "Azure.AI.Assistants.UpdateToolResourcesOptions", - "azure.ai.assistants.models.UploadFileRequest": "Azure.AI.Assistants.uploadFile.Request.anonymous", "azure.ai.assistants.models.VectorStore": "Azure.AI.Assistants.VectorStore", "azure.ai.assistants.models.VectorStoreChunkingStrategyRequest": "Azure.AI.Assistants.VectorStoreChunkingStrategyRequest", "azure.ai.assistants.models.VectorStoreAutoChunkingStrategyRequest": "Azure.AI.Assistants.VectorStoreAutoChunkingStrategyRequest", @@ -166,6 +171,8 @@ "azure.ai.assistants.models.ResponseFormat": "Azure.AI.Assistants.ResponseFormat", "azure.ai.assistants.models.ListSortOrder": "Azure.AI.Assistants.ListSortOrder", "azure.ai.assistants.models.MessageRole": "Azure.AI.Assistants.MessageRole", + "azure.ai.assistants.models.MessageBlockType": "Azure.AI.Assistants.MessageBlockType", + "azure.ai.assistants.models.ImageDetailLevel": "Azure.AI.Assistants.ImageDetailLevel", "azure.ai.assistants.models.MessageStatus": "Azure.AI.Assistants.MessageStatus", "azure.ai.assistants.models.MessageIncompleteDetailsReason": "Azure.AI.Assistants.MessageIncompleteDetailsReason", "azure.ai.assistants.models.RunStatus": "Azure.AI.Assistants.RunStatus", @@ -217,7 +224,6 @@ "azure.ai.assistants.AssistantsClient.get_run_step": "Azure.AI.Assistants.getRunStep", "azure.ai.assistants.AssistantsClient.list_run_steps": "Azure.AI.Assistants.listRunSteps", "azure.ai.assistants.AssistantsClient.list_files": "Azure.AI.Assistants.listFiles", - "azure.ai.assistants.AssistantsClient.upload_file": "Azure.AI.Assistants.uploadFile", "azure.ai.assistants.AssistantsClient.delete_file": "Azure.AI.Assistants.deleteFile", "azure.ai.assistants.AssistantsClient.get_file": "Azure.AI.Assistants.getFile", "azure.ai.assistants.AssistantsClient.list_vector_stores": "Azure.AI.Assistants.listVectorStores", diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py index 433f72ca45b5..6bd70ebaba05 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py @@ -43,8 +43,9 @@ class AssistantsClient(AssistantsClientOperationsMixin): credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is "latest". - Note that overriding this default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. :paramtype api_version: str """ diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py index 2b2e6944f1dd..fcf3bd499866 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py @@ -39,8 +39,9 @@ class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attrib credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is "latest". - Note that overriding this default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. :paramtype api_version: str """ @@ -53,7 +54,7 @@ def __init__( credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any, ) -> None: - api_version: str = kwargs.pop("api_version", "latest") + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py index bf3858e5c3c9..c962d3131cef 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py @@ -52,7 +52,7 @@ def build_assistants_create_assistant_request(**kwargs: Any) -> HttpRequest: # _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -80,7 +80,7 @@ def build_assistants_list_assistants_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -107,7 +107,7 @@ def build_assistants_get_assistant_request(assistant_id: str, **kwargs: Any) -> _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -134,7 +134,7 @@ def build_assistants_update_assistant_request( # pylint: disable=name-too-long _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -162,7 +162,7 @@ def build_assistants_delete_assistant_request( # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -187,7 +187,7 @@ def build_assistants_create_thread_request(**kwargs: Any) -> HttpRequest: _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -208,7 +208,7 @@ def build_assistants_get_thread_request(thread_id: str, **kwargs: Any) -> HttpRe _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -233,7 +233,7 @@ def build_assistants_update_thread_request(thread_id: str, **kwargs: Any) -> Htt _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -259,7 +259,7 @@ def build_assistants_delete_thread_request(thread_id: str, **kwargs: Any) -> Htt _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -284,7 +284,7 @@ def build_assistants_create_message_request(thread_id: str, **kwargs: Any) -> Ht _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -319,7 +319,7 @@ def build_assistants_list_messages_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -353,7 +353,7 @@ def build_assistants_get_message_request(thread_id: str, message_id: str, **kwar _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -379,7 +379,7 @@ def build_assistants_update_message_request(thread_id: str, message_id: str, **k _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -409,7 +409,7 @@ def build_assistants_create_run_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -445,7 +445,7 @@ def build_assistants_list_runs_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -477,7 +477,7 @@ def build_assistants_get_run_request(thread_id: str, run_id: str, **kwargs: Any) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -503,7 +503,7 @@ def build_assistants_update_run_request(thread_id: str, run_id: str, **kwargs: A _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -533,7 +533,7 @@ def build_assistants_submit_tool_outputs_to_run_request( # pylint: disable=name _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -560,7 +560,7 @@ def build_assistants_cancel_run_request(thread_id: str, run_id: str, **kwargs: A _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -586,7 +586,7 @@ def build_assistants_create_thread_and_run_request(**kwargs: Any) -> HttpRequest _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -614,7 +614,7 @@ def build_assistants_get_run_step_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -652,7 +652,7 @@ def build_assistants_list_run_steps_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -689,7 +689,7 @@ def build_assistants_list_files_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -710,7 +710,7 @@ def build_assistants_upload_file_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -729,7 +729,7 @@ def build_assistants_delete_file_request(file_id: str, **kwargs: Any) -> HttpReq _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -753,7 +753,7 @@ def build_assistants_get_file_request(file_id: str, **kwargs: Any) -> HttpReques _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -779,7 +779,7 @@ def build_assistants_get_file_content_request( # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/octet-stream") # Construct URL @@ -810,7 +810,7 @@ def build_assistants_list_vector_stores_request( # pylint: disable=name-too-lon _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -838,7 +838,7 @@ def build_assistants_create_vector_store_request(**kwargs: Any) -> HttpRequest: _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -861,7 +861,7 @@ def build_assistants_get_vector_store_request( # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -888,7 +888,7 @@ def build_assistants_modify_vector_store_request( # pylint: disable=name-too-lo _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -916,7 +916,7 @@ def build_assistants_delete_vector_store_request( # pylint: disable=name-too-lo _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -949,7 +949,7 @@ def build_assistants_list_vector_store_files_request( # pylint: disable=name-to _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -986,7 +986,7 @@ def build_assistants_create_vector_store_file_request( # pylint: disable=name-t _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1014,7 +1014,7 @@ def build_assistants_get_vector_store_file_request( # pylint: disable=name-too- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1041,7 +1041,7 @@ def build_assistants_delete_vector_store_file_request( # pylint: disable=name-t _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1069,7 +1069,7 @@ def build_assistants_create_vector_store_file_batch_request( # pylint: disable= _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1097,7 +1097,7 @@ def build_assistants_get_vector_store_file_batch_request( # pylint: disable=nam _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1124,7 +1124,7 @@ def build_assistants_cancel_vector_store_file_batch_request( # pylint: disable= _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1159,7 +1159,7 @@ def build_assistants_list_vector_store_file_batch_files_request( # pylint: disa _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2413,7 +2413,7 @@ def create_message( thread_id: str, *, role: Union[str, _models.MessageRole], - content: str, + content: "_types.MessageInputContent", content_type: str = "application/json", attachments: Optional[List[_models.MessageAttachment]] = None, metadata: Optional[Dict[str, str]] = None, @@ -2424,17 +2424,16 @@ def create_message( :param thread_id: Identifier of the thread. Required. :type thread_id: str :keyword role: The role of the entity that is creating the message. Allowed values include: - - * `user`: Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the assistant. Use this value to insert - messages from the assistant into the - conversation. Known values are: "user" and "assistant". Required. + ``user``, which indicates the message is sent by an actual user (and should be + used in most cases to represent user-generated messages), and ``assistant``, + which indicates the message is generated by the agent (use this value to insert + messages from the agent into the conversation). Known values are: "user" and "assistant". + Required. :paramtype role: str or ~azure.ai.assistants.models.MessageRole - :keyword content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :paramtype content: str + :keyword content: The content of the initial message. This may be a basic string (if you only + need text) or an array of typed content blocks (for example, text, image_file, + image_url, and so on). Is either a str type or a [MessageInputContentBlock] type. Required. + :paramtype content: str or list[~azure.ai.assistants.models.MessageInputContentBlock] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -2494,7 +2493,7 @@ def create_message( body: Union[JSON, IO[bytes]] = _Unset, *, role: Union[str, _models.MessageRole] = _Unset, - content: str = _Unset, + content: "_types.MessageInputContent" = _Unset, attachments: Optional[List[_models.MessageAttachment]] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any @@ -2506,17 +2505,16 @@ def create_message( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword role: The role of the entity that is creating the message. Allowed values include: - - * `user`: Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the assistant. Use this value to insert - messages from the assistant into the - conversation. Known values are: "user" and "assistant". Required. + ``user``, which indicates the message is sent by an actual user (and should be + used in most cases to represent user-generated messages), and ``assistant``, + which indicates the message is generated by the agent (use this value to insert + messages from the agent into the conversation). Known values are: "user" and "assistant". + Required. :paramtype role: str or ~azure.ai.assistants.models.MessageRole - :keyword content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :paramtype content: str + :keyword content: The content of the initial message. This may be a basic string (if you only + need text) or an array of typed content blocks (for example, text, image_file, + image_url, and so on). Is either a str type or a [MessageInputContentBlock] type. Required. + :paramtype content: str or list[~azure.ai.assistants.models.MessageInputContentBlock] :keyword attachments: A list of files attached to the message, and the tools they should be added to. Default value is None. :paramtype attachments: list[~azure.ai.assistants.models.MessageAttachment] @@ -4525,33 +4523,16 @@ def list_files( return deserialized # type: ignore @overload - def upload_file(self, body: _models.UploadFileRequest, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Multipart body. Required. - :type body: ~azure.ai.assistants.models.UploadFileRequest - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - + def _upload_file(self, body: _models._models.UploadFileRequest, **kwargs: Any) -> _models.OpenAIFile: ... @overload - def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Multipart body. Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ + def _upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: ... @distributed_trace - def upload_file(self, body: Union[_models.UploadFileRequest, JSON], **kwargs: Any) -> _models.OpenAIFile: + def _upload_file(self, body: Union[_models._models.UploadFileRequest, JSON], **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. :param body: Multipart body. Is either a UploadFileRequest type or a JSON type. Required. - :type body: ~azure.ai.assistants.models.UploadFileRequest or JSON + :type body: ~azure.ai.assistants.models._models.UploadFileRequest or JSON :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py index de107d2f836f..9bd7d46ac371 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py @@ -1,3 +1,4 @@ +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -184,7 +185,9 @@ def create_assistant( # pylint: disable=arguments-differ """ @overload - def create_assistant(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Assistant: + def create_assistant( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: """Creates a new assistant. :param body: Required. @@ -198,7 +201,9 @@ def create_assistant(self, body: JSON, *, content_type: str = "application/json" """ @overload - def create_assistant(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Assistant: + def create_assistant( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: """Creates a new assistant. :param body: Required. @@ -1728,7 +1733,9 @@ def submit_tool_outputs_to_stream( # pyright: ignore[reportInconsistentOverload event_handler.initialize(response_iterator, self._handle_submit_tool_outputs) - def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handler: _models.BaseAssistantEventHandler) -> None: + def _handle_submit_tool_outputs( + self, run: _models.ThreadRun, event_handler: _models.BaseAssistantEventHandler + ) -> None: if isinstance(run.required_action, _models.SubmitToolOutputsAction): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: @@ -1804,7 +1811,7 @@ def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: @distributed_trace def upload_file( self, - body: Optional[Union[_models.UploadFileRequest, JSON]] = None, + body: Optional[JSON] = None, *, file: Optional[FileType] = None, file_path: Optional[str] = None, @@ -1832,20 +1839,19 @@ def upload_file( :raises IOError: If there are issues with reading the file. :raises: HttpResponseError for HTTP errors. """ + # If a JSON body is provided directly, pass it along if body is not None: - return super().upload_file(body=body, **kwargs) + return super()._upload_file(body=body, **kwargs) + # Convert FilePurpose enum to string if necessary if isinstance(purpose, FilePurpose): purpose = purpose.value + # If file content is passed in directly if file is not None and purpose is not None: - file_body = _models.UploadFileRequest( - file=file, - purpose=purpose, - filename=filename - ) - return super().upload_file(body=file_body, **kwargs) + return super()._upload_file(body={"file": file, "purpose": purpose, "filename": filename}, **kwargs) + # If a file path is provided if file_path is not None and purpose is not None: if not os.path.isfile(file_path): raise FileNotFoundError(f"The file path provided does not exist: {file_path}") @@ -1854,16 +1860,11 @@ def upload_file( with open(file_path, "rb") as f: content = f.read() - # Determine filename and create correct FileType + # If no explicit filename is provided, use the base name base_filename = filename or os.path.basename(file_path) file_content: FileType = (base_filename, content) - file_body = _models.UploadFileRequest( - file=file_content, - purpose=purpose, - filename=filename - ) - return super().upload_file(body=file_body, **kwargs) + return super()._upload_file(body={"file": file_content, "purpose": purpose}, **kwargs) except IOError as e: raise IOError(f"Unable to read file: {file_path}") from e @@ -2610,7 +2611,7 @@ def scope(self) -> Dict[str, str]: } -__all__: List[str] = ['AssistantsClient'] # Add all objects you want publicly available to users at this package level +__all__: List[str] = ["AssistantsClient"] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py index a3cd7f954106..af5212be9e0f 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_types.py @@ -6,7 +6,7 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import TYPE_CHECKING, Union +from typing import List, TYPE_CHECKING, Union if TYPE_CHECKING: from . import models as _models @@ -17,6 +17,7 @@ "_models.AssistantsApiResponseFormat", "_models.ResponseFormatJsonSchemaType", ] +MessageInputContent = Union[str, List["_models.MessageInputContentBlock"]] MessageAttachmentToolDefinition = Union["_models.CodeInterpreterToolDefinition", "_models.FileSearchToolDefinition"] AssistantsApiToolChoiceOption = Union[ str, str, "_models.AssistantsApiToolChoiceOptionMode", "_models.AssistantsNamedToolChoice" diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py index 5efb9a86adcc..fd0dcd3fbe4e 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py @@ -43,8 +43,9 @@ class AssistantsClient(AssistantsClientOperationsMixin): credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is "latest". - Note that overriding this default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. :paramtype api_version: str """ diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py index b5a7bf17f277..d991c5beafcb 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py @@ -39,8 +39,9 @@ class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attrib credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is "latest". - Note that overriding this default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. :paramtype api_version: str """ @@ -53,7 +54,7 @@ def __init__( credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any, ) -> None: - api_version: str = kwargs.pop("api_version", "latest") + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py index 5db7010cfdf3..172b51ab51a9 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py @@ -1309,7 +1309,7 @@ async def create_message( thread_id: str, *, role: Union[str, _models.MessageRole], - content: str, + content: "_types.MessageInputContent", content_type: str = "application/json", attachments: Optional[List[_models.MessageAttachment]] = None, metadata: Optional[Dict[str, str]] = None, @@ -1320,17 +1320,16 @@ async def create_message( :param thread_id: Identifier of the thread. Required. :type thread_id: str :keyword role: The role of the entity that is creating the message. Allowed values include: - - * `user`: Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the assistant. Use this value to insert - messages from the assistant into the - conversation. Known values are: "user" and "assistant". Required. + ``user``, which indicates the message is sent by an actual user (and should be + used in most cases to represent user-generated messages), and ``assistant``, + which indicates the message is generated by the agent (use this value to insert + messages from the agent into the conversation). Known values are: "user" and "assistant". + Required. :paramtype role: str or ~azure.ai.assistants.models.MessageRole - :keyword content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :paramtype content: str + :keyword content: The content of the initial message. This may be a basic string (if you only + need text) or an array of typed content blocks (for example, text, image_file, + image_url, and so on). Is either a str type or a [MessageInputContentBlock] type. Required. + :paramtype content: str or list[~azure.ai.assistants.models.MessageInputContentBlock] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1390,7 +1389,7 @@ async def create_message( body: Union[JSON, IO[bytes]] = _Unset, *, role: Union[str, _models.MessageRole] = _Unset, - content: str = _Unset, + content: "_types.MessageInputContent" = _Unset, attachments: Optional[List[_models.MessageAttachment]] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any @@ -1402,17 +1401,16 @@ async def create_message( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword role: The role of the entity that is creating the message. Allowed values include: - - * `user`: Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the assistant. Use this value to insert - messages from the assistant into the - conversation. Known values are: "user" and "assistant". Required. + ``user``, which indicates the message is sent by an actual user (and should be + used in most cases to represent user-generated messages), and ``assistant``, + which indicates the message is generated by the agent (use this value to insert + messages from the agent into the conversation). Known values are: "user" and "assistant". + Required. :paramtype role: str or ~azure.ai.assistants.models.MessageRole - :keyword content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :paramtype content: str + :keyword content: The content of the initial message. This may be a basic string (if you only + need text) or an array of typed content blocks (for example, text, image_file, + image_url, and so on). Is either a str type or a [MessageInputContentBlock] type. Required. + :paramtype content: str or list[~azure.ai.assistants.models.MessageInputContentBlock] :keyword attachments: A list of files attached to the message, and the tools they should be added to. Default value is None. :paramtype attachments: list[~azure.ai.assistants.models.MessageAttachment] @@ -3421,33 +3419,18 @@ async def list_files( return deserialized # type: ignore @overload - async def upload_file(self, body: _models.UploadFileRequest, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Multipart body. Required. - :type body: ~azure.ai.assistants.models.UploadFileRequest - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - + async def _upload_file(self, body: _models._models.UploadFileRequest, **kwargs: Any) -> _models.OpenAIFile: ... @overload - async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Multipart body. Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.assistants.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ + async def _upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: ... @distributed_trace_async - async def upload_file(self, body: Union[_models.UploadFileRequest, JSON], **kwargs: Any) -> _models.OpenAIFile: + async def _upload_file( + self, body: Union[_models._models.UploadFileRequest, JSON], **kwargs: Any + ) -> _models.OpenAIFile: """Uploads a file for use by other operations. :param body: Multipart body. Is either a UploadFileRequest type or a JSON type. Required. - :type body: ~azure.ai.assistants.models.UploadFileRequest or JSON + :type body: ~azure.ai.assistants.models._models.UploadFileRequest or JSON :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py index 292578c140f1..8e56156b502f 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py @@ -36,16 +36,20 @@ def get_token( enable_cae: bool = False, **kwargs: Any, ) -> "AccessToken": - return concurrent.futures.ThreadPoolExecutor().submit( - asyncio.run, - self._async_credential.get_token( - *scopes, - claims=claims, - tenant_id=tenant_id, - enable_cae=enable_cae, - **kwargs, - ), - ).result() + return ( + concurrent.futures.ThreadPoolExecutor() + .submit( + asyncio.run, + self._async_credential.get_token( + *scopes, + claims=claims, + tenant_id=tenant_id, + enable_cae=enable_cae, + **kwargs, + ), + ) + .result() + ) __all__: List[str] = [] diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py index fa880f254f35..ccdad560d92f 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py @@ -1,8 +1,9 @@ +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -from openai.types import file_purpose + """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize @@ -54,10 +55,7 @@ class AssistantsClient(AssistantsClientGenerated): - - - - + def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._toolset: Dict[str, _models.AsyncToolSet] = {} @@ -186,7 +184,9 @@ async def create_assistant( # pylint: disable=arguments-differ """ @overload - async def create_assistant(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Assistant: + async def create_assistant( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: """Creates a new assistant. :param body: Required. @@ -1796,7 +1796,7 @@ async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: @distributed_trace_async async def upload_file( self, - body: Optional[Union[_models.UploadFileRequest, JSON]] = None, + body: Optional[JSON] = None, *, file: Optional[FileType] = None, file_path: Optional[str] = None, @@ -1824,19 +1824,16 @@ async def upload_file( :raises IOError: If there are issues with reading the file. :raises: HttpResponseError for HTTP errors. """ + # If a JSON body is provided directly, pass it along if body is not None: - return await super().upload_file(body=body, **kwargs) + return await super()._upload_file(body=body, **kwargs) + # Convert FilePurpose enum to string if necessary if isinstance(purpose, FilePurpose): purpose = purpose.value if file is not None and purpose is not None: - file_body = _models.UploadFileRequest( - file=file, - purpose=purpose, - filename=filename - ) - return await super().upload_file(body=file_body, **kwargs) + return await super()._upload_file(body={"file": file, "purpose": purpose, "filename": filename}, **kwargs) if file_path is not None and purpose is not None: if not os.path.isfile(file_path): @@ -1846,16 +1843,11 @@ async def upload_file( with open(file_path, "rb") as f: content = f.read() - # Determine filename and create correct FileType + # If no explicit filename is provided, use the base name base_filename = filename or os.path.basename(file_path) file_content: FileType = (base_filename, content) - file_body = _models.UploadFileRequest( - file=file_content, - purpose=purpose, - filename=filename - ) - return await super().upload_file(body=file_body, **kwargs) + return await super()._upload_file(body={"file": file_content, "purpose": purpose}, **kwargs) except IOError as e: raise IOError(f"Unable to read file: {file_path}.") from e @@ -2522,7 +2514,7 @@ def write_file(collected_chunks: list): except (ValueError, RuntimeError, TypeError, IOError) as e: logger.error("An error occurred in save_file: %s", e) raise - + @classmethod def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential", **kwargs) -> Self: """ @@ -2612,7 +2604,7 @@ def scope(self) -> Dict[str, str]: } -__all__: List[str] = ['AssistantsClient'] # Add all objects you want publicly available to users at this package level +__all__: List[str] = ["AssistantsClient"] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py index 336ad4ce9f17..5a3c3ab7000b 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/__init__.py @@ -60,7 +60,13 @@ MessageDeltaTextUrlCitationDetails, MessageImageFileContent, MessageImageFileDetails, + MessageImageFileParam, + MessageImageUrlParam, MessageIncompleteDetails, + MessageInputContentBlock, + MessageInputImageFileBlock, + MessageInputImageUrlBlock, + MessageInputTextBlock, MessageTextAnnotation, MessageTextContent, MessageTextDetails, @@ -153,7 +159,6 @@ UpdateCodeInterpreterToolResourceOptions, UpdateFileSearchToolResourceOptions, UpdateToolResourcesOptions, - UploadFileRequest, VectorStore, VectorStoreAutoChunkingStrategyRequest, VectorStoreAutoChunkingStrategyResponse, @@ -184,8 +189,10 @@ ErrorEvent, FilePurpose, FileState, + ImageDetailLevel, IncompleteDetailsReason, ListSortOrder, + MessageBlockType, MessageIncompleteDetailsReason, MessageRole, MessageStatus, @@ -262,7 +269,13 @@ "MessageDeltaTextUrlCitationDetails", "MessageImageFileContent", "MessageImageFileDetails", + "MessageImageFileParam", + "MessageImageUrlParam", "MessageIncompleteDetails", + "MessageInputContentBlock", + "MessageInputImageFileBlock", + "MessageInputImageUrlBlock", + "MessageInputTextBlock", "MessageTextAnnotation", "MessageTextContent", "MessageTextDetails", @@ -355,7 +368,6 @@ "UpdateCodeInterpreterToolResourceOptions", "UpdateFileSearchToolResourceOptions", "UpdateToolResourcesOptions", - "UploadFileRequest", "VectorStore", "VectorStoreAutoChunkingStrategyRequest", "VectorStoreAutoChunkingStrategyResponse", @@ -383,8 +395,10 @@ "ErrorEvent", "FilePurpose", "FileState", + "ImageDetailLevel", "IncompleteDetailsReason", "ListSortOrder", + "MessageBlockType", "MessageIncompleteDetailsReason", "MessageRole", "MessageStatus", diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py index 3f2e6561663c..667871cc972b 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_enums.py @@ -205,6 +205,17 @@ class FileState(str, Enum, metaclass=CaseInsensitiveEnumMeta): terminal state.""" +class ImageDetailLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies an image's detail level. Can be 'auto', 'low', 'high', or an unknown future value.""" + + AUTO = "auto" + """Automatically select an appropriate detail level.""" + LOW = "low" + """Use a lower detail level to reduce bandwidth or cost.""" + HIGH = "high" + """Use a higher detail level—potentially more resource-intensive.""" + + class IncompleteDetailsReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. @@ -225,6 +236,19 @@ class ListSortOrder(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Specifies a descending sort order.""" +class MessageBlockType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies the kind of content block within a message. Could be text, an image file, an external + image URL, or an unknown future type. + """ + + TEXT = "text" + """Indicates a block containing text content.""" + IMAGE_FILE = "image_file" + """Indicates a block referencing an internally uploaded image file.""" + IMAGE_URL = "image_url" + """Indicates a block referencing an external image URL.""" + + class MessageIncompleteDetailsReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): """A set of reasons describing why a message is marked as incomplete.""" diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py index 3d4ce4433874..3a916bf99252 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py @@ -15,6 +15,7 @@ from .._model_base import rest_discriminator, rest_field from .._vendor import FileType from ._enums import ( + MessageBlockType, OpenApiAuthType, RunStepType, VectorStoreChunkingStrategyRequestType, @@ -1903,6 +1904,80 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) +class MessageImageFileParam(_model_base.Model): + """Defines how an internally uploaded image file is referenced when creating an image-file block. + + :ivar file_id: The ID of the previously uploaded image file. Required. + :vartype file_id: str + :ivar detail: Optional detail level for the image (auto, low, or high). Known values are: + "auto", "low", and "high". + :vartype detail: str or ~azure.ai.assistants.models.ImageDetailLevel + """ + + file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the previously uploaded image file. Required.""" + detail: Optional[Union[str, "_models.ImageDetailLevel"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Optional detail level for the image (auto, low, or high). Known values are: \"auto\", \"low\", + and \"high\".""" + + @overload + def __init__( + self, + *, + file_id: str, + detail: Optional[Union[str, "_models.ImageDetailLevel"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageImageUrlParam(_model_base.Model): + """Defines how an external image URL is referenced when creating an image-URL block. + + :ivar url: The publicly accessible URL of the external image. Required. + :vartype url: str + :ivar detail: Optional detail level for the image (auto, low, or high). Defaults to 'auto' if + not specified. Known values are: "auto", "low", and "high". + :vartype detail: str or ~azure.ai.assistants.models.ImageDetailLevel + """ + + url: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The publicly accessible URL of the external image. Required.""" + detail: Optional[Union[str, "_models.ImageDetailLevel"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Optional detail level for the image (auto, low, or high). Defaults to 'auto' if not specified. + Known values are: \"auto\", \"low\", and \"high\".""" + + @overload + def __init__( + self, + *, + url: str, + detail: Optional[Union[str, "_models.ImageDetailLevel"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + class MessageIncompleteDetails(_model_base.Model): """Information providing additional detail about a message entering an incomplete status. @@ -1937,6 +2012,146 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) +class MessageInputContentBlock(_model_base.Model): + """Defines a single content block when creating a message. The 'type' field determines whether it + is text, an image file, or an external image URL, etc. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageInputImageFileBlock, MessageInputImageUrlBlock, MessageInputTextBlock + + :ivar type: Specifies which kind of content block this is (text, image_file, image_url, etc.). + Required. Known values are: "text", "image_file", and "image_url". + :vartype type: str or ~azure.ai.assistants.models.MessageBlockType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) + """Specifies which kind of content block this is (text, image_file, image_url, etc.). Required. + Known values are: \"text\", \"image_file\", and \"image_url\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageInputImageFileBlock(MessageInputContentBlock, discriminator="image_file"): + """An image-file block in a new message, referencing an internally uploaded image by file ID. + + :ivar type: Must be 'image_file' for an internally uploaded image block. Required. Indicates a + block referencing an internally uploaded image file. + :vartype type: str or ~azure.ai.assistants.models.IMAGE_FILE + :ivar image_file: Information about the referenced image file, including file ID and optional + detail level. Required. + :vartype image_file: ~azure.ai.assistants.models.MessageImageFileParam + """ + + type: Literal[MessageBlockType.IMAGE_FILE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Must be 'image_file' for an internally uploaded image block. Required. Indicates a block + referencing an internally uploaded image file.""" + image_file: "_models.MessageImageFileParam" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Information about the referenced image file, including file ID and optional detail level. + Required.""" + + @overload + def __init__( + self, + *, + image_file: "_models.MessageImageFileParam", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=MessageBlockType.IMAGE_FILE, **kwargs) + + +class MessageInputImageUrlBlock(MessageInputContentBlock, discriminator="image_url"): + """An image-URL block in a new message, referencing an external image by URL. + + :ivar type: Must be 'image_url' for an externally hosted image block. Required. Indicates a + block referencing an external image URL. + :vartype type: str or ~azure.ai.assistants.models.IMAGE_URL + :ivar image_url: Information about the external image URL, including the URL and optional + detail level. Required. + :vartype image_url: ~azure.ai.assistants.models.MessageImageUrlParam + """ + + type: Literal[MessageBlockType.IMAGE_URL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Must be 'image_url' for an externally hosted image block. Required. Indicates a block + referencing an external image URL.""" + image_url: "_models.MessageImageUrlParam" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Information about the external image URL, including the URL and optional detail level. + Required.""" + + @overload + def __init__( + self, + *, + image_url: "_models.MessageImageUrlParam", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=MessageBlockType.IMAGE_URL, **kwargs) + + +class MessageInputTextBlock(MessageInputContentBlock, discriminator="text"): + """A text block in a new message, containing plain text content. + + :ivar type: Must be 'text' for a text block. Required. Indicates a block containing text + content. + :vartype type: str or ~azure.ai.assistants.models.TEXT + :ivar text: The plain text content for this block. Required. + :vartype text: str + """ + + type: Literal[MessageBlockType.TEXT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Must be 'text' for a text block. Required. Indicates a block containing text content.""" + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The plain text content for this block. Required.""" + + @overload + def __init__( + self, + *, + text: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=MessageBlockType.TEXT, **kwargs) + + class MessageTextAnnotation(_model_base.Model): """An abstract representation of an annotation to text thread message content. @@ -5232,21 +5447,20 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class ThreadMessageOptions(_model_base.Model): - """A single message within an assistant thread, as provided during that thread's creation for its - initial state. + """A single message within an agent thread, + as provided during that thread's creation for its initial state. :ivar role: The role of the entity that is creating the message. Allowed values include: - - * `user`: Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the assistant. Use this value to insert - messages from the assistant into the - conversation. Required. Known values are: "user" and "assistant". + ``user``, which indicates the message is sent by an actual user (and should be + used in most cases to represent user-generated messages), and ``assistant``, + which indicates the message is generated by the agent (use this value to insert + messages from the agent into the conversation). Required. Known values are: "user" and + "assistant". :vartype role: str or ~azure.ai.assistants.models.MessageRole - :ivar content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :vartype content: str + :ivar content: The content of the initial message. This may be a basic string (if you only + need text) or an array of typed content blocks (for example, text, image_file, + image_url, and so on). Required. Is either a str type or a [MessageInputContentBlock] type. + :vartype content: str or list[~azure.ai.assistants.models.MessageInputContentBlock] :ivar attachments: A list of files attached to the message, and the tools they should be added to. :vartype attachments: list[~azure.ai.assistants.models.MessageAttachment] @@ -5258,16 +5472,15 @@ class ThreadMessageOptions(_model_base.Model): role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The role of the entity that is creating the message. Allowed values include: - - * `user`: Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the assistant. Use this value to insert - messages from the assistant into the - conversation. Required. Known values are: \"user\" and \"assistant\".""" - content: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The textual content of the initial message. Currently, robust input including images and - annotated text may only be provided via - a separate call to the create message API. Required.""" + ``user``, which indicates the message is sent by an actual user (and should be + used in most cases to represent user-generated messages), and ``assistant``, + which indicates the message is generated by the agent (use this value to insert + messages from the agent into the conversation). Required. Known values are: \"user\" and + \"assistant\".""" + content: "_types.MessageInputContent" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The content of the initial message. This may be a basic string (if you only + need text) or an array of typed content blocks (for example, text, image_file, + image_url, and so on). Required. Is either a str type or a [MessageInputContentBlock] type.""" attachments: Optional[List["_models.MessageAttachment"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -5282,7 +5495,7 @@ def __init__( self, *, role: Union[str, "_models.MessageRole"], - content: str, + content: "_types.MessageInputContent", attachments: Optional[List["_models.MessageAttachment"]] = None, metadata: Optional[Dict[str, str]] = None, ) -> None: ... diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py index ba79c2794d4e..458875b29d44 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py @@ -1,3 +1,4 @@ +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -675,7 +676,7 @@ def __init__( :type index_connection_id: str :param index_name: Name of Index in search resource to be used by tool. :type index_name: str - :param query_type: Type of query in an AIIndexResource attached to this assistant. + :param query_type: Type of query in an AIIndexResource attached to this assistant. Default value is AzureAISearchQueryType.SIMPLE. :type query_type: AzureAISearchQueryType :param filter: Odata filter string for search resource. @@ -1271,9 +1272,9 @@ class BaseAsyncAssistantEventHandler(AsyncIterator[T]): def __init__(self) -> None: self.response_iterator: Optional[AsyncIterator[bytes]] = None - self.submit_tool_outputs: Optional[Callable[[ThreadRun, "BaseAsyncAssistantEventHandler[T]"], Awaitable[None]]] = ( - None - ) + self.submit_tool_outputs: Optional[ + Callable[[ThreadRun, "BaseAsyncAssistantEventHandler[T]"], Awaitable[None]] + ] = None self.buffer: Optional[bytes] = None def initialize( @@ -1394,7 +1395,9 @@ def until_done(self) -> None: pass -class AsyncAssistantEventHandler(BaseAsyncAssistantEventHandler[Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]]): +class AsyncAssistantEventHandler( + BaseAsyncAssistantEventHandler[Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]] +): async def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]: event_type, event_data_obj = _parse_event(event_data_str) @@ -1403,9 +1406,9 @@ async def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventDat and event_data_obj.status == "requires_action" and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) ): - await cast(Callable[[ThreadRun, "BaseAsyncAssistantEventHandler"], Awaitable[None]], self.submit_tool_outputs)( - event_data_obj, self - ) + await cast( + Callable[[ThreadRun, "BaseAsyncAssistantEventHandler"], Awaitable[None]], self.submit_tool_outputs + )(event_data_obj, self) func_rt: Optional[EventFunctionReturnT] = None try: @@ -1762,7 +1765,7 @@ def get_last_text_message_by_role(self, role: MessageRole) -> Optional[MessageTe "MessageTextFileCitationAnnotation", "MessageDeltaChunk", "MessageAttachment", -] +] def patch_sdk(): diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py index cc45b34ae7d5..b9c4c0f6003a 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py @@ -10,8 +10,4 @@ from ._utils import enable_telemetry from ._trace_function import trace_function -__all__ = [ - "AIAssistantsInstrumentor", - "enable_telemetry" - "trace_function" -] +__all__ = ["AIAssistantsInstrumentor", "enable_telemetry" "trace_function"] diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py index 8729f04d1f6e..16fb9520212d 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py @@ -532,7 +532,9 @@ def start_submit_tool_outputs_span( ) -> "Optional[AbstractSpan]": run_span = event_handler.span if isinstance(event_handler, _AssistantEventHandlerTraceWrapper) else None if run_span is None: - run_span = event_handler.span if isinstance(event_handler, _AsyncAssistantEventHandlerTraceWrapper) else None + run_span = ( + event_handler.span if isinstance(event_handler, _AsyncAssistantEventHandlerTraceWrapper) else None + ) if run_span: recorded = self._add_tool_message_events(run_span, tool_outputs) @@ -1527,7 +1529,13 @@ def _assistants_apis(self): ("azure.ai.assistants.models", "AssistantRunStream", "__exit__", TraceType.AssistantS, "__exit__"), ) async_apis = ( - ("azure.ai.assistants.aio", "AssistantsClient", "create_assistant", TraceType.AssistantS, "assistant_create"), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "create_assistant", + TraceType.AssistantS, + "assistant_create", + ), ( "azure.ai.assistants.aio", "AssistantsClient", diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py index 3d897fd1c874..424771f27914 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -281,7 +282,7 @@ def enable_telemetry(destination: Union[TextIO, str, None] = None, **kwargs) -> log_exporter = _get_log_exporter(destination) _configure_logging(log_exporter) - + try: from azure.ai.assistants.telemetry import AIAssistantsInstrumentor diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py index 29cbdc66d7a7..3f6ea212157d 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py @@ -43,7 +43,7 @@ async def main() -> None: ) # Enable Azure Monitor tracing - application_insights_connection_string = os.environ['AI_APPINSIGHTS_CONNECTION_STRING'] + application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] configure_azure_monitor(connection_string=application_insights_connection_string) # enable additional instrumentations diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py index 90c8ce2eacbc..e5873c4115c0 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py @@ -22,12 +22,7 @@ import asyncio from azure.ai.assistants.aio import AssistantsClient -from azure.ai.assistants.models import ( - CodeInterpreterTool, - FilePurpose, - ListSortOrder, - MessageRole -) +from azure.ai.assistants.models import CodeInterpreterTool, FilePurpose, ListSortOrder, MessageRole from azure.identity.aio import DefaultAzureCredential from pathlib import Path @@ -96,9 +91,7 @@ async def main() -> None: print(f"Start Index: {file_path_annotation.start_index}") print(f"End Index: {file_path_annotation.end_index}") file_name = Path(file_path_annotation.text).name - await assistants_client.save_file( - file_id=file_path_annotation.file_path.file_id, file_name=file_name - ) + await assistants_client.save_file(file_id=file_path_annotation.file_path.file_id, file_name=file_name) print(f"Saved image file to: {Path.cwd() / file_name}") await assistants_client.delete_assistant(assistant.id) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py index c3f2b74b7d6d..ff6cee650009 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py @@ -22,12 +22,7 @@ import asyncio import os from azure.ai.assistants.aio import AssistantsClient -from azure.ai.assistants.models import ( - CodeInterpreterTool, - FilePurpose, - MessageAttachment, - ListSortOrder -) +from azure.ai.assistants.models import CodeInterpreterTool, FilePurpose, MessageAttachment, ListSortOrder from azure.identity.aio import DefaultAzureCredential diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_base64_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_base64_async.py new file mode 100644 index 000000000000..e56e1311aae5 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_base64_async.py @@ -0,0 +1,113 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations using image file input for the + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_image_input_base64.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import os, time, base64 +from typing import List +from azure.ai.assistants.aio import AssistantsClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageInputContentBlock, + MessageImageUrlParam, + MessageInputTextBlock, + MessageInputImageUrlBlock, +) + + +def image_to_base64(image_path: str) -> str: + """ + Convert an image file to a Base64-encoded string. + + :param image_path: The path to the image file (e.g. 'image_file.png') + :return: A Base64-encoded string representing the image. + :raises FileNotFoundError: If the provided file path does not exist. + :raises OSError: If there's an error reading the file. + """ + if not os.path.isfile(image_path): + raise FileNotFoundError(f"File not found at: {image_path}") + + try: + with open(image_path, "rb") as image_file: + file_data = image_file.read() + return base64.b64encode(file_data).decode("utf-8") + except Exception as exc: + raise OSError(f"Error reading file '{image_path}'") from exc + + +async def main(): + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) as assistants_client: + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + input_message = "Hello, what is in the image ?" + image_base64 = image_to_base64("../image_file.png") + img_url = f"data:image/png;base64,{image_base64}" + url_param = MessageImageUrlParam(url=img_url, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageUrlBlock(image_url=url_param), + ] + message = await assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await assistants_client.get_run(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + print(f"Messages: {messages}") + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_file_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_file_async.py new file mode 100644 index 000000000000..979861978d7f --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_file_async.py @@ -0,0 +1,94 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations using image file input for the + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_image_input_file.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import os, time +from typing import List +from azure.ai.assistants.aio import AssistantsClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageInputContentBlock, + MessageImageFileParam, + MessageInputTextBlock, + MessageInputImageFileBlock, +) + + +async def main(): + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) as assistants_client: + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + image_file = await assistants_client.upload_file_and_poll(file_path="../image_file.png", purpose="assistants") + print(f"Uploaded file, file ID: {image_file.id}") + + input_message = "Hello, what is in the image ?" + file_param = MessageImageFileParam(file_id=image_file.id, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageFileBlock(image_file=file_param), + ] + message = await assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await assistants_client.get_run(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + print(f"Messages: {messages}") + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_url_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_url_async.py new file mode 100644 index 000000000000..25e1a3e5dd62 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_url_async.py @@ -0,0 +1,93 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations using image url input for the + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_image_input_url.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import os, time +from typing import List +from azure.ai.assistants.aio import AssistantsClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageInputContentBlock, + MessageImageUrlParam, + MessageInputTextBlock, + MessageInputImageUrlBlock, +) + + +async def main(): + async with DefaultAzureCredential() as creds: + async with AssistantsClient.from_connection_string( + credential=creds, + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) as assistants_client: + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + input_message = "Hello, what is in the image ?" + url_param = MessageImageUrlParam(url=image_url, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageUrlBlock(image_url=url_param), + ] + message = await assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await assistants_client.get_run(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + print(f"Messages: {messages}") + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py index c6bf104b05fb..d17575f2d6fa 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py @@ -123,7 +123,9 @@ async def main() -> None: print(f"Created message, message ID {message.id}") async with await assistants_client.create_stream( - thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler(functions, assistants_client) + thread_id=thread.id, + assistant_id=assistant.id, + event_handler=MyEventHandler(functions, assistants_client), ) as stream: await stream.until_done() diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py index e1e5c27e47ee..dc6b5d0fd2c8 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py index 981002f22710..c7fef5c65435 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py @@ -42,9 +42,7 @@ async def main(): asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) - vector_store = await assistants_client.create_vector_store_and_poll( - file_ids=[], name="sample_vector_store" - ) + vector_store = await assistants_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") print(f"Created vector store, vector store ID: {vector_store.id}") # Add the file to the vector store or you can supply file ids in the vector store creation diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py index 2c1eabc34e87..40b606917baa 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py @@ -39,9 +39,7 @@ async def main() -> None: print(f"Uploaded file, file ID: {file.id}") # Create a vector store with no file and wait for it to be processed - vector_store = await assistants_client.create_vector_store_and_poll( - file_ids=[], name="sample_vector_store" - ) + vector_store = await assistants_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") print(f"Created vector store, vector store ID: {vector_store.id}") # Add the file to the vector store or you can supply file ids in the vector store creation diff --git a/sdk/ai/azure-ai-assistants/samples/image_file.png b/sdk/ai/azure-ai-assistants/samples/image_file.png new file mode 100644 index 0000000000000000000000000000000000000000..50ae6c65367af30a10642fc910cef97bfe765796 GIT binary patch literal 183951 zcmdS>hdbB*`#z3qX-cJ1Dnv=jDmzq0JJ}>7D?(&%Dv=o#6`^c06WL@$QTCoi_TF3H z^Y;9_kKb{;{)C_7c)ngoJjVTgzg@TMI?wYuubYRo)Mct|blWH>D5$Ppk&vaJ*qlQ_ zL3w%$C4OUlV2~C6Cukw5Y$0cI&%#>MOqb%Oro}@e6APpJT8vh@X6E-zjL&lLaGX8O zsBdBM&|HX<^TGe`1so=3dYnH)C93ctTOVGzZB9W!Uqt>}XIyP5O0kZD;;O_&d7I#o z4r}?n{mXn4hU*5mux*>;@Z7YI_eowU9dF|S-e!BPt2M{E4$x?OUGn+#ZI8Y8@i{vC z@kh#EpFdj9^hEY`_BccCPv4eOj*-;U=8lz|#!jj|hKcDn{gsbJ| z|9;ZFXFrVp=X03OoV#H~^FJT-qDAxnPcJFqhT1wNCN_Z|KYnc4y7i*6@`3K|Zoii= z*B?4`NZ{PL2(53Pv1&PdBO@a>)NZns1o7NwQpvZSj`lDRlbsyvI5*N&z`$?Z{5(6pT$YG5vw*7s0_hT*cPe+uLX6$a!)Eu`uAX6jLq7LV0c6XdD-^u+nd|k zB2=@?Vr3#I+dDdLs{Il>#l*D!=+UD#%X93V1sGv$%rfhh{$h0d zyUrOor2zLUW+eRBx@&7|AI=QO92PX&xVE^5x6^tTO9y{p&`t`@F6O+?b=b5>=Yw8tF znr*AlTb}qMepuLMSAS(lam5=>^NxHm*KPVW;hVCpMm_CjhxT2d@^Jqyzb*AskeSPq zcl;(=?U|-Rc}<<2^gDO%WL5aM*P!9!an<;P%!@*IzfuT0F3G1Em$>hK%cUunU#?($ z_eJ8J4C95tnD;XroUE){il0%_Cf3EgFuHe-rSkoGU*~_Tv{?HKDk>^T<8jhq>xD%` zY7%arU07NQzY%yQD?f%y)%U#P(%ye-%bZdnLat588ipN>4GsH7+cL_^7@WQ7cn$vE zcjnZ{FS*R%7+LoPKMx;1yx_6txb)|HH|rDc&>lHw^)x;{zI1+Oq=hC~J+EA@W2PbE zGWE(rkFQEbORDbn*X(y#RQkupo^RN^^J|gV^rM(12Pro$!^YEii=48k%>?7Us!;Y; zmh)7NYRds7Tt8TA3HR$c z-u*(8q!9V1Ap)lBD_*mU%gg&qwdv{T{GJrPE%Y#3E{%97_y-1RkmBk)>!f(>~d$OlYz+zyR)AHzl;qu;jO)xk(_=cKfM5f&= z+r7%*t*&C4^{Y>xKK(t?^0_GW*|W{9Umh6NI|iv-bo|mP?7VikV0G>ghem!v({fR9 z@nqHcf4}i57qKU~O}i!1j5}Jdq#t9d?ZWku9jR$M@w-rRPW9&PmoH!X`TIY`sxb=) z9N4_`@YirjuN_W_R^y!_0TTQaB&vH#ea>Dx(~JE%F+DAwZqTqp{R>M~s7Udx^Q%uk zd|=G$q@|~SJ~8ny7>|QuV!YrVUBZbp?xeD=Tw9mp*I)l$nCubKc=fHMw=ZR5@s$FAvm2D5vUd#f^F4Ps8;ezSh_K$nHIO(Blxl@oyJuQLc+V z+Z1hV1om^N#y0U9gXYh`fWF@nGW*@Ge8+*NU|mY`4x&gckXB_26~TRVX%gN zDXK}Tf-E_|l&@XeHaFJcg*~f1+5HXq^-Em5j`TLy_1pKK-X*edNJ zOCi`0-<$H0?O2C?)FT%%g^X}~uTp#!a?V=o$IIg?_V<72oDmQdJj%?x;p)|^1`O-Z z+fHq)i@qryBNf7B(kUEKk9?$o&6*Lbq>ZIne5CXwHMxO9J=YyE-=9TmEG3xSYkuZl zH^l`I{LnPNKM)Poo; z@jiHV7cFg!=^@-O5@7m!SFy0K_`+x8L3ZZa>LcdWcOJcU=FFLgL-i+B3!IK@r{$2q zHEuWRTB@zTu<`qnJ=@TaF`5%nzhmAz?YSuz`LnQa-QeKhm8(}}Y}s*ZIaZ@Z-@Ir) z*-UEHe^jVooNZ0f++a61vK58O#b0j+3fNno`+L@J*uZP7jYNt|ImW`mBqE~LV^zAw z&gN~yE+G;96%asXH}ji{$;JNnUVRC_8q@B}S&X$G6crVHXldyh5U{Vdu5LFCjSDWl z2eFxHH5xP^azZNjD}FJ#JYHZfYQn@k)R}i~s3|!)Rh{DvGaK8s4C9U>WJk$lNl8h~ za{rUfnWow=g6@nqCf?byeLKzT>j7-82*z-W!CICCg_^tNRf=KKcd_=^H_yrqu@5Ac z=SKew4{yJ9>lPCJmf}Ovk&%o%JbO_!1RR%mjJxvL?J|$h(HYES)9jWhe8;WrR@tVW zYqOh{Q+@C2kdP2=rxlx!uJOUaz+pZV<7b~ge=c7tDUp!4bg8JTi@}%S{QkVTrCCFV zwZ;Bs+;o0rdR$zbn6Iv-cVOl@^S&MQ!Zwc(bqAtWjF5&Fy1h8%U*?cHgsl5}bkqmY zSX^7X=Yk^B7)p4i^*H@ldzQ;*y($8ZG%Q)&hbQq5ty1rUY(GBjIezWK^*0;?)nOYx zDkXCqbKA3L&%{W}*Aa9TXxn5@BR@RM7!Xk$pskFDFb75yoGCOJH_n3F6U z<=NQ8%e!k`YPZnQqZhS(b@lZ0@`lV7CNyjGHwLgN6akP}Iu&GRR``;Q^QXv_ZMdVO zqbA)@$zq^-9bWMyFHhuAg0kJrz;3BvfyW3%MtqV@RlHI%mq9)2X^s5D>gwu?Np~zP zc&tWSU5|>p{mnV89VloXeJ#B0$B(Uf_VbtC@jZ}^xI`s0a_rbK#$(4e&X08zBWic= z+Qs*)LhOq z>)qB@8FI?Nb#Q##7q7aD?H+7+rF-h^tpoR)BCpca2tOEV&$XrbeDBwun>TkMYBz1) zPhM4;r1coPcNaUp|4#qr66dwWnB4~Q7JvIc|4PPp#U0CjU}Q9b%3f4eb<3tX`e$h= zC6c#TqDnf~!`~MG1@_`SC!|7!Z|`bgDQ`fWN}8|Ajx7Yf;FvksH}~ruf4ELYUS3|| zvmc|a>8B7JF_!O0GA7%%$Umy_TDDE z;giFnS%iPc18jrvV{xvoo1$;Nb9=mDvlz>(I3tt$_qSSGTN9ur{%q%4AxpNgj+_%e z6W`I6=F?N}*irH`@T|g~4iWpglKMEghRW%&fdTiW+2IJM)ujU>c051@ZjQ@yTdDpb z=}e4fbZMd9={Ki@J-^J>m~A<%+Aj(WREo?zIZ(r_sivjHw4Yt&X+yly_F=%MkJCCGC{7 zv=T%bx7CPpdc>R{0QA*YEc&&PTd^(3;{3*C7Y!p#ll_%)zm7#7C85g_xQuyeW>7or zMdVZu?k9YDD+M6M@Ba7BZeCtqt@8pxLPc$DhdMhu5AYcsKuUE%CD1~}+$60HR7!F0 zw7a{f-_3V?2<+|QmL%ZuH0xQi8-R0U>^GPX)*d%%P5W`z@;K8f@BP}1hxm*xAW$TS zIaM~NN}u1doA$@5x%Mpc<`hjA{g8+VX2k^M$6IMw^lwGv1RmnX0e!f)b*$0va5js(XAzYg4DXHip`Dzlcc4S*l7E_=9J@Kdbp}F~43*L2q zSM64?`xpA&naD?*2A!Mb%&rKIi|f z$VF{lKzu6LInq*=Q%QKzaz0hj}lRa=oXA8j7v$fUbLz!+85|RJ0QAr6Kn@s)A+l+?F%>k#BiW(Xel2cQ=VpjaL zv17vjn0Ung&dSRA15B@U`~BzMyez&br9(|FH>W;E zq2HM+_~k*H;@`f66HH9gePd%g&!0bElkM%jBVaJxVjmI!vXy6}BeKUpZIqXO$Wa<= zvaNwbC5N$!nkiEOA?Kad(^PY9CLO3uN4mSU>Q81)nTf2Sv@x@=TuQ5~s^Yv=w)o@3 z`@4U?zX-Q9>C7zzrb)8Cz4Xw`tSIv8EBQu^e213U^N;! z#gBv)e`ep;CJkpFApSu$)Ym#fb!xIt+zS00NAol)Dyn?E8->xQ^#U`embSJ6or(8h z!9CwR_f^Q*?%%&((jc3gDZI!VtUH`Z3or~2V0Yp>Axnc$&V-Kkb}q{y`I_wddikBZ zZ^&;!MV0Jbr!GUMg;MPGdUmizyfv`^x9=#dLNJ}kH#ZH7F)u?!fZL@XxZ<4dFtci zqpzkp+>)5|_zH5dOuB)Bjh&rmBW+Y?2XJ*6Dyif!!pSY)ak($+XLA11HQl`~c;|0~ zzTKOK0%zx%nXBNpuEsN8^{PVSex{F(jy?!)-L?CL=(hO)=H0so0dp?tihgvXIyA(q z@mdJHqOjoK+Nq!oE5N-Zyl2MPL*;-upFDX&YA|8Ug#UqWgNIHjBng3lNnE+I1>p zfq@$Q*#GqHJtS_BM|NZ<8;y4rl%ZwF^+A zNEC?41vxn!^p#S;^NF6Wt}Bp;`x+lJGxM~(qM6wla_QaQwv$Uoh=97vCr$5Lf5E{) z7=Iy!FEdj>`OcjPe0;3(C#IqLxDD5@R|q$p>`$?mRgf2LL%alJ4Y6URHH+O28ZqqO zzm(B+^V+o{a$iS|_+*%LZ7UEVL>{zzy4{|cpRiIa!tQzd!eh!2DkY zE2GkqlHVY|(yV(~Wx_Y22tGpsJaCM!7{m;9h@c)UHvXLQ`Lm0c*Y@()>|P%g;yvBy zcxlHw^TaP)peQOTVti8}lWSw@e^QRp=MX=~OXjB~_}t2nb2f`p$7SW@{`=!8rJX)> zyl3=%8=tuU2>;&EaRiGVE*J}NJT2cCCjLyKqUGw?P(yqKHmc3U@5d9Zg)W-`uAT~y zUJe9}Bt#6%{g!fuK?55&J>816Y?e&0;L5KdbCG=QvYj$6n<(CjI@;y$SyEI~To{T^ z<^l-SRJ(ME(rTpnG14+iRc~J@64<^|iX{N0u9hZU`Mj-bQ(|fxc5`xaQrvbvKR@0@ z`{0~$d#143t^IuE#U^)@mH(dG501+PcDk}MT~(9~7)x{}0D*@1(*X7)UrhH`(Ke2@ zXVJK9rsi8P|Bx#?f9^qfSHy^6BOoz($X9OMkN~#$Gc@FhhaAfz@jV&QyK&Q|Kd27w zUS8q`25g59AN~#L08fF|v;PG62En!J`3@e<&B~EcQH9^WZAwZ?nwXk8(>fpUzMn-x zp6fqhXZ>?}k&kB6?a3PFUHm&TOf+h!9B|v@x3i_usQKZ&Bm2itdzEvn*moWlcoZ3V zEMWe&vhoL+h)d8{o;*AoL<=I-obPxJDbx*@tlysGusAhP7eh^IVqLq!g~kn$*jn2w zMC@k1LZR3~k?H(Tv@uDI5XX221_Z0ZyAda7X-MAZRdcMUsOg2JZ8fBjN{cr$I6Ok6 zhne=2W;M=2Ymv7Vc>Q2*q$RvZ`t@|CS?x@RLG5R~Te)_#*OWyc{-z+Y2t^CEar2!o zMuqr_hWreuID`ZPn!JkHd=ZhUoMC)qC%v#cwp)4|GMU!QKuuBboMB_a?wgyr>}K@6 zkDOB+cck6ed!MrYNDm&gq{t;Ye&f^Lm1O^}fOQc%gBVrlj31321-&GXV5~DQzIICF z-RI9|RWgiyY9cNZ=}pB~@=n6~>gM6&{gXO}&ffDL;5YnFNsM2y4z5E4r!__ywWMAe zT_^kYd|Phj!#_{TC!RVk&HOM=6)hk{9=ikNN2ABVWo~Y6Vsf$&LIM_mDx{)uZrOCS zm6y=}ix)2%blnLP+jJv{=jwx77hJ_2uP)DP1Jcmhk2EA}Fc9i7)R-7Arpnt(n9gyUh2!h8B}*$qxAXJ zlhRUFCypH(P%9;LDlb&`OknI~Yt$pjAI9P*#!8W*2r8BQPXlHm$mGvaG(8%Tc(*b*OkB*L?*fMec)eZ!ZW-iYQ7W>(w6O4ETJW1@ zK9*j?u3&Lf=Y>Ddh-w2uy-tT$I@7er?P5N)oZWPv&oz@dfVnWU-tt9`9PmWtY>PO3 zp-61yp^y>EN1b{0+JG!Y>FxmCsLX&0;tWG{Qxcai_uEzJ>Z53YVKYO6O#T$+e}hFF zDjb(lbBe<%OH53R_P@1N0Yvl@7ngMP_c>Wvg-uPgd3Lj6_)jAF;Ihg9){g8;4%N&L zll)~MwN!LGU;U1XS4ZDGl-S#=lR!3l%Flupy4}0~1Jt^ZWz!6sDj9V%0KcMi|Kby7 zIr@<=4{+V3K(Y(ttD&L@3=GV)9NvG)ht4(X`kTJE&E4RqHStQEBkI%0y zYydIYCvvRZ%W}A}u&RohmP57L&fKqVs<(n$znU>YqktaN;@7O>{rmSbZ6=urcL3m) z(QURf-%U&V40~F$@X02+S5y}oZ}vXfw4KXw$s!=^jk!4jVaEY33T)t-w*!=^bgVZ* zg!WDM{i<8EKXuywpXkh>=4a=BPCMv$wt$7p*%wD&m6zY^M$PaX^pX@1x>uhrDE7R@ zLWCn#W4DJ?F8h59Uut1{|Nb;PJ3`cVeez{7vGvhXA)cTFzp*-WH2fD7WhX#e4hY+D z?LV!|D7@p>cI+Y~D9!E?4?3C-q_RXpV4>Ht&4vn?GWGoojIh*ig+SdY-GGnEP?QVb&Jl38H&Vmx87jvIq z2+T0&e{0GGPI;BfB!AYc&Ar3oBLb;ZN1MXZ3j)0gVUzx=Jwx| zYlyl|D&JO6L@`%Q;hLcVfV;Lf|aw3jLZS#_VN|Q>?LraPj<5g zGG^CUUWM6AcB|NPvYns#{sSG0%F0T*JAWAfuaTJRf_9zq-a|t}q#&89JB;x$AIzv9 z8v(8taTctEPm7|Ucp+Odv9Vq2+o1?Iu8##z52;PKF?K!14JU-5;lBAN(b`4#c z50>s89))NS$?q9&85tdI2tA9RPlioN4#!0xe_EqBNv2goT0wiuAfpS-3P|TvjHlv+ z0c`@zU%9xrv_31x(_R?M8J__46>gYCMI=`w>5({M9+*x41VtBll!2(tA?5tN|F;Pa z9M}Tpm*~7aH1y`hi^q_3Gyz7l#(L1BAn>9z@`a1b231wnyAK{DYV>Y@4D1xg^E4$T zBlFRy~`N48$uY&BKY-^0VFRP+W)I?(8oXgZ3=Tep~@p;Ruiv97juVs*aD zP{evKWwxsCsrUAe66iDB!jn$-4s(h_L6^DzNU1tcJzeJcKjh1}`df?R1JFiXldQAz z>%RpawU6h2&?+C}I24W+RQO^aK5*O3p6u3i!+2Hk1ht&~>(;Hi)MX9XhNwm$OtFrg z_i(S1!Q&9ikRak6DZ_t&W^(J6@AawU)Nf;u=Bm?8%Uj*7O&BR^v^&+aM1%u^K{qEU5shc;y0eV`}cg{Xz zaQd2KJx-z1A&i|=SXOp9la&hc0y;LIpzPF$he4hCpqQvK(3WA6wA_s4=QC<%R8fkP zJx;PTve#wo2iMngsAmH9bEm%;wJ-@&+>nwg#gmu>UmfQC+k>6^@bCBC6t`(x!Ay$r zrS5nLM2SV*KwOqPD0?~vg)~gkYrlSL+DU`@*vBX&UdF7rAJgf+n}Lj!3JVB1ETjw+>JnxOcI#t#rjhGC$_33HZd80CYltrV-tece8n^kd z@8`h;mGo`g+}uUWz`35GBKCbtcUg}fUDxF>?NeUtiN~n@_0c;0_Dt@C#KbUZ)?IXT zo1fmqPO#=+~rFV8zgyMo+rV14m=*7o_qSPd`3{>3Tl8xHJ@uC8GZkLC7?5i z2>POq+2y~?QMb7)2Bb-k4T%745&)+KXc8&N*frH5XiQyj-L{WQt7J=nU9YzS1rTM9 zzK$EJbNmtdbSMN_^q+;mw9C+^pwQv%6eM@e+q?O$;Qj2t%J}fPmcAJ@z5>)E-9*p# zvY^>;_TJB3zfNt{SuLev5Iqc0R1CXj9YwN6K`HuApEQdpWz2xFt}Ex-aOW?Noy(hZ zT%6Kv&#|Vy&{*B<dyafMzIZ{`v$9 zOLwWyA@ZaK3a9YRq~Dq-*K}GX?MfQ8p`qc-Xd5k16%m9b>N1a=IMIWmNs23nTDE`x zrAM3okjA8G_r|^3HW3hvD|0}eidBf;fBN+44_N%ehrJ)PeerOkmb!ku7`11IC|l{% ztu#-oY#Tl*guA^#WHhxh-fWvy{(>ai4SAR1QTI2mp1ND-CD9C#iMqBAy(Asn7iu9X z?gK$*MMdd>7We&1|9JJ{w@CTF!Tmz-^6PlO;iWR4txwJO-FZYP)~G4Z-Z~g6mvW_4 z`+WoDsAS>@xb~AVe zr~Y1@!3{BfyuY`+D+`)FGVu<@{A|O8XVeS^c_y_b=%WF*?4we1!sd3EYtg%8zcVa; zWYF7;?u&x3KCF?#ot4h3T>$y%n%KuR|D392qwaX_)Izzl!`0=U#28pobNy545O40 zK`vwBYPQH`LS2!1=Yy9~QYL4cG~$K#5_iC*=Z8=JTl%?`L4}%`)%*m^aY$7lfE1b&oje zk<#J~AW{-0>HH4>;4dI(YI1GO6PIK0Wo+`Xk9m6O!wbi!PLLj&B># zInhk&Mo#m5*>N+7hx2>HJUT#WKBcP^c87QUB1YFUt62`niZgG)&+)I+Vm>E6PCrt4=t_uzayn^ri2hc zr-eV812)rxij;nRyn%GOK<6bF^432>O$i5N(C^4juUxK_lIKT5z-vFd>|%%&EDgYj zH7T0nBq)oD!-x(YDsso>JO24zvo`W7n|MTaPL4K!eXL?4-P(M?zpcaL=mh%t?Xt14 z8BlE+$r5EheR=|Fe(Vlj^l?0hv}fFr4Z%<(e`V_WpF$W$=-|olLBHa@WKr%(Wv%C~f)6O2qE3F&F;B`901Q3K zr@as)HA#o3`orT^yOx%gF!emUSfylo%AJSZ-@bk8aBEuYbPLr{HpK*Y(2_Ww)*qo| zW`O_T-r=!j%Fxw_p9r3yg9)P8Ci~V3ND1H2nWUD}m(!#@#~U0Ia{}KRe&$vR*f@+U zPtiQ@LCP&copEhKr_AGiUG)7U3K1W)WZBRXCAqvt2vTT3*eml_)Ak!vU+#hm!Pb@z z$Hjn04z_DI)}`pm2~y5b6S(2Qaq-v4wzxkt+(Zrhn4!Ol^q->jZ5s;6dT@-3KmanR z8xUT?!JiGrXSaDKHT9Iw595#A1Nw9@6UCwx8msx9Lr(GUH-xzSSWaR>g1lQM-#$w! ziTYoHk?-dtnLlq_=Zae`X_K|D!kQ!j71 zopSeL{&0{6*q`FT{h}z4m!BUd`R4b?h&R|)1RnOp%*@l67`^;M`w8l;FDwDHsR+xy=Xc2 z^H$nH5+g0EIJuai-2Ve~-CTVV68AXts3VQs=$RBdT$hmI>-^LMopd@8VS9 zcUYKEy-Z02ASdvJSE8ru%5YtzGa4|9LRFXLEok015d7Qw`E$iZNPMH7unvhCYE?ka zg$lKI_G#t!?_UX;H5c*94qt2ecbd1p?6#`*?jwR=*vxl4UUeE zlW3xPHj3CxT$p(zbwuv{w72Eh4=>~_FT1-6$X>V{`FHa2d=3i_cN`XwR+fTI256_= z_|Tp`OmteQ*i}#hhBC9WN$wFc(Wu74rQlA}Bjo<_*v;q%JX>1~>FV6u-ri1r9Ux&X zbtZ5J?k!#aU0NCsOj}ZhEt~eel6|JU6rU$+GQV)QKrclC#;Vh58I}5d<JhA1oxy>B!g z&W+s8i(c7~O80`bOK)YWg42MeqntD|0qH&L%RUT3OojZ2ywY#_Bc5#Fh^v(Wkw6nGG zq{1!HC~H_;rKpDPoo83w+mxbNEKva?(CHM%{x{HYMgOgshkSX^loT>R$%bs|rb*p%d_1y&e z4^Fp7aKge=yQSxyNq$ZI5Aj=9SL*hE1tvAoVvTr@98ef6oA*;WuoBrqn<~ z{2TQ8Y~Cg%11cLHSpC#Yz# z!Co*N##vJjknJQ|d`7oi|A?+wR@h7KJ=UOXG>%oET?>dCfXMW8UL?1$=>a+Tcm6XJ zq9XugqltEXrp4gP!l4L{d=!NyakqRWC8cQ5`5};!ze8H~!;OhVj}=`WIlJTFnT@wX zg?HyA+fMgEveC*PHrWC>O5gA+;EzZ^P1BB!E0G^qPh_v_DG4+yUf27AYNofR#|314 zYXzVvcKc7IK({k6i~M}Yf2+TW8-g|B;u}#FP&B(?Gpx_zu15SxI)4U0zl8E_nPf;b z*!lG!Qv0cjL+b#4cLuR6pT`oN=m*88@e4 zIwP`ETS`KwuRgsw4PiDQS|1RT7!{k^_v_to{n=x8AdeA*ur*Pjr>6&=n@7~53m0_n z^a`4xgTXv>Fv$y0UmI1wXhT%6IrR|fTsImnuY6QV|ERx(+x(#2AqwoRXmf{q)bHL+ z-x~qQr|WM}%s&XjSC0Cp>X=&$(DD`fN2x~fod$<2;ddrIJNUn3mmlWC3$U;>oNU<% z)b2R*@y(QYF$l%LEJZFJs&bZD)CAWtYzO>kH067zrMS*zA=n%Bv!F1jTy~5D>|#?&I=#e!epGC&`I+;?f&rQJE&y6lHYjZb zQX1v;1d_i1b0Lp#!r7I4#y^D*-M}(h4Tr}_%7o6=Hb*EO(uRJdL49yZcx$Y>#-Mrf zPMz6}4bQL*L|13zPzM_CPJiitW47T12zkE{)m75hC{AVgKXdAW!_U&_q!XXGld=LT zS_hr^j`4NN_+KyhNoW8+idPn= z&GW3KouZ?JJ>58jtjA6ZnY8CS9zvf{sbvg$O}LCR+SuAJ2S?3_*+xbLo;=H;!O%qL z4+G7gA_mT8Sq$!h)ShI1aOppd>C|DRV-gbndP8pNl_uNosFm00eecJO52P0$0T4U! z%!PBTtXCeS-Vx*;9UhKv%72uwSq44%Juf$4^InvdeJPbU85|NaOP{a$4ozxe_Jy2E z8lPrnsjZuZ1sT{#c!D6_GnGpku2ydGw>LUDFIy2U%_>jdJ$z_8oGF(82BvuDm7NZY zIw{CENMK10Sq>7Oz(p85JxOT8CmPXKq53cLC}$aAk~ z+Y^EF_aoZ3-|4DS0yRQ^Bm5HA(#*)*)YKlTlWz|J%E_dj)4N5h*7@#Lbl0^9Ga^dZ zGj!PUV#R4QGZq^+Zyug~nEIj8wp3{T?>BfQB`R#QjMt^QCH|B;z5aI7)pCz-*qR_- zTdB7vtL3;6$+JFI=H6M$XDkr=o8nFV%#y9donkhj7k@qZ=}XbFJAT|Jk0ad5;ANWT zaV$ggOM#C4K3$P;4Csi*o~hC+?Ns!_2mY;0`})mz`1+Q?Tw@l(BBp^rEq;0Y`b6Ka zLvZ?1qO?6NC@8pQVqgg^;u(Oi7WSasi9bH3BI>}?GP2cv;sttg`T1x1OdRG?ev~)S zM@P3`hyqSCt6sfeqx! zl#zjf*e3t8E&f#*L1VZ5s}_O0*?6cF&193D!SQCEXFwxyccLYBPoSz0Cx%%^V=)O= z@juJF%uGyqv4)@C`T5y5<2FS)`4A7M~tZAmoVQbPhEtR8I$A7bsEe>1&K z0Xw^Q`$Q@N z{n!U=_iblB>F{O9Nu0{jsrE1SARBdwh=|B6$K}OyHF^Suj?Ux$RsBoC5{gdA`NeNq z+uHht#y@2{0z%#P_p^m7H0g)2T=0Joc6)E=AgZe^-Fo#!6E#~YWky4b$FO>MI58tD z`4X;d-d{<^D&Urx$&NmCdRv{f$}XrPF->#jA}Z-eOiOOQqK}hf8NO(VQ~-^aW0#X)ImEDV?DhS*-bp9>3(K`r7LRGurtDJ zm7??GrPx`ejBHA#RyVFHc*LHw+zcXpx}t0}0Rq)hb3PyyRvmX65)lPdMNmmDjAmmn zAxS;Y6IJa{q=Pu7X^3a6CRO)3#qFwaLXF`OB>9fa`Vd;#`6ejHlX%^sZ7bP7aGoDi zlcBzM`Er?Xh)aO;W?tSMvh;yWoF21JJ~-UebYISd*B+j;_1LOavw0)|h8I01gH>l^ zj4Oj+CvX{K*1PrD2x>*+;_8y9pZq+%rKP)%&%TZW6kj3B%yzyJf}cX@*M_)?c8^x# z2Z75er?scz>q%`xqC8gjA{i>0-louaq+yP~cI)GmvZ-ur)ZHK1=Fh=oU^ zKor)Nw=tVNmrCQO`c-gQ!W}aw&qr37{RXrop%V0ql1aXC4Y>ESzl+tWxd|%_Y~({tw$A zyfei%YS3hOIJuw{?Zk+0G~>W>NZ-6k zN&VEJ;@0OGAGB+Oj|e=x(5uSyrh@p=vGE8v*0p;ET)Uo>Vq1d{+@zsLlgWPo|`d2B2z#qF&ra71XKPEmIF3h9>nUxcl)I@{9F`w;)mnwlCi zezCT;Y!X0UhxVeFwDc~R+~1F{_VwGf!@g+Id3GjG=!4x_L%;eF3dLk~A4HaWBOHCi zQA5N;;&cHA-`c zIFr%7tPP+UxKtX*0zpUdesBhWkXXQo;yoD3;1wbuyp%Sd&9VT{Fy?mLqob>5TT2cnxq4ht z_`@*n@9v%^Yi+uOVXFV+%kr%h52ns&1Wo;l%J!1g7L};8w8>gY3xZ&O#A$6&&*fOm z&)%>H>Mu`bOB-i}zIj_wwY)N*@>YHq7lFrIC$;SK+PW@;+Mf+yjJ_1yCNoEE2|%6U_ccAsv;!;_q} z{I}*PGnJD9iRS>o7U?8}0bQ~sGXSIbTl~70V9#&af7i0#-tkb~zP-Kng5n+V19rc( zy#>cTzArR?CZaST0u@>%S8m;+Z9GYR(_f>!7lPsr{F-+_bgIaYdaM7*`2+lENMYKj z_~Dk2&-B~Auw%&a2{dCetw9hoJe=ji*U&rJJ6rowsCV(*6x|`gf6v`nozXLvhJQQP zVNq6j{&(R9nnw2*_$ zA>AM0D_Vqn?K;&Kqb4a$_D( z*ly+u&t7XdQ876ZVMfo@cb1)rDUOWJe*Z~U28%#L6-W!>hH4S=tfG$P_;E>jK8pHe z4UOK5X?*B~v;Fwq&N_cVnqM_8+Y<5hG%+zzeff+l7S-dmVZ%pPSP+JV|K{XRf7)+Z z&i-<`bvpG!9}ORExwE$0lIkzb?~dB9%JteXvnP|f417W9I1%kJWg-R~18dgI*uD=d zi_oWoEH-Y@=C)dy44!xPec^fX;#7_;eG8?cc#VE9Joo;Y;cHX#gBO{!`)C{8X zS0Mu1?)miTlXAL2KsncMd5gDaSiC--l#Biiv;gPvuEtf+!v$Ea)7RMZ0L+05&)PU5 zOMgyH{jcRy@NX4|Aka@O3ADd{UHp^Qw;Qr27>8A^v>;RN zcs-&>dv<8<{uWgCZ;(fJ@7;SW;m#+mP2gmAQ{7~cdUqU!(-w2EMC>lC;Cqdf`O#rN zzMQm^_5G&Icg}_;0!mVzJl6*C(sZPmJ8az&lowOD4)W}VXeqIc4hTGCbXx4CtyxM* z;R?{nm`XPX2qvBGi2Cat)$Qd}lH0b!XHk>e+0$P{S{(Twas*x+_y8r%_Klbd_b5GB5InEnul?}rE9w*D9x%=wtUE||V5%LG;^)_(a zVhD;8(BwWkZ~|#725t}Vd|kBX72Ft!?BI>8DsMv4YWm>RK>wA?#xa$~+HDSPI?p~y zsav%?VIb5K3p+|&UB2q{LR%Y(a zA#IE<$@j109KoZCtKHBlZTkA<%kVSu4%krBJie)X*p`6)l1u>0VCdyK2^`>z}z4kgW% z@z~lJ6Kw+e1wW^yNq~sWuwlYedDP`ud*>@Tw2&3Pm|9u6y?b{kJUslVB|4r4{f4I? zpV~4lC6T!|C2DuHdm@CloYjNT2V>NG#Kgs2E#aiJ`^AivyEm~o_Dc=du6h6d$&z<4 zfTLH9v>L7?Nc_I+O|de?cP)98WEYKK@4(2nUX9B;lJwp?OnJ!IduC^5e#Jlr@fu`9 zofuwSWTkG29aELxOB#~c;<2h(sg>hbrKA)OZoAX*@HH0H*2adU)gm)^&9@p(JEK46 z3Rx@+ISkU_VF*zta;*^0pAS_ASSTrL*S4d-u@!)JuiBlYI~hCSo39ZNdu^r#x?@Ee zfKzGzuVo(wpHt8AgjrA-z5}aSz&Kmdp-;cJ;|9fnjnnQq4Km z$thz{jxwEAg-}LbC#PbDBCb|B14Z}J=nr-1YZ$tvgA7gE_@FEQ2=4jnh=%H?iDxM{{nT#5s_!vJTMMsC$smX?wj^m+&)sM&#yPyDpU3yl`&j*(7L^_PFAX|ft9e5D=6d~_!H`tYBenLY| z_rP@+#U=Vva&NdstvV%BgA-H%ze6a9yEv)$36xcZx<>d`Od)Q>P;8rzhA<0v1WVp@WUnP~rpt7<1t1bE7J zGv0Bbq>`vW6>1t@#Z#!7o=}zyfLwCTY#c(0T>u77PumTwk9AUt*7;aK1*UhQG1qbf z<&Is6_LJS4(0(>bar}EDvM}Dj5-p?(NuMCNy%eI8Sg!~ky13zX)6r-;^>Z<| zL>DEF-_HhLbeL7XiKKvGS~e9uP0hy$#@?(OYCgot7wqTv2=t|GU|E6a1PB}3wReF$ z_g@aopF49VRr=>*k}qP;>oqZy-7S3L5U>b`3fh2TX_bf#eV^Z|YHA8&M_?%WP%EC1 z;)REQTL~#Lz;Yj({4U;V;KZ-sQr?Q3yK zB{2;D`@5L^OXXTxTErNWo_x|dp9pyvwyMt?^eY;-ueI)9YWD+#i7E=C`UkTyRPu^7%iw28h?(ysnuf7FsvKJg$q-kux3Sr4BT^5P6;wW;5B2vO z;Lo&bADmi~e}B-_WFz}TAR%EaNC{a-Ca~2A{@YDU_A!_Xa=P@z9DSR?`x?9C#t*zq zPENjdW*YBYT)O!Wof%2<6B~#**;~l6R`0zdC3II!K_MYJGy~7Hu6bWn)4qAL;4%sU zq(lFhL05|!G(&6rZ-CETI+%LCph*l?i(L)y&Zs9>?m$Wvu$@AT%jEkqho2?~ne5w* z?s1{}Zf1#LC^`SR54e)D$tf2sH{O0QlRK+b;x0_}H~>CF^k;`G3hCkNfID{hPhzm6`?$Sj8>D;fGib!@wt)pl4%LbhOsW!sM1MTfUoTA=`h0aQt!7WFFjTuZVjEHUiv77DKoF zu;)-Zyc%uo>8}*2stn36_Egp?n-IE&2Q2lx6_>5Rp z^b$(cpecipv#VslHkMZocm5_K0Mysl3pZ(_q0biKkp^|Js`p?k0#n}33bQaWy`4(5 z7$5iw*+z1hR5Fl#3|ZGdGaDa+=&wnl=ZkaiMzzJMUKtE%I|Q_3nd{X|;el*(bUCg| z>_4D&62s&`gsDRjUgW3+GVNxGqI`BqeiH_i7G~;hNe=TFw<|L;Kisr&<6TULuu-Yc zPJ-aA0HgjlMq1mvOBZGwj3d#!2KRD@eVw^#xhWdg1gey%-vPEqwC~C zD>O9e7@ApFShUtw7R8c{dpB?JpG}zox!RL>4y^#l9Er1&f8-~W3Y{c~+2nCHn8y$d-qi+O;Px)P63^SHV!|^Suua4rQ3n5-h4hstl^US^G z`X-p1GF%tq8=DaW1`RjeO#Ye;4tgQxfuBLV*GGpzn7?8dK7&R#qyMX0l$=$Hy;q_M{Q`(Ad#bLZoy zacBT+fm<+FQ%H;kXAWoaA?uMzAmrjxD&qqK%CRpGA)wx#vra0X$Op{%2FrA$+=h<- z8)A*R7z)n}Mnkv5>n^T%J05ozcf(}@0vYZ1H0e@(#`QY9htt1&+5fuNEbH2Mbx{)H zYXM_TlEY+t8@>f?1eFlgW_mEx|AXo@iI~Fv3dH9lbi&~azEHkipO=Fp>B7qnT%G_- zZ%cEdUhrJ*M(SydU)KaM$7S3u5Kv`{fTrt`Bh_*_5^fS=owmDq3iR&@kj@{Jnuegg z^k_E^WL<{A_>wPuVy&M7O_nM^fmJLINBNIkO=2MHyL_al&>^@HEFfbj-i#sK)9Seg zAe`+EFaN>#!ss&e?43HkK%?&DC zeEVTk`-AA0t5lf4iA&}*U?bQ{&|3|1oW*GCJ{U{HNgtN~I${~VkWKU=c29xB|3Eq` z5)VYJ>K_!a1gUU^uSXNzucB;n?g*L}#G!_nM%U00)Dg5O_W+A|;}9l_+lfzTXlQ2b z-0R^S!(k*_scXA?dbrUYPJ7U{pNvNwy!R1ndT;Sz8>VqcEa3)REip)03ikk93>$b1 z>bF3!6~hw`6?ODP6m6wmz4nH~hx9A#j{Q9>biJJ14TMd?0E(b&HU5FY<$#a{YY@K) zM~M-h0-1y}BKh<25Tx_^%0a_xI8zAcj|8$7iePXUnU(-UH`M%rj4Bw*fc1Hs*Sx0m z;*E-)ZoLBMB|3i03D;4~kX1fmN`f2@k)NNB7Q{}N{*MapfCXGqU!N5Z)O2a)etWj% z_5Np4gnZ+wBt^cX>FEy6QVb6z@r1s5Me``3s||SJE=l&x;h3t3k)GXoHVs^i49H!% za)nIyxkDe)BKGO(59<;V68r)KJs`P{bmY(y_mkrq7&uN-cwJt+9Gpx$gFHZ_F>)#p zF%d%;r%1Icahz>XBJaoca>bL0z&ve{?UgWT5>a3WMUZq%aTbHH%>>0ZS21mI7j)hZ zpLaS8D1I6Ht0v#k9x`iTaq$MYG>S~_z=w=!(}$DYl16P`Zq^^Dgkp`);Ga4nn+6J_ z!oyn*!H;-<(~Mg7z*o2Lxa^~Z(v6$AM^gBRQ%47|cQ^ATA_DGe6hU_q2dV6e{w-sy zLpmcaB@(D(lhDSuZ4DnO3E|%JmH0RzIa(UuO(c^^#Ki?=rw~UUz!dNpN;$E&WBIoi z+<_8+hSy0l=0_aR{ocVWw?Y%Fo~z0k;JgK7TNkt^)crF_5{Jq45wZ3HXT!7@#MNX& zqD;iz4Xw0pzk+&=mPu$$YAv}eg3H`UWGHDK%>7}$a)I&mQ> zTVpCntU+IlmME-^^+>bda7MY{54+QchgTrAGhttl(JhioVKj){>MXGVRL zlB<%3dyX`#kn;bV$^Aws4(1q%Jr5m%OK`ipHevXB5~8MR=EHTUU&KsES_MAKTqWs# zSP4?SWdxxO5brb@A;ax$tJ?PJ<;#b?<@<2h#8q87@JfT&8yOcRBx%S2eUjcdV}-0K zWUBZyv3yJfaw)yT$sdQ09C_jA_aWVyxZH_s?o;eO+)d1tBwtyhzULo;Do@Jjk>c z@Zo>C@Xz=-HOM?t72X_QUx%f@SO*z-nr(d#v4?mIYI4cER58djTAmor{CvY|!&i77 za46O>I;Jx?ED1v>bkA=C$ry)g&uVb=;diwXMMg)!$U z2FID*cy6E9$0g^JM~3>pwcrRTSLS`*t@IDDg?RN-bKnaR#Gy_O*v`ZnNQ{U<#!P&C z`-y3@ujkbd9UN{afXav~iJTRVdLRmq{dICI2dUb>3>U#UR>qzGZSb4T2?`BOn#{$d zPW-${Qr1ay88H~@3!I3<7Jma;k-&tn8B(_2+qbb__yu3g9#N*;HK(`zo1jfx&t`fEnU_vT)pogUE2nTjZ$zS{>Oz zf#iwFg5BVY+B0_j^+=CKUHKh}uJ2(a%VM;Z4BcDzf`0{Te3GrlT*n5D%67MtGz(fq2R3e$ucU zr?=_cyVng8PDir^6YmR?Weor64#%?}C;Wf3eFt35Yxw@lF++rmsAvx&w2M%PjI^|* zQyHbCsX?KXNGS~|QluU2DYH^(&`@a@?M1Z5`@f#oq0DoBzt8Xge$MAu_4a+g-{-lX z`@XL0zV7{MaC>pi%a9^2XI>%@WdgFAPV0NIDIXjd(80T7nPgN|C@LzVLG@10mKYc@ z@O`?#L_7@8d*F&s8Ij?GGYvu2+a?Wk7Mj$p?_XvW{M(Vk<`A4AXuF+O07XTZ*w)_Q3gmr~#Q7v7mD4__V2YtJOZ3X%CJ4GT&mu(=L?D`M_v;q5L!_J7G#P>5b5|WJeDbb6 z`#WTrMf)p;4Z|PKhTU<6j~U`#>H7M(`1r?&mZSkzGhg_rSkcnb7_B8pnv}0f2x{~u zs&Cqarn^hvLJ#*J(c-1a9+six?s&WxfD>ZZTdFY0 zvXCGIdzzykQqOg@U0mGUhVtfUDIkf9gjMG{wP1;B-G`XbIE>B9WMmIufDfF2prdsX zP_anOKHj3)#sWkINmw?N=1j>Y=FBtoMl5?;UQR_7%JV@+#G~whJ^WxMr|lLI5kZpB zU~|kH@!})X;@rXZbM(xvXdY=EmVbyYBN9i!Ny;ix8m=Jveym!l0nL*6_D*_;4w2=Q z_ZO_@3{K&vYubKy z={jL)UU`z`!LzC!Ff?g9C#O7g6uAxE6R)zy@YpA{LXJ2YAfv>lO`A+U)Nh0(fl8^@ z9xv4C!8mAe^;Fz=<5PhXwmPxU0xM zZxF7?eT7eXdxs@x5;66J!>6$p-Xc+gJF8qw)*&KCK(oz91SA8yez{bBwKxu-!8&AX z8$)Kbh@bab^Z);q-4kI*L&(00-R=LpgPqLD;ZcmNC^ zvDL_)l#Bu%%#S-(@!_K>TV-T@|CPT z+9jo979u%pQK<9WA!4NwgIV#JWPK?dN)MTwO5MvjVP$jMMM#7AFl@ z0}+2hNcWZr2+-p;ZYHj>IK9z4v&Q(-M%NogC@_es3hkyDFc?G@2CGd6)N7>Z1VOSW zYQlD8&k*=Cd@%@mgz|@T`xR+4+?SwWRp@FAZpIQL!t}dHbaY9q2{yt_n6qe+3l3t4 zgK-}oLl%J(s<1C{Vv)5!P6py20^!6~fHUOdAafHH6(w{R>F^=Zu5|S(Uf$_A?&mSt zb8>UfcziV-O%vFM!D3g>Q{H8BTo%TuhA>WQca-Q(fLS;=IUmAkhxj@nHH=V+b|r$c ztNjFL18;W*@*hxV6LXmaDxW{!DXJ1YoD29B>zSZ^@YP_4X%WNllhbk>W`22((SQpklSegXDe%$Bu*EprC4qDWtlrk6M^}&qaQ!+x9_)Ms zD=br*gH%$TpWhq)&mQHXMhr_v$-AM)!8(ve<+N#<|4HG_e1XBx*h5ATB018!NHVDhRr2~lI= zO7yiko*y&s4&HQ;D9+gU_QIC=^Qj_4X+m|Eq#^h-MQq3S{qxeBL^JqSxNHg~FX@95 zHGMw~w<5uK304AH5eZfp+(Uo_;4gxaa$mnuMrJ@yzm<+SyDYAPa*#B^1tDEQbCeAB z*q%`N#UMo3ar%M#O|k70AZQNmK#8ppAVJU>`}W=4xeVX^<)xsL4O)hQfl&g!{*nvMO-6HN?uuHJX%)zmQEkaZxK%k9o`G0+5xD+qH zcZl}ZyhL7roccJ7y8DNQ-XSiWfQuC96_0>`i%79iA`u-bBAG0>Nn()#ZPCM0WjN4% zeM#lcr*nE#>b>miUwrxX#Z+g(abHFL^8c#``@J<5lehU3^J|#>5_2pVhzLPMIR2Ge z0C#3CvZ1l<6;;o2kkX7&-_W4{)_>08+3x~wIUL`$_kk!k?|}d>rPbwfbc>oeCSNr3 zoR}?kbuCGqzkB`49DZqbfQzdnG+2tNr*oUfzFlqt-u=wIZ|^>Tb>XvsXaC%}mXE!? zTlDleU^RZct$d}Ts`L1(b}5+o>s!fajW%AFix<~f2)SHz9lLV8w;bQ1wy#%CessZN zl;BG5;ZTcS)x6TOWo!7d#c0>`&nFH)5amU_@1IY+=FtC{%e^jbPb|I$E6eamX+85V zxAWW2ez=czxBvOsdWCubr*vAkdwf1&Ys(JQ5nv_~9G~#W&EMbqbC3OgWy9~90YuXA z+&TX=CYdcyzdXsWpNsLL1$mNRe{>@W2z=cnWG`HJsMTQ3V#a@20sm(M1IS`^ak=;$ zuTte@YsAMcimbk$p2ZsUBdxaJ+-8I3ZHC2*i8m^c$NbW=<=^eOwU2cQel^iRc0WXe z$OGlIX0e6*9Weemd(vf#f4v{%N1J7frJoPW^~!GujhT44MDW{!pxwC1x`E?&h?s8C zSbU{j$^Uvd!k0+|x{*+J{Fg<7V>&`cvMMc`Q3{a*7>@RD$YA_&Hn)H!y#Kk znD}x3{P^Jt@W6PEf<{M?KMThxWL1k`2kPSLN_Y+6`t@($PMke^HcIv&xW)5mBt?~X z16C@AaY1J59+(+urkTq%b+CewL6ut%@{jO)5O~6-kp9@QW27}4nZyf2s&skNU3B0O z6MkT3F7EEmkg|a%EQaCO=hm_rNO@nTYqBYt$hl0U$oQDAJHFH9A~(A9D7c2dt6b+S zX86ORkhkO~mb*Y;Zucf4LxGJrA%IYzk;a(SM_zEiOknbY1)(h*Sw}0^pq*h7a+Kg6 zWUY^31Af=G&lnCEOF()#goGHMCy3%~WScnzEpJ1g39&-MpLJ1S5Jnu#mhoNP?_=2u zMz0!mlFI;EkO9Q>roP?*C?9EZ0`3Y`?^Y;D3CxOczd|EvMMXDCSt8WYOopTDm0dkU zD539}x9rtH$~z0HwzD9jiCyU2xf3Y_A}0M^AYdNnZ68uS1=lf*qo6|-K%pSRB7_EE zqd@eaF9HYBX_Sbl?dG7Ef=PG!t{%?uE01l>e6DnFGWY_>u1Uul z6r>Hy1cis&lM}Y2-L~jlGWV(=@~u}@RX?8jKSf7p!6N4XGeh{_EO>RwomqlwI14wD zpqJT3`fC;s_I0^K5e{BxR0nO?d(izY9XEI zr(!u?hVV1)Ic}pP6?N3(?CdRuR|&jeb1J{V^mT^y+6o&|UyQvL)7e@!DIj;mQ(|9% zTPs!3cMCpOU~@^$fJFl($R4oWfbDLg(=3(CFJ9`@@VmgePe5bt)mUWD6bI~s^M;%BB**1*KjycB zmC?@Fv?w6oU`0b1&4|i^Lcx}P0y5n6q&=>C{lNQqiLt3EDF>iLAVVBr46x1zVs!SI zBJ|DWA+HMy>9H3dA$CyoI77X?YCv%_jK&)6`y7zxyK2>T%Xy3{m!2J)fYe@A164g` zzsKH^>_487>ICls4PBD96T1!Yfj!1}2xWRdj#{xp$Y=v8C=9_|(>9D}h# zQ;pkzZD7aT1bjw3N`U^%0QFBid;y0MoDqB_Wm%+i53xf;Lmh$5Zbt#DE{TKOeOJ%( z)Iih4d2KV5Lj^Fes02=&(bWVz)(-?P=McqVd-!lzRT`27N^YydjT$~)Zcr64>s(BM zC0B&7PFG_iSArhh^y#NH1yn0TK~$O?KR$s1EHy%E3Qi|%q)ixX!NDQZA#5$h)%S`T z>Gis5!tqOLJr;5?b4cM$(F;^h7AhV%#^AVOCTazJ5Y}KqB<0Y|B4iNfm@K^>Q&WHGTU=J9*iUwr>R+y8PlKDtK70iLrkD z1}=-^#+^Az^Ru{F6h|+~X$4#x5)6N=%cq&6$0KiP_@=RO3R?9j4x4UD1Ii@;D-Hql zJHR7(`d44Wd)$^dD9M$cV;U@WqQYMGa_~SPT}_ zNmF1e)COd#6wv@CDReFu@Xfc_ZrRjs9UbGF3a3Pm_VPquOJn_3q)TU>9bF@yYEk>v zD>q<~P4mLwM=pN|x?Jg>i24flU)*GF7f=Bbi<%MZuIHO!+!SC9LRnKDfZS0^;#CNC zJS+xJ^*gLLfg`-3wkEBDSNGq4$JtEFIyItxXRuL)OF`$d)COS%1s}7~9l^ol3h$pH z{tQLc2LT}4Zh{Pc)Q$N5Nh|c)wQC=!Xms$d!0-q#2`g4OJ3BY2heFeZ80XkPSo5P^ zW`!SJT&4xK`kP5u=wr&lxYyztUNr+()_xpDys-221s^?JhvTS2SZp3STc0NuV4=RD zVrdYR?S8&Ik|h(9yH$Pj7=;C0unbA@x307$J>uvM)wa=R9kFL3I(Sn@e?W$hQcJZ*Q2l(8ZWd~Dej9=i{~QU50=&7B7<>~QXC-q zA{4N0D)NJ;$Y&=|fR55Ee>y<35DUT?`9uX-Y^I@x76^zFIadq~3>S3yCamJSCqa%2T6gP3>%)+1F6oztF8Umb6zF1D zkmqj(&ASJk5u{SU;skO+@esPxk7$|bGY+andaO57-m(c6sVj#!NJ%k6SRd?oZw)|g zT>C6Ug)_Hr>&G-W1N>gGLjGMFa40gX2>pmriYK!MTlvAv6QMF`%0e20=GUM&Eqvg+ zXujL0)-Y-Tv*-l~Zch7YDAby^V~BNKY%fa)U}iUDf%N6hfY z?cl{5m))HsWQ7hhhJbI^cJ%9Ou`}P+B?Z_5f z+!6ygW84lx_1(a}Sz%%|^ z?06SI5KlD%YXA%3CYT2Dbloxd>&No_qSQAo;mZZ&{%_}j{sZr=EG(hHduh;K(Eyo$ zMQ)x≫gTEde-k|6EP{8jj}ceyw}sfmHAL@W_pNN~cesMiuZQKK#=$^?!zI{yYfv z5~2oqe*cp?36~R=yCXS4nB0Ow=vaP4`F_kYL@$alFj^zyyqmX8h=AeS1$;LP_+I+~ z1OBNVq+#Nz+0GAz))x>=JOaxKvlRXQ!}onxUjN5=vyo?BtKqD{wq-z#&)yL2 zM$mJA9I1>Bv3r9D?JV>eo<)uTyWkhEl)ZQ^lvn77s z#IG4A+SCvBa>jrFklql$06K;#g!l%h`T!Lk5@lHZ;S?f(7vW`bciw@RdNdRg9DLX0 z&88^L)Vc6Ae+a#U4v?_s&EL~a!J7($o^?W&6b6Qh4@kfW+X;bI1O)6495|^ z^j=6N3H`&0CcbZq^6P+IWLvVcX7~NF}66nQ~p~0@M+tTA!?}#m2X3 zsh92>zomvWpct6|W)1%vdLH$_vj*ntGQBP>m|Lrq{v1N2kAN8xW5M{7sfb7_)~Yj~ z4?Aose`-dKUQgCaNF$K?VZHnt2u?w zkh{&O#RDmZ0fo#PftmY!2-_W}&St5HpQkJW3Su`= zv;x!#&k%t}2l^2l@c98`&4|d8QIbjFka@(p`~~f>19E;-XOV)!IF|Is6O!x-0tX{# zV;DR-z_6fhLJJg;NFXfy5;qvZ86nj&+@hTJv4VVqn}f*}r`Xg_zo5 zxDu)wh$Bw_QWRQ|96TCtecp7V*8*o z=v8qUdobIIhcS*cdJ}p2kNoP7b+^iaXPuC2!t)H3$@1OLIv+fczB!Ye6s*%NIlp9y zR#qba^DHsvsAmn1TMD+7ye4cFMQEw@?6o9s;zOkXQUAYT0xAa{`!%ToTc$j~#XS*P z&Ss|TeVOC{ivu+U{`H#Tn=40f?2>{Fa$=Ypi2R__aLIjg@kP8XoU)m;KlSxh4BG4h zzl37(2~boFY@3BV0^L(g5&q-i4SuT^deO5Tb8+ugr6w}cO(x>4MT`9Im;!No)70c! zRQ3I%yqvnWYIP&%ozJ^p+GtU+Z|Tu+dKz%dy@((9lY@_Vq|)!e%QdbOHlt?|N&&W# zxwVFi@exp-l8J7Fe}lD1V&>EHE~fLEnh%m2q6J9s-zpFJ(A>(7Di^qkQZTLrs3Vzt zHu>HTz9&Bt{(^>|Ms(%1CiDIMg{dB&8k*`~xn9${?pW;6rlOQdOR2_g>@&j0j z5^R-BF^1e7vQ?tX{ebC|K|nKYDh7bkD zDgOC5#KK=>Y%FA=+lLR*sMHBpN-KIzzmF<2A0m}Rd&x#FuL&feVnN;PO&s1MP*F_U z_JCf7pIiyV;EVW%EJ@g|&BYY3=`goG?c~(EtD68%XmNML>F!bvTNHMv5D|(=N3S5p zH4!i3ra=LXq?x4I9)2^2M{uN|2Xif+&k|J7*)WnrunJSWN*(EAg%SQGNEuzqk(?pz zop@*Wjcl|B5N~j$*g`Rn?=C;Ae(}1>S!nTM+a1feg=C%pMHKGaSpy9qM zE^tj}(6z>n!viOf;PH~&R}zLTU`;5F!gKaxTA7$X5{ zmdqYriV;6}hN&5yN^9ZV8eG5=%x5I*n$9)Y-|C?I7nl728O{f&L3P2GGv#ye%>*Mt!yua8ywM>c z;ZJ&uHBPA($pRd>Bd`bN;kc)fXy8N|PmeBy;}BZVz&%c&pr_d#8Ggh{Gqk=xTHRDz zTg$ikdQ&O(HXMm46e12sG>fP)Mwg49m>Cy^jHxGlCTFaf|Jp;BJWODaO$YdWI@oUS z4wxmoBO95%`3^*y%#fLv>{GS)F7)h(?elzPM_mMj%GGbNnJZoO;$^ zilXY@&>&qP$;cf!hc^fcDs%`#^*?WP0~lQ#&u$Igm{&@C417?e`5Rs(XE-DWLO#4+ z&t%TG1av)b0fGaO#Z-^7zcK*!GUZQ-yTn-yjLgY#y@Jkr5MdSlt7(@2nHBJ;}l=W3Y@)RMoIO8yhUpT@27|Mt)UeePJW!Dh+S2burfm(p_kF6+2&M`*znn zVX?l3j*eMyXo=Q`^T(d%0Wxd^4_LEp<9Ej(p|8H9gthdqq1sr6mNjXW*=<-H4eG{V zVw|V|`?hrmLw%rFh%Jrj1;ot;aJ50*?(g6Es7mN-YiL<8U5FIU*v@Uk z_pyPN+*9#K=|b=f5}dFph(Q)54465Zt8T@Opw-fm3L*2O@^aydlkb;$N+Ke{8HZt( zFS@!3)f?8O5L56y*V|Sa9&#_H>YS`!4jl3W9tU%3?z6^lnWJV40 zX`4)KpwWnHTOtBqp^vBpw$0JiWR?J#S`Cqi@_yT)-slhZ=mQGuRYUia_Zm~KbTgmu z-Kpq8VDPMgnW}nQgyvKu{ez0Cp#o_2BS>g;HChV4u;rk9 z-eta!>q|Gh0B$sDP|W^(LrweKs*_Cr-t#cj=oHCHMYN62eId>w+x5$efe zrte+y#8DSf)Z}`RnI~F6Q2^xny`7TMl{{g*C0vizP?oJ*r)2b~7LY8WB*g)R9M-6uU&n2)LL*MCLK^9D zKoxNEf8}vo)hu@Jbl7pG)yl*8h#O>S6iUwfeWF}h%9Yu3a`d>}w;BlA>ZI~4op&Ik zi+Arf=VHOQ7)i$TyZa{DPU;1}=*e@=k4jGjF3BM*ff4gm=TH-t+cm z#GSfGFsHn{TkH{$4gZk5YUc+aqnE_sdvqwkzTz?56=)nTx+>9XplWSjxT>V8O8w5s zgBuYP&vqT-;(n)7rQ^fV4V5lf?0I}M$k0-7Fk7?qKUt?7Td zT0zI__Xnn6l{=q3yLn@KZ4gifV)NII8Mx9ahUd_W{1C=t2{qVy#2JS|<29R$Z))c6 z+O-SWl8|Eq2u=bBJTk@cpJvr1uwsLCx6GaEuP|YDPGiL=Xd^OQ^8Ef-Ky#j6$bAs$ zWI+y?uKWbC(nEy62^7+Nh0NRZiB zk?(G2DPqGE522=ByhAajm!7Fge2Do-y2$fkwfG3hk<Ok7h!}c ze@}9hVQS^4TYt6MN16IWYC325$SLhZK^-2beJzY%#gw(n4bebUD z@w&V*55W$7?F>E$gS<9UP4_S!y17(WoQ-W$e-^~jd6LtStD0C^F7UyCXtR}y0=~pC z;E`#e&Wo}#p+n5((UECuzRUewE|T?LxmmeQTqY?s_v(pLh;+?~$iyE%Vryep0onjh zgP4ZNlP6E7#;V1KRvDRc`2akQRK5BOy7;?ryI2Dkj}%#uK1odwQOmT_x@N#7GlI7=&gkZ-s^$tX#WnnY7=# z>hf}x(#L_{Cws35@`Qmy50Ugk?DG)k>W74OF@JVM-A+E@BPV*X?A?%Qh!{}%N1zdL!I{o;nd%Ehhw@t!6nuH6h z*w+ZfCaQu76jFoWge^11bd1epXdA@?XDg;H99dxo{*qW1&{8sNXw`Hrcsj1e1T38a zWYml{#{4tTnugNHQ6H6{+YmS3OsHlq8y_(uNrg9VH5_FA3&KdjRNU1h?>zx~8RAQT z?oQ&Ffj^$ysIoZ(oM8waC>hOHE?$#mPeyD%MVjSg0g2saY)a)M3k!><81LxVKpp9_ zhc~_hZ5T^IpajjAqWn3CaYL;`1%Atisc5-lPq+jczz5JXT0(NH5kP>Gl(^=~xHRe6jVr2Bg- zBfsmsop@qvuF7|reyP`V!VJz6-hmhvQ4s6tV6`lbNK6E{zz*OE^A;cU#OQEp^GS$M zA0uiWfQ@3(YnYtbfBIEG;` zvKX>~IrHZ`*4H0+lZbqTOg&_3^XmYv+ut5vMB1VVp7{eeEo+$zxa%E&E|9@Ri7|Qq zq}+YfoZ3KVRfXCSR;qk1-Gogp9Mcx*ACWG(n{qcz>2_z(8M-sU<&24Zhi{gZ-|&^>v6o6*Q`a$f&cW15no;oQ_AWFW(MUoS~gIl ztD5QeW@T=G?qO_X^(Rfe$`xwT|kQorZs-8?+5kwed>NQZqoA?f+V zda;gMz~f2pmvSv@T0L3Kd3vsxymXmU^#WYj)J6tG>9|2ychDT~a+tvtZMXBQO;`mE~cn8VQGQk|8fx&&1*J)L;vD zLqh{EJ}~$76TyWNU6Pm^sR>8PBj%Yt`YorYvJ}HqG=_{Lfg-vXyn(WcN+|RIKxKEp z!TM!t({)=J``Kt*e}N(?7M;c0wCz4RB^lR501LoJmIM05gcJt81HoZoVeI}3O?KEl zW06~z&QQyDW6q<=ypI-(ef@}ZarE;%%={c^pZ?~G!_Pah#U>l7<`APnVq*tOqmJBR z{LD@UksC`;!TTC?HPSm`*|y>DN~HQ;&nvc`@b&Rimd#;%E%5D66p918Z2Xe3KhH5e zZ28yNA2;nPt_A#H{83iKEPc|0<52V(`%*+N{pMM0iC-f9>QnZe6s?K%DQNvp-a%Qq zl{_ZeXE+n}^$AW8+sZRTJw6>FYQoi=xD`eis=mx@>>??f6pQ_9l8o}>R#YZ2M`=xT z;NZQP%YI%WgP|7P`uzzcRU_klbM*g8-xID%gLL zX(oU>TCujT=&RWvng~J0;~e>ILs2M>XM$=gYZm$Fha-lFHwBojLzINOhAN<%muT7z zLrIXT#WHqR>qSjo3#HE{vn|P9Niu4KXte^dyhdR9xUY(PcfWF_T-In$Y#BO)y?uOI zGy7`I(wB~Xi9`OG-oj2@i|oaR4kOC@Z?L~#eVZ~(Q1*EAYk>(Snpp7FoRmxKzxk|A z^6_Z+RPjvE&7t?@pD@S3_3hjm@_i^!|M-1RbPG5K>dKfI7}ju(TP;iH&JBErdvFA8 zr*SKGFpHa;;A)q1g|FT!=OkwAJ3*9h?OH`p9ptyJzga_;nj5k+ZrzfB+t6K70?t;*=BTW!6S}=a$SB2(jMr3@yTP3) z@94Sn#$C6gN03lDn6nl6ahDD+sZJWn#{@8cxX__bbRv|t-(Jf~C-n$6DqpK`(^2yw z+942-&E*=$l#$#sN6h^8b(A^V&y=@c$C(+Dvt`Sc2&4)Ib@wx2qs1IWH}0!k@l~D3 zHSStYrj(s{=S92u(Axd!=1;~IQ0k{>uiNb+-M?eoHWRWhf43K=CC?c9ag_RPCGAt@ z^zNk-=DmHui|m5D+}u4|ww(LZhQPOsy@lga|C+HsIe2XTQ|zFBZant2(z)l&(i`jo0QrJmxtRhnOtl-{zeiNxh7-`O))FdsdX;fEP_%Y(W@RS41<1xH^Oi6>(EdrahR=6Vc;>>= zgMDNG8s=MTK>ZwNB;l7n?p+)i^6B?hh>3`(HU(N;w_AUM8c&7|l`8HA4jMgwab^Gg zA?%oDne)?vK@sG?`^W?Lb-0X~qz8EPmgdS_?L2v29M*G>ShCycA3C(-?V0&+ zetYUOL@`6hD(4Aj^f@>n#xC%Y5{?d_m0lc-jEs{FeaHUAVKrmI!M>*pwO3RpB?1#W zFz(Y+=7eG|vS6Mz{O4kjGd#b#mV00N)<5NA|PpM42#%$#y0hk!w4L|oh&E%OrDw70JO zrI7C>qL0T%U&tUPug{*{yr;89gT;L-FE6iT)##$%C3dn?47cL+*_C(@-j({IK8y6U={S8ojjH2TkjkOxevtf!#uBe zdcHN~bTIpxYSAltSp1WRU?)w}$2v%cBC^DtcdCQ5>#9oj^9My9Y3$#BokL(7L+u~F zYZZEx_N9+|WR9u7h*M)%`(x0VmtbXCaR_ic9}mwS1%=B9tHi$sJ0L}Cfy3%I7mOP% zr#rmE^83>fQIL4Bk670B)!ObRPBy4M4ejdC5)XJ_Hw2?EP_lMxa-uwP5?TM0L@IdG zrXjBp{Y^+wE525Ploo|^0PZ`tT%1vQIdW=i0?&*T5FFBvGRQC5-o$bRjlhxsJxgvir zwnFWxAtPWiJAL=tgu|6H!VS4zqCo2VxjO^JmC-2{gT10(odmJ-x0gdDIXt0XvLC<= zE93Old=b_GOQ2gwatpIYW9>)y>l4Q(i#xF6Q5%ME@CpejgPsrzHiC#E3L`$%l8lC$ z#(k+DKN4v0@OcP2tz~UtNwKeu(S~t)#kgZ~&Kb6|i6PjJa(m6qKPwn%r)o7w?t;-+ z323|oaD&)Qy9hvlnv;>0H5QuztTrJ@-z)*Dh613Qp?Juh~w5_(l&8CN-i2;aKfe$Wfe zRBcPwg44^yR8s>ioRCIMjM(kO31wplIaL{!r%kCz zuL{GBMQyJ_@^T2DVgO%m^91P^6&aJZo#cyq6&05xx&!eO5e-F;hMT`(_Ql0yN7`p1 zEp4sK$|BJn*bg|N?Vbq`BDyUS+(aF$$YRP^Vv8FBYe7ex$gAXBU0wG=B3%aQJbeAN z$V6?_$JlE^e?OUqvtcW)(nu-u+nS}!;Xyo{&9rVC8Purdx| zVO)Rw?GuMmG<2M1=M`Noes$dp3-Iba!H;nD};Gow9k3!RVU@;=O@Ct{3LkVK_FS5Ypm*cQIY}hh%?e4Cw?%#SlLj3b#|fT zCwZzV*oNX>(tF(oQK2V&rs8l^d|`yuutkMdllCaa8yhlvd3)bRPitw~&dp|+y|D}Z zQM1Ztjctt6b4w2etB}0q?hd0pWme{@ zN=lQAvWRJ)eJTj-S(mynxXhj;yKr+C%7%Y+Zq{qec9~Llrxx2Y&@_)X(Rf^jy34Hd zYJCqHw{N4Oz5@=V1z=AJu$=@nvUi~W29ZV)ux%O2Z;O@}Pu;qijDYTrHq6^Cc~7C5 zO!LFZ`vS416pc`vTwJZltIMt)x~}#vu-YPgu$Bxk$b9$_*_ikn$9b4lEqQ)N-URuvT#MD<>n;*C>dgzCPGojs&n7gAkC zWGyJE5cdq3%@T61U>bnGUCHzJZX1-2ZMwYm-fbcKasVt!^L=^bw(}Jz=*b>&6T}m- zKP!4WsL_kU!JDN&)m!Ae3WAF#otXjiJ2}ZKZ(hA%nMA88nu9NpB!?J~)r?T_$MJQOHQ>sGDnFI3) zgUavqgjdza8ttk-!O+C7bAceuSG?z@V;^-3@4GX??lmqIO9EK3K`02X$nIgimwjooxN`_tWwdi z|L|~X+i>STr9crsf{y|!Hbh?Uc?S)>WCJy12f7 z?|^&ux(N)0ze~C%WX2CcE##}OHrf-hv*mG?!~)}W>1fPaXV-6HA0mMa6}^SC_l0P9 z5{pcv#d4{s&MeD`$8Y+U_;keIvhpRiTP@0tBThwrJXuzB>0sgGHiIZMkos4C3oMjN z`(%Q+6O1%0YF6wYsJSaJL8(d-AN!L42!*mC-&M9% z?L}~760#~go+vegcqWWC3=e15@V+h%7<>QvD-N}lBgLC%GnZNA#G@CZC40%S&sn8X zP~6}MEtcaBe4jgt6X~6{01r=?g*>R<1dMW&dx30*N$6NZ&0Ftk)))|LgE;1_&{no($rX`zmGQ90zG*(pYQT~DO;NZ!4ZK3A^v`?lGvJAv3a{Jaxb z{W{S*Iq$+P7h2+P3NZ}v1vcbdHYG_cO*^I_Zx`4=~HoU>5l z3Dw?YKCHatxrG#0Cfr&`RmO1yZKbmcQ<2hBO-X#rrTKhh^y`F?yNay}kSgxq=YNww{GB4CqsN>n>_9DF1p6t^KM4-CVX@v)& zj?~#c6Q^6^r6mZ3Y;5!+&7=xLTV<&PnVd9|pEdCOm;v)k9CQX&QE*p|tQCtqRflFw z)S=}PEGCN9UWXB7EYoK`P(WyOb#t>Izx@poxW>Q6IXo<^7-{KnS1e|0mclRE0=yAR znH~COj+Le$<&^sH;ID|VFc0(QntE)WjnTTa5MtAEd$b1wB=eFw)N|0*t&M(lUk;%o zFJw?AN5r4IcI;(#jQi1|~Mx-LEP zu8{`TOBq@~xoCMH>_VbYH#7GHAV0W75Vtmcb3S=n^KSk$AH8 z`^#JBGct0@si&ETpXy7m3x^c7$`91_5w!1*_CRD;K6L-%l4BVP+&ApvIF7*Kh6z)m zuwYe;3h;l-E#N_7C+FNb4Fgh61e#6EIeY8TQsl!a_zYnv9P|$!qX=j`~;$6+FyFIS34hM4}Z=w>bV$?z1C@ z$D-z!kVvl*hQ_0Or$~)b7m0FFxaPfBJ6;?k?^vnD1RjK}H5}M-6=>l)ieXZVA(@^HKI;-aK?sh!nVt4gzoWH*DhAic^)Ma3sBE{-Ylq|x)v2uT)qNtWG# zb?BUEcDEl6*>Z7FN!SRX%s^`t*^kDfaBS$Y=|N1aSR?|89;qK)-QACw6u#Ugv6I#A z!)erz>wyX=W6KQU_XZbtpq%87%?L)65{^(AyQ*oo9O^tZD$+xCCZco$&$$VZW-;=Z zD3ITVc$igL_E|{uLJ9eWW1<+uwGgOy^n@^%48z{H*t#k3&g>1tdyr2l8XL#p+RA|V z1cLM^tCj~?H(njY{euj4#Yr$eRTa7@3M)~(!+=@=tl*613}x7`?85W=wBd#19CDSYQw={imrS@e zZ|oyZB=HNC`czZ#vA?7k=vPWwEfr|4Nl(W827{ddv2b$!^dq~Y3#`&7IbQuJ<%N*+ z^*e$#G8ixX`Um{_;Bs)~tYz&0XnB(oa{nWc(=3rbq5i~`Jy*Epx;e7bHK*4Hw6`CX z+Y58-bSDb^@A$LhheXDH^0N1nJ{NYPp>2|BnmSk4Z`f5H$ ztLfh-jQtBmY2Jq%zn9J9KyT5NK~|Q>Fw@oJw-h(V;)8t%l$Tm1C?=k((ykIt zz1*gOyO}gY#=EF1bKwm`aONg+z5fegDg8_ zsBM6A6d~LN4fEiD?D6w7_KcI0hDrbK9s)q!pKKhsA|Fso;H?cKExR*p$I(|NrDrS+ zd(hIvA&0Y8*v*)-(Obzc1V%hRj-{e7jp=@5XECbcH{aR zMWinL{bgArxZMFf1|NyJKhrkJuT3v-@UpXShUM?82`0{!cXmZ&DU& zsB_45%jTqO${k)rW716KqlD$)MV(nDPT~IDEnqvFiUGlEq#6zh4Yk8R z9+>8_v(?o}HDTvODN3?lOStMc%^zbde^e<%S86FJ+$0ztlvK7#W0TFfBB%8NbK!_QPh9*@J_&os_awDb1-ehxY94_0M7 zV`%WwWVSPzkC6{eu^o%V(0O4oj>2y4E2LcL+b;6Q0y4XrGdHl3>4{ETTd(S{u1hN5fuQH zK3dMSSLag>5LLr(6Xlp99lFAn@!a^C-bJ72nIkrYB4;I}jSZrN`(c*+IKnqb`%GC@ zKtpgP&j)J8OfuPZ@L(thuas0x!*)v3KT;yi8;|D)y!=l05?u+|%ZmK&SXHegBZ+DN9#mMpV=Nu+wy)&=qo$=bF zceu=NFX`NyKV8SSf21sTKKNb2Ba?;$P&7J2E`fZ+3#2Oh(?_IzrePmuQc!B5b!?o> z;TM+$<;S;|gU92*n#A1yN=va{w?%gq2{{QTKK|YD>fUBg&<+})-U%F?iT^IXi*4(V^Y?>Rw1pM)!8Y z0~nM?RWel2r7XXHOWG4aSu=?)NS!!9hdlk`GAwD2`5U+g(}q(S;DFTH`u4}F;y=K( zF98jJU(+VoKib)}&YJl`5p|yXdNC*{{_3R2lYJ-`n+ev7)?M>zqvRIwFF?(eLJUS> z-rjR(!-yOzfwJ{Qg@r1XmPzZW!)8PZh@&VDFviO;F=&lrKzr_nj5fMAHRs*soOdOt z7J?uTYy-B2&g4ucA@2IneoraS{+xA=$p9+lG(Ffd{t5O!@2XTpLkMyaSmDkf47I~< zK<#d|y$`3dn%~0VDO`5g9Fq3czz#!8M zfcRr_*X^Z*gv#o8M#27E2@G6Wozw$Oa3m5O)IoMc zF=h2@O-T+McAd&_jS+H8*U)J&YFpc-;e@~-i89;fV^cAba`$w>nD z#?O!dHiHI+DB| z)te+inkJ?vbOCFr!H~bvEmjQyDBjaM?++PM4}lg8*uG;&TT~Ab2dJrCgO+@m0@Tk-A}#-I=f;RH^d^(lzV{;MhMO-;!Wc| z2KS^6%h$;0cKWT}2vLkD2lPD{B?gZ&!o0ej+M4KvX*ZE@>k~F;zwNs}0*CP+tLV4Z9{H;003a#LvV(}JeBe?SVolBQ9A=xwtA{w` z(0p5hA-8Rk0^cGYWm*Cb{yNBqv8!|AR-mt>C9z_^qPn{JUCS<)wMCLI>)H#SJqx!e zP}M%!pnV_tH=OBIo9xg7Mw0O!Y`blq_u)yVeys)$y@~WJk~}TN5>LS?M%lDwbf&ct zPFU^*DFY1SCHVPw!2BgF3Cxy?aC$|w#vL@fPN~zNPkFjfi<;hg+GNkblUd6Ge!KlA zM^}zu)ok;G$N`HqL7kO>#USE-NP73Bp2x~8pv92rJ`@}3X=^%Wxk^1D?4j5aGj5x* z5d>mB{Pa5krrQ$*mHUVa0Ct(B5{Gc7EdW#U@bl+qNj*1S8k_O4r)lxDsR3ud9p-Ij zerL?SQ^W>k#!IYPwHpFed+Ml8Nwd!_{M;em-I5=lFx7QI91WSuC^SatBBD)Lww>C& zCnQN~;wGqEm#$@Drv=2p{Lyso(zKD1LhJsN!P?2=fREFJ8^t5#41XwbZnG@R5E9dG zPNiSEPqzROE+U$UgdSSYW;N?y>TF+*MK+~L(yq3u5&pNI1fQpZpxa{fSIY96xgm~h zUn=G=@p)Xu{PR&bqe{16w|EN&&^pX7hf>SD^n0SKVv%CcKKSzN?;1+uN7(1Z%o7O4 zJUL=3G&+U=b`m1Prc5HKP0`}@8#9*uhWL=ujYlt%a09tS&!zB=80-3NPkcIQNs4!q z4vUzh$(Ap=7n76piYB|hYPA26q4(+gYK1~aGlJ%2afkug*v?ogT0bCd6=*+6wLb}~ zQP=b5!v&40qtwya{pdT{QpCDsN!u|?)xgc30HX{q8np)`mRYPwRAII{q2-nc(;osF z<6;J_w2;GTy|_8g#3;*XKFpiZ+#u)7GF7|hc(*sSPQ%=tSe@!idLb?uy~BVJa4JKx zXSc)nkj8vtsAZ|mOz`*sioJ`P6b~Le7_ED$*KaS~sD>A?08Aa8E_+>_$`H;I;|(1! zryL}^u2DSEg4s)Mtk5lArdy(-#zy0|qj@h-LBwe;mZ=9j2QhbuJSn99HDH+$=Msl$)Y>7}64 zyQ1*>y4!g_F!xp1I?w18I(pkgoQvCjmv==I*J$7T)*K}yDnL(}hGsPA(&gNkmt%eu z*nMB8y8Fs2?qADp2k7yS6+xgWTB)e!S@p~O31GN!+{R*WxwOR_P6!#UOym1NUd9&-G}!l z8YqYKdLd*F58KJT0Nx&s)8I9=23wYBzxykiif+V$2*v)@vZ}dFzSq+{e`Rtr(xb56 zk;w}+gFw-5^So}?Ek~HuS%qTYRY%^;w)fz$=%`W#RAO|tTB-F5zKCQfq-gz6RP8;< z?2J=1h|%%g{)QSxZ>#M=r{38LcPdX2y{|NSP`lw+%Y-*BL=+Dl8HQvrga3NA#|)(lcsfCe{xV%%^(_TneWK5*mxXr zND4o#Vtp1^q@1II&EhX?q~7!@B=FOR-_m2KS#Z=xm!nA1K$xvSL*iJGua#^Mt#%ZVotIg@s?X&Os|s^@3gu@u3@Bc)_p1DW6@TZ@`^B_yX{82U__xtbpVu4 zr3BdsxgIa*zIW>Dy~wc%u7nsij+**aVe4OGENL^m59gv`X_&%b7IkQY++N6=*1^bP zq*XP`ptKh}9PUIM9s`FQVz<#<-V7MOQBAjR-yZEQ*9Du_tjbJ`u$t2L;e=#a=c7)j z$fC_u3(QiO)t@~mk2LVL@m})qz)~B=tYCA@m%g8ce~+v zi}h6hMuguOt6^lnBl*Z)V^O)AHI^>Y_RWJE{oY@ZbSfaFQH2QY<2RAq##eBegB1Cs z-r&0-?djvwI8MU)bA5<azvtl!d>v>c#5`7B!XpXhXO60n2j`wWPMy^k1dP7 zop7_Ga&qU%G+F8(M2UObQov2|vb`;KH^%V?xtnaJ5X@!c%WWc14Cy9HlplKTuiO=& zoYe(-2P%U$=GiH{f0C*HbaelTwnKx_UMvw#IiN>2uvTbWDgi=?alwM^o;bW?A;JnP zKh7Osaz-HY^Iq!Ulk@Z1?5G29k{5#ShumJs6G3-{iTBxmdM+~L3+1r>=4&^a$u;bQr1vS7lOB@Q2|C^LIAdiItYux zwv%=JZX&f|M8D~o0MA&rrDEihMhYLq|6yKr1wh-a%~p=HS=VP2ijAR!52kOlJ!Hk> zCvIVom}7P)G?XxIr4aeP3hThM>V0iZBCk!(5@?Ucpy4uUMIlk4LMO#32dd{r0_HP$Az>UyG8w(bvuh1wS|?CvLg+v?8B`JF`6xNpz#{ff+pHiPerT6 zZ$SoM>@eXkqOHQNJxuB+ zkf@M2rqz~SHaVg4BqXKCzvY~nl1ZuDldgNbKjS8wxVSn}O2?SYUiIgwLLsX5K@H7; z#5D{|H`98%6oN9ye7c0%kn}B(F~Mko-kqA7$|oS80y7|UJb^ZU!xG+>LG`Jz_9D(C80{t%`HDq}- z;t4=M)V<+fKE|}2Xwt)VD*$@LtmHSAV#_wU*!dp^lu9TzqvI3`*xZ?f>L0JjNNnQ<+dYMLV9FzVlGBx9Hd^#u>qH!cb2XdyRwS-!2N##2nVd}YVy%Y+<}ZY7QL&@N!7+P>KQ20wxT%*`7BK&vHr$x z!c>frP(LF#lR;c&4L7VJieQ*)hmoNsNk)dqW7OmH&X0a@-Vg@-LL7P_3?mk?=zOgP zG!*MHdj7(N+%3g{Zy?pLx~J<*R2|6v)re#P5c0?nG!CBT4<|4Df2@58Sj}ns|B0~^ zgT_{3oGhV4(W;CgN0zjRv}m$bN@>%ceXTfJNGnN;%9heU2GbX#MotpuQ)oL#pFJO*i2SLOJJD$(- zANCs0u?0}wMvXMaF*no3i@=ANIdtF6 z_-veUmqS&rP^U?X9v>-l_X`*7)Xa&fMh6`oncQPb@=Bh_@n4n?V~iock(GvYxw|SA z#{?-5I7iZWq?mK0@c;u*$cMSO!NY(c@LXJ<7R)aI1|A7O{=LQd{x@?T^A;Z@405l$ zU0T$MVtKF$7z=|y;odwh&SuPk1Fvf%*W!%cW!kTDWw;)z!DC_S{a+#F|Ct#7w$n?| zPX`Wt=%Or(Jxi>Z(9CTxE0No&aQ-i*WBNQl-!9}y*8ecper1mP`O}6h7^YF|EDj>(z~ zjwv#!qk2X$Mljv!=;>_0{_5w{)2`q(WZEgH9nUcN9tp+K4oJwU3bxpv?toA*+W|Jj zu|_+5*bZx06`gpw9f(^OPu1swB8Ey$V#%m%H7ckcgF_}ju~vKs1Sl(1#K$>C;Jn4ipvfzu%ji)si>6SbNd{O(SU721tf(nf-q}}-ZGw_hP9f$QU>gP zZ_h0M;>rHb8i61|1qvObZZ03RbIRVJSK1fQ_N`*ClPY)rn#qF;NY2iR%&}ko3^o7y z2_6nAno;U`F?J*8;z~)q`su|*`I3lHTZT0r4ONkK@%kGqbr`!=ZP2>NVWe0rv!Bv6 zIMGK7NUITD42BajBO_=p_?P}ud*O8Ij&XrvivUAX2o6)Wd&!N1w9KERr@J$OSyBbS z|5-owO~HS|d2xJ=U4o?B)FGvw-}O3wyR&RoxKM<^!KGpUtUkWEP?f$#sA7loc9k2Rpwzvt%jf zT7PAX`|fwTbm| zB$F-A^(k*+z0GG)U8LjKzTFplp&|Tlzt0$5Bx|yHV}Lc+(q5p3Qb(tX%3q0mE!8@5 z=0M`(GF48~2~v6Amt5ce?iikAZa*^Piqzhn*2b@+jo{tQrTh~-1WZ=@d>Kv@1E50k z3w(}Z`1VKxg4>2B{$TW6Mo92~r7mSt_!VA4`0+y%&9@uOyj^f^Zl`7$ja87GZ$vD9xgvoM-4jAdhV3`rj+YABqBN@$Z!~EEdqxE=>eu1YF453dLAOEZ79( zWUZ!#h*abczmohci6^#COh1LZaqwyuUx(+F$QbHM65+N37-p?(cr_$M8T>g`QI``Q z_bVJ*AP6Hk^=el>^MH>EyBB{}4t-&*rN0<4|PCLP)$ZI zB3O`pe|CBW;Vk+a7Y690wrNucqS3H970R)2KaC4vg+S!sV6ihV#R2GBT8UbyWTQEHD;QV)r|Z1|45x&gke9pOuG)g(}bEeq@5(c`0FRhe(<9Mde{Q*2su`J1xA>^ zPtU^f&LQ*v;UQv${)r;9O)FuPP(st@@>1fVb@4;qg zhuHQ*2h)@xmw}(l&BMb(=(9x)Mm!ZPjv9V{xQpZKc$H6;vIOo z1Uin?8aWteh*3k3{A_^qB*`fT6RGdLX!`VJ(EE;D<$$?HS6@^n0rqTpJi~1^(Bs}$ zD7eL~YBzz2cm`E_5+%724} z)DU@>6uBdTYv9b!r5+XE2+)&=Ehnh4pR_EF3#Zr#LSVF!OaK6ON8ytlZ3G9yEx7@$ zeRyj-7+isZimZLrp;x-nfWE!J*Vfce2j)#|G}ZUnfyr&aobSAlsS>J!7feG>FED6% z!zllS+fQKSg}F#UDkF}aQrqg)c0v#c%mKy(D9C2NwD`1A+2gV%f^S42LHX&?yy6!l zMTD)Q9fw!7;@4#BP5^|;C>r1o#yrG#%7>4l#{<*ax$mZ7o-pnD*@4!69G20%79%J~ zz`u$ObV(dfr)DZ{Wfi#F4^eJYmPD?+6rUY>#ZR+t0xw$8y@wS07cL`pCd6eNXdRlR z@Jp|g_QpT8SWDWm{WXaq4?zK) zi%Xb*>GItMXZ}(;$*G<5YdrF6xK;Vzsf+AR*&q@U46tSC-TnC0w{GRiyHycJ)w>t_ z%oP5TC|&v9MhrZ{3wVG|s!h2GPeJcqJVZwJPO-G);4Z{N4QmhC3NnBVrA!?#Ms3&% z8|G7(bZkLH`ttPX&qiunpS-RAJOSGS6KL#P16oYk?PL9KLsBx#2Vf5UU*S9 zSCq&YarkBZ9a>+J*O}B95R;J68R7`o>rg9rqQC+rUbodF1Lho{NBbg5@C_=1FPLN_ z2PYf=be@zV=$c-$ckEoE72Br3Oh+{tIgWTGf=BPM+VlF>ljOpe)}BN{YI2{L2ZVOv z?5++A?@9wUNuXDLNZoJqHU$9Wd?CGF@}q4cfvT49Xl^u;6*R#z%zJSbB$I>_fT}RC zk8OvRcwkq z*RfQyaCh-6uW`UH1Z3C8(@F!xIH11w?!(m7nj2{%IS|lC5o3>y{luwLYWU(5RYlHL z%~(@BkJg*q0Fx3*hOJXuKa#W`Tije+jMYY|rwS}hed9mKFe3#6My_NK+ns8U7DHmG z()#sY79me`+C`2(Fj7)dI%`qs+L4fuFj*h{th=|jaMkn9*@i|hk$OEB5h85&^XKRI zXKLuL0Y2C(_ZtS+L{D9gmH;qLc~d+7c5}_SOLD>P2YO$Woiv%brz_BQ+zHte^CQ~L z=bSLQd@*);Zz9BLvu|9yl`?UU{SD-W^g8vtc546q8`E4Luh?tX9tNx^0!S2-^-*XL zf+ke;*GzMrlc5ok9TMErVk$ZF$sK>yl`Bo68`gF-%yD;v%C4hroJi_;SNU14RWDtu zh_%a4I+R9K;&FZH+xlVS*U{fz>2RHM`L1=LZi9fx%lvB|_p8qNbV;e}_+R)`q8cKo zYG!sFoQGkpC>M)ZewH~}{m&x{1PdLDk+ zq-VHqw%Wc#YE@%XQwu=i#T{br`=Be>@OC%Pi1+;fbw~|Vwx^k-BSR0u`)R44ZS|Z) z)WmFF-%S8eH@rLZmh80HOzU+^Fy8c1&$8MRb0a?_>vob)9ekyFT_?E9aNvjRW?%+2 zNT}FlngJbeH4YmuCZSjeV>0|6K?$SZUzaK*vN)n#9=#p*>)SG-@fZfW)|me?IlqFQ zNaYyBbIlj9O>M^Mf%(Y4uZ2sT?8S{uMm_opDk^MZpuo^?Xn%qkaW0eT z8ap4QFNC{Db@7_{XXc0Ou+Ec-GKx=2KmudW4r~2}Y?ECXDG0p`INz^BFFl5!A8is` zkg6)EH>vF+f*){e)p7H>GcA^|Xo|_ro$Kf8By3_gle%YO4hna}n)dc~7u(z2xFp?a z(Jp)P!aRDtcsZ1#?G$g&j1RrWuVVLV+bKw5Uu179>+L-x_1TU1Ex|=1;I5CULIQDT ztsy_iD<0v?X5V*G=+y(b%{*LN__-MO}Z1cGmzW29im%*oMZp-X@1F_O9s|5KTrjl%{}KBHX70K>O{ghW_p0GDkq`6vL<1h(Cq_qePzoVpM6oeL9o zsCk>>M@oGT_3N&|mL6eQ2qxmKu1>6}UbL~{BJ9zDqK{y%*>ma7sjcl5}@-j*&nC z=1CDw%2tE2oBU}RHYg+=sG?_&N6rZH%<`luh!JJ|hbVcxZI4XM=om~{0XmkJycFb$ zbld3uGiV*^@lKQ{XDvcD#O;1{V{6?DbqIYh_0pdzLYyNJFT@~jMroHt$`$gB5~`u^ zCKr6{Se%nAV)5HV5wo4zL1p#3NQ(WhCQFx7cVv9M3hDq~bJt0WR^kWUndl#?C8Tg}eWFfXonN2dCl(STo zb0r-2!&^-M2-JmTTkATGVZ!_Bscu(Z?IU=ZXM`ce)K| zkK(NA!Z2}p1(MjHRmG6WtJkAar`oN7zCz=mB`q2tB3@lJuha)Yc#GZ~C3QGaM zfkfYeQdwkV9u;&ny^-r|fZijiyLAg0^a~`3RL*b!21_Qz*7_oxBwt$Th1(wgnm0UlPGIHpoN);syY>#g zPnuRr3d;Mb5E;@v-?L|Dq}}MM@xVfa+DfgxeUyClv)grs=91enDwLC4Nh}N~s66o1 z6HiMS<@_LibIIQ_GH98{P6+xLQ0j2X#vy9R=(phy`nNvDbP;qYnEvsLkw*Um?z~_E zZU{P5!fn3%Q8@QCaj2$TUt1h&MoCdP&w`*3tqD52P6gZi!Rtm0{uh)jJNx{#speT&j}fpd&-PR2eFfp3OvoZbpgc&*7Q zkNivrs1m%X!F>15cQzMFHs%Pw&CWK#o)1Il;N(Pv_EtnoB*7CNCg1gD^g4GQ20Q4d zrrqxVlqIixp^}cb7yJ=WGk_gDPY^YD8O;CM6i?M-x%S?h{NGVP4{N>NI zk%PL#-ox&Cnh6ynb}OB*tjfq-)KjE~?-Nw$Cdod({N*PYjQeVLFBFly2@N%7r2rh% zXm|-wF&tPF+6=G(`$f2~Z18~M+G0d2F{|8F*BRt~dTQoTz_kXSpLR~Z8IR`xT)^VR zw>P}QG`Ow;9%npOPyQ3HF(?5(iqt8(76}}z{h!g5-?qj)PN$@}zRfxgtpyrgW%a$8 z9GlviGl3w|xom3R0-Nt&>Ze116oI(?;CqNQ$xkjsMAzWdEuNPlHhQyZb^l?_I3$_s zU4)A(EG%r8yD9AklZv5mYvjQ8T<7{6<{*5)%N#f0x-_TD6mE8+4j$U<&h<)ISa-Cc z*PBo|hwHT=Dc8XS-u*}o4=2P08YP0le^|o(Bnk6d?vL~An`$C@0S3m7L_OP>RVzeSKw!Ij+qi^+WB zd<|B-AlQOvA;4*l|8Zq6eoHA3;W?&t>~ybIjW1CMF2ggqX45ufLLuFdY1KHI#A~%tWS@&7OP#+yHdo@7T{_;T@77hI;%W zfr~IcY*FC0`SO>bdh}#c9Ib~-LzurnWS|&A(4Ir*9EtV35)M;!NU9j&gT{zFy+XwnKAs^XOrk`)TESNhh#qqlfu{;uUi9; zPF0}_lCW0)rHQYt0iPol#fND?B}VuJg-^#sUtNOZh(qE+#g7vHrz9>#gyUi&{F*Yi z&`}``amI`pc^I67XeB};UR$H~q7z|v!A7X{rzvrnoRLQPqBe5(pKuQ~QaWPf?ITvG zFoan6Kbfao`7Mk*k4!Lo@f- za}u+WT=8?iA6i)C6>T0jbIo%yfkH(W0TUKxM`@rRCJKSj9+dW!j1Ttc7I1D-WKwn; zvkEyni3kGGGA>3x)aeC1U)NRvt|H;?3cS5iHY&78gaxhjk<99y*aW2op6{rXmrn=B zFjpul8UY5NlFjH;4VHk;zBhwd*`PdYl*<`^Y(~D-SnJc%5;*a@F$=!LxPDs=LXG1Y zRA3UnmXv(Zc-RoKMcwv`myyCy0#fe+QAm(3M8(mrcM$sFi{R3U$0&#$ryW_Pxng3! z^Zv!Pp1g9|T_~)T!8cIcBV4&0LFo+QUD;jVb6~dDF^bJCG3&1XDxFBFQ87VYOHB0P2I{V!hevt12srChZXcOE~IQD4_O;_EacK5I35cfryy zsd;I`x3L%_Pm_x5>7G~jID(`dJ65w1Q$+S(-X95)HiIi7RT`2ar={J%0t0s%MzN6~ zJ~8JFG15(!NtKVM9|ayT z{Dp{{CQ<^B;pPGIzxSp|ku=yK7MpBtNLLZeD+C{xi}D)}u>2FmV?*46y(o_m`!^4c zhND$hl&yd>^deHUcJ*|%la!B_UorsqkHVN?EXV`w>b5WAzY)1VitPK&xl52_5Uuu$MKQpTsxyjHvoND2+(8GP%FofCB)^9C+!A_T z3EN^y7ZXkTbTRo?-8p^wVRDdC3;<<&;}mf5HZM-FnJi;tTi@!-fK=Nz)0s5Ad zItpK7KxJj+Ru9uZ*MSDReC0|w25qlizdoPlc!12T2zl=NWCZ(~i)h7Odo4Pu5Jb2w;;F*A9Wsy}3%~^?tQ?}O zL5Nqvmw45jmQoR3))V{*dhupxanmD#ATmuqo%vLUz$=c&=DNl+G08HpIkRqUDT{jL(Ftl1P7Mgw4AVPFT5-XBdi zsvoUru`9k--lQ2$6Nz7gA`{^pXGl^9iAU7_WUjk8)Ca63Ql#z~K7*C?(SAW7mrkGl5556I(fR4(4_d z98lw9j=r3}GWL(lNTPU#uvzOuGes9Od@7R8ch!JD3`a9=0S4U=dH+2h@g=yMoM|DL z(qU4id@#Ooy|KSU>-yR4NKWle%h$HnPPC2!HMtIZW?PgKRzI9srlof$TL)@bei9v` zR&+ZIT|#F=wX2(3K0sRJq-F=|hmz^oX*pMr(t+tv*2vf_Y5j;D1Kg2gV^n?{Z~baO zm^t?k1J{RcHXBY)LwX0UOtfp1 zQeO8)@F*~%Bj&6??AatfzRLh?J4&`EAPg3a5$YF;?tT>h z?@hMME|m2>{Z_1q=HoAK2?iVOFkH0ye10^{hOh@<(+B|aX87jBBdI~;U4eSyNS4-*Yl*T(RWrk{;#6VJoIS@_f;C2QD@h$RU|?pk0JyULpmq zit>}Ly}eC)!agQiRGOSZ`U!;e>MDBcBJ`0l!ked~8Fc8PxBNl;MNhmmrzyH|B?aEk zv)eUx?a3K;OFmNlCI8v7!c1iQ#Jp~s+1)n&Y3le%lS1MDrt@w07YnE=FqgcGD@Gt> z`N*?=zV<4dqJ97Vl@)aR&G<;$* z=)RBilq3a7L=P2gkRLXBD4$N07yg>#9tYKm|GeLOHw*@OFFKp6R4c4gFIFeqYy6sG zH8H(F#|$}bn4!S7NX;~*5Y^V7IB{D!n~3zgQ;WK4{>i%5KTJYwT~|ZCW9{`+3v4~= zok^&2&}W=sl87L#^uUP8KIU&99=Gjo$roZx(6%^(;b^Z*7khlpK%%T}I+?BKxrs%U ze2ziVu(4iI?mIT5?J=qS1_Xa`d%e1#oz4{h%q!T1I`fUDl!U6z>c$Q)VYq8|#HhUfI+d}1{PP{w z4h`2$(q$qatVLkzc%VhlTs?(`6>T8xz}6SgY<7CZBetmK95`SjP)F<_aHN%32z#1+ zK2xMdzBjXxAz1CZo=utF2!hm%;nMM6f$Y?!8lnumnJ5xPwAe^}c{}(*5hj#>6vgnw ze=bw&XhG66Mj%~UF-L7-Kfz6{?G!S^V1BQ5uaq@HxzMLRX>E=CF(;~*h0B! zFN|xv%`vy_HDt$$O&g0Ee#Wx~pjXjs*w=q)hX#?=^qKc%aPkKDbc)g4#3-8|VuLnq zpK=ddc=ixMd+6oK_Jlk*NET4XiJa*E8VcG%X|=_84V(ou5z7EYa8KsUHRs@S`UgiFOuB7- zyf$3Ew>`6`gB5SdEN6QQAefMVZIJEqhLJ-G zJ(b`AE zz_O-x3y3Ug$cBi9o`h}W2DP#2?H24Hrzl9qng}xp;yQ{N9IsFp!)}J$v`Vn|)6tBj zox34}+lBcZn#qodPF(;m!Xc}SDwY~IZd^-8P1N5EEvdOkGx|@G_F?ZLO#RG4zNktA zw^#YlX(Ov4D14_NspqzIvZ#p23ji!y@0uPoT#ZDk>}3R;Y;luqJk}R8P0?boElqwm z)wXgv!G$hiO_-Ij(Zb>u&Yd9Wgl%N~CR@8GWg{}C1$*?|g*V;^w*4F98oju!zvaDa z06U9_uypg%C{$!#bETw7Qj6eT&JYR|^77ryXCZp0Pe-^BK{1p7ErJ0%w(HVvG~XRK zx)K{vK30AJMir6N8Tw3BNIz&U0G^C0D2U`PY7W70m4@VA9YKfYGl)kSL20ZIWFxDG zPy}&AsE(Sn@J}QJDRyvm>kOV>;i^GoG29#Imr~oJv1z5gUy6FsEw;VzG(Lu!-4g3* zxK5h%-;g%!=bXVcX=Q)us+)E=@GKoGKJKgTP6~UfXztYdYA4-S2oZ2#PYqefGx6eo zRit}j;7PzN%Bi217S_7C+=RC`;ri|Z2uf`M@{;L}-`vGi1Io#A6i^s%lx!rZSn+dC zo*x2DZMo)4brIlX5bg&c@`PlcIA@|Lua}cT1hYLa3(MPn$MZaq`e>zau|@5?ds`c40R>IhKFLFqFgp zdlJ=(U(9#%CQRjmX17jDEq>(?i`9;ClKnd=SmoS*kSs%g;V#jgPmjBmdMHFg?ze~8tVB9M7F_dN$ zoe4muL01RF7gXN5d2tRT1i_#{DGjFcHA<-?VEW$^G^XkoUsjp`E(N+Lwax_WlRf1W zQV~~&Z+gqVeeY(^M|-^bzZT;_e(lnlt2db}n0tv->TSZoK|#{E1|(l0CkBKWpOP~X z=10#n1vHO7fLgxi%eiqF7E#pmA9-x@BSisBjz_^4vSYz+&@bb5-m-01BnMCFi znx#cka-kbz2Z6JpSh9PFbC+V z@~cR3hQV^iTY%|M&xPGzn)14C8&hfo<-g!a!8hx_K&6s=lhSET?6h=j1GR@K%{m<= zz1_M%STOyQ#qQHDOm+OE2T#YeO_#YWSg!gATnm5IRsh9gN=c<(D%QCJslLO4DxuJ0fkB zM8HsrZl^5OFMp<`b7am~&Ux4b!hmHMRXz`#ylBTMIDzNV@GNk3LwC&i@&gR#F+{w< zS{V2`4uc9YMe z#r&x{#aZU*g+Dnr;^oUtKfe)J1E^*XkPmJqf?CG*h8+3uzNwvQrg!4QkE6fdSn)7Q zZ~gxiP|#T4IFxk;C>y}GqhC?>U+PfEJs#N9hd3_ixp0nhQX*kfL#^|UF^p&{hdS0r^Ha2L)2>6j@PsX*i8(9Rl?cpBqp4s4QuR~a$Gw8cUb z!4-b}twLinqOSjL5kRtFD?wSF1CInHG*)FR^$Oax2~1T#-{aI}{>Q2%!vdM78Nyq~ zjQAsH?j{D`hIzjXUD2&+^{c7I7(PC`<>Q~PIAh7T>GHt?gML+34q6^^V%plFhnNaO zgYug?XC$8&V(xc8zh-7dcbrI+(Nmj@!krcGi=W)dZ1Ry2k-UwqsPV(>kx)fCMVv70 zFU=Wwn>-}qA)yP0uyoRqK;*3sIs$XXZrhy zYaHc~REO&whm4lM;8L|RsjJSiNBKT<@^4(W_9)-!ifaQJ`8O)~J@8e3wXjd^HRzXL zD$z8Ewtbo#8T$twrRVT>m6twdeC%o4XXU}+pxt~)HWlg0LIOjbXC%0M+UZ~Es|vQB;b zqId@f2O$+-zFX#-nr8<-(-aC-qA*b z>p?pRt2X?N$t&OrJJA_X!sHiO19p_IB*!2kN7myHazUVCeGDixHC%GnD#A7 zzy#uop2Lybf+M!-$P|<3AK#_c6(oQ~mo|HQ=r&sUvKG{8y9#5?)F+8JPyT({%@6mz z#Y@IaP#hq+9;h&L&b)c)kIpbMmdhRGQ^s=0HQzKyLV(uR=g(}t@$$3!l`fMFKmghT z6TIcr>j>K42q^~a;a;=7XkU31vt+HR%A3YvXmT`^m6g?xj0iX~^s(&x<7W**UV(-c z`gm}>#FQ4S*}h-HmuvC)de8za-0U%5-vk~12x{+%4+17LD@Rl^lP7!0PE0-?=w%(ZB-Fg8C_GO$ycuZ$AnSW9uEehE2W5$#| z9mA8$n8TwN$;)FJhsy9@R*zP8?b5wv$Nr9*!Et=qq@u1a0u-tds++FmTwmg)y3I{H zx@{ngknKAsM_e@47>>Cl3_&CrdQ@VoH+q_|*{f42${9CVJrc%QG3tV!-l~HJD2&hm zG$#}d850wOFr5(8q!jT0&`Z=h9Y8@2sAUSBtwhxBQzUY3+H?-zEEi8KrTdf@&ngwZ zXE2O?ZrK6vx({z2uw#dh`{_zp%z@|7L60}sizrnMvgYpF7u(g<1%G}ZT;P;RjTAC> z18>UiCps0D%u!QiV}+MTz5vV70;sVPq~V-KTO4)RLYtB9MTm3`W?NpSkzeVZ-KbD# ztPV)*-)y>(C5CaArz!Oq&$A*7b-Zcm59WU=3*y{ryan^$)~`f-Z%2(N&8%r%X$=Oe z1@$RSeD^qdRPlP8r|n#pX2QPfn0XmNK$V}LNMZ|DXxoUe8zSWxI}6B9=)nS1mP2R^AK0_G zmUg2@kG2HguN`NRMLu6lgrp}Qer(%v4~;PT*l@%YLm9QYp#r5}%#t__+u!7ieGwr(tXeue3J#I zPSx2azp)kM>`Os%%N)^jk2A)D5ob~Lms65ORrbmHCTIa}^)_gaeXwA>fPnVfO##xY zn<6fWmq^n|7lws#_{fns>5+Fse{woqeN_~)VFoK3>H%T#;>8)a1;c?NP=-lerl`ee zlMAQ<3j_=5n;eqBv#Rg&hjM&_l~wDCR7NEG{gjJ1TsY+mT_cTkJcXlvxa*(asqUZ* zT(LqbvLKvsV6QngSKIri#GqTQdyt{9!#nwB*)&h`PaMUfcc0dl3CjG zNgr~Q8UU!O3JMCL`Y55HAC)90Bc3r^jQJ4VCu(4AUHN=E<%N*Dk$whh4Zec+$PDLv z__;Zbxca)g1uGWEXBO?WX)Op?Hqh>-3`P{M(a*s79`NbYrx7OlEthLH*!ZP6iTY_Pbcke35!AV8Q{gkXvazGl6OQ+$~sUo_%x}ZTR7L!am@P!8E z=BnA)+0_6cpc{aP&h%bp75<)2U_<+%qesJ0qQxOiJ@>|TCA!GPmD#-mEn2v+)-7#eczjS@M$u8VeH-o_zAc*Om|3E@8P0gZ`z92=2d5+2 zs)571iE1ylt>R!LX_H#JEYW-DJ zT|;L&uAd-1bBkP5y~JC4_04UDLZZNopkV%5ma5< z#wE{k4c(inrs5ahDVF!J;kbTSp_qqzMZM!1p*QEk^2MZ9V>#f_+_KHUBuCuC+na*i z3}Ap4aur*v%63O!$`ZuvT=dtQc*K=o>g_pafl4$<8W6HyPl;xN=cF`8ng>hiAkZkqEGSR1us>L5bt4`- z48Og#0`|;2z^O(~NEoo9@dB+CXu@U#_@~MY&;&aOQkV^BrV=YBUO@gQx`+_u!XifL z$)K3+lbS|MiKz<;UmnuB`{vx;i-b(|^k2KzZ5$k;vsKx}N ztX=uzHe>rU6Zf^)|M|v_{VTcM4oC2rwhI!SB@6eU+MAa%uNKhJE5RW{hJ>R8T~QpR z5#@);P8RReC$X{-cy@B0oemqj5RT-hJzakl42*Oy=PRuq!?^P9;*7s*bZT+F>OIaM z4Iijr26(JBwL(;?`0D0GHv7gq8lzHxnlHJs2MUqxPK972$Hqv-Z0<$?u!oU9vY*MR z?wBRk-y)P^Qfc^kNVMm|GuR_5eyg(41Wl$Pr0d=7u#StyWg>+1rJywg`wxvtpZ&%G z&d8R=*4CuTiFeiSvq#UK*X~UHDFwHXl9c}WL|=0{9EZSHJv#cAdX$1BF(>&Q)lY=C z;5@!AT4cN(s*PxQ-jO47)it!$cVEBVEFP25 z*FkgQdm8{P4r&@&L_VDn10^jTwlpIA@ z{xb&Nt-VY>8^^N8zoK1P51ff;R~S)6cOI=&!{_tGV5)WV5y}GMY`z%{Bqnru@`A0- zV91#)c$ASMtaiL0KU`xyr%BHw2RB&`SKNN`>{%Z98=<=^&_&x&Au_QBQ&Vf*k^yH@ zrH^uE{*7nY#V%o?yy~$BjyZhTusfxW{YA&!EFWo3n3^qqElD#Gk27)Qb>rQm8rV9} zH7A97cA$!q~3VBfID?ASig4)OUY>Ru6lpxd%TrU?W)if5( z0BXh}4}*pnYZS5K@f5P$*@{y#k7(qq-p&21Kj6L(RNL5lTQJ>oIX2MNnKAzU{)hk! z1yYJw2^CyT;7A%+D7cxI2=v1dOY%Y#DcdI=!T)o9l=c`5(DC6j@pz2x1{>E2@l%=k zpiD`L3Sv99G>OXp_43_nnX*l%2pV8zW8nwVL-5iHgWs~{dp^1?eDn!ydRkhV5)ok- zQv{Y(dH0K0&WL%mcSzfUFZ1WBtFkk?U<1%<2unY}R zeB~$j2q}&Wwv20;!*R5{0QN!{jij{J6*RvzWE}41aNx%Rz}YZiLE53?)|#AL=vXe& zLjw}E8kGX!S@oHlz%Y^a03EI{h9%P%$A`~F3@UUBTDQz}*tqiK@+1!y^d*$4Ma>vD<+&7CW#>&-vqNRZ4oc9}5pjJe*N3|W4A>oW z@59HBW@gNg+12fjHe{w9wXOHViQ;DKXqMa580Z%-`cE7%ea^Fbh+k{q&G$q(j&4-~ z8`t5ZN4@OiA$nHF0Zh*m@G*4UoAGoUzVzVmi08Fd{Of}6Wl5eN96Pv$-*u(j5Uqo@ zyo#6n=!z>;$?_*YKp*vTN)cKJOiXf64YOt^iu_zh<0c}-}O?fssQwh{M zRGD04ucGe$V)$dwZqd;t@fue*={a?Q&X&yEGErp$6xx3Zn=F`rE}m`xt5HF zp~dGVsZYtn@l2*F`e(i$7O&?YTqrVnarF#Sd+!q=Y9$43WD-=cfB$|qhX~!~-~T8u z+R8udum-p`@_{lko3L2xPCMZ7*#eMh;ab&HH$m&iOZztgMt$0wXZ0YTSF!Kkzt106 zf_u!$20hCkr`!Lg&YfoaZWdrQQz=vTy!(Aw*(&yf9Vh#jFr&~5pOTsnCa!IIU}tn) z6i!OsBJR}5?tRj>MNT6hTSxp(iLM{xzLLRh+UA1ttVwcSg7=_-aNdrWpZ zz}j{Bah66uukFu2*lC4o?HSfvf-JS)yss+Aoq~U3Wn(aL*=YwZE{^j#X?~Ne+=yAH z)+4|Gs;jWv9+X=l_}Z)4vT|~kNRHExUdedz_WOwVk6}07Jyu7L9yP)vi)#%^mxxT@ zCmRDDz{9Rteq`hZR4}+RSh@EfJP_a)bM_XIoWJ3o!EsKLjPtt&EyK^DVOnKERfq{w z0pozZ3$cIABo(Ki8k~P=;*O9elAOH-|5X_58(?&(Xh=l0=sp*$5Ibr8&RTKcfp9Q# z!m1FB15CIZW5_xn5`v<9MzlfBN@Pk5=jHXpc1kIg@PQ&4VHZkDX~(Wu<`eO?EERM{ zJ2HGiPoEyM^AMMC#&pp-1AxVR8d@~SxeA_|d@OggHBoK?GQq&ofC2_RG`l@GMsH$k zH`TW^Yr}_6xPRT82D^K!4~Y*w`e>VrTCH50qvPZ?MpGZpSnfItY44qBpioIb>W?x7_B4%Ku20e zmhszZjkGjce--cEy`wtr5-{n{t?&VF!qWeSTV>2``*2`vizi%NZzp?Vn(Ak;b=tIm zus?XVip@GWnFHo+8Z_u{5c+#q_x5M??B``%gs<8m)dtwn^Oh;>xG`YiS#6?YaM+R! z*m9z?>V`hZoFL$@m>En2R(^-}Y@@<^0}dea*IyTbgmd2D$Ez0>>;4O9@{gH;ck7;W z<)SfS1-RuljF@*-a9!Sgbt9pRgG=L~roLSIhxEoG{J3F)yYm8iw6$?d{sqpS#~q+- zDuWN2yAQzb@O4U@3qj}I(b0ld|KXyvmj$KGnpKpi$5;m$`b8*ee!lrm;tsTTF5tFI zFgPFN$5(7$95Q@NZ*veZ!0Xsqf4-5?uaeO5-g7K`Im1T;)CjFV#UJ)8DGT5C|87Ri zgLL|7E*wYLu<-WTNZ!ELGcwb5uDM`~JHkiI3I~)$I-3PCa^JQjoKNA0Lyta%D~C?F zc7O|D`D&$I=gvR)msPTyJKL_*N>*C{Qz*BrRtVw#RsAeP{T|L2xCbo!7Y>D&zruw> z;RPJ{R5#%BBUspX)jTzvx!(jZ$=}y{S>%J^+%Hc%$9-z?f>+7ir}qRyhKEgmrbX!2 zXTB4#Ev8T6^dYutG(BRG@k=~wM5i<+)NYC_ariieLsa>S{qsw4J0Tf`{|HcbE_ma| z2_y*nr7bBT7VriuTcsyPcwD^$N8arm{ndM&do?N`MP^d&OU^?0LW7DI64U(Q%%@VrZewyc+Btq*gshB zi{Q<_xy>$P%gf4$Hp(IjA6Kp#`dLH|tDs3DaXcpBt;Yb2NZ$Sf&Ur;v?Mg8W7|y?@ z_n;Z5;RG8w$jI=AOn<;0uTnN0NFd7%?)?|_>Z$GN8~(JnPv~fC*t5rC%8@{Tzy^1n zc?bo?uoWf?a7XS8H`$8-xndV`N%DSslx{L!?5~GI0UfPkBg#PB__a70(OcWjo;}-w z?#>997~0OflHML`6nE>;azzfG6dUsM6}gyaWXO{cFqY!+dTk(*4e*uOOw0qJcw*q> z+s_#P@y8!D_=(6MZ1Ni+O(4%5CN*U6O&_>@@4}GlT#x=|ZG!M)OAg0-4Er20;~T(z zZ%W~}!U&0L@~*C~A}=a$e{5=^X-{>TJ5jx$THJY*tF5;ZkLP$5^U>+lWn%cD^}RYf z%-?Zxa^lPc02W-D`^ov~(8p^w;&kSKC4CRUSD#?js#VuYI>v>7_Mk60okfF2ka8r% zmrIQ2DHyHvxaS<#li(a? z@v$oY+M%2)y)m*YfViZI2LmQ-O-1M!Uq>OlpNL%6HN9Ydfh%NTT2^gJua}>niBs*n zcdEf+6(-j8pBh)KUQLs*L}H%-$%I*tIJJjR=!P%I=LDH(oa;!#B2bRyKg3)v`yR1Z@BG38N*I@Cv{tC#I@y zYV7^%X=9_tY6ZOn6a|e>=JX^6wZGValaxuFmd*Q$@dY!5rm1b#;puhi9daU3l!c0H zF7z0(P3CG1YqX9RFb|2(Jta45?LC7>$p^7wr6=V=sdDx1zNoM%+`J%EZS62(<+%bIGX3-<|DmO@f@B20j6F-fknj(DfrAl%RyFCe> zDR6dC@~ka>8eW<@`+PRruiq{?Gc#dcUPhWot4+QAnv4#=-8MSfhBj}^a^KnsKM-r* z;XCK!O91XZEpl})&8}+Z?F}8?D|e?|44`D#%bjr?EIjm343|d#Gg= zb)Q@P_L%e0==lBor0LTn#zcm%0fJn|n(WqYEQ*-C&ZIF1K+gY+=$h~`B zAqF|N?4Sonb5KI*?(Tg4T;cFxK9if)4Rf|{=S#k@EGPJ0#rWs?pb?}f-&^tE!S64k2IvU0xJ^78T$#+p$n{AWE3?=&aN zR@e!5pR>0g-n$O~Nu*fnw##3xX;u29cfF}+RrFn-td7Vuh8N6F6ao@vIR6;^SCCj$H zK2?qFy(w+_q+kQ@NZtXFOOWJG(j7KUAf)t$2?964@RR+4UDnVLMes$s{{--p5OqrI zdaeLN)t%DY{YA8^M1Aj)RXmEFHvmd#nRdb`1lLGjPXY*&buL^nlo=~< z4VX?&y}ysDX>Y2~HSI*M0mR8`Ro-;R!O~ut`>FqOlxopg1rf(_hRG0YKv- zTPq+m_q=lDO3pj3GINTw`?Z?l#CQlj0uV7Rj4>xPnHZ&RF!DHF#jubdKvW@4f0nRM zo*eSpC#)Fhz80I|kB_##7PRoZo_BCn^)zJ%WH1vSiKD@t2{sAaC3!mX0BSTNVL*7% zB2{)-RaGdc^=eS{^V%x@T(M#fqTluO^iVQbR&uckA?>@YE}bKlEUe-k5LYM_1q03T zmNdH<2e>LQGk~j73s{JQ^^rI-PYKjcg1Eu~SDpfd*7?bXf^j9~ZlZ0A3V0jPD{rfn z_b7?8)rl9uD<&Z}Zjf{eQ<2zUmT3=7_J=7{XIyfYd9<=*r%_34}EuR+>L| zD}a7dD>7+|n>tnN-9hi8-z~xVo)oRi#CXP87+h|{uMo+KHPh!CKR#A^-8v+lQW$WBVJ$R0s5ABJgO8Jp3JiD>0 zFky({aKdkJMfnlE^0?{42h#~FAlyWO&Urn4UW;B&cv8idQjyO$iV->H*I%bUSWuh! z?(JL73vSCopm{{^bVbOSzr`eYQ=iWG!!xISLDNg^M-&c1Z+=ZSvYp4-d%rCrqY4ij zcz0PXb6M#Kc`Qodz5ICs^b+wNx5*OPpecbGDGP;EgjCVrqJO>D@LR40XpFsPNNJCM_SI&YCdoXD99$eBb=V6s-}eK61tu&-#1AI1n+NI#zub65MT+fdK7&HWqN z^VN@X8qx_jx86G--0p+G7U6dO049C3j-3;a1R`HVvfJD_8>ZdB)l08{Tvl(CQegFc z4Tj+nbY)WAi$*?p4hN@Cj}&9;cAq`_^FE*d3=t-RKaS{0w$LkZ8+Upgbu z2?4`~BtI8@u8pnA@He(#BN8eX>WB)OQUU?U@ANAKWH!p#HisOz z#1L_{b@g_oONOOY+6whSpyE`UKaqr`jsu&bE!9?Ps8!silXAYduTM`q<^fK0>^zHAYI=ik?#&-tcqC zkxLx54@dM|P}#XNOA);SoPQ?ioRCgPX794<5BpD&<%O~rQA$AIK!v~rp9QDk*C0hD z9Nk_9-XxP_pEJw=3Go7_bFVpuZr_UTcVGc#Oxpbo%t^ldeh-vVrsJf1u7x=N53 z`yr;P_@rw)?zx0v?rh*MyTEwBpS6we&*5t6&*R#pV$xwWeSYejpaJ52A&%6cKGVAcH)TfO{ zF>C<3zYgT=Y3~+Pu>2DMF+Jy01$#9K$~)ShpFD$|3w0^gqoAQ^4N#SlzF`0Ab`X=G z@FH-6x`$(6In7Dpn>;xTiExxnz3#-A5ZTZB!2p~=g|9ieJa~3ZEbz#og| zh#Od4(%gb6Y#_6cY78y8a^!U$9v<@8fO1C9-w-r(|5JQhKPI{DRKdKYN!$kG8VAgS z$wDfYwZ$&y2c-H(Eme9_G>OKxP_vKwl&q!S5Z|zr^0vYEJVykY&-m%eHZ*rfZ{qf( z(Y1GWzr$~VM!fn^B_ow~ddZRjb5WJvji6*Z$W!C9jP<7=Mt7;=kwKEV*d&>u_uD*4 zw?moe8G2$ouD9*lOt88iKYl#i#qHE7Xu4k~+Ql54F_8Z#WG_@SOj*49wBAI%q!@b; zUJyD^Kc4!0VOu{ z&vn<%F%)l>8$b_tIK>~8^kYZo>c^f}P+#y(-ZRA-+To**evzC^G%&so?mv?S*(Cj) z*ST|*`01Te@VH-2IUfvOnsF3BV*i&$m9H3VV2HI--ZPN$5iyBm)&MPiAWc!Im!B*c2^w8;&L07dpAXdGs8vjjZg-wy z3JDoJb)Qo{S;n$j4WrbaE;zNfB| z8Mojgfx9E7JaKd`2Dn1_sD}>@;cfP$H$1${KCgZy(L^;|0Fo}5)85@IrSUnvhg6WnB6(&NHuY%vWTeUYj1?>$3fcKJ=+2ldOguP>RxA?1d4Gh4h6jI>838Jr!BxtnI+=p=Ce*?qSXlJ6PJ~GKH!Tc>(9FJG4sMGMm8aQY-^H=F`o| zotJPfUwk}TEg%lEO89QQpsW&MLB>uhD@v`UL1&K|WpR>IelJEB8b(zBI;M-arL1I_ za#7#*U>FA-E`0#1_6@unKYq+2h#DJ(E@sZGxL(&*S8(O;y=tN%@PL$DSxoFS#R!A*xU`Yo#=e@*9J}zLEet_bKZk} z$AM;tp=3Kf9u8H5@P(Fht_4x0Lt#UJlI;e;=IQC_Cruh^YQk~nDJmum--Ykbp^_;G z6={aZ6=hZH#R8HBk}QnS1L0#MO;2ItsD@T14^2Xvy?;Iq_bRBfZj0U=8at5FB`B_q zF|SxhBkO11R_nbp>r7^^Tn;2+7z)`-*}>t=m3Mz78weda-ttaKY&Ls!Q6yS28a;z) z@M4D#9a7Ufq)<-x+-J|SwP<=3T-cw*xsc^_z&zm&CqHI-Ve6d>;R20+hLn-wfV|2N zm%gBB+Q2MHJDHCUUTdQReIRiZbShD7VD*Ci!)5k;2%z*C97QzW1VbsVCszPs01y*O zvO?(ZM3TGqPFOiyjhC|*Uz(W)Rs&yLVrK?F<=mp}d~}-xu9PQ@LLm%;Bmp*#sa8V3gZgW~z_q#`&<2_w4BP;{=$ zLh-wLa8n7f;t=*g!O@;}T_gmg0pCaTb|G2Um3Or1F~~tVnjZ_)nRo?GNN99LSv0bK z#`1yuM>t0yPr!~!47?1wZ!UEEwa!$OWiN1gpkT-LP*M4T!Px!~d**(qSQ*YwC&E&T zUwj#fq&NO}M>$P+Q{)Z$?KR!z{4O23NPO()oI}PDTMR7%q$P3=)G7#`QN)r~UVT>= zmWVFCNjP?4Ue}SPoB|6SBRGo?Ob#bQ*mRyI{$91Y)7?}Wf z*`u0-5ff9+E9khU3Xtl9t*A{`y>;bfl=g&_L$9FyA$}0UaB$!HwM?aE-cYh&w9118 zAc8-4g^m1Z$Djsfv9HWBk63df8An+d-F$I6JP8s?Ldk`jWm&cf=(f@0C? zGrLX)=rmYL*lO`s#`Nk;)Thi1nHz3?-ezQ|YRN_rix3MmpEZ8WJSaHW5K<~^lxS)q zd54&K;~`~H6-fj&r{P8+{|TrblLZl2i6ZL4IuBdK6IPIz$Fai58W9}r^G=JdFTCP#lyqYo(8ebu6EdiYG;CduhwnZUOePT z9bv+o1*nK#al+VBk%e#skxxB$cH8vO*@su-qiSeddR)5yi|}F2!r9;WBr`J~$5jAH ze4#+XlMu-}41*yQ6}>u&p+E;{nC zk!NMn2AYmTPxh$3NaK}V$E%)C9ms^Tq?Bvvl>s+=JSyNe>wx$Z{pIGWiTj`K-67Wt z5fj41{a2?BRr~F8MFp9uT5WV}tR%_@XJ-bABQFDaWTTL~20d!Gbs9yb!h|j@`By*} zzjmgv{_^F^DS30pz>ELK+L=K0+;?mLFY{cHOc_$7$QT(*LK!P%D)U?#6(yAWE)t?h zWC}$C;YI@@63LLEgi4W=$SgzT{p{T5ocBHNTJKtCt@mE*tmi!UL;e51-}k%swXc2c zYtQ%>W5&&s`o`+XmqBVNPS<-kY1(o4-q8bN)y9;1QQUu~-|Bj$bx^3++OgF7J?St7z^vbic zK9LP$u)m!c*jmS^=7wyrF^p|k2K^>(r|`M=X*|Dp{rYvld*89PC+Ca^E+xBhWjPDv z@ADm4U9stHasH~O+)yu1PeVPsh%0}pM5VZSu%S-36Z+pAiM1F7azV~B#l#U@)Bogf zEg!Te!(}Xg(4QRCjc>tdgT#_^Wy6Zl&aIqU+Wk(>-}_>i)Ge*I7?4ceP}{KX zzlRm$vKvR7MX$|v%F1BgADwye=KJGf1_;wCj{mGc!<8t=gd@1 zeC*6MoLt%1`YJeo?>~iTvyQ$|SNwUBL+ks7&!s}2`1r)>JH>qLy;;2{JaG3=R=D&b zIP`6j*1c+g(2Oh^z0N@-Ja&-_a+d2}>SOU>ki#{-KTsA_X+!!bY9;CcQ5EK~@kly6 zccy>!tQ*_n&plH!d_C)$u}5D2hoerb)~i;p<%GFe8;T#5jvi{D?mZJmw8ye0uMO7Fg8*`IImHsjy-TeO5^ zD{50J=N%ABG`7wS)`}SW1}KK#<6U$v)+c5SlN|~0gl-{0*JJP|0!=KYnKkD^`x<%iQw&kk5q6d$3DA$naA))VI6K zbfUPGcSn4hb5M~p8nBVFe;`f}+{}e00#t*kzoZ!ZcM*x6tyjM$Bbfu~{!axnNe1K| zb6G#eo|Qt~t?D{)m|2uTx9!lP`WQ=**VFa9SO@}NdR6{lc;zTxD;7*TaZ8KnCblhz zwd)0%qT4w^p*D3xUBi~Qou*mBnGisZlGR8l-3Ja#yfvRCoBLX~Q52bgfZX}e<{?rj zN=SY*fN4Nkt;bNrls731;fS|cES$)L`E_-5q0-2tGJpW7mz}(P@zf1<4EHV#mkAE7 zSC9gh;P`R*PJ9h<&Ozkm;DdkIqE!`VbF;zSx*OG^b)ZBox*VH*X%-6rEf~YndNnWi z1KjQ;6zun)qT*=?ih($QIX2K@otrH)7~;W0gm34RbnvR9kcDKX&~HzH_fe^-sf`S# z_{OgI`OQ^EFoeu|pMPfc z-%0ntX)Lf3+6_=cS)-b`n-z!Do@BqkeH$LL?Mv(l!7DbSl8pU@7-S8I46zG2#eNw< zrj#x5&6>IdfY=;RPm%JV67)_zPGLqxR(eC~j4J<*rO_90H^k_2E99nS=-k>RSEi3t zI5|U%iZuMspa&EhE@PrXfEd|1SeD<(^S!fwSRCT7I8nm1Hn3PBL&;jgILH#7Q(qbD zf79&-e|N!xNO8~kBd9ioGG)j=7uF#fG5FM_Z;YqQ(2kUxT%4{kCu0)XFn)Yi$_xZ$ zGH1xU&X_yx33a3PM(q)n6|8xjvrn<+s~5EI2%W@Hzm}X`kM8I=6|Ej z0Zu7jrZ8$EJ_RN7CMTVxJb7H5bS7uQY}^TC)?)cEq(O|vQK4DRnzdi-^Te$VUfo8e zuZ2?$&4w;@7<>-XFFUQ1vY#;m)e<%ngl*4cW>ioGXIa*M0J^yGp^FB_9%oJmStm|l zs_(j-U39xGh&ZEn6Y(ryPx+}6*1b`h$;Nr%9Z5r<@t(AihbR-vVg@3H{TQh&!E>As z7OcfufY*4!-;;?0reWko|#ld?ncfgbU7Ohr!s+(eaXj1{JyHx(|U^8 zmDMxnk1Pmi>HQnu&l!v?bBs_4eWv`9G33F^Uv3t~Jly;%S} z52j3Zipm%kb8$n4z@PBj6P*fV_GsVmH8W)C1@EV3+-m?G9?>&G??nz@^FuaZNZNS) zSG}>PXsuZl<_YOmXf%}r0%HnS*jKdRJU!^hphhiRlHdWJEcW+XzrI|yP8?5sT3&wA z*KLoX>+?~GgI2uRg~|W2OUAwaxUe*8-aPEjpaZqA^Y34p<^;sHedhU*+4r?IvY1Y( z5)z4QW9PSObS!VS{(kqn`b35|&&-A^?ZK>{EMoE+q14QUL6%pH!aoo%47bi&evL`Fn( zl6gFh!S36L%E@k6$XMIf+=Ci_QyCbeG79tlDKi%s%9cTp*VNvTQBmf)b|%qU3EGMJ z$uC0YO}*vq0LmK^_d5fh2 z#LFp!e*>2yV=BKj+VN`@Nml^8@6+`Phpp1=Y^6El%1+-_RBtbC+$H4f=zg;g6_eZ{I3l=Oe z)BQQ*v{u5Uk!e@1xaZhRRrCJMUW#m1p?DBnOY1f?+{WBU<)vD`OZ_bg@GPwzqgLR0KT9%-)K>+mjmZ-qX;XRe=HMssN@+sOhQ z=U4S{a8Olx`;u131H5XaL&@qROL>*n-<+>7c0+$i7QALdUk}6lPm=DYF7+BccFV4u zz4S!b7b9jXQ!ukU$H?1EPwQ0w$5SF-WyiGG;*>oNd1g&);q}C_)B|qvW!-iKR?lu- zmPaVIwrhSeVil)DVY|%7_Q@1|Lm|lAabl?YzAJ>AH`VG{N=mJjlL1l?L*p;~{76cg z5sB^w+Vrtc*irVwvK9<_@hDE~$H^AkUq>e_OZ@$r5%&+jm~P_G3f0DxY4^&Djs|UQ zAGD7SLeZUi{PDzQ)}Nn-CbXGalC0j&B8PJ-b3~}A=e?7~!LXB+iw0V3r!g$`qW~1y zTR;Yu4&fipSLPd~vF7<Y$#Yvnz)T^a ziF-eVZ&dbU8)FYk2pIB83=~p)v$`(wki<5KvC!Nk*W%38?;O*5Q!p6HCRpxyxGbS* z-tp~=CF=qkHfuJ8D+{0sOj-FhJu2##hP^{Plt|v7T1qyNTu$cf_C571ZYuje<)(wM z-ju2W_TstUWx||tUyd`(D7>S7P}^@E;dI3<13E4JX&#MmE=dxUelg@yld>?!bxGa+fjTf7!KWZ>uEJ3TW_|GJAbH}gC7zJ z$||kb$htF!2DvOf&$x_+i}D(7Ai9(huVu(KLP&97%l4z)jMi9s;M8UP#vpIaIT^&2 zg}F8wsutP=K8TtQRgTkq!O=zsWQz9U?`rSxg6ITN#X*@0 zrw2gA+wua$Wk}-nRSwtgI}aeKV(_JSVa zZ|;qjiT|*WIE+&8SBc>9Jetksw4C$rF2(`u@)n0F;#E6zhzE_sP+X@hnD~I@*gy>i zaHZ(DElo$^^iJIUhLg3q)y95{)|Ed$vd1Z;W)b!7#rrK6Ejpt0N_78Zf==L34x-h& zsXrN)r;L~Or%lT~MMF^z$UL+=y=U24_NT8^d1(kegBdTr6P`TqR*{{zD?G}4<5MbU znERhj!jpEcPYSI-j$^1+m1DqIxA#RO%>BQKP! z>6jZ<|7o;5VsII}yx52%k}4x_-bW!}4WCYbW+Hy790X`~pN%)u1b0JCZKu^nufOr3 z`nt}{F^ALJ7F=IdfuTea@*;MmiRHvAdJKtKPiOt{NjXw-GwddBjQuRKxmKu?741{8 zV}Enn=+5kz+R@!*4-}#6HSkF=xHu)%lqD4xk7*YyitZua4-05XW}ldCG-3bH=^VQp zu`UKS5l_mA&9uM?#rW9waDBswv@9Rto=s?#TF{bj#{194)LwSjp)DErqp7*$Q1s#5csaS;w>XtR5B;($-Jp? z-IrYv5PVUxzn(cV|dzV(f1xn+v$fec`=Tu5%H-P3jLLs>FAZT z?dxGA5yPu?|nI49|TJlbE9TN+VAY<_xJ<$Fby&(Jh zku~j0fiF}c=Lc)0K3k@@nGFyLSF2{v%J{!~$Yy3W#&GPjU@DXee?3NJs#gj-{3WBy z)*XhG`Go}>xf3CtwZ_oNj(EE2bx&yk%EqbBaAUf?dSz87E}@qww~tFC*{*V9UDzEl ziDrGoDz+l{Y`6Bj=mxsiAKKRVTtz~jBup+H>$C-;i}yW-Dc#KdORpVR{qctC^?1AF z`)g0UMgC)LlVO~U*xA?iymkjy0% zHx-#njqPn8@l&)tbbnrnj{y;-;sh4qQ<%rs`A|OYLH{q*^WcnUPq-qwo7AHPgUT8N zTvj%&i80M|f&u9mo^H>Dopzl=~Xbj2>>MVkXF+oo>hU&ByjmooUL8?Ugjk zIJkMU6Myw=+p=Y(v|Z&JELiZO?$;8uFap_NigPHwXFXpNvM!cq-Ctx)%zx;9JO?I| z1&(5+d({=)N21Y1KI`7LT`bzHUmEvFQFKIA?Ro@*|-zi zsD(}=5(gO&7tzYjV|FSmpwx@J&Yv?k^zimfZKzC%)0XTsro@r4?TuxAG=}~U* zA1k>dlW9J+T>9hj&T~C`5k0<+su#P5hckhNYeyN>(lW=x{qQXBinERN-+8erLfqWM zNDdzdqGA-g!^?oY@v$e@=>~7#K7aP}!Uqo~qrBD+YM@g9AQoeTHH@9J=3=|`XXks6 zIx6))x@tcs(N#plB1Q+hC@$I_Pl)wg;AAPI`1y*N0l~G1}G#O$RM!Qv?xVR z+uIC-ULhfVel&lp80LhyUv6Qa@Q*rxNPr-@(MGk4BEmi4Wk&MpBxiWCs?EmDw+0rj zDdkf?1btsKua}gMrPx{M0$Csrwp<|(Vy|shmh-Bx`UdTC3WmJ9a2R$={l+JDDNey* zfc@aXtJkc7JBax|qyRpRp@Z&Hp4+Jov1mm>QF&cl)EalGwYT)=rIFg*DfRK2$e|q7 z^VmACx>;S{*fdhRy&oua37$3K%&=5bKFeTSlv6v7;h2tVtfiMs)Sl{Zsie@q+J6E{ zF0gwjjDv)&Rm&VW;E3Nm_W$U%4n~*_UL1 zf{;1nc9uN zfl(A8AwT4lTdn+SkdMb`Etf+?#$@+!iWqz|Gt&xZ5*hRo>s2gAn0ixESl|DD_3fkg z@?GUy$uVV7Qn@r9k{Aw?YzYTpCVH;8v|0*lWe$uYb`YW-gh5D9u0!CQyIO&X>ml4Q zmypmab2)xTa%Xu@052R5nN~}op5XI)t^5?EoabI0+4GrdEE+yAzFA|1g}IC-0_DCC zE?%>pk+pweTt}JliHkos77!Ve?UrP0vk7S@&{`NIKIC=jbOd8H!rwfPSKuo#7GP?F zpc_M%ePM^MoKCTgSi5#DT0hr$=mk#71aeM3PgLBdct4bFp^x4?rA!mx_U6qS&9C8z zb1sQu=U1uGZ%bNTv3-{*QoR6mTIN#I$Uc!@)XwrX+_9G|7gYbbt3j zB}Jx4{`#M`JZwj*rY^DKea4Nfwf6{P7a>|e?Vff?+pZ0?kEF0(dTUo7GXA9D-xnlb z>wu6)s7W|z(G2haG6#)N3N=O6?`!{-wrzruent6B#vMnAUz_)WdKl~A+<=3Gm~4v8 zDD#*Zg@u=NP3kFj%%O*${&wNA@FQ?v&82cId7{n>bkWG$_>{Z=yBGelfqkSXK-&#; z%0>drjqF}SX4f&ye_MbVWn#WtP+-u-HBo_{f!zlumDPkK1P@t zAU7V&VDc8XRkEkaszC8UhP5jL?>;2#ye!|Q*bv7@5phY)j6KjBOsuW;w7j>b<{`WJ zT{Jb>qS{Z47fE|=qdk2`BJHs_ybLA`EIpM~j>+L&{zxMx1~rS`^*^Lwy!q53y8VJA z;WZIc&0eX)Ah)aL_iBaqLt07?$2yt2{z`aRPAMk&)^C2%z>OBU2`*M-n|FUTSocv% z4H=n1^L=h`%haVxwQCzJ%xpRAThZUL{28UR7`y89?tM4sV6A2kXy^-FMlh`@XCXO+ zH)5TeNwL_E%Fc95tK3C!Y-<9`~K%GXHp)I4H>)d`x$VRc?d}wXuV(a z9eboK2;@&Lcr8y3%cvXsgjVQhIwz0!bS`mpubrbo*TEdY!|1IYjtlFgn{uRE*RJ;e zQsm;eB;J((DVsLB7Eb6F277&s1S`$dXjIg{Mz%7H9~a(tVW5CyAufdjGRU<%%-6=S z@aXs|Tr^fJiZ=EyaJ$)4hL0-x&0ZH?22=`ZNkb15?EnM3`W+vD=0;Dp>@J-ac!_eKRX-l9;p=8YVc3)1OR6VHeo{g5~iqZyrzP5ar4p zqd5ioO%|v;I;j2h7j_%53C6w@SNKiG^jGv*KlkpN(Pt$}iA@cITG-peBu7bndiOa&ZvR@YRhi(A^-CfS?rad?x0V{l{3$|MvZR_hG{d z)@r{@ma%*KN*sf#k0qs_zJ3kUqJ3d#FfpQU^{;n6uPF<79;kFh+NGwU;g~gB;ak-j zesfK-srfnJwmcQN&ZtjLJFkQk6MhC_{2t7umbq}dvd)Ea-XT| zR?hbn*U!d}avVZSp4ZoX-b$ci+ zQeWG=GfTH?b?@EVf{JA$@=m+MEQ1%gM}xHN&X9N64Xu z_vHJ|KR>g*#dg8?kKBIAeE`7TJlhdNCphu?W9l1TgLOJ47CfkQvDN@fOxdk}my4B) z>ZX%e&#jkcux>!B=+-sLbNETr63F|;A8u#x@c&h2^Ef@k*0976PtY%9Q{J6{^V17~ zUFv7X^Ta|jJ^cF=-GK)>LZA?zPxTA8{kU$70L+r+*sWIUY){VIjCyo(0jtJJ+UEVM zo~K&6^ros->|u~}D~BL0Ev+T|p^_)fZ>uPxJ1o0B)Yfrv^4e@IVOXE8`8t#SojsRB z%$E$XoEG(Db{}L&50T@X`J=cN;0KbCj^6MZ>YjWpp}5B00ajAB)dL@Ls0m0Ek&Irb zdlmL>A|F#bXeMmE^bA|Cm(N<4=l+1fJ&E+d2FU{Fqy{@YX6qDt`|?qfA|VZz!m3FD zDuNR5z~R6{b#%L86ahiTuTg7TNcqex$ZWCf7wP*Z?>4o4s~R_N-plQSsznQ#3gJ~h z5K{+pp@5`pmI)1E0of%XX#= zy=v>sN0+RJsNCj-2NiJ&Zq()6x{bxC0iAtTr!^gDXfQgWuhhj7XxU#OUri=OmZD=$F|3)wAdR?PGerjQj*>t{OYMjj0d=NCJ!qe?`A;hoW9PPBMS85+NILzP@u=2a z!}ck;%*RaPF1!@AsB8i5Td3aDWeiC52Y^f_KtMdPmWM}Eu&<1{xxHJrZnATRzM_S( zvG1Z=yDZPvs$Ie!Kox!qv2;Qlx%vG zScc}!4r0O8!%X4ZNVIrg_`f*xV&dbEwZlve)gp_8O`rcoyD&w1Iyz;Cish})KNkyq z3VY66?nq{he)_I!*>1D`_zk zUKab@Ua__KQ@gIRhC4RXBE{y#(mi90QmmCtaYHV`_Gk|U6mAe4m85$7-*TpNKc_862+Z( z^63XPiN5e3MuotMwbkDj3wgV+gEt)(5w6Qyr^VTyZ62j{8(CkWzbW31Lh@qQF3%0q z3;n_RI57&urSjvBv-M^eP2Wq6H0vqsGK7fZQI6AHJNrjSctOZ#HaagMmdT6(r-T8i zGz1u^Yg)*SbEI<;5{V*A*5*nF9h8*=J?*{xYVXaYDRfX$(db9e4H>mRZ&Bb5MkOjs z_OB~{Nzj{0VFOHG^1JEe4)S?`W@5oWm&dR(!yWu1&04wiN0#bRx&xTA0cs~?;4bd~ zhf7p26juu_jafsG_)L3mN74l&MMODl%n*^H|N4$IUnI*Hm!N168`qfIZ=o5btqnwx z3`|nfCDuO;y9YxO5}>{os~}qEYkf z*gZx*YlAD8Ib_>})(v-*VEIxIV;xzCI`_>_n|}Ws6VlkmHQ^5a}JvK@vqp)@LpxV zqI$==ifMVA2#=dR6LpZd(sKNfzqVIHRzHKx7lfG@JE}6o$>i%n>|l8;nQ_s&9}3B9MBa z>Q=Xy&uvbUcBbHQ9g3(4K2FSqVlvU&oX&BwH?-HKSOpq#9a2n+9gA`PY2T(*k&7fA ze$7ExV;%Ityj(uJPxbesSl?%9=IyurMC+C0*7081`aF?j3BcIDKOW4|cMHDghqoVA z(p^Lwn5vb2fXoM`+?&}77X5zc)LGIPsVF#~3+@RzMI&z*lok(B$2Gb76BCiM#d=6q zn_^`lH4lxs6R~VFDrKZO_4ujH%EvV67*nsFl>24A(4ihHjAXV#j6dnO9f1OSMWR{m z7HOQ0cr;tQp45?p0Ew4yqDneAgdgEiAZA7+QZqOvVs~MSg%yxDFQr-eyD)0AC6I5D z#)<9L>A^~&rzssKU}O)B>+Uwfe)`EPtz%CE>dZopUQ*Y_E4_^K4B$#emc@j6=p5B{O2PC%a^DE!QHfp1A=}-fGkjG(hV4=OA zVIe}(*btdGq!kOw8MR5h*F)w3$7)b!eSO8)+%_$-0)h873%%M{p%YJ0DQiB^XGBZi z^;^dxW6q2rY05QUFJzaw1O{LJ)^Xp6$GX7-QvLTis;O83H((PT3%8_!YgB6LZYqqA_urAVQkqy&lqO^O8v z3|Y#Y)3PB{%!8Sv;)sBJ^>diPuPXl3>&TF=DVar@!>ln#s-=j2DfAmu3*nK)PC}vE zBa_ye>AqqPlyb|~t)rO-Vj?G)HcHZ65zBF%%tF8%HlmM0Rhv5My7WY(0{Qiw$n@@* za;>8vzq%6~vvp+ijBtrh4lsjd!iN%G?oUUb>MNcPD^!HlGxK@$XoZ1~Z`4lbG+i9? z*!TBuC44{=(ri;hmordZucW`!WTeDDdRHuu=+wy=t~{4|JZHPT6|FWz6}&xt z=Bsb`l^V&dZ(|vY{8j(eCH5$JA3lGKqEQ>>4K{7DUwwtnVGeV7{Q0;46khr!!>7EN zcp}?UfRJd_=^1kEh-vzAAVr7`uexK;9&t}a93$r}et1j8$ulIfS7Z$?qDcHi7AJ>k zWxnf%HG5=Q3K;PudHGR9hEW?jtTLnGl#oVWR$5(9S$F-%)UulfIMp&|3wZ3^Z`hNL z3lSp(8D+Eui$-2qjiHlg@^x8U8nUY*(Egdp?PXAIfB>IgNq^{yXF8wH(AK zt@GuBuC)}I&UkX8ynH|&*EW|#KZ`urIOx>AT}vo*0INlBlwlbAI6Ul8)gwm6r}=6H zbrYT0_$GaJfuN$EhD7Ko7!t2;n-;w-YQeL}+x#_foR=1Wi$})m;cL6CcJck2*}T+? zZ8rb{ldp3l%|jq#H8k2<=o4>`-}N@0a9KPQ?{haL@;ab*sF|9YbRZrf%?#$nTs)Q- zCqLe4a)BiuOiCuav7=$q1q5a7i*R4;lsk`H|0pcv{39Qp=BlD~_0PPHDa=8YORo(!_OIh4Y4#VWQT(;|T7Tw2)NLPL0Vt7oGgKwRkMK9Y_F`6(o_o%4lz zY!M4$WJ)vUxt=n8OEcXI6Y45bJ$TL$*UV^3i(W?br}T=WoFwY;NotxWxIbI@t0<|V zZtWTK{Oe%G$HS~YcptB2(~|~|h^_c?u_9QJjKRq2?wtjFoBXZ)!lFVNsKT-V&JAcS z$@X`7RoGcqO<74uWERDY(j>eaNTN>gq-!0Fj4Il_OfFI6yzEhCXne5!_38 z%snM={iv+4mGWo#4ehWp>2`kVTC&h~L{`EZBSoqKfHhi0sB0 zL5Ke+>w@XKjpYvGbU!i(i4FuAeb+NDupU*Iur7fjdjjI#18`}(&hDNqZZJ8NhKqgH zBGddgM$#wN-`phWBtzvbmzM9#+cEriN@fHr?K3Nfl>FT7TdKcq5Ag(0q`aVPA%xTM#N@ei{Y zK+}jQ@KDV5-=1Q8g}wq$_ZDC}vkt@Lc3gq3qYRxVlQd$?wv zQ9>(ic(v(=9y5(D{6Cza=$vUB0{OSr;O2^BF$9npObUr7#nE#? zqcsD3OONsw?Ktf_Lu5Ar{j=0aTg}leFT88%z>DL&HKOA|H?m`6oPKiT)shTG zHtTII8J-R6$rV+7`(cvShj7CN9+Wd}FJ4qgypCqsc$T@GM$M!Huf zzwKk>{B$Vw7|Z}kG+me%8iDm`7D(jd0<22D?!z9NO0^-=%$w(^354zm_95aW_8KS| zht*vsRHMCITRFgfQmM+`%Dlysq!{8u=uWMz!Z34z`}VcJzDz|F>VUGE$wnVG_;Au? z)4-qMrWS|m=yoH$aJpC#fUYk=gsrGVCIOW1Bd3)S5&9ESR3oflj$6jxC7Cev=_fUw*Of08-hb&K z@_Kh@`Rt03*gDc!lWvyu?D!^8g5dbmRxDak8{KTjOcZ;c5pL)h7}!WEm$o;&$ZYp~ z5YichuDm}O|F-L*A4;Ey6U4tD5KgJl(+|O_ktIgr{x6wAgd325wxUaC4FChmmy*5KKu?#1Z zuw#6FS4*E?_BuVUq}CQ86zj{J59DaHXfbkFeT6z!Q>n+%^yXK4(hX5x=6*mA!M>+aDKYBk^&+_MmOjYZH#DD0jvG##O5xt&lD#~5x(yaW7^gE0y3_qx$*u_xvME+b zN|8J?8yioB@7N`TbNOmO5aCiNxkDl;0_|jCSJc7jtR<6bOLkVo1<7zV5b8`})~?X7 zqKQ;@pJ|rY%o_t?g$5&#afK#vO+?OYr=e-lSv)*=_D8vtVg-XY%UzMZioAM&s6a@T za%-q^9`#y~Lc;!nv&wm0_lU8N?hM=S-MBGR>Ej=ER1{qkS2joys>jiAq7#y@gRBcg zStsH*c${vGN8b?zGC5AoA$-S*tnqoT1@f`d&&FVclA1_R_8GdGNGAh8SRn@I4=-;1 zE|>Fp&u-*0DGFtE7>_fT?;P9LJz^J+Po58!siD?}U1E`Th_qA4Bq>-#^aH_cZD9d9 zDitaElvp?x{h$s^JV|qv0-92$Ka(X@qq0LpK~qjN`{(z7ukYVm01Mfvw{Z5`$NeY3 z^ekmz=E93}+9`^L3R?nKP}BB_ntg1QlO9%g`qE$LSWVXMqok&$qGB22y+Zc9!5>XV zT*q+EK4lCzI>a-qrCVI1FvOJokkpRL;73nOTUcHQEvtp$i-Nm~rF-tO-;iNT0ebz< zJ!?hW)-s1t@A2{6(bC_oGzWjp{IMM6r_5ZV&?1Do&Dd-_9q^pB_KP>A(ODc%WFd;o zB1l_8bpF&@;UYDpDlpf*uw?$z3tsU}+PMUfO_vhyP#;?QU-S8NdmzcQ<)zsc$Dx7k zr1uS)KBpPi-CXWr#p%Qe52Og;gvbJ)DhC$jNZecMUfbBf^3vKhf;FUF%WC1+d8JeD z4685eL1p{|Qt;rtEp27#35oUr4@_i9L>(ky%;f9{TzCuf52$zX5&GloF+}C|$2%_S z65@o!PF`MW&&v=s0Q#F>Yp@pLUwf3}4v5D77)kP~|2?IJrOzxqo4!oG?PxyA%@Bk_ zHKXTxp+YMSw~>8#p{yV=oHVLN6_`!^sG4}bJT^|kn1tUjEm1jh)J}x7uC;hKW7af7 zL<$1}a@)%OoegU`Lg%}f`1#KNNgbsBB}G;ripvXXxMLi&C%@k|!CSv)V*Vi?b}h4J zd6bQRBZW?FB#LYg`BITL^^egoSbJV@$qyPBhoO6&uj-!Tf7Y@bEpHUv;Kn zZ4>|}baS&m_3aEOC230#_mH$Y+VNjCU>~fB5|X{%d^Na}k#X|(2esC%%bu6{1|p5S z$JgFuccHV|zE_Oc?6=xeAfQqUAU33zS;S^IBDAy8xEGOXj;4 zzkYHyPB>*`+T>^P;=xR)yT?*Hh4GhrESZzpS$wW_2HMU)7kOTN>dT;OLaB})KYpP} z?Y)6DlD&&-y!m%#*bs|HMI5Ip?$-QER95nDq13t06E~|y@0A9Y)w9jHB95`$m`dn^ zFp6)`)MXc)X&~t=euDnTA1p^&=yQSfiJD)po-wB%h=VT*z}vN^p6+>b_LI$TKZ&Fo z2$M(+TzoZJ@zv$6;|$;M)D>AsLPV(gz$mSE6ZB?xULB~d(6QcqBw@?H6TbW?%D!2m zrsh$U_?9s?Dk(&UuPUo-!5A~>1WMDd*I*}_W~1KL_Jmv=(vz8LMt21p!9uUo{%Sd2 zmcEE^M#Q2}T~!?|B?rnrR@70iMZPL01(=gHT%YC>q0xHu=z8kC4nE1ab&H|yg%?Bp zIDc!}&+$NA8dClBof3h_)zwu!B`G@chXpvpA6zK%8Gvs{ zyWA+pmDK}`J-%YSDW^n8N4%9-yYvkG!qJGXhigWU69+=z$r}=;2uZUzKh-GN81be6 z5GPYfaIN!xUmi$@wd0!4!Q-CS%hbiTkZZB0G0k!5*YHY3Zr zMtSA6YS)Vft=L7>N<2fp-{d5VqeU++EHiVK5ab;wuOLbD*Iv}-s58PNvO^h;p_}AD z^AEG}F>3AWS_~pT2;zVZI*j6bq73xWdL1r|`N#Kj;ydLRf-tqz_RnVPPTGqQ`ZcNC zCgd5I-joD2p5zek@>m)4j0scrA46aCc z&kS!WDG`?DAbyE$qON1(6}GV*PKUH+IBkqtPpzw>6g2ndm*qB<`|~itfykmvP5}(+ zX>=)SQd)YEx(knunrRrsl^;JZaB673o(Nw~=4Q0CEoB>o=8DTd@6hLz*h&~f?kqz* zY&y1S-N#}${|WuQ4-tRGiO1#MLes+p7x?`CC4J?YYMP3OCQ{E@ihUD|6E?~oNGcYb z`uaL2?kIe+gGkm{UeZ{zPNp-bRk}2wh6wzW#skdV>B`@H!4M-{uq;vrqY7gQ$`1Fg zb!f&ZgAsHe;$bH2+^Y!4OQ8Z`Oem(*3!eTNK36%Y6soGC^6dSZ0m$o|Q@-qEp7ibe zHMN62(xH$M4O#`eei`IB2QvdExqe zdvU_QCHmU;(x7_YaMZmiw}g*HW!Jc^hTbqVD3_|a$x>|`2Rj|3uCNE+DLO%Rx%R^$#)NB*2Ez` zc{D6;DCrUsMz%e}NkH_&a`sV2ga4cJ8rU;qezQVqrKYz{!b9DTIGk>f}J+Yof z-Dd}XzX==B(Qbci>GOMQN>xx``Kn9xE;cvo)o5N_ z+w3`)F}>57H?>aslI+&wZslGz-Q2yaX$ym{rw*RdZmVX{%GCJHyiIkj>x`&-esm0d zkS1m+?>b)WXB0djwMoCC>SHA{((=8Vc|UBsVaV4(3w!MBG2QBUC1plah?s```t{4d z;5E5v!;P6^R;(C+f~_fynf69&+xMD%(YpP1p9IFo)DTgH2bx(&GG;KJ+pla=1nv|e z!o}rMm+dw3>0r{pEX0pA-&wRDZWE|DU8;Y2?b@|#$I8{3Q70cwFm*o5AZmV!x4L4- zlP=rMS_IB$mZ;@(ZBUDCy5Vp>oqG3H=J%M5AMZE5>)#tUHtf-}CnjNf?iEkeKy(k? z$5WLZT8);^l@74ys#VXiaPbOi*g4|FiLErciWxXImRz|bB--WUyp`}1r%*+zCUyjg z>(9E-VDjV#L-jf++brn2y9vMVDsM|!Sy^Eo(z1oB=OzC$p_?`}rg~^xu=6=Tt}G`` z-B0=C$&;xFSntxYfXpo>t6rxb?Ch5cVG~FwbtqC(Ryz@Rq_(n3_mRU-pXF*G5U4Fe z@Q}597*q_+v0mlt&)72mK^cDYs((?F85C}L_LuwH{5I5i8pN(?&29wn&yM*E7SvH! zS5HbxLiknx{pZhqVPWe0olfSexJ8^qd5gPKb867*ehpjn=I`C9?0S6e{P}<5{P_%x zR}uvN#(KM-o?qFVqor&Ty~bPp(8mSy=KY0)RvN9n`}Da?>NC*T7(I=BRmvt2EXJza z;VX*?hwi8Qj|8nwQBGi8e0fIj06Uwj;scvLZ{X6NtI4`dlvIW96{de_Oqc=}e z+OgRxJz?C~u~+z0+xF~f+_}!1ii(?^dj|g~xa8B3NcjHC7n@}&-DZE>ZMFxJK}jK2 zYegT3GYSQqPElFeu2N|zKq%L@ySRF{t<|aAm#9Y ziKeEbw%u63I8{BWylvs(f&77}C$1&hu^H$nU7NU$vX zdwqCb7w#gEDtr!qpf`HUX=@M3ry`!1nVVmxMxE>I+~#(UfcpltO7~BO{*bo%gbcqxr-N14Z5*0Ai6g% zE6|}b8K@&UcB(bgyF^Goqw6_ zVLNLJ#DS@lh95PCum|+H>)iCWc(N%xZTfe+Kc&Zt7MA|=IA@ote>7$&=Q`-Wlb`Xt zd740EUjpfeMT(Tr1%2DlBAd%_O>0MU>w9(T)KMrlZ{B>8PZX6HefQqI%P7TNFj(f3 z=>6!dsd<~xn`UU38y4lbZ6sHXw6<=E+`NA0I!xyHQ!HJ*dbNlWI3qLjFJ+}`*RHkh zzeL%&!(@MzmM#4#-JcN|zBlSJc<|t|g7X81|9n4^Z?5CvSX@-(3;Fd7b}fK`0f!Zu z3nf0$`4JbilT4^`5a6N;1{tP;>a%NTak3kMF3lYMgtQ%;e65gBZN zL2gDWABBY%D+D`lzcGi5hD%Hu0h!|KnXh)N1%uMFigf#Nr~FT!?uU28W$(?_&!GF7 z5c+B>#_u26MlcWd`X_l5>2tFAW{1IQ-=7>gdbF4-hFpj?vt;jHGq0MXSyEOO04|%y zdkAc|7{T{tPWVZ-a;>{<_5(PQn; z4`ZJ8?s|MNpO)Ue&}7yX4%yKM%QgfA^uLY^^as!MK(&*nPIdpd8X&fsQqaZ4C6#;n z0rKxkPL3La4(pZHhpeh#9X_=jLk7D`ZMM0iQHRRPN=|=wkHUpZ$60rvpbF%~*$=K4 zoS9*CletR;YCpq4gAPiYZ{n!P!LWbEJPB7$BL`J-lvhyW&b^5fN_qPSdo+Ua=Lv8D zO1ABccK571!BlMqLz=GC%;Xqd{fYDE&Fjr9!7>JA(_RHwck9y!jn3v&IysVv%|jZ^ z96WJtkAVZ#hpc>yt7n(@Tn)F0fh&*%zA>A~;P=5->v)A8$ z{CEY0gY}Lb0RfHX&z-xDqHG>DOoIjuQepVhO$OSGB~y&%2)F#&q~tdb4tmun0;;!e zg7>dgk%Yq%wXo=`hNt%g=nn1Dr3)>tdYoD@{@NBA>d*W7NtjJq{k<=!=FaZ5)(6c) z{7AuqNCLH=$z|oy(o|}8G9h98sZ)J$BvfwKZW9Hlr?+>P9zB{UT&gM^$M5c2mx{3M zYBkeQycK0cc7V=^Y0WIBlSQDvzzc*1`|;+crs=`Xsm*adj@=urZipOOk+kFX3ys4W|Jn3!8?17y?yw*AN$Sryx+bzKz9q*;2;M9 zoA$vtVuArdJ$^jRd2r$<&z{Y3H$7zap|WzcnOOrr<-lRDjgU&ME^X7UU0P;lBf|YP zc!nle^W54uM4e-mI1E-&QHfDBF8DkE z9J=oBziX3rcMLAPe?LU;wuS%YNy{~3t+~X1qpF#UwUePoc}wg!e^s>}?5;zfZL`Ra z3_DxikfmN7)Tfl`XHe%V6e1uz)UnlhbKB|Drp@82?=p1NM5eZF@7~R5S6U_a@N#$W zPbR5P9<-)Nf)Cd@<7r}QS|U&bJeG0i&Ye#e0K?B+Ie!NaM*bHD;=Z|j|G3aB zqz>(HdTDBstHB|GKZmvdY-$=gOYR;6i0`?7L$fX+nhd;-HUVvn$GQr-+Hnt8o0eC; zxY0tZYgbJJ1GOVZj+{Jm#*{9BfySn2W58q9>Dzwir0_E}Ex$T%po)#i$J=topn%;pM5Ea!`pI@?KZPLSaKLQSjU0eA)Tp;4`uhd@UNl`+XR;0+-G{!H>IbuVygD|!^zq$*)7VJ*k!%etk=kDlZ)PMhG%IDnm??JiImbLlDX=tqP(Z+A8 zrKP)f^{A#E-M7vgY-u^`cQYNojMF_%$6oAs@o_H|mC#<2m91E`Gz4}#5T37yB30pw z1H*i7`B=5l0cX~QhN{ucnmcEXMQr8A6J}`~dyBV<^Bt9#`-D!*v-UPk z+VrghNGO9aKGH=c#*SLxS!wA8T5rWbiu|Cco7;PE(R1o#rT0GSvdodCk3;>G1w45y(+vHFgS0C{)@0KT ze4$>hRiu-m%uv0u+!Gx!z_ISQ&8%*w!_u^mPr|lEcl!9c%^(K{ht2H=sq#di9%AZk zSBaU(Du;QUu0yeZ%vWX!(lrDwo0j!5SEcU|gbrW*+=+p~13Upu=yK0a6s;L`|4i;Bhen;1@pUaa z^^fQ2*_w`5b{Cj_oSaG(MHg-Dx*=Lzj}Z^<1q3pEu|yaY*xd%rn{U{+ zZ{K}KX1^r$jxsWR5;$tfs&*_sYybv2@VpeROL7m1c-_8z8hiKcdsbGK5o#E1aylX5 z8q)8-aMt6Fy&UwAxrZJz&$SfJfC{qlGE56SmOZ_&+u!NFJ#ltd--V5M!7Z~Gh;6_G zhPbtUy5O@tBt%(NRf&oE_JiE&DBgejFkg}qt**Nd9_W4jX=>`c{m`Pbv2NG&f{&;f zoCsY$E9Rf#Tf`#e%)zItJW#%0k&gjCG4y6@*E(I7p5|(#6&7~n_oRZNLZ2)jK6r3j z)EJlft#h26v!)lAn$GA%3Z$+1=#r1o{^4t4J!cKSFVz-Z^3-U{!@U{YYTc&IQeEBP zC&CEh{#Agfh|GO?;C{-z`}IqMLh&d4x60G+ z)JXwK898=rBR}P7NiLn~0Y+A*3egagl04(->lTfylS4Xm)7_RZ?-ZH7&i8*zs7YOS zZz@o13l6R|aNt0}u!^vDnibu)-vAEvkf+qOYgfsBybRiM3U^7!&O4&|?cth{X9*q3CqDI2%s(a9l#)VQ_0Pm&j;Iq2*GS<}WNg3(KI8%ep zCvg)Xm+KW&GF1nGsOqPT&24G`($eEGnBh;9|WF)`f%k4OGli$9egvrV-X zGP?NB+Ktw)zLZ=?##m8FBe_pNpg`Q6ax*R&B?Y8%FVZ=teuE6<4$ z!OPdL*XKrf1>G>p8)s`rsg|=OKdBGWtY>fDZ033HO!~z|ILUUm)~#Dhf{tx&78wa8 z|MGh>=*7Q--lp6tZ3-Bxv%2BUjCsfDdiKj^jJR&NbBpddNZ;p_1ca9>{FRTl+r4_9CZrD?i=nH(DMPTs5v9|cRy$C!@@@V-m$90M5^ZTct8mH7E3{u!z+y&a zPBYb}*`SkNW!YdIaYAWpX!@RCG$B-C+HT@_o6ficet)16ka?Sr{vN8Km}Xj3hclPw z4YX-RI=c!C^@nkri?q;?RvH0ZMa7pI_c?fjC#GXl6%f#XG76esty>q}KKh)PP@E3 z&^&Sq0tG{_k6Iji7rr%_Tq$EnTr424_3+h(B7%z9WlDWy+2o?KNhe)h-NIA<=tL8Q z5m*!oH3sDH+pwWN^UJfB?SW?*zt>O&Tr1TV1lo*d=3Gx5Boph79$mb4aXf9}U8>KeTnFrsma{730GZ1hOB|U>$cu0BZX_Z^|i)$=3 z8GjgLQ3?xgxR5$))~u1L_fg^HsGl1H zvYpw#!We67KV=tp_p9{X2u%^MrkU!>{-g9_>K0CbZu|t8e@lERX&!Xy*|RBKdPsyi z@5^VH&YsLx$MiJ8<#ha0c&!$VFlAHfgS^{pW&;*d&dRaF6qC zbv7#_BNg1and)Kr`j$(N^G3S#>(>Gsx62|bcx7m6uhOw&b7ZLBe*AdO{~B02`}f#I z4i56t$Yv}2df6p$XL)< zAUja>uid-bh=-NDcTnc{J6nQ+u7S2r;i7)Q^MjIn0PxG4Wy>~$^V<(!-I`cu)5Hy_ z^8n^#4K4io2fi*Ob}UVXZKg9Y2Pm_v-{OfIcMiD#xjg*$&*ezs#w@J*q#QGn0Is5; zal1`{b&N<$vfd0&=0(rtBD%P_{RI*^@*HVZ!@;7BcGhe_~O&_4ojnBw@f z_w~DX&Aq*=hWfuntS)3<)YDIUqKCpTL=XeXeZs*Vt+^m{ued*H_wEJzJH?&>rT1q2 z@8>se?!y|jj`U+a-!i{`GBNQw_4c;?`-7N_t%uz7GFnVJVq~6E3~C*- zO$@5)wEbZVlR5M**k&-OS;ygTKU5UVt`IFSUca>$( z^IQRN>I2oQYGRlBX8;OClwDKpBFoFlBB)lmK~pz}zq@;|Ha5RoxLOW3-1z45WpsQVonP6D8|4oJrl#FhcaW6ACg+!zHe*zR zMjb{7o5gV9RRT?*+eyKc)d(g;V>S|l@5*pmvJE#Sa?8c|np}IEj@iLpkkG*^E znUcQs2N2|3>F+>{Dk7|_>Wm&WDrRsiN^avti@MUJtc#3s?_dwztRAZ0OV1@I55l%j zN!}R~Ym+T0?b0c_`;cc?L;DQ*!&OM;{u&#fx^Z;V_iqmmXG*y_>Boc|Lw`_K`prRz z^m(OKv$vfbzn}q^Zw^~Wh!wwz`;NbU{(O(m86ZIz^9Ok3xGrA2*zTNrLs+TnG>2_8 z_L(iM;gVmzeH(<&Amlnn<~d3Rn{yRhquMQ?s(bPrhy>M*2L&E)xFrf=+?g@Vt`T-sCd>?b}=I-Me>eN?51hSn;j<@a=Y1LGISjx<*+s z|KOo|nJW@;`0(E}%ML?Gg$J2B_i5K|k80CHJGD0c9Uo)$816=F8qx>I^>58Y=&1@} z3P~27DK1PJ^qBrn9RZT<$-TzJQt6n|$eTy%C|}*ibW}}6c81P$C#pEQRAYbx8}nAu z?}x}MdwjZ^Op2`cGmXXKp$`wGe%1k_`tN<}j`$wdhrK8)!03qjFW1`ikScnfmSV&5 zP12n(2uO;8KprMwsaF@V*U451Hd_cm=0`6f-g;cd4d;s=7PYl81zb=LNEO6OjG{H# zE_20-6)r1QT;uCo93I_t!c{E@jP&5YhOZnq!?1e;c{=S3T{@8@ik?5e;<(=IFq}bl z^^k8~6Q2nE4Tr|~y5I93hxzW3v=|s<@_HuKt~?fgVS}bk{prOb)o59oxa_ei!TQeW zz!z}IV*L$^!lcOfJp(5EeIn%P3Y-2KzVuj0u1CN9ZQGU)CnXL|A)CFM&Aw1B^zo@) zEj4UvTsw~MqG=aA`w06XZ=rK%30J#@tkg%>v4w)*%KiIRX*V_oCQ^MWPB^Iw!33GM zfuy;9vP#S>8Y~QCZlOySHKmv5nlH@`1;%r?;6v+?`qrT`xJ(%gpB;dBcrV6(L3occ z_tPt|XU~BCQPEkm*6O!VJI;Uw#K(2$W&;iXr*4GnhwgPWx7?>&$Gw+X8i^eWNn1rl zbE2N)o|xSce)&$~Oi^B_{cTR1iM)TyR68B6#rj|hB01guM`w$$k5_6dU%f> zKYqsjiJpGCt598Vp%cbvS|uZK{9K+huFvaY80rlOL!ilAfD`F%3DjiLL*~mpd-rZD z=mnT6Wtb;p9EuAw_N|Zp2VgDg5`jvPmoC@zmXg5qUwr@mC944VTyw*i18asjxG4uY z|NP2H4_qmtSAlQR=vWtF&*r{IFcnrJFHPItU$ytK^kayM(wm_FX?3=j z5ItMECFjna!#2KGQF(dzVr!N$XydgYcy~SmInVDOwJ|yDv-YQFO)Jdj_pr}jz8HKP z8_=j*VQrEA_i&#ah01>m@T);*v;k6*GoC;5T%=*?mqXW?cM=t=LL|T+MRW7)>-gA{?tw=B0cPGX zp-q5A!c}bAq-U82y=RxQO3kFf1r%ZSW6dJlhG<`sZ%XTy_^px<=X8oh|~n zhd^aRVLSFHlZo53HddXsifYxN0~lWRP99pfg4XpnPVGTtQB(|`3^?@B4qnBBZAwNT z16=IQA>fqFYZNU3Y&wTm|bv>{z-|NJ>rC z>RgOT4LNuhfVKrci%#9T)yHe?3Ua|vr}NC3QXIhGQw7BJ(YAcND?EH2&RoPnUCSZ7 zW9r+3I2OqfK&KLM#LskL>GS7*QG|CgG;B@Wr`o^guB91@$VW*b61}WXC!EcL@uVth zRG&K0n5y6PZT=t;M}P$_9sU<%Zys0k`tJX)q9jwL%u^|}$UKuFp(0ePiG++r8KVp- zL?uI!d2FBonaWHeQO3xWp(4t#QKn4&o>%tgob!ErzrXMAxBob2@6X<9t@r!B@9Vl= z)9qyB7#1GBf)kwBM}xL))vk6~eufE~UzyI!IrRqCx?Y=WbAE$0FqjSj7!R!Yxj1>; z1$>#`C#Jk4oXse*5^Oup>J?#=NNoQ6{%Ln`bT=UZ1t&dj>*Q1q&)qs=859bxFy!#J zcaI3QxegFY?c%~@s!KjA>N`=kj$6X4Qn)B(9Ub*VxCq&nhbLy%ZtpoFY6L$>@p943 z?c25~n-N@AulK_kgawK~(6|Is!-zKo(M{mb2uj5k)W3DXJ>TcyVMB^F`ihM+b+niT zolrHb(6QNF@v5G_+34eb}&fK)Iw^^)y%0kdkNDIEiFf@x4ojTyf0;Ga&tGzS}1kUvM{;@!6C{$24gAG=b|2KFn z?4oUkuElr`POlMGs)aM_9bWCgYeT_Xn>8gov|rPaqc7&>rdU?#VKeA&)5I70&dI9N zpaGMS@JGtEC2!xxaNR{Q%ndDY9jJy{txW8jfh4F3h{@MQevnbwvku_NiyCKAUP%8u4HbKDnXsO;HH-GLC2h+#+FfbbjNa5L(9?`)*tYv zqA>#t9mwEm)7r4H40BGYf`NIt2m0mW2F29vsk@Lq7|#_L&+7(vuY zAJGS)%P6D;51Ys%l>wTeWVI}M)+XAQjl0jB>7&>~pXFR^ww763YJ?nrtzpLFIY#NxRA>d#@_QQl=b*Zv>@&B<*LHHFg`G`)`l9U$_?za zM=*jk*s02jI~DC7qXN2$X!c&*8`MUP5G&8}^>v)h;3(s-@E8qtIMX-Ef5pl~i5kem z$XOx|{Waj4At8Yr(dpd3KKs4L^y#bVqeX%q@0{H1Uv(mtylS;-mr&b~f}sBKO~2JK zF)iixm;&d4s-7~Q*^_NK7)FCpy4Us<$9Jt+Q)`8euo3mzySWtm`}p{TEwf^HJ@T80 z>f)tK%Lfn0@v7h6ZItV{v!25rzu|#G2zNvJAk$htJ{-Fyi~juT1p|*3Vob3%tXSo7 zFOg_%JKLSIMqe|I!G3w#y)Ff$0 zRIM8KK6e+j>Hht#1?xE2Q4R+HJ`X@Nme92aPFg|e5(-GLGD|*bSyTv=CfX7T0VSt) zV4nT@uQh7bB1^~BbpmM>?dIJbW8G?lX{-9PlEI1Ii!IXc-K$qC#h95lL@S6S!U2)S zQZ*ETNTQA+{H+AR_&(8dau5?*taC++$XR#s;>Dgz|MaCP+sl;2q#r}@K>62CtrFw^ zd48b|d>u8qMf>)(2mA?IOw(}Bva3;C-=%+=SZK|Adw)48R)V?^FP5=$gm0qoD5SZ+q}TjRJXYvu1VdYi>S)do7yTg=W12 zjGwh&K_VhY-ujh4kcfa4!JVq>q7> zs6-cyt<#~mX1~2}mN9icXOLhV0#pHLb?n}K2#P>}4_z2w0FsP1O?d)W@Jhq^@4x6g zFm?wkYdkFvI#koo+lqXp!x9#d(MA+M;4e@t?bsS79;mFofA{+ZTovb40hCtBWrLRH zEF`T}$6`Qa?Be=cN+(XX8+Cz~)Qff|1G&maNC}Huy?7W>ZSW$c-?&i=7_?313d^`I zTqiHDNtBR)unf{bGFFoj`4zl)QH6DfH=*u;Kk5rWWN3b>a%g^jpSSrzzG?!(*aG0B zi?m3*8TEEtHFY;4M0Jz-l)0CWGje1dfy@{t)_>?bapFXF@Lq5a(H#_&l!UUvjKKe$ z54l~WSCxlW>#Vv_5u92+)1|{N4E}Z5XSFaeW>8#ec=wocy}|dNKcPEnFm4q7+;Myp zvMdPeaC#rviFAR^r@bdlGG?%Eu_6#{Mk65YHES!)0WtAKM~$tOcl)!)n)R>v)rF32 zC6CDTOT#~OViRyux^-Wx3}+A%NV@p`K>>$Fi^2pst#>`Ugh`Vpi#AnGXXvyLjT-et z#_E6Q7|J>ssc}RYwP~Y;!YxB;Qo&2h$uUVut3_R#nwkpnTSpQKTrvXVkAD30$*ynv z1GmSnr)cEM*>||Pk$2tZ&DY@uP*Z?;ih}ePv$miKtWxJmdHt6Bka1gaakT4ekj)`*7&Ci@S2NkHclXy3 zw*k75jLxBSWLn5K5Ee&7MG=c5|D7DU9MjtZ8d?T-=9fHiVaCPQprx&yPBj&`jX>%& zL(3_QD>vvFd=SMqjeEq79W`5g$XAUWJC^RM1~M!m;u)K}8f=KaheUq0`F~sF}NgE zQ&SV&O$J+f7JISVK4ax}ikb&}m0#)0>X@=Rahr!6fB@R7){gj7ewZncdBNrQG~H%> z{S<~(iLSRRyr5xOBucQHg@esLT`TGz8lqgq7$@u7uDBUrgOsBBXz$P5?oQ~rR7G$C zrb4F!E^0%E4kZ=uM3W75O+U~~e*E@r6`?>Lhcy+{@O(C6&xN1ch``T=daV2uF^1SX>sAsfY5+CD69_Rq(`aE znP|h<|MM%zzpnFP!-HRWo)|8GfB`=@pIm~rd=^4IOKvL^{Z@mxVG zq>Tcnpn7kgI%go)DIt$szh#S7zGJ}|nB6OMe#wBjQZ29;=Y@U16=f(ew8Tt4+8b`SjZt}Ka zHqUNmW!Xd>2~m;op+{L}6RUP#d-dv-V$bKJw=K8jTv#e%)#l%c<7cGeEpf8V=D%9} z`h0*ng4$(Za?zCiqJ~IOD{F90XX?Xp@}VL!i^6&*?*~lTAc|@ zR2=E|W-W}aXt^Sdv#S#qKge~NosOO1-)~zxMAho-u01ElGS%zUt-G8;xiRrKfFsNU z$i2XhzV2uJ)bevv{5_eS)D?-U30s4YKWEmuO&cZ*3m_c7dC1->UtU&8VHb*uX3hv3 z)f@|D$zccj4Eq}F`;;61u%j)sK;#qRmPQ}tA|^8)&>fx{4c@8C@AnVix{RSgt|UMa zpV$aEdmVryB)kmOLY4IGQ1Nq=$m!YUv^=>gOCv=Z%dj?5S&*KW{_cO=z3KKvgd>YS zJ>6*BqH^DnV6ulHMdn-^ZfTj-XYu!rLeUCWD1_6oV-BaifkHtct17;upzAd0s*Z1l zv9fOYx7+-b2`B>n*V2U05yxCz#LY$RLHt(GJYzF+X%-QxGC~hVe+MQx)WgGw_E_j$ zcBBZ_X|PlW+yf6`cX;b zp%#~vc=qh2t7|$D!!APeKzuZ!bl}|z0+Err8fqHpA&OsIRj$RY4o-pl6=4k}z{F=e z@I4eg-5L3=-C|qXHwPS}9M|Uv$epKe6g6H1OKbk6muUR{)#9ut|4}u@u&VT$JJ!cf zqcLeW{O*{fzo@`v7!jM?Kb-uEw1`ZVCqEu7ig1cNjPLkYXLDmG13}h)N~4VD;(sSN zNQZyS09Mr>`~eJ>!Yqn3wjMl01De3DJGSAcJF@VL*Y$1PYH$m#fLwgp+Eb^#R^NPl z4TupNg>>ma!0ws|=FqFOtGI1xavNrgZnuDw{^Yz8J;`yJmj|KK5`X6Grz2FZy?XYP z!V57>tPilQ_T|afabhe2U=1J@QU+a?hKbLuqMe~3U1wqk6uch=mxYA|zJ=&{dF)+L zJ)-~S)~Sminz>sx#{a_Fl7>Wzk81a>_0_la61C7`FenMJB~6$A+H&lrLSaCEMe67e%CdQ+$V3;t;$(PaEYaXGdw-d}%Zhzna;MUeiT* zy=-hico1iy%#YhNm*L?(36e7zvMpejkvzF@>35FzQj#sfpe(e4XdmJ`7~A!du@&xv z6Kem%V?V!4Zz46C{3VA^AsfXEBuWF$a%l!w&4QrgyKMXHR_U^9bYZfJr9y%0#N(1~ z6;n6iI%{B7Ne*U!Zizk(!&OiW&yTEkFg zrIRBs_)00{;la!_Q*9}jpT zgx_S_>gP?pmVEc;=aS_jsu@+y(Tk4HKus{Sz1KFMGqA{~Wx9-ZG`a-qr^*JlKb_&l zpoccp!yP1BGNz!H`p=l+;USC=7>3;Y)M?^N^ut#q3l_K{cE`(?lVsO}5aP)=TDbDj z+i|x@7O6we!-|Lj%GnlX6FWy_860iZkg(uKx0rs9b<>o-=W}BZ6QjHex9qADHGV~wX zbL-;+u+~NwD*p80`(&H6jMyhe#=qY`W!fFFlQIRE$)!4|CoEM%2(YLkalS4`Vz}=w zdEAu%x&ub$3I3MG$`p<1VmWyk-~9w#U}OyYGWdBCt|H} zyG&;LcUg1%A~bp@q$ey-azfBh?z_#&JXhFdz{noA=G@|3g#V`&dF+bYwR6Xg!~EPi z{!JOvcAz$*q}A%l-ltBLHto@|;{cwBFxz|ica5y9RDWFF?F$QBBz`^A_cavL@=^~@ zS+1iSVI$OE@sm?sBv6Zj|EjTh#XgXXiI`P{5AJ1RtUM=@M{q<%X}y!RYSz5X$Cmd+ z(*vx27w&crLtQn>-1Y4}>hK+x;dsWr7s_^H16yzpOW`Mq^6I`1GBaC`-1`&qJOj%g zlTbCq01ynYSarN6_!ubpf*3+DIXth?!YEp=gipr#9nQ{>IdWa|{#Td?DaAE0+Y)M` z2Nbw%EykOfm6fY2yY&f z#-j+dLdM0Vx>wdjyzQa$lgvsA_Ntq_quppBb(0s5zV^L%{`s+6Kz`FR9` zg~4bcMcC9&PR1#Tm1eV~VQN{K?j zjEK*;&F8aJr>YtnrkeTOW!Q@c?#prFSo4+Nl2<+HR}fs(uH4}K+lIiqnVbdUoRoZ2 z=5PMemgL5pwGE9D2Z);6uqqM;)k}ZcAutBDk6vl+cEffd2906XlRG?@hFO;Iq~e>ir;;1awQ*lklnt;d{K9Ru9bw0)TGxPf~WxF z2|ut+b2*@uoA@4zHuIB0QH5r_;4|z2=^ni6IlT+NBA;ns@)|2udw5;plv^s5iMQv< zM*t$Y2&*U}drX9OYY|;^=+&zsbEo*2P_l=CT@7lr;Uc^WowX?L?!bMX!$uh1XP;c@3eK9dr6qJv4rDrjMs`+(G8nZRXw)8_n=dB0AtB2leOEHIMm+n8% zc~MUN)~JVpM|#Ot0wPiVX|C(J9mrM25h#`e_8+{_*`H+y6Dkf7+8O-$s?L&|QqluO zr`d141U>m0rXtFrYJ4Y9MyB!^Pfi}Od>^b65+1H{=xt=Qa3@Y;7>25bRVjU+Bz7io zH!r3T5JIe4Y6>(?E0!-`u8cuFeP@hB4WrX*^Yx4ev2KO4p=iV2JNKLDHg4R78ET>T z-pQW>kuh8mXl%`j9Du9u4x639wmyn_t6{?i&6v@SUTPNlMoJ)PD|l`?>MAk|?tIQ} zkwp(w<CP=XBOdC)R;-<$d-c04kE>VXRlkvQOyCW(uLt<}v}gJj zISzS6roa;%9#t{@GxDLp9y6nl;flfvR&T;BF*<^wxxvMPBCgoGcf$F(5~#rcmT`;p zI`<>LTS;gEGkW6b!nw)`_wL*o$iOmT$`m=bL?F&-OhYh;Frvb~KrIOF`!4*f)o1>@ zHebHE8u{9NSu(rc?F6IBL-9DGR79uX%gZ!;M9OJwTos0x5nSjs-qo0Wau4}RKnQV2 z@GTj7UZ1}j`$ZJG{H9Cvv;YWkZ;A$++RBV_MRY{GQETxwuSz>*0$SLa9$YXT|o*!tG)tNt;FjVugdVrsYm8SOOONo zj6Qrrk;qaqCDlJD_$?n_*maVV(_~Rq4&iuDMTyps>nO(fU#J;vk?`VA&kv^@2B(Qj zZ5qKxJvi%GfGF2l2+J`h?LpEKKRQLpx!bL2MErFyBsB%FNd{-fxWca_Uk%0$sYuJs zb&PuyVv|LF`6pnXYs!+j{NJ!8Rmo$#LX1WVoDAi!B{8=N+ch=J_Fs+oB8EY5EM`7U z#V^QXuoq1d2MjX5y?eBr-(lKsbG2HHRQc~dJYZ6*Lu0RFvC;o_^N8MrSE(r==0CwM zZHhmLpj7;ck&(-o>XTZ6X%o4sx#;UVJGXH}Db}I?vVa2!n=y6Lq!kD|_E6tN7$cVv zBc0PG-vQ)bl=mQYPW?J8i`QZXxF^z`2{PIpm!QjL#1}NeJ@H_CBcm@a=NI>?B#i76 z!k&snnm$sa(uFshZdXIWPOAbS6?YSl)m zhagXrv(>rX$oA0BO`ERa;~65pO-piZ&3+!-zNroR^~zG#r_Uu8n|PkU9O0_8;A=q< z3Ya3p)T~7Z!(EDD@8@!Mgk@%3S8tuOkujDrM3j1IX?rZar4``<2{HRX$vQe%L`Od2 z4y3|Q1XE)2{-O5`uSyV4zW{(QgBeZj?6#0gyum7T3ARI9Ld z!*;tiG4!6|5VSMl3E@{xE1|9S-`znO`aP-+_V0GO0%-+*5Y z056^FMpMB|o7z@uVWg~qfL}4n|IUi|&Jnk|7aTniTMJHzNAc*OP3a$Y_@Nx}Ab~70 zU^c5@TIZX6Ka$BiXRPcXI5qMX@wKBriaSjOxKlCJ$EP;PKHId-q5VY<9-wxCGZjrJ zqH5O}4LHyLYVexIX%uqmXS!`E1g;H&ft!MafyBcx%R5Rp$VXj(ZKn% z0FH^xX)onwV}-l-d`JFTQt_(u%ig^!PUPR`M3{hyI@vpB$eP<22}P!gTBF^3hhPD+1?!i> zMQP;)b3g2I#@Uy>pjtB&_mz%`uqh+!&JGQOgb3!S-*kEj0G#8Nl~(6CAgdsoLc>-^ zyaRH*kIUHPHhe_rY{HKg2D!WsDg@})cXL#Iqt5c&l1E&j~}Wh=V`M>=v4 zac0_mHb2k#m$h10F29ChMppeeE#4Z*=GBclXuud2%uc z+)bn>qOYI3n;AlaPgu}TPp4?x>m{rLfRc(#)6@6X7#Ei|&H7_XyX|pe@fdB#)9<4VWiniT=+FJzcx^v`&{=xF;_}>fP^y;@i8m`v7BElNmxA-<0X>aiFCafrx^bwBh2o zLp%*Qw^l-^DhBk2H&u_lL5&b+OJRa#5H?P=haK6r zu)1Z2+!Euqy@F{e%;LmRVx5D44y2%?(1bEz`b@=tM7wJmTzNKw+x61hqBJl6`Rj}M z*ihwB1R7p?crxjdUWuRfeFw8yit&7(0ivKKL z35xE2$lgv9R~9}5{9Pxg7^H$=7?J_;E(T#h1Fll_AKXfLBrRU7{nxy zOHXJKzLpr2M4Bq??_X)4Bw#Rd8zIjTY2crk26d=-GoGF{U{2h&*wr$FH++c?^NGtO zpep;^IzrQth7O72kYh;|^!^MR92UGTL~xPr=6Dq<82ase5i$0OYHH`P&i<|Z1DL)QVvR#N}25!#^s@wX=e;xOse6wiQwP}f*{4(14S)o zIwm)DHBK#oH5B^8He9fFFVOR z6YMnv9%s(HO_oK?roHt6z#0uFsZN9Z-Rf)=f&%Od-#gPRvb5DI6~#&C*YkZpJ0X4| zghI#DZ0qoq>@jq`rO#SAx`?DSj`Y(&(YQ39K5W=9P-X`#RW=8}Tf`>t&SAK!VZ%m^ zgsX?P4SP+Vm%;7=@LAZ@Jm2w*9oMM5IY##4QuKrpfL%MWJ7X_shZpc?WE6^%nrhmc8$4W7i?&J z=;=VqC@W&)fFxHU$ZvsA)M%Y zz<`D%MtV4y8~O=YR7?8(@slRSmiSqkl)ih^W!{HpO$GJkaF;MP4t<5<$&)Af1qD3J z=x7A{)Xq$VPjIQ^2a97=NV!529o=)?BzhvRTJ`@KHer+#4l3Mk^l>dd;b7@@mu>#K zdFfYH_IR$j;j*^oCzmwwG_Tb1fz78gL(L*oLoxWn#g-Mz-%~d^kB`o>8vOyOo=9{5 zd3J6!(4+@-fY!>=a`HxcJ%nl=6as+^5OtL96x|4)T>^##d5%)LZCS0J(+}mIsM)xZCB5oXfX_ZuoZMx&6Bxf}gPWk1}iDJ1% z%tOY4Avn*iPKH&5um=G;nr}Or1+nNmIZt#QtC_U&*$5W@!3Av!;I9Ph5}Z_#RV(Kh zVht2u5=hv-VQ3a{!TzKY%{}iLRQhevgj;7$pDrY#Sh<_!KeLy!^Mk(q&zw0^f8l|4 z?jLB$v!2uXNeG@{Rb%UAp!@<9PzLvcRfhe1jUo!(WFvc>eyRdxDj~eI1Rgk zN(56fkOYP+Tqn2FuM;6EC;Z`~N9}iP#%D*jF7*{_d~BiS7$$BJkK1OnR&M%rWdP4Z zt{L6(`vD*KxX;r@GEyv@7pI)*qC*qKJ-g#^Lihn4?J_!naDd({=HxXoZvg(N2NpKU z1iA$65dlY6gWd6#Jnl8HXhy)C1D>gW+kV1GG$OQ9mohTq+`8yRIZ`0Sj1H8_lr7C0 z&7w_;!GCnutfQI7poRr3x6pz{PebFy_c5E3qz%9X%hb;0u!4F+3ygda{=mCjqqS14 zvL=a6j#pC+uVt-TLfF=9((~}c`FJp7h!+v9VO8Pgc(74}Jfb7pPD}1u-g&o4M$ro- zA+7l^OMZO5NQM) zN06HcK_dEhh}4W0Abb@;91iKvo;BkKt^#1t+@Kfrghl+{;RoYxmHpVkL41!S#9G|3 zg(Z-sLsZGO9B<&%nWFvVd&&o)QZqBgf3AGx-gs{1w~tT&F9E_HgM9zEH)hfSR0+VYcJzg z_UC}p(1}T|4Gpqr{k{GTJqZ1JZgJh@TVTjmPx%_F7-wTRnpm9e?Z#L~O0X?T>=?Qg z6qOsd+3Q8w8dQ${s~K4p1Ca38sERcMfxyO70sOvy+#8KltSbWwAc&EX0g9sd!cY9< zC?$7SP~VUEJnfy4a+A)+_1!-0Y(5dF?Y6Z8RdPw{sG6rW0|A$)ak^AQcyo^4 zV{Lff=1e@yLjVsh(WxS3WvS`btGAN>II(pLy0yvDwq^=;`EUHdMHVjwQ0RLF;_X-g zOGkcXsF5<}@BgzSkge72Mo;K=t0+TYmZ#$okv=6;wI; zO=BO+ZS@*8(pIpah46GO-gm-+!&}Z?@IQ9prs9Q@81B7>ql+6%%ApWh==(3wW7J7a zJ$!i71f@}Kr9HAMofRNiAx{OD@yw}A&qR~(S2>MU<2udzsr1hcvk7oGh3;N*q?|C0 zlStSau?4j-;`MLX{r`g?I7?L8a74%qC4RE$DyxH3QL$4C$qJ`%8Hit=s^|Lu|02hz zU;>QTWgscwr^D`bH!@PEeIs5A{d#5JZ28%Sj--Wkq{oo)*4L89*c^M%$j*PmwMjy& zit^$BLo|OxL|(?$VM|4MhI*J*@(!X9k-5l4bMxj!dt95^eYUT9;yc0gnRO&m63WQG z-=$%t>o^wnvrH6$5Pyr2ciD`axj!yWvfr>~Ybqo{&#a}L>+C6l_L39_|q zfjLB5_i#Q)f>^*f+jZ1Mt-9(s&-`=u#{C2OA{mB}93+NhbO<&lzLb>-Vo?n~Wib_* zC_mMFKw1d_B`9a?jEF%2Zjr{K3!ylY>HIQlSZuDiD?sutbN8GC9nhwZr*ZZFI_)4% zM8*v@`a?qe^0E+4UZg%A;qLCf=#hCi5?C5Qv?SkS(9qTui(T-hA%m7K;V&`|T$YCH zbx+V|%Pif$CiG1^cdns0icA$nKDZj-jO1jZ9ozKb7u-5p^E7Oc5X)5*qJ{&^EvVlu zu_Mog`c7XZ_Tmh!MUzQkOYGGY1IT2~E9BCv=#dCv52dN7m-mXXLA*{Mfs{md!y#n4 zD%LWtrL`Uho`{g8Tw45psFXPtOmB(>9&)bhZ~GAKB2+MVi9|q1@5CUIC;8@$zTlX4?AMwG?hYZr%a))s3F)EiN_=_qfbVMb z`S<57jyB?c+WK_ytUHDy7w!VHKQ@5hCn`OJITl)K3l=O0Z`3VO^o?$?T8N!yku3Ia z1)-^nokKr&g;8kl{{50oh`?GU)NFiBAv=i(7=VFcS6z#jeLj4N+)INXQPZ5)n}-dF z&Z#_ff@Ri~taFN>!7YPtnOs`)*aa2Dw{x-D1*i?gTEJ1baa)Gj?uK7p&22Y-Ln>D? zD;*ZG36P2i7$7JY1CZlJ^nys=Hou-)-r`(gn?=n&zB$&fbLXR~@(|U8zim)EYU1g6 zo+O%34J0@T4nY`ER$gK!Q@ck3P4iJQIrk-=iq2$i`I1DH>>*xLY1x*Lcs+kasIYKB z4B5op#@sGL#oQ75AnR4~ZHu<7kDn)X#HJ-}4$bOiPSn*qzHm>6t?XJ^EStB-XitPX zLrSOk_{O|16DCZZ?U9%JlakspwUDf`26|SMMp|k#p~JUztqyFW2;_-OFP*GokVSu6 z;<2kU`4Ou@Nkx%K-sYh!qJ>sRKYOjQ#H_7PzWczQheNv(?=oL>!AMxvp{piW3nc;5 z%?fbI7F?cEi$MOHY)A#6nEx-y1irOSGz8A#UMR_tq0hYpM0ZG(Cb1U6L)NZ6^5kXl)6n#2^c#0usgbq*`S|Wc&I9EDB^Gxs z0(Vb;_B!!0M9OGBam3cG)e-OcsE-8~z1JteUS;%UyX**(CYJ)b5^1 zjKC7UD*Tk(L?U0*(lR(Px&+O@8_^`zOG(K2SXSoH*B#0oi)L8gahS>giUv}2#L>)a z7iJgUG}&hAO%U?44T!{91DhpLKzPs)kO6iLqD}lakB9TQfZr+ZY4EED4>H#q4noE& zNb0`>Pg+t#7VH-TBtV@|-`JkFG(zx9C~3rFnq)%|=Oc;3t*_M8QxC0%$U=FegJ?Z&atrPc_}pEFWYNNbHH#=kQhQ{Gv{bd= z?39oh4zU(1t~wV7ENNv07!=fn!`e&c;CyO0j#d(OjyrK^iR=^_SMr7_ZbKt}4*C}% zpd15hHG$u+MDsAjz~%{LA^I28cr9@Wv$&;^#zEnFM{|6AZ0Z&+qohGvAqnzd@R@9GP%f^=(iN=Bj-#Vw%w@ABwy(W_YIhm%?Ofaz<#I>}ST(I~Y zvH>&Ei%Svw+l?Bb_ee;sj0_oi1HDTod>qf1Wp%Y|F@%`GI&ObeO*=B?S$h&LE4{jq z`U;sH;=+uQ0m_<*%u3wCPtQ2pz0(XN$VS#MIlm0JANgpQ34% zY6ux78NeLZ69^A4YQ8O`*z6wh+Hp1R14&ZSd*WOV4Zvv3ABYPDJyIU#PMo;@Z==NR zE`N?D3Jxzyj@%mZZS7qy1entsYiu3vRG1oRobb6$zlg*~4SM`sZ8!aeKVbm{ z4`4E+T-QQYu(-TvGZ#`c#5yjnHvDdFel+YlZ9Rdwc1KXb>A(@a?H&j1(@1%-$1!I}B_bKHWbkr{#5>y7 zg#0znL+6m^d2B_ThKX|>+l*$UiU>udS*7Ku|F{(&F;byq7y#0$QUXcxV}|nn(L#i< z#!S~fJvFx)>(+uqn7!{!tS$Ka{0|>?&wSM*B2_G8z&SvY!do)>n?4om&K1!Y8CGSM zuU4ndy1tpKFwXFTBq0SvMLA-`#?5GS3XkZhgKlY%#);2SITJT*4>@1S10YS#zR7MF zNOD@ZU?E%COy_e7^IqTW{L!kqq z)S42$R84NiEBp>F9Zpdc?>*%0)PDo!5^r-D{7v1{Bml-X)6jl9D;9JSF^U*H1n`26 zXvwnXW5$zJA!!oSzd^|4n%o7G5}gX}K+y_fH(*ZaB&arRd<->}uCqOMH_1$c8*n={ zM?)Mr0M#C@uTlR?uA~1CbH9&R7&^l#(jImF{HEC3{@y!p#~V~Bqc3f}{Aaf}hZ?XX zSo`-+@M<>O1W!*w=kT!#ZXj1h_3wY7Z^l2=uZuTt))q#Dsl&19_IS%m#O6kNx;h5j z9Y<_tnnMhTkdVafd-L?8?en-Ki3505rJqE!K9o|^+*D?Uhuqe3{`Do)T!Ve^Tk!$1 z<}6syA-7ZX30qiIMl2baxEm-4R|}vW`-|P2J;cN&ST@htJ!_t2BFa1l zeVIr8D@hLk7oMQzvuDp-PR%h9RtPDfe%=)vj$H9x9S**R+r|7ZCO1R7mM}Mq!%eOs znKET1+arb@_P23RdGPEX9`~#(>yD-~)K_b%B<;jy@j^v*WcZs88h_vbCD&9;Kt}*n*K-f?D_`1gb29q?q1Y z&+ZgX@~}rimB8U3ux0*H&Me$jTV4-2tYh^27L(!1RjaCzThL86%F0{9);zG^H1rMP30caxIBJy13(Ls2q?7~EP=8n&B*`*<+_9Vizpha+&iHguT-X0 zmArW)spskZv?JUK!_4l%Aq5neGq0;r6u{6$MniF|$hO3h0w(3u($OmR2lb-U7%~mk$w72(-w3}O4<)Jy6v~!P2WbQvX0CW*PkiFq841b^dri{D_SZXriKIXJ z8)x*U`p8)0o0Snx%(7`o{nG%<*?8P+(T^>>Li$9g`%fkZdZKZ8gS*}XZ>7r-4-FAA z;soAPygj4z+iCuDLQ^E2yB_tmt6Y&o$lLDdEn_bPED;)2pk7Kh#@}#oYd=3fPBy)? zq+1_>4Px*VH5xHeCFXd!EXSat8Zd)IBVyRB{@?x+`V*X?;WXiG5BZ{MBsPuEvr6~L zSw5tQYwSLs)%H;+ZoIU%9H_Fh1B`S$yE{r$GTmCWR6gU_mshT3sKb=S_tRha&)Z|C zedo>Nu-1gu=y@#(N)RLzA(IU6TjT4{)?fT=gTOZHD&q=7eYjyQ7S z8~@!WcBp~hKmUzO;8itkrNo6Y1+jz}#d{)x-nD2|u>bF7FXdw(9(tAcb`jwQq9;^9 zLG8J^wb1g-l}f)}+jxab&g{NKwbxC^M)ydzYXkPGa@Pjq>LLL^fhiY<37AC}3$_fQ zrXkL`+qZA`UEUpPLSoCQcQHtVxA~b`PB!|hT17JZ()IL(o`l)hO;kn~!pAB6Qf^%4 zXxDBxuS)WeyUEB`6D6I@ zij0w)VZm~Gk(&MI+l2}u1AkW zvIJxIjkM%5_+y^3utfjX`w^EKA83-RKenQ0P@jLYN5^mwFs-ch@3)!^8 z?tPUUZH~_M*iiYUWutc=cj7w>k|0>^F0Q8ipTN1dYWCHsUSmd&uIImNamVHgj=4QJ zCvboexFKKxL5!2@VE$2df-I735LB~*#|qXBI80^{!G39oFo2jXX?{M@ybh8?tBBBz z=}9xuV^%&7q^^a0C*B+u1j)hdJug1zJ#_sV7L^BdCrG=zJg9&*%mEVg;5B@NYCf5V zlBq{_iee8z13I2H%57g@Q#SPt0CkY(Ft|^2SUYSSm3c1*Z#&k~7RNXkp+wL%IcYii zJjC(_FfXK4mJbKvCr=io+b$k zn`&hjya6+1gX6I^QKZRd9&!Hi!LwKGBzCu&qV}H_2j`QV2iEQ=X9A#&S5IBL5=33X zH}fM0@2cOZQKBD3V|v`r*9s@1QyLL-ppfUu!Z(|LSkG?oZ_EN&)~_*zQ+ z=7j!SaR>J=sXcPZ2X~V#qckM7F^ll}O{%Kj*LTEdp~=mVz)1NMeol~po=#FKpxMFC zu(q?9)D(avLZ8`Fe&DNEDDCDs1UtjLid2xg8N90ueFyfiF`a&n+4>=>bl?FLEQ!^T zq{q#9!A0ESQ-SES88-B;S|ytayw#@JNMbxqmWVt$aQVXEoL?e zoGjvR9^UE_@wjqPU|ilG7;s7>;31Q^-8if0&PTb*gpG2tekVt50`w$~Nim+bj3APM zd_w(9d>i1LZ~7iPy}IrA{jst6oV0v<8hNj5hLDUThyAr#cPm++(}XXR>J~MG?T--y z6~>sCyEcnVlMESJ3Jp$_Im4|~$B8abDE`I2qCR`#1EtCtaZ~vnF2~KfaYIA#tY(bO zWT1Bh56_!RtR>b(lE|Z6A*G~}^!59w;9(7BofH%ns$(MnTbKZj zmB#cc)IVXi<#cE897TjiCA13!CrdP=|uO(Vx(CL-u;G>Oou4(|>}u?NpFAc6?CY}MJH**N$?p@L^^P9Kl4%jQuC`%S zreukMYreFW%GH*e!cvp@=B7@25~OT#1Nf%voXb0ioX8i^v)p(k6W`DkOw zTR)|ucJ$H51gOY(km0+)PM9m479RNJ8MEU3unuPI;$jb1W-miRl1KsipCGk!LB3Km z)>c4J`q~0oWs&hq9YT*?ldDnD25LN;>VKEzA?6u%zZAmDCetx1pVh%?cB_Us&^{c%%V~S$^3XYFnryxC; zlQ;55mAVGv^|a7RF*t*xk(;SpFIXmmqjZ<7Lq@h`98|a($=L!hu!=MmixT7SzquX& zMF?U!x_!aO_EM`PfNXPd>w`uYx;qc?Jh6p7s9S>-RD<~A&(7{E@U7uE#O(?<>>aO! zqVSRls~@LLYX9V&OzO+gtjSdg2xY{3XLX50Yb^=71~wD~jfv8rODx1z*QRJ3Y-q#z z1ACY#*u*Mj#tmIvi#9$D=Ju=3dEwdCKVtg>GgZS%4c%i7UvM7KDQ@k&q?h7-FJ4z5 zdjLf0?l5yTERLqqb(bCEPB!l5!^Ytj9ndpJ5Iohz$B(xq)uJ!53(lP0Xkp{Z(c&?s zL2*{;ws{EdafvIJo96`oKjIanq-eTLUZR_@dhv=++gq+F%H+p{WZebi=-QNtG9>=G zpY5QTRRra==+d>T%nsP^6CP*hHKZ5l%$Nc)!L4$RcRPh-?K~TQqp&;Q>F-~rVkAtw zjZ-?9rC`5i6b*ajW!HOXUA?S^s^+vSy_2Wx-Fq~*^APOMy5ycc$yKPlXOACZm%)wJ zAVRdsmqgqE08EdUkBQFd^m~XL-TL*baU%xExHxlMk9Y+Cm3kW_v75Y}e;X+`0$HW} zsSP5-Uy0vWTsED=<{ZmF50?l%0mQ>40cyNIml;Qe4)lSTka{B^ErC3eJZ**}}t zZqiecpKE)B%9fsyp}{YcAew){KOiYdfP_V(-2_%cUSkG%Zh`6Ye8%~#P;^QnfFq&}Gva(ruLs9IDMP8@!ENB%^{oUn}F zQe8;YBDtpW#YF4%0!mc?6h>PH`zQn}tL%yBX&n6#CmBUA`$gL;H@ypeCtVT26A3?y zlPg;0I8_F(TN&^qD=)GW8z7%U7b1uS+&AW~Uj-?85f5Ny3pRCg|Au{I9A(c&Lq{&)!cSw+{c*;7`Y3L00Je zZJ$tU^EV_U9*vGSTM-C{1f-3vKWegM^D$&hGt5#6!IM-WMt-UcP-cf zn1DKpG;ljL1x*4DF|i2X0Y4J^lVOxXqy|zKDc&f!d?!qVfrcW z9IZFsDx5P~o_RsfnwWE+Pq zsu?mOL?h!8VGVpgLfe3f-wRHjIkS1LS9GNGv;3v3UmB5aVm?IWins?M5Xh|D$!p?s zVOD_!KL*7hr7+RZFk)d}BIlWLo!&g5VD4d&dyRncraEwanmQiHVY%+l4$%lJhYyaG zn{StM_Dig2;#XR1(Sl(SIPiEppVp}K;4?8tkr^15x28cSbs84YMj(T#UaeX+k|pfN zb_&f}!8uj=5rLQ_0k2|JQiJu~xw5kV@6UB%qtq0XVhgkrr8~cY=}lxFu=(| zv3e_RFmbbS|1AG0Y+~%ZJO(nZtZ!(cHe?OBO2q*iE3+!=86r31orZtj*!uX8W&iLOoFcE-KQB#2CE<6$Ugl zssr&ydo8_ryCYF-!-#(_-dj4uGLd+vE^;+FGq}x(hz?@|$oWwOf+wHmdZGwD@t7(A zrLLEq1^edS@NZXwb!6s{5lLO03smF7D^I}pau=7C)d1E%U7$mBDtR@Mjp^pLqi|RQ z&bNrv$Pe!mkzp4YTPAZ;Ol^f+X<;&Kll~%Z6tnx>zPS0v#Qbx7os?Itlu3dqP3eE? z%$XO+lx0t&GeqSC-#WYiY&zV|6X83WmgG94$L2JuW8I|NO&X@ZckD zGnTbcdJK$RU}?EcXJPBE5s^ApEmo*<<_0YNnC)g%-Sqk-z0!b#Q7Jcf9Q$^&%k&95 zdhI^;?dxnGodG!n_4$&o)o|iH^p&|t+MItQQ{M1?Br}nTHKfr!x~3)#o1Zjfntw2; zj=6F6(T&@-B~IPtKLq9S0m*vhARO)un3Mt?W&ZQsF%(Q2@$XCuzXNRN`}W|8-X%>H zy%upj0v&4y*G+L?vTq}XHaqoCqF=?1J3H-)nFQw!985~#UW;Ju$U_I#FHA`~;XuPT zAoVv7%FLR>Y2JX|Y5g7^Ydj#Lp1!{E#Bju4+ltN`J$Uiz)jmK4Wyg-A=mKinXeq8H zlWOG!{;jR8ox{;(P``d_GPKHn{7B?@pw{HSY8V!bVj9rIX6c{u{o$SbpY^CaYXvLr z;o9c@FLlp^moviVO}UB`)QPs}SoX=lmS;a*Y215Oejq5Enf2(SlR`NyjW`}14~}+( z3FhLK2Y_?l8tLH%HJLdt?%Leub9kp8XxQa2hJEs64q8b5#uvh(-R4%9n3>taxN?QV z7iVi{?WZ__M$w?{&~2)i6$KKj?Ai0-*N^j7k_7#5EpzVyP^NKrztI}srw&aHw~Moh zYSPr;YLBp|im>D1MGmO@N^|?asGfTEU4IS5tOKm_Vy;#n#Rf_B?(Xv>Ug%V0Ckr9O zbD5jehikn&fK};{`}Y1z1NK=Id_h9q1{RjF@o2kYR~q!d;=#1uQshTp2skHd0a6NNYGVb1WpoHAz@y7#|PsfU6V`FprRhMu)B;0_!hcV4wVHxqF zq$EMysl0_;v;xNwvjB^a5T*a7;D7jkBk1Sm&GAwUSP3mu%p-NC89;#*>Dl%+r@a$y zsR-jGEUIn&mYn9ajzLr0)O_K+RP$k>dM2MQ*B#ljWxw@pE1v>%p(7X5+}z!Nck&-T zY-DWb$ypD()XN{m=#tAwn1abvF8`N(qeI%oH@>XlUJ(3J-m8jc@yWkvuDFxrp)7Hf3tP!u838uR+$&7^)O})Myif@3_i1VQMTC$t4O~4 zqzALrhQ4`n>KJ3o0rbj5UzG5X*>#)wOX+kV9wS8mLk}V_mYcUXIrTo~= z?C<$obKec*iJbeKGpzU0KWF7GTl}W=U7T7ZOAZ(zB#)em1M-8abw$|3=bn<+n(sdI z+?P#6k9NpDDn1KxqCT9?9$H0XGCy^z`0LX?UY_dz>}WgxMT@2`YT8DL zuWn;xWHiF{CVA|L&JQ%w!cG2^ow3zbq|FALNamz;uhZ}Q9?ZUzT{p3Q;~TkMYGy^%#ZXp&#$j%{myy zfw0%UR_$RnS^fr%8Y#1nqBt_Q-Pzlb48QEz&+jvmKU~{m#>4#LEd$PbqyLY5`Qk;) z1NTK{BbQ{ZX*@M=t!2tLd~_K&Wy2@21G~01WQS zeFJ2eIUaH=-}V(aY+J9n``}9sv40v)oX3^EuNS%wPHV{A9EZG}H3gSDmhN9<~P{IEWKbjNGt5i^4S zdE&(*lp}*~ypK_{@%4!T3=0Xl`fd36@Bse)Yq*Bu>J{%3PsI$-wpFVkz)Sb|$N85g z{`2t2Ab^I>R~}C8yQc;IbQ|QFsYqmYdT*y2c_Y`mQ8&pJ?D^F9!B~wgI#=DVT(UX! zRQcAcVVzz@`#UHUf#yfTdj%TZ;;<6(o?tBFvE|V$T|6!coC4F)L`u~|x;5cUtFf*N zr_-vX#?1^$S59SaVvso&PcQGvwI5b4F^4Lz0k? z`hsAw%k3uykjvt_wvN8j%=6{@cKH=n-`Q`;E?zNAz8O<$; zNRnfye{Vsq9tD&nqJ>A)mcSah#)T|_8`S+bA89w2MOK#Ef1-6U%ks=*^>^>-iXX6W z3;>nb5s8E}$PJSjW!v>_dGlo6mF5TA;Z#9SkkgPYrqQxQ< z+>sY9T;Ku4u*s4Cxh^c;yQZcl}9tHWIW-&*wIu?kw0D<2qX}WT`1J>!RMB3QzLykoc8+)OKWF|qJq$(NTBY-$9p1aiDPmoZ zfbdOfm(5YO;U3fgt#FEBLls3};VYi=dSeFXBL8Irm?dQ4$L-tqhMl$l;Tnt8ldr2i z*+|1W51jSwk<4ay=iV~kpV@5DaQ=h-9BI}KySOozm*!S5h%pfaD)gO@j{K0jmB3rQ zA08eEw{!>4>rmcQmj0W(6UiVG`x$NDa)?E7-LsmBX6b7VjniZo5S_Zu&CGv$P@IKb z19P%`m;%-t%LiK1NJEx$IF^F@N%_M zxq+4E6=@91GY-7}y>E@DIZ?L?WnpO6Y|z459wwKQ6@EORQ0(!8-`;JUz*yYGJT8ZN4_$fWuQ*O9@^ob;wLj}74An8rt=ja29$;#?{r8qUuXYP2L{42;@ zTq9*n`%N3w+wbEZV2oIzdEoV(O3TVN&^$r!>w}-lp9Np1hlh`T!#$7~e73##&=C3} z=DGIzgBv&Mn}51oH*C$CDC4KIGn;Zr$aLuL8WRI`j<@6g=I6~mABqupS_wbAzUn* zI<n8FY1;QPBw_i+0k8nH61q(jU?RT@Vr?d9K>$Nd9w!G3n-YmiN z5yp%=H}3DFH&1VTW^{BgK0{n=(e~B)fI&?K?p;qRhrPLBu~ecB@|!8ka|# zd`Iw!;4o8Yt=s+lJvVL{$zvt_Ylp2U=8U?}8r1pErUk{f2l8zF-@Gj^N;!B}9G_O}nrHc|C+^vvy{4tPayGUs z$#Tzh?jJtSoay5}I;32dS;mzs(v;f`A0FOvqPoA+zTpjjGsa2J;&bkcQ(pN2X2~)) zK=f|@rITb)>YuioG-+{%gXp)YIWrtCKcA~V1=N~K6xFDQ&wsRLmu=^xzj!yij_X#jPIwI>U4Y-l3 zQ|4@BABQ-9vl_&ZGyG&%gxVQXyXX~?U~T^S_8_)yE;S;B%R!!UB5&QKGa3s%l06x7 zezQJUG#lsrKYYCjT#kAB{(sp=wi&Vw*`|oHRH$TW%vf$~wvv=3*;<5TlE}=Mg*3S( zJ0X!MqFallnIREHNm40_wAsrN`oGUR&-eFy|F8f5cfVfGGiKC%U-xx=KIi8=kK;Ix z)1`kgq3WLf%MG*VHJz;61jr-UWHfy+LDA96qH5yf;-rSYMGRjBwMFv&S9)K=VA*<{ zVoRv&becE617jrn4gmkk)vKwgspH8jwsZUdA4bB-9)s{o3v5wr&$IWULU6<&YR)IL z`})1pYZlgTw1kXX%7XxtZ-@Tmx&K4UZwHP=12mO8PeWj(m7t6V>NEfli*of4lwX^##z0as*hvBS!k5qM=!3< zTmHMG^Qe(e&bhRIzPVyCg#<^eVmJQw+ftH2pIPsh!z2yCbu+-k?dZmlTR0;W!ydTn zo+Oz(2Ciw}Fa2YBHlRT^P1RXgdn`)EF>yU7v)XH-iIuaxW$E?7x;fVjH_g6qyot>{ z`lj!%^WQTA(WJf#Fm+AQ7V}qkRuk4W!}`CTW!+F`s*Oz$JLCS@h;hLwxodmVUo(hq z$q<*`hK4EM6M*8ElO^W{V#(y{`;}EM5XEn;yxl^Il#MMU0Bs6lVaUywrRB8ly-U^8 z+u{g24XZ=E55HT*@Zs9`>FcGiXYEWTi(BozKuN0Iak}eEVx8H(^m~aj)MRv9-D?9} zmf@yU>`xL!abm+;OvRLAdq~KW3Jd=qhH`2lgx;Y~Yu=djp|1lfmw9>9Tu?CrC6=jq(Ldo}8bX8G`zvq*Zd%5A@F54n* zj_K+bMpi_IN;dR!K#O7(xwwF2x#Joq-7J%IjmFg~FdS|%Dp~HLIHF&V@k_S-K|PQ< zO>R%4wrx{(aC7?f=rIyqc)DFJ@2PrJ+eESXioWbQpoF)kO&)fHnh7M8FTb$Q(m8#L zR*r|WsUp=oj)byN$Mz{^b>D9~0z(Y$_4_hD2E9s-2LBoGxC;Z3tfB`ythA~BzRlNP zha!+L0XKCbe&xyG*IE~zFUsXH* z&cu#sTvaR<|q#AAz|l`Y9i-=tvc&o&i}6~+3X z3BxHbRNr?UH+F2w%ioschS<({W+;K<>B(~s<`<0DU4x+Hbl+Cz5Jw0cqj9YTbQJTI za#AG$Z#nkP=!t-?QE-QpMBiueV`yp0swRy;adev|A0x47 zIiabNvYD@0Mrx3W-OOcLqHy=kWjR;`xyO#dt zMl+5FOO1wQi)+XmV+YL%F`g7~A9N-mJlbS5EbuMg=NIg3mTy=qT~il7lo@T@xUm$q zPNu`M3y&+t`v1$4XG9cp3A1vhD2b({;uqgNYNpEI(7uBfKi@mo-Lg9OwDa!|*44P@ zx^~HE;Q!Cj6_2lfi-bP%w};{1pYEjikMuyV@4c%Yc^N5?Uh8_*t1%_L0O+n6DdWH1 zkaU)jCI$n>4yqKo0c=sSM@cd%K{)EOz-d^DXZ$l8#52n%)h+rxAQ#iL1i_?3L$)Ue z`Y}+Fgx6_S(K%N;83rIUed@%HY$hb0VrokJ}772j~2nhiD(ad+avhMoVeLq{^VO&%VBcH_QIj$jq^PMO7C_)xEsr%cc5l5DqJ zQs~03lII`q;NK&upkPFHs>Py@_Nn82odf7CezmWa;RGoYID%v;OV%wp9o6T7LpnOK zLch@a$g;~I``KIaxz(^YL6XuC2Sixfa_m=maRKROklm+;71qmh* zQo})1|NXG|eL)R$2QKt(V7MacZ9t$&vTbqN%`-!*?=|UY=ms1r^qNcuMPYddM(r^p z6>Jg5E zO!kMau5K83=qp8cQ)kYeP4_uWmmlbKMe^rS|6mmbgkz`{`UKc;3GYRQ1mLebi_|)s z(k~VvBM3|JlAKS}&YXRtk~`%4l<1170_@j+JMEirBZ~S|dgR#lk7t0_zaNw9K3Mp4sgcy=aDRbe~ zb))!aH6WHoPy?Wl5+w9?wvICq0EDk9_z^j@sgVx#a6o#xAg>?_6{ZV85of@n04^;C zz4h+cMp1N^(N(zGyJ1p<1SZdIsWX$GqT8J`ss4g!QuhzV+a|xcGi~w`_K=*3sD7Am zy3hRp2K5&(xB7rh4}g1LK*jRfwr${DQ;AiGBb|9ih%FI|h{iJ6zojP9`ntNY$jdP8LS8;ZQH#3DuCXa{&Kg z8SCFyb~W?G`|xs z1o1=%Bl$bg48SXaG_f50j82OdS)i9k_(Bg}?8Jl34&J5?F4CJ?KTy1=wS?YbSlGp^ zNo@*HBln6fu{-FAj&sln8$uC!7nPi&bws_;pAxqIX&Qg!uBYLRFBy>LmgAuxn?l}2 zl4>l?i8&&$c&=s2HyAiT~RA4 zqr$o7L#8w~EUQ}pOi4)2`Gz~a!i6xT4x?~Uu+qbcEu?|F8Oj7wDOzkm1cDM{*CyXi^Kply0P2MF6w8ebeCcKZ_IgiIix z^0XPrxCle9!xo3{wcxWArQ#7K2$2Es=zZDq%F4bfLh!jo#@*&nZ&neR%MgxCU$Z7* zzt5A7ei#P7UlUX$ps8TzXu~=>OOlj(X?5rTS!PF21Rw9wqaDC==a$x$Noamr8$fn=mQrS|Rm+8yN=!J%*S9SG@^=A8P94bqg> z0w=ZTY;h}RiodZoudv>5J?`b6Y}cflJ)nx<%%kL7M9#Bz3&qvO8Q@59v1_}I}!-k0>1zsRBiVUfdyq?i-NUGF7+Fq=rN^X8uOU!{Tw zQ&BZDHuL;WGG;oRWh|Oh0U%Z3?VWyln22{b&cdA!{YWa{v2zqDOetD|7I`w?w}dy70m*LqgBW#q+g4%B`e zGkI85Flg#QU^qm{?nJ#AP(3FdffR~aUu0-ffk~x@QsD9BV(SO6ACsxs|B$MXz2$y? z;LQUg=15Bu$Yt^K=l#EEU63tGfhi0M$yIKb`aZ>5$4rMASahkaV%=pgED8TTn>^Ib z=-z_2o?UHbcGz5MrYIKsE?rt5(x`>6X)?^i<#|O7l&JI=VRZRgINBi;bno!X2>4teXyOSPX9D{IQC zHtE@|KTrvuB*p|3(z-uRSCU%b@tg%7JBY%RAUi%I*62CXfO6Cq-lK&ed%(}SN zGWFr1TjM8B&K4@Q=KQ!_I|^${PJV@~V;I_49@K;lPZ}!4lYk!ufMJc6!(48{{=W`h zDh7~52WZ%>o0K%ps~RYVm+?F}PV4a!awZ`U!dyq}F{k8dHS~o)PhIHkMVA}C`gOj! z|H!{OMpF$h#Kb5R|2kxj6TtRQ?Msii*Vi$}{3IC-darB&F!2x5o$gln8;vfbh0+vG z6^LN=$_pHYWe+YS?qB;#1#tLpQ(@=)_c#z18BBbF3n}G_)lq#t7Y*Hsuwx~NeLXU+~fZl?{xNzn>McG>{-K02_FI~Dcs^-+?b~=#(!)|f9 znC^S~=FM5XF?7s>QrTBW11N(7hO-ZHmv^t%L>O?6rILycTtU)ASF91_UWYjZHh5^G?N8n4Z&t;Da z(6Gex^~Q+{bf)?8&;i7&?Ou-ye4&tD<&7ZQf>%@j^ixwsXAiX*@|g&)(u`eQ?&4j= zh`UVEv!QZ2s-qmk&!crOLklMBdHABD@Vd&%8iWzq)FB*LIyz5Bc-V^|&)!qZU zWfOO`(Vnwj9rbB`W8GCCMm88*qJ9f1UCSDDzlvdmEhu4B>7M=jJ#roTM!KYJ_~#Ij z$uxPfOi>sxZ{+@(^+Y3}u_3Mc*Yn>-m0BkG3it9XL%Jyd7tV6WfM+5nw+&FZj0(sn znK?c^LfxiK1M8R7q*5szwv{@Kqr^e7+Eu7oHplAuLy}HwbGNS;5J<^ov;I@n4#s+& zpj0}}B^*ak7VZrLz2@~zbFo=>4e=SAjE&AZ$*kE;4*sPZ&W~rGvS_1wy9M`|G0x?B zFk{L0Y*W{(&ZJ!vj^2(lH`VJRc?6Y_2rtETRhH9KkbTqG_r#vIoja4=L!jFPo!(IY z>c5ke!l5FWfC6A@(?7;+%gv=T{CMNlXmQTx8tsAY<$Dd~y;*GOAC z_#majD2+RSS?5;&QEnv+E-J5a3Ud zZ&E^VLe=j6r>V`(wR~~SNGB12LfX2f1a&{oz+4bb?@V$~fuqR@GSTMDI6S(AN)6ii zjPY~uo@1dw?!ZJGM&fgf14x1yr=o=STno~xOJ^RCHO2iYoWJOP5Lmkxn5!j0#S|9_e4FeJxL^^xA7>x()us~4Ifzza#?Og4hDigO;b?AU zAEL$(`gKUrNyYj!oX!Ygf^h3mM}2J3s(JJ2VkKRw{j>~pJ7?ia z2GUI?FJ2IJ@dupJ=3t?@y-q&mejX9Rs~4>m-5|;F;BAqt?m`b6Em=>`Wc{8!ml8c^ zC3r==5ie)U#osUJAL$Yo<@C9uXNV}`xpRCQ za=Z^99%P+Hxm6O5BaZIR0Gt_!7X(;mBm_H4V^sLC+rnB4hYv8Yru4{;H*k^T)g014 zl2_X%|1yrE-Frprq|>a%{ch310ny!ii{k;!QBQ=EkbTsX$G8;|0qXT^q%#S~mnBA< zkZB#1m=!&zAm_9adhZ?dQWfi$-vQ@b!sI}9o?TRpIVZLxe3gl zRuwI1zwE6Vtf(>|52DaRO-$|}L4%6u+I95pZEnfeh#9G`QloSM`(? zN0Bko)E*pQBFGoxRd+r={UmK)B(naL4u?UwT+tGEPtZ|P{z5OHW$4-IY)JPKv*Bkt zIS23GPo2QRilDk0V$>(7zwwP$p<^8`u8rqmQw2VYE7FQYBsrbIU31tYL0hmw*XRr3 zoRgx)6s@XkKEd-jCD6YZE*N@t3jC^ZvwZa(oOuDEld2OSxwG{$VJ_I|Y8QLC@cfwi z)hLOq0FI!OEUa{tZaYkMazk8l)%X}5YozET_tD^m0jZS&+nWi-Uu>6-b6b?``kq@0!E(WQ`zNR1d*O| zacDvA#`=x6kQ(A2e0+v_!Jbc34*i&xXoima#$eZK7V4U^h@-(fcRqc3%sLR2L0wH7 zl^ey0pmAQmzP9t#n>$wl(64l^SFedc(vKfKI)wESqVs~B{<`uA;8o^e$_-roEN{sUfM4?{kWB&cq0o-Vqhs=c=tje8 zPo9#-BSBiEYlc+nAjRhQlwyo5Dg2=A4ANi*2^mTLGrR34Q(T}H%NDvH8!5Rm2bCOy zEhp#{SCIIrqi!T$+)oyk8#rv}&=sA{4^L$DNsF)sozcoJ)E+L(r$mG?4w!2?*bTt1 z;;Dy8ar~3Lmo8mmRZ}=|niu{$UGYDUHLt(EKSjej7ePZ55pe;sGP? zrbug)5VFWImU5JA+O#Pq_D||3ZulUSy2Rr@o9wh%ECfNAUgO-MY=-zjk z062tvb0$i**e$IT!}H|2QKa*(=xA`y-ohV`g1+}QFt}2kGkoaKylcHTRA|3}OGT$g zj~{O@{g`FG?(m^QB$lP>ifSqV%gU@YZXKqslCM<(R7=MOI}_>O z)r-j;8Z~bGVDa>B1w@cTt$8bocDeR1=@ z^6`tbQ)r6VaW1}R`Z~8N3h4MqQLb`uUR!mr|KWWu_Sj~mW$l06G4eZQTGB?pDut5>)Xdw?G# zp-Ky^w?KOz`X&wwr93mJqYKMPOYdCzT-ATZF`Mqw_6YdHOasnSf@7nOo#LX9p30uf zaVnrOr1IUG%9y3CEHWv~_UYC0jAO|@E}ylI#D6J(ghX!rCFBl#vSJ;@v1{eEDhwd= z_bmXT-DC+}UWG&wGE4MJUJP!b^86uV`{S7Zd#>tU({x-0@{{K$&MC8HcqeM0f>pT- z@&Zx}A^bM5d#V4-_2hPUG#gFUm&HlNM|J0RyUA=Fi518^U>c|JaM;Bwv)&%?eb9hH zP6=uYVzmi(iG+v+*VN-ws@o*BZ*G{S--{Bijs)l=9WlD?+JzI4tx`=u9*)a!gy{>q zjWNy!jfe!kTFkqXnso6z3xQH9+92R}w~}{jwIAWxQ2P-CCs(jJS`chFY|I=Z=`!NJ z9Y#kvl7!N}dKvl;8QzP|y6um%3ln1QJ9JU}^O&!5k_ODZdJSxVxPEJ?cO0%~YGGsl z9t-H&l0BQByqVCGJCm=I?QDF}uYTM$$Y43rL{bl+NxT;c8xuKJJ^b#IJ`wi(;xAPoWjPQeyCx3UIXiSkHB_L4NL5sO39tuR0{rTYk(eP&JWNc z4#ZeddSb|grKIBQ$s!jYh9)fg#It&eXCPUdpmUUvL>Fn6+^5%+sEg9HrQQKlgNo-s z!3z~b8%Y@5=RD-jUQbVdo>0kz=&;_?!c$^SOc?)Y;SKATZ{COy%Zoz9vfseDP57)c zsTRbNCQWi(jFTR)`VIjh=dUS0DFo%2BH;f4#Ngj+16j=3#dJXmf^u%p8hYYP+s2o}NyCN--6xPB zU7Ut0S#l7wqJO!1v0h>0$477N%lf5@^*jc=8PEke`|Wcpd2gi}B*pq5?pnJ`tFlt_ zKfk`eC1puRMpZb#OR9!cUzUMSP1GrrjX`p4aMtsg7 z52O)z%`{oQ**r%ZAmX}Up`gS>*p6s9$&7Zr~4Q8h$nKwQ1#nu zg!&#tTHew54bs6fZextEN76279OB`lwi>^N!cJ&&E34K)C&_%JFry_ji!C)B(K(x1 zE+FsnOuOdRTj$x^YEoA(oSnF}tKwh8?IK?UTAO{&QMiiVgSybj;g%~B+y@UHsH1cn zA$H|?4I~*LSEFCr%(>mhStz??UjdK`fJcq&>8&pxk~A>@*{bdup*+$aTzoJ{v3|G` zuTND*-*b5@^1YF~1$jN5*AwJV1Oz#nRcr%z`dEUwbl7o?t;+QA0>JxMQ zM^@5oGlkApIc#OR2ZMmFa!;eLkdK$sNT9T;yACr* zCua{ku{7W|$%wYtH(>5r^d{@&{WpJ3j=_t%`0-;*zF7_XAdvc2=+dBxuIcN0^_Bl;ZgAe@PI_74i4O{cZ)7!Y4L%ff*_PZ;-WOSipH-k%}Tb4qJ zf$>x&cK&NSS$NKk$_+nqL{LeoPB`)0OZ@!&2JtpyE^F79Vz@#U zScY&Y&>57d$RkQ3o=fxW(yTthl#wJ0x&F)4`ikjKK0c+f*-n`>z8N|PAe{=Oc3ZI2 zIRJJW5znSBes6AfzPfkB`iqtC036QwE=*!-N6!4*&BM<{J-8G{{riDxc!tK!-m2G^ z3)nq-ub!fK{5X0;7$6IIz3cuD59nVJOhSJNpd^hUN=h2D6GYysT?xOq*b96Qy0O3L zcqdvQXT`5L5@5n~*D%y+&kxi-Tlv8x`51?}w6;dVWS?21ea1@CjGZ|=htzwArFp2& zlpf30XNbm%IU}T_eZJ_!?S1HY*9BJxuN4adG`5-^tN-dUY#7a# z09Obb`nD^K4ppuZZU%amD=tfijOA(L@#M{>wW`a5Zssbku9x1~UQF zaaJE!2Xq;B|KTO=t+Y97B1Q@!YIP5l&;G1^-4D+}Fq?0R+>r)LildT9%)$2T%j8US z#__`E!7sz3dHU85zoyNBynHEoB2ns7P;cM8dv|1Ajy}YqJ8?cuyG*W!^o;~~*oQ&h zoKOG$`>%lteD(Aq90`@e@RF2`k2*V!ZK4z#17%3}p|m2d{dDu3@#ae@B{MG!4lpQRL0&}&zjsFQG4n`d6cJ8U$je0@sHR)A<#Uiq$v_Re} z6kAXl+*fIKwC1*H|OR#Yl8@KA> zW+eGgzHUN2#otj%6A4_x8d640AkbacoOH2||Bypt?m9a2>aT6jRmMSoBWL7D6HSvO zO3DIh;M4W#y7t8e`n$ZCJ|^iSxvDU}FhxhG9#il@phL$NX(h6h?kdfvaQPeQw%PTv z9w+5fG#h91IBu`@neynvoC^Ua@%LN3Mg%>T{o`TLm32@>3&%LM$E8o+@o@v@=s=@5 z37_bLj7Jz3A)~;dGrbSLquL#U7%*I8SMZ|;T3IrdaO@3q7d!F%RyIT9LJTCU1=6Sc z|Ml!D6x6PoY^OD)2JU%rQ|QwZ*$XJn!f2k1l6?#%BfoK!IAjSpqa>|*IkSuZo{^kl zXXz;N1M!eEI_$)bhTFq;Q)^0Fiei3{Lnb$nDFC-;zkU7oZF+SHJ(X}c zuQ_mCEA}%~xNs>s_7^+|H6uM=?P+_;8%W0lsIyHAy&=m+#rRhQrdjV#t6?2kNdUysCrd|0+c^eXY$q}^)QA%rR z9x|wtLn=wV1@=-2phe4crD?dyXyUa%(LCP@)1ItyZ($*%zXcjBQSKWHX>~CZp6 z>SKTKtT4rw>GWlDK8fYRF^|T0qdFDE%{Q&udrMh=-CW@3N34iev8?>osiOyxO zEH-Mv9B$l8Zr~!+Vu}(o-`+MV&2h5F!u161ysxAzJkM`S5rNxH{O1hmnjsAa{$3(B zDLln6+IK81dIPcjZ^rR){LIAx*IrL(lk`A3n1zz!V4;POq>AGM#M@hs?Ou3*Y`9oU z)y7Nxu=TAp7)e_ky>by`Acm08(uD#Nai%hxu3wnjiE8TMAzWSI0t(YL6{oLyF&<3i zb6`+?-FDzr8Ps*6;FC5R3dZ<{COAh)b2lAhlp6QD8?Z>Ksf+*ngEi#DbWwxhh!W${ z$5)%N`7l(W0FC!3pkXNz-4GP{9qwGS9^Beab%F22pffQTUL0Zfr`)`GQ+3V%d^7ZV z)UF09l#UzeP1xM2%SwWkricvh!Hb_{?XmzGXp8QEU6>5`(I8FRfd{;X%-l*+^SNC%#v&Mamhvyro+`;is2}zVr}B0F=M)vJ6QRDu8$r z52UUL9V*+m(|%^?S1Xr$!=`;h&AZKHG}|;a+q9TUVY7bk9zFhfa9dG)ULZ}$3FJ6g z3Dpod(tMLKXw9(q#oN?T6+eu?gA8hC$Q)-Oc*K#(-M&^dTQp01rxLe#*#v7(dvA_X z)l{-1n(d1gF6T7T9{A7xhu%p*H${&brp5&qHbQ{KIi4_$hz503 z@@N~3!H%fZ#5}GG6}h^&veNzB1R&M%(q;|baPOK*sWF856d{f z63IrFXpHXXyDy>~us%4#OLF$-ey-|hmSDu1r_1RWm2}!#2RVGjyF{GLNavp18_cQ( z_nbj3)~p+`SrKXYYzk0m`xO1roKIMD8JDh4-PJ(Zdl>#mnv8!sZQ(Huh;6l zhQ9n&;;87n=u_6r_qH|F$xH5_no6eq20C8aa$X)}V@H{SgwSdKJ+6A1bi?d3kVP1d z*hOEc{?;giP+DJ0WZeahlm1AS41&BHnQlj=^Pl3Mdk*2CZ68v>xOw5oCmEsi-$)0r zP>kaCB%V^R_WC&?J!_BRS-yilWqGgS#*(@Y13T$011{8UGJWlg49~f36Abx(s{|UFW29u#ktJMa ztP$oNz;#kqfduu42IV5L(D?l>{i2My2-*dBh3V- zdhkLkA$s>#rjsD-E6=9*c_p`?aj9b%*p~D*W0PfZC_3Pm8UCgG3cfPCrteA zg8*M_Us1m=!PcTt(AOx$?M8U%cIhHcGr0vTF$AE_8Dh7-V2cU;(q1zs@xh@M7_i2Q zu!pc&Mku|pvKE7znTY%|0ZR@c;J515op{m~FrYAe0Hr9Vh&F6eG+;;FO3ZY=t6dv) z6rM=1G8zB#a{%3Rqec5ZzdCFErNf60Lr`ri>JZ{`+JM!n%x%J&QHQOKjtTxBQLGZ9 zJ;D!z?tg&V)iwl@qTb?~lMRUaAD3WCmmnb^UCQ~TT->1Y~uWHwMzDW=HGKCbp zz5Dd(Q*fmWR590r|5leAT)y=+Ew;4$hW~7{79*<(01!5aiuFrn6E^FAz8fP1 z-D>_z!D*a2Rb(WzN=8yAH);!-tNVj2gR`iCLOBEa)HVNbwq^0)9#zbwD#5J@h=447 zDWGy%pp>1uzC^d(b!tj|m9(kQw<}E6dw%vyv4HV}SPBYJ&yu5us(P zWN7I*Te%>4>8?0o&>(bd4YUSl-ui1Dz9Rrgf0rl9 z@W9mNijrnRz>~O-AwmY=X#~TZR3rsyff5OLg$B*2jii{AqGR^g6oAW0 zDoql8lukMeVo*186b6Q+*Q;BDK1_#GK`9=_-AILiq#(R5T2^-E=vlKf-74B)`2s`0I66HUvRGQ>t^3baA*`sxc2r5x^v~q zDDo^@o+0v*5f~9%X-T7n^7(f;Q%-VV4nccQaff9j{X5AW5P0R$Ponskp!5)$^&k_u6DwDQ0wfj?Oc%tn5`9QlY~Kw|PJ!}cda1z?el0_3$M|(d{<_ua*@)v zi$riF+3ym@$O~Z3XYG5(vXPh04jSgmNCiY|_y#glR0)>Vc%u4u!J$-o`f&1=lg}-i zjs-JuTlpB`uqbei~gV&2Gqmbn2?K3ffF!e!RCsU5y^4xk0si&U#TkdiqZP^ghJyr_WV z=v@v{p^A$;IR;)8X?GZqJ4?VneNvZEJn0pqXLEUl;ijmcs2cLh=AqCNZ^Dl zzx*TPg}})CcGU(TIDJz-^z~b&z)KG zPNKIM{z!_#W_M*PH3|{lBhFsb1Z)|&&^ezN8W|?Sm;A)HyImzX!&U2c?CA3gS+61( zCO}9K`uMU{XzbNr3Y3;Bl!T!{2oJ zw%5?1A9t@LUaD#62{*z*f}f_rT7O`Gnwos&d+*`$D@Xia;({1 z&N;jN&83bH_+j&(wdCqv5As`enB=Tv()MXe{C=QJHnSMW2L2&$t84W3fp4__wHRYD zdj-g{!;KM<6VE*6!ZRXKcsYl<=$k08U%x&xXD%m%n--w)&vdy_M%EH5DSF9ggvLSo z2y1RjWlic^f(@|{-OxN7oFb4G4-Vmjk=dHXZu#@i+g!{Wnp+}qB9~l^&LO7@38YaE zCunW09cm$Nb??>itj+sZS2XsVX2o@gu{ni|(a@o@q0gk(2#7Eof&Cp#MPEAe!SnQc zr@FLU`7f@z>fpV5-KcoE>C3^jd;6h*kB_ljULZy7u4MSbgp>J{7>?G@iFkR^d1q)Uh7F+KW7Y7LyNg!XQHhEN3YAf1sx7EL z@m3y}coWE9ap0D`s8IaNS1yl_DI_smbMXU0c$J#EucECmREBp&A&5ev90vliK5~0& zt6xe+LeJ%jkWT{EN&^#Ua>A4Q903)l&P&xLD|RfSP0u5lqInE8Qkb?`sB#haiWU_W zw-)=vi^29G2F?MZs;fAYu1&mAGyGf7KMooAwQb#69sQY!G(q~@u50^GYaiuF z7OGedN)v!26aa}gSCn@npWzssBq$BjsP_g*4Pz%MP*ap+F?y{!nqD5ZcZb7rim3mP5O1Gd z#e|02z#tyJe%X$zE&E8RqNu0?ZOJb8>5)}ftAjt?8LJrnD1E9b>1^O6TAQyyS$4Xu z2919d{-cAYVR0^&Iz-kSC*+Oy)Bdcd_}oU3FNz$swvQT^>gMTm)`eJ(%Jw8cqFV(! z@%EZ7pUZ1wLphH|)-{-Bgk4*e3%a2-y8b{Zovx|ma0mbCs}cdS-_P}xS4U_E63d|J zaAkG;dpUJxmRLn#ZkT&tU@vA#97fAv$+Jn*^yL`*U1X4pu1M_QRoKwjxn=0sIO&Nj z4M7_1AtO1)Cd5oW7a1A(?ELFAx7-9=jOk&K#+C4qI!cnI6cgl<(@6znmoQ}`lw@$M z+I?eSWY_r=Cc-@*q3F~a?sWaO?xXxO1rX6kqC76abmj9kX#IMXj++uj1k%Emz5qq4 zso*4faHJm7IqZZZ6u}iW-wm&u9h{9k7cHi4e}Gd8XZodX&QdB~xehuYRv6HZW9uL_ zJ9cK^rvQ`ewNZS{Q@v#QG-OfH2?FH&B%#uDQ@(md9=2+bmij94ee-r-D|5{Si-y3L z!(3Wr!H%ZleoGL*izGrYc5o8u^Fd$vutJy+`ddJsq?Zg>Z=ThTm&(%Qvhj8YQiEQG zmSwr~6P-6mO8GarFv$?f*i4N(Zrx|s*RIQL9|5}i#KJ?{FF(GpK4}f;wD=(+FymKs z$1|N=D<}gGU+8e-bBSf?xQ8_dvgK{BT*(2vFgIPeq^sqMAJLiFYjRG_`vu<+_c_PJ zYN{v>HRe|R87F4mo}`kuV;^bAuSIDo`5k@KfKIW}EQu`T?n00S5kF}$D)y8d1}^qO zrURHBqAp(wL^MfDd09xK(AcNY`;|;7dYL`@obhwLFc@?LhHQ72bB|m>v>Mrn?w%pE}BTkMu0m z@%--5&w%qS`VCdOjS;9o`ci;()>aj8C*7ONEi+%phpP2qrg5$wV&@0K_6GjZ6)1?CMsNFKN>dk+PGLrK0E> z(Gd`Wq*eMoFhZWa`oyVI15RQTpDZn5Q!9|R^((4v(y`TWSq3SWAW~Fn=+OP8=h@+^RIIMXcNqUG?C$NE7QqJJ)UD3IOr)Woe z3zxxeaCfe!YzpUsRo7q6>m^?-zU0Z15L8ya-b9~8xzW&u>M+oOs||^hM_W9D z-6lOw#4##tz{IH8@k4Ju@uel{&3)Hsm3mxo;pFa$t_zu%NTT$m{cO@CEq3C^zRW^~ zhVR&c(zT>v?pLMfNlYDGYJn+KYBCnRkZ*|S@}<5qOFjk@@xH7ky%)1I8ZDxn$#Jq; z_wn7T=sA6%(K(7f@MP*vt2@lG@x8(WRyLg^y&`S-xO-O)bc04O8UhA{C@y>%jWOh{ z-)UVaoPU9Ooc~vyI_?fjSDd(o(uGaW(!k93 zM1 z<;Umu;vSNj3A!$#dVaw{<4S*R0xl^8*Mr7(ZtAF^mq|*kC+;5_AKC*vcRqM7SH3rm zea{W*I(#;)p{F-!Hap7-uP#XpNbwNvxFodG(>p0WNi5|H3s?0N&Zp|mv~Tfn1#;0b zH6;=IjRcPXT?!{^s}28J3lm@K>IhOn!{jt6h2N zS>7Lkfwo-O#Tu@txLxW_?S-)~^lO`G$ongAoW)ZhhIu}Qd;LCmYr9zOTN?nXu5)Q}ZyUVFJ zcVmR4k77%Ny2UA5g?9)r;mjAq8L$wi(0Ew`duAlx@K6b_E=CZfWmYC^zoghNPsP+@ zH$e3mWH0egJ^(wQ2*wOr(qjo++WUZXR~8Opoy#RR_t<@TTYJbr@$FGbLz37kljGdK z7Qy!r@J1OcEsUZ>+Z0QM41o#}wZO-ZC8{Yx)ZQ<1-3D>1LV#nW5@R0BTlv*1I6k?`ClK_I8yIWguLX1|96d zu3RgXYG;hF=fVa_B)~bK5mqWA`=5XDGl+&+EEA2j_99?amc0#>Za)Z0eJ$pMYn0SL zTF?q<)@Teuvd_lT5v*UD(8@xVZAw>mi?)iQ-J&uNb_+x7uTh;3F=3S(KH~w!EuqXn zdx*S_a{V})OrVrtkRH%`F^J|I-*<*Pr;lUFc!2=c-;9;94{UoinDw6e3=bu>TrV|& zxT$MS-Xhtf7VXbZ-C*`IUk~9vU+?9kE7E$!eK!M3Yw6-}tHsEg4# z2(55s7KjRMdkmU6NxS2OY;p`2D-Tvhh}JY#h59A1ncQM)x)3Fsn{jX?)Gpft*74t8 zdEAA%g3kx@cn^zhw{Rv15Jb3wyYeia5+Pf9CiP`3hqT7y-c(t9S+O}U#gZEMB7)Yz z+_VE0%SX~i7UXp#D4z=2xTY%-r(_Yf<-eS#je}7D!$NJ-0^6*ds}EQVPO zjT0&3Op&8YK}?C5n}(4Bs0n;{>Yv;r^}wHA%*}Pm6UPc|^$o>bmY;Zz@{j$_!T@jf zo>;)pBtvXL)#P=D>)#Y$O4klKl1Pu)vi($3!6+ZBDewDC4Auc%POn%EZWE94Cs^gFe`=R}fYY51B-fN|n z8?HTcRBDcQ_fGN6MU4P(RdkukCe%W!*o1eo;qbdd?=ZG-2mw{N)z_x9z> z(|%2xwQ7|OkMbNM(*L|R?_lnS&~noEg+BBMSmc6P`@TcAC2$>{ep_~oRL7Y2D+lkou@B`vQ&hDKdqIvhOZQ3~Rd08p>+V^r+SwC<& zkr3>FyPJ5_%8VJTL;v|cg1<5~vvnE~tN6D$|Kgu!yUi8_3O#_`vcMWwhm)<}roa>J z4P!G9B8lmgGk?p>^;+~0!b+l+6$HWpKbReqK7(bvJeHFddXPsyL=a&_WSz63A&k#W? zf~6>{1~uFmNwM8FmZNxaB>{tFyQxX1&^J(3Jf)&e{r@cuNym--f91`sU&@4+xVX64 z1>>)I)F(dp57S?Lr7P;{)!UoHX&Rd(M$ z8Yntxpp_zV-Dk1bDX>KrD@4>u!?2Rt6}9NblQVQy;ws6Y8Y!6EuJZ44URSPy*XSIgrv#n(Qe|rP zET#$fl&}b#)l1Qfs>3KGg%Fj+4mc)-g(Ocx|8WlDec+&#P6%*~ql~zDp+axE(nrXt0EQaOOkVu+_>V(5>3K$j6OdhDrNcx z_op`u7D|#sB5MJ>=ykCf_p|Vc=zzt7FJpU;x6oT&%#iz^8e6a%nUzS(=l4tpIZDkW zZAj3i+?xBgr9630U2(BAmDVW2j3D3yzwM-p1}e_UTS#52FdOQPa1}lKC5*?y+Rs@# zMd(8@w83|F24@&QwHwPHTB-FNeIEV9>1p|_vIT?BC_ToiQ8N(Up3bg%`QpVbVs12z zWuE|YC4&}>5FF^r)+yRN?g2ng7*rECg@Za0FQ*($Rduy3fT1{33h62)-fv{m3`*|o z1C{*RtJIlpXX}UlAz~zmB+{)tr5~Y*q!A@|;a8tCv&QeIW%&Z`7i$fYRD$BD}@JVSQ1 z<;Sx05(1cgwZMc#hF&y+NLtC}?UzDW2;DhP*v$~8APw5+p;~(T&_?R6aLFVRi{rW8 z(YWwaJ+<{@wwNt9Sikgoh_2nrRjXq4diCtNFejUM3xd6agNQP*bNkj@hha}zC}{MN ze*DJ`j;iU$-TnBz>^X7lU`*wi{P`@=Bqut3k|stFY2}1QF?Ja4t_;swi0!GAu32#Y z5)?Tmm!ktq8bKSX%NoC`{nGd()*T_AfQPxF2S4+vipPZHyUT0yddRV-DT1@24EwMf z$+@^1%C#`8xZ++YU;3U_`FR)PJec4791}&cG{h`W2pq;lO(;P)v)Jl0cqrB?2dq|4 z%Bc0Y&ZAUPM&J@B<3G$IYswx~lM%)kdymH&ZHGQ<@CKeQigMZX2|sDr5G_{U)R?GU zyZov?uS4~8E#`k!#ziBw+4hfYTYYJ<*_BpPQAH1 zP{5AN@)!7W*ys$DGS$;8p;D8}*6J{*ckgT77YPQM8M_2&BH#TH!bMZg3Y1YY5Trd+ zOx*k7@5U_GD}kqtQ&)J)5um#aE>zYYUqI_8Bh6cm}eF~rpy(-WyeG^L)2eV~lDoAxs9$YaiX9@yF;nUgHr1CsJWTxXlH&)WqhW*cEM<-H^8dN63A2$vj{gHaq-bC}%S9ro z5+AXZo!>$=WDs%tI|rP6`sG&(pNy{S-%P{Z^&qF*^$y!2=bzKJ4|!ZDuzu1_7hunp zwR-WNhT;tx)RoFZ`8;rV13af(tl7f$elnVeM#a|vMUXpnvEGLzroq%>3F!K5S&)eMpmk+Y|Jy~FQP+KsiUK#rGsWdHdVwO#*=`X5<#s78_Gvvl2lSzc~Ves z(k_8Z1wg?5JyUbzJ;m#w<&S<8OIgH?QiuRt?3F4?*IBy`M3Xyf*8M@*LVuE@!`Pu8 zU6gZ){y+8Q_Ga2o@Q*~r=1N_`n%E{%V?y9Hq7NnfuwjnW%B4ESj=g{c4r68GF# zH+8yZyg(tD_P!tCo}VDYFx0`7`==5Rzm#r#L{h9&=xn$-*uLHtP7As0Gyo0G01`NH zmJ_iDs(Gv)MU|$-f4uWp#l|&r$T!u1%a&IgH&Pau zl7S+l@0nW;wyG%tr6{p@kR>UMVlf(sO)v|p$MNF(B(obh{e}ph(IAk}{ z57w9MDyN&42AKDL1C&g_JtDvYjp_qjLden^a>sT+{C#r3 zBfJ6HLb4t!Joa@b=Y1uW;--)~6>`0_Ru>t1P()t#0+e`yQ#xb*U;VOir(=wB1k~S^ndV}9{^nBsb0(ujkW1@^JUAa8A4{`7 zT$tc@JE%&eawmyk$Zo2}5hh7&M^$7~!6ZudHs92RW}WSD7n0V>y@J5T6vsmkUc*7~ zB>#F1FvKi5v6FYr4*lXy%Liy(z*tMtHpV!C5Uyo4l#EPpldn#HT&$Tujdi{e?vQQj zFdktfOTy<_S4ItRlgFunOBbatmRm`**sb+Z(bPXjv!NL7NTDrc!?WW)^4E3ZrQ{tk ztQJ?CDt(@fnDR^7(`eE?$Iiu7ZxJSwJ=WyDeD}UaBG{^BO3nNZj zQ!9&#cA;wFWs@gCowMw8{?#M$?K=OdMdju1be#ite-&in3d)f*MOOb^l5u?%5)SOg z>E`%N_donuIY-)WAKWrP zxA#APY${6n`Sdrx9cuow|5sC6hkf-&(ygWwkBn`5MBjR(e^8f^*L`YJ_Eu^evuf`JN$hy}x1PK6N^JLLimnNKQfJ>`E}wN0^M{2_$PAv> zL8gR)DSKTVRQH|v`OpQ7b@b1_R@rf%m5)csb~~r;Uqp5Mm`lWzT`B=clN1U-~6)maw$~2`>tH_nu9?qK}|oY zdFoyUm;b)oNJ-lFXqTRHYCX88Xp9m4FN>@NXO=NNJn>6^7)b!&WiyKjno6SB0I8eo zW8Ui9Qxu1)N9(2Z<>l}D)jV@0(UwUYZnce+yghP4_I5r(_M?5hZTydi;w=U;GSbYY zXq=*Gg7C)WXkS8-ZQA??XkAUeFBfIqbY40?UP)n|En*We7$kw$008b9)KheBfnE=d zMjR^gGutiOK&TPIGK?T);KYu<9c1X8=Gb*@XyWT%?-;&hx27M`0z2>%Ia|=L%di}X z+B2(Ebd^vBr3?sgD>1EAtto&1^MX%DD}HD-EQ|n|dgaRFw9Wr{r!wu0v}Ni766@>N zu4%2dg*YeOcG%HKG4x-uJLhxps*9l`TDEMt=EdJlgpTHz`7KnSN?vil%aU;vrwRR1 zHi?|~n372UX#|j(sNA1E ztqJP>g@3hbspcG4Xz~w$or%S29 zE9vf6k}umVr4`;u4p@F?#?y=8ORf(Hn18IUflIMo@*^#cQNmUTILq77>c$@1MujQ^ z7a{l@b~I5^u0WRyPl;_u>cE@za~Czvy?a)83SORXyQ5Ca9?KL14kS^YO1m;;oaKFs zAwy^y8m%>*r?bFv$dLP?Q7o+o4XMy| zPB8KLVu!mFNfjn98+G$-5d)NG^`)YsHD6|7sIIRMne${q5^&Q~QYe{>JO0p*=$NH% zNe0UPHt!g)_XptozN!y}zqAtOg4KTN)G0SVv~%LpC#)TzJ{w7}l$JT6YwA8v(G<=t zudftNLRlodj^)oDU?a2`L}Kgm>h0Sdk&&*Nasi6jiz|QarTDAQ(P*f`5ne0GVyeID+2JyI#p;0kO*iYM1Drsv&e{U|=B28O~d{h%LbFo=m>u8=;1|h8c0$I(Cr^-Sou zIy;QFu=x1=K%@{8)hDrF0NCxBIS@i9VSeP72xwh8l3fuca~?yAq+5=R*S5-)y$XRd z&u5}VeMO_K_AwZ{=KUZ)KYztq!s*bOnhzhw2}?GodDfS^WVnTeeJ_^0?)re{h(8^u z&-+X-0D+3&6HO;hqg%l^T&JFL_r;4BLHd|=EwqwcE%hZe=BtYJRb?|&8GMD!2miBU zk9;Fx*ZcAHwyLSLc6dbA5}*5&uJ2BeA1v(AuJ8!n%dG^#lDp*t&YCDcgg6JGq{d$# z-bSbkUP<{|PIXFh{CnU;haE(z27JU0{ZD`(IcergGA_5CyPo{)18#;Z-h4uVDBbxT ze_Nj0YQ&fCb!vn>{aHxw7wirv5B7HWWL5)-5^tDVU(uaP&wg0I_Q5fY9{>`_D6k2c z;^8d)(pzAT>6Q_8RfZS(0kVcvQ-K@3y#M9B@BOyzWHZIz>NcbHa87U%5SsL?@4P<; zQ7U=iC0e&++RoWZEEl*Mo0}QP)3~cg(2LKC5e|$!?j|Asu+qHx6iX?RGWb zhwz1BA)bHUXPR2Zr^|XXa2%`@4*m#2D~!4*A(}L4B7-}n_YJr8W#t)lAEE>gkn{C9 z4ZT{Nx@}v7%z-ZdPkCn=R%N-qeafuuRNI|XirTPHNE~tur%W?ll7$K=BF;GCgo-(! zSZQyX(-N2BkY?h*pa?RF1A7x{qEI5B;!q))GMEFP5b*zsilp-`9Oz=XIXv)oiuJeGl|F!V!6ItHUZLBQLxJ+z^RH%OM;s6Q>ke?*R$TpRCU7 zjb#ZFdvl3=6wmqf?ef%I(RA)qb)-(wnx9guCjYte79Y829ch3l)v|Hc2BSG^JIl6Z zOBC(3#Nor>Gm1{hq%Vh9pC{Y=fIdft((am>mk4+?LLR7Z^xgTas$bi}}qS$qI{9Jw&z1HG=@ zD>kDh(bzZNdWf#*5DQ|$!f}HJ4HDsF|8%j8{vN&i5C~Thur4 zM*ofE7`6~pl*~lR$%N8A*z1T z&pgGk*yH+k{>$T=QnwhK-XB#m zGlk?{lFS~uD8_r zyXrzj!uuWxlRMb#dAg4oZtO^vcO>GtWIm|$th|R@^`M%r(1YInrDyd))JjL~#y9+$ z-WlmW<^736TPJnm?e;W^M=VW)@dx(qeFCu4;Vk&yvK=6mGvFoC;=l6RE2}8}8<$-u zWHB|#dukJqJ-@tkAw-;aK`DT_WCVrId`4cRbq!r<#^>7D_!M%on|Pw{PlNp>(%P(g z)sgVN)79hp?eXC8q*OH3?5a!A@r_^cY^t9Hb{xkFL#o|kr1j4d$65kt;+@VVzcK^n z=)CHDKEJm06TfNu-NxzLbsB~{Tkpd7w$?|D6nU#b)6NkcM9k}^kKmR6Le-xhrjI63 z3`-T~tp$bGojkW%>!>|}T|POG6gz@X(yvc-9jwqapo4%}-HqCmW z=hjE4fa=abiuoxIb&NClE2)z~_DP#qi#vDis%Lw)Q+L?EnH-NF(9FZ<(h}A`m0*GtiO-7ZUgle|L-du%OB~_ z^}5oM@KN&$xG^ts;R`k{8_9R3EG=M!+}-G2n--3-2`E5|V&Av#&5#%{0kW6&ch*?{ zDzMG!>D%{*ueAyI%TlG*H<~Op+{L16yW;rMeh<~{h5c=3RPq9e^LyJ_@*EULz zpWLn{)7M$wZ%7X%IxU{{*^xBAm(BPODj>4SrH6M7LAC^t8Qwd4bXl1{^lJgUazotv zu84@d`Z)}Ox^($*adGPWV!!2Cgp_nhaE@m|7UOsBd{o)?(1d30t8Rj6wNVJstMDxX z;EQmeydar5=c-})svBYEw~96E3&blYvCIbjB$ z@ws^-9xtCLJbNv#Ukmw4dgu9MB9;tV5y>#A`ReaMdw>(jWy1!G=B<}YX+ulK@#C12 zOWcT1^ydfmoDEKFXC%fAl&gaS#^I-v(?NkTYIRdO}{p; zeEsdJRY(fz`j7*7;!%(pG7qPo$p89giG`S5O$Ug?25O(-r>;X=jC_gNx{N%dAy=>5 zW%!4k&pv+g#PQNR>xT!9wIuRnMbHE`O>&Qkii(2!?#i6|7H;^+4drwbRYHBHfjtYE zbTzrzY6LQ4_T^L`1t%3^qXEp7P_3SD4xEJ6r7QVGt~FtepN3vmxgs@*y1dkDH(PF4 zOz}sQAIg1&1TfI0Iv6cY+`rl;{~8$dWT)>4(^}nM%(k&l#>LF>EUG5&P2WDF!!&BW zCv~DhXscfK<(BIZ8L)>7i62oHWrST7`meL?(%vE4nHUtRH{;NZo(-LpnD#?EVUoM_P3MM7NGwrhbwO zr(U=5i2)N2TWhv{(f;u7v`BnhvOUGv!ASpAk! zB>epI#NblMW9;#QoZ?fgRK-mvKC{y!@mR`ipv^JeAH z5^sE?%v%6qxO4UYX%o<(irroPONH-Wu^ji+zfgCr<$SdMbNnL930?g|zpwxMhgF47 zFu_N_E1j%uM+Udn{fkxU0>VRMZ}Dn=Z*%7_9(W$H-eSUX-)3!$ui5l6)2AaDx<_y` z?SmleMW5qmsDv_;O6qiMYFaCK?c!x2VH5Ocl{px7Xa>tJ z;$NdXXlEqwr6Y(Bzn=q-frN7u>6<&93(a3SckbNpj@Cnxdob`@Jwr`(W!uswL(^=Y zx#NW2%%{+xG=LZYn!_*b{S@oD!U@;#9a8qJ&L#JmLsk~GYY}~6+gow--pM*eqlLb8 zJc*Q~cgqIWeBC|g*pzYY(5=$O3|s0qj*(hv%K>V38}5NYCXZ-Vy##F5D50}}6~^I! zCNYoUo*uQOqU@Pg0lmnykC9V*JHA@;`^l^I4MLFz^K{?qQic~#wC$XmC!DfQNzMkB z&G1uF(hpD9JpTflEMI_DyTQoG=^FExU}y%41?qu=&4`R01;?jvUjkClm=1kXCn&)= zrg!P6TVDecf3kEnreP_=nh2&DC)Ng1u#E;MCnt+v^ZH@VU`mt0G!SIQirRL3ykE|l2Ff*s$xLV#U)*Ep2tsT+ zWvX+?yaYS1k z<#njIuAN*O`#1Oo>_9bCxU)4TvL$A{S)DTJ&#$!1ET~=cKL0C`1hxMA4=ssq?Vz3u zXMA%ajYaBw`+1<5d$)c%uwK&_Y@PwPQz|A`^o5j@T+yoL9qgOqIXJ#)-N$2S5Il=X zD_COu>fZC8_T)+BFf0E9o1^7N6e=Q(cW5@<;&04OqzzrXvu2bi4LX=wVfg$;?W8fr~#PC0Hg zksd-43O(Cu=_e=&0lPwB|)0Hd2FC295jsjscov3>iY(nrv# z2CuaHG1WP9+)}`tNE&Mng9e>7*~?v`>NOC|Khf(Qys8B-LC|y^pH0M(MuhkGtm9g< zMgsOyG5mUX;`aSy+55=>klDGiBt}4xL;EY9J&*A-Ia5HeZV#&JSq%+xj%3qWs{Ty1 zvp$*m(%du>is3FU8^cp-e$wwPx_OCx+TkZnYeEJI%J`c!nUZI!`JM50md$B*5BoML zC|lNI^@N=BHNWW6OtE4K2&C9Yc+kj8WbpZa#6<}%Za?I2k%HwvcO*%s zz(1}?jV4f!lxJ#qKok|?ic))g=n>v0HE;a=@u$fltI z^{ILga6YFc-LH95mrL5*E$JGnB_YefrE_bp@cIvfzp=T}IH}VD4h?@AyHdZ&%@X3R zI=|Lez`rbz-3dr#zJ7jY;Oe0xzw1}0(}l1WzrNVd z!Au?^ia0ljoM&82c|wDvje$oul*j{*1%D9m(ws-npZb{tHj{SMkP{Y?qvaABwbzys zMO+IKS7o|Sy9-87`Lh5XJJ0$)B!yv!LjRT^7FIRxq;`OS0fhA_OVIo{==<0S&1lZH zeSXr!t>s?E%>;ZJAaWu3STq`2pRz?QR=<_r)r}HS_%KL%re-gr(!ClnvHH%~T<^e@ zEeeM11ShLMU#}-lOZzG>K1sb@Sa_HWD-RN=8v%FLur@V+H2d`lJpvML!gR4*8?FW% z;8zcOzx=DR9C@9!aIFcK0ofn2<-gkmq6Dbf4k;X%sDw|s+ez8$)r*?&{9v|M=qb@y zVYM!_FR3o-RC6@J59;&5OK+leAQcURf|z~$`SKgZ@~gQ-oAHZm)k%2%T15~2`M)B1 zu`iy#5(fM~`Jtt@;8W*5ShIF*Iwbe%neS|8<||1xE~_JUwrmHaiO@jA?Bipu=aL@6 z?~nDli$q>fxktt=0)y`fCM8ojHubigZvmdotfUHk$Q&cJ7G<7B(PEwCnQ?$UEJ*>0 zpdqQ#n%I8VH~ZF0Af4O~Xqa;KYJ6fv)m?FkgvF`5V?PY0ohwsm7D9IMxqI^DNzE4{ zOEuEoDiFZ@Pq2%Ker=QFHl;fEwxI{#6e_~Z@O+u6igv#G#sK-bFh z6z{ke8PHigqU3pgjFgX_w}u#K_A;|}7$SFQU&@(2g@VSR#CL!~?YY&Gai9B-ST}kV zHs~?urX05%KaA!4(}Xp;vv-;=aDHRL=OD(?<4g2SgzWH>Y2Ir9EUq;3Ef|qlmsZ&J zcE75nVBZi7BN%On&*DBL&LrqYq!>mv`?%#h z9AKorn+=c_$FMa``(#`OrNknJSIrd{Crn0@=rBnQ@|h&moJ?ahZnF|X174p8QsA%X z`h6+pL9S;K#`ExZ4Mr4Ku6UQj`U%EPYZ{VVjhP11kmDw(+;ADF>RKLb3tA|9T!DZn zi$Vx816Z$`)C6q`@aPB&`99`(R23sv=6IJo!t%E_sx2+%h9=O+%JR!EzodUx#O$GI zn9?s^pADugc*pdF51fG#zV@lp27hd@EQ7f3$*+g%A^iTxjZ&7=Eu#WB&-3(b*6}m1|G_1YPsnf z5&gC}jQAK7LylJ#GelZ5Ir%qFdI|uialW5lp~BUWYu#;fE6;;v*^H;g2`dyLqPk@y z+Kg&&rTQtN2Q7P6@-BzVq9!6(OzJu`J#(P`+oo_f4^tMf?T;|U+->|c|6Yi6_rn90 zZfpC6Qo?&%tHuOORPvdsv%7yz69LR&{e(a?J7V?FMzpjjq zthg?K3cO0QEi@U;&U;xKn8L%zGUgZTE{YQW99`r5WAV5OssGC)>!zbNfO*otPFXgx zOI)}7S2>Zts`L7z27fKZAV?FEI@Q-U!mLldu}40Yv8T(2>g%m68q3HzsN^xo3^&h7 zU_ag?F>~b*t!ry3`5z_GXXMrIXf{P&?v?E9gi&7<3R}>u$rFCC{&@ZrOK$4%u+^X` z9jCL}=Ckj@aJ(pgyu0$)jQk6GvH?ojL2iT!RkcLQJUWS{il&2OVGe-pU4qPTJ3l{+ z>R7^0vUOFJT8Z2DpV`p*2q;P(G}MlNma{Jo!m}rnRg`2)RtK2%(JAC~=OE5o}k1b>QeCEQF*fI+%mf zUhDgG4|$Y5gf%f>3rMJ$Pcm)J!c70X?|=boSR@ zeWjsyCT2Z40v3O8>%qU-CK-(S@;bH5WB8d`E%{PeumZ)+lg}VW6&~Ir5p00y2D3ig zDMDk*JuGJh^%bu@S2Xw0mk^TkcIy>9)~jF0cTc3cROU2CLdey&_g?H9Q^u5!OrE=@f*3DR?P?^{n51FUKmt|>a&Cln z(zzhpS!_}f+5Is)*Dd6mJc0hxByMMnf$Z~BR@Ehhj=o=|B5QD?f@L1V0r=K=Jd8|m zh^Xj!3l<3ZM>lD2gt>2AIiUpe1IM!feH*|0a=^-kO>`;p*|yA&-G@7gm=v+y%$%$* z*Kf858mXc~d}a&Yn!r4RZKz8jZXKhk^x}g8s(4Kof(>(o7TDX_k;R~n;thD8DOpkR zxHS?sc^GLHZ&$>nV$Yb1;d?cA^4+H+)@X4$BKF=HRWZgy1*cJqqrWJmaGXIhBI(`w zGVWZYpa|+LP60*jn86MtEO znBsn@*JdIcm~=JjdUNPC7Z>^!ZT_t9A$;Vs9xVaUh%bI{HR^!fWiR-3s6-wuJ3_9U z(&`o!Cg+WNmPa%8bae0DbE$`9hHFbb!`Nl9Y3LM*_PM2} z2pySh20QQkpbGr`ZvW$Wmj~?N(yiWyo@&u?__Y7?ly=^m9jnsIfQi=rTT0&E?~>z5 zVc|axIscFOfJ@u*@<%wl^Uqy82)jS};Dd>~=CEIvp1@U+0CTZuJAMJ{`|PcJxM9WZ zHzt=peLBkAp(i5f|CG1&cA$4tge89qvu=R{u6k{m5mp+Ir z>*C`j;10RB1CO4&PM0KQFmH>tWK8z0(i^Gm^VP}^3vmI% z(!Q;o+E3)Y_A3U_r3;$lsrz&8HH#Mw`VcpsT1-}^yWT36+cCvkTcg=O@bP2L`MW(w6lXT) z4Wv>Taz3sjV{vF57ca+@i@PC;KmFNv$|o!&Tn(u-5TBv3h#TQ7=hEb<2>I# zCapudCJGaq2cO^O@SRiAGj~>Lv~39%86nN*XgD0cKcA_AAv-Q0aFXsPy_$DH76XOy zG8LOBL-b~#je8uPTAnbeAZ-uJ=-F6h*11>U@o51c_rEj1&O@AEXj5AZNTY6#y7*=f zHo#_77;21dZu=R$40G@W?egnQC>we8J1W+6anHBj`hM`)wEBzeJf?iobRQeg98m^s zBqeqhyv|2s9`)J`$eVj*yZ5y&TUVO{A9TJEgfG(37qchFr2N%8a(M?2g42BkOR6WF z)n{kKk?^`lr!@cePBON*zRU8y{AbX2{>2_V@u6f{a&X0QZqqxBJ^+IU-E%O~ue^jo z`FOW=izYG>&iqCuWs*1-Y^K*5FWHFz=;F5b1~7#>JY#oww*rkh0W-Uu-6n5Q#`Xt( znGa^n{@@5DNV2aj3_Ho~`o!7gUK!D=N09Sk=kxQsFLckmIWp5P{>Dg%Vx{>0 za@2cv8@vBR|9hSuk3GB_j70L|J1Ym;O{+djBsVneQ?bBNdG(s3N92#b*8z_-G(DvG zgO5OSC^@~Gkcl0l@#O-hB)a#Mjy*V@PFTmiY_pXp%n{!SSXPk1&^s8MvesMvimuo@z zrmXMI$G-c=DWZA%55Y$6qam&!cbi6*ukJbr)d7edqbnwN9-1Cf(((M#2Y9x4Ke)@3 zshg2KVgujnvP3z+HFF@s*-?d%eu@^S?nz7GL+8{v&IuKYP=2Old!+lSs)$vi`&QCs z8eK|h+VuUH_NPKK!#;8O=gd}5b;$wOfgklBF8T5dkl)$)QOl3pdpGzz>g8L{e!5ne z>7D8P0}CMZTJEB8xE;z-D7T3E20naj@3WD9-51THDIn)LQMA=E1KQKfBs-&Qo$Mz2 zGr;5taeV72F9JdaIY$^MH@pack}P==yZJN6EP6@N45JD!4M+(H2S|=MdN(ftdlhbkm1`KbvNIt zj4a#nZ073q9@!*;r_arB=pb#ZTY=_Wq|D1q8)$-P?|4>~{oJrH=V{sJ#fL{aFvKK> zexCn*qL@Y}8jNJ#OP=L!bw1Hy1*9gd#vhe(#2yV9msuXp1k(}Y%Py2TUc^|`$BANe zclg*xmYibEi`Cg9Rkwti<}3;rO~S3)=Tn54>pgz+n0gj};DLm!i;KRcU<7LImQ@j) zLGITGUDcc&Ae{GzLs2JYL!pex3o0AuT)X31xJ8=bd+X!ifhp7);%@~GOUK(-s>PHd zLBNa#Y7>or%@7f*4A7~jady=%cDRuCCfO&|BpxJn*q(>rgc!UX(@!y>MU)cY;<1qA zEswe3oRdZFF&>u%>T@!M7p`Cq%SlQve76S0Jf?|xtnhT@auD$0l{u{|&&{Vl)eXlV zQWxEU*yPjw~T$| zOdIcXQpnnmNq)t9R)K`mfI1u)*?qsFcfGn!9i9+^We_ za&n0&86)2${YInpRB#9z5n;TRcftRAP0Iz#yAs(|=6b2AsirW`8GTXV)3r41BWlQL zond1?Wwt@nB3CRiA7?CQb~OwaNo|d9v}=n5Ypg;7N$dPst-*32PaL9F0DyKknkC8k zwL{KiILUgIyvp;DlPcd(w_yt_kBnBcFwv^HF&Ro~4Ya7-`+-E8l=mKYiS-VpAUsG; zFQvB0B6uaw7^d18^GsNKa*90PB%m_45r|MbT0)9kWXh0{R8|*& zsSwDoUh50>+vyCm(SN2`5*ynnp{~W~g{o=mh7B7Ik2|dq-Kv&NPpJMy;j!~wQJY+I z1G_q?U28xMX z@?5!~a*vZge`R12Y9OSLBUKaO7Q=<=J+Qjb2mx51@1}R9Js!>+crWto=$C#gjeWk^Ld`zvm+LNd5P|*Dssk|o!;M?Ch@lyMb% zIk#@PM+-lrn3m0r?D0ghkOMIu9ZWXWK;`>XWW1sIK~n4BO=Q#xM5L#zH#aRdmcRmt z#PEYPwvMuO|NX2Zf=qQ0X768>=KSjSH{9N4a*v5W{==fcCaDqD(PHG}UdRmj%0RSR z43_C#eTo9tg<7`tmF-oeO5W#O8c~Tfs4CJ- uT23K)^IqEYkJ>=yOYiNBp2(@Hb zflaKPkUf{>xWI7Jwnh^!5S07g)o-v$QXk9f9P{MqCcu^IY(ji@jPdZZxE5hi zKlPh#J#uV@tOKg@$1>L`wa2V$t`PE$erSO}11_zx_G$u$PzrCsJVDZZ`UesmSmmZJ zYyN&yET10m9}APH>QeN-jha*~ysG~vv5-#K^B?BF`v1ufQ!8IcxKP`FTk4rD`u4_w MpMG?>|M)Nd2Y2> None: messages = self._assistants_client.list_messages(thread_id=self._assistant_thread.id) text_message = messages.get_last_text_message_by_role(role=MessageRole.ASSISTANT) if text_message and text_message.text: - print(f"Assistant '{assistant.name}' completed task. " f"Outcome: {text_message.text.value}") + print( + f"Assistant '{assistant.name}' completed task. " f"Outcome: {text_message.text.value}" + ) if self._current_task_span is not None: self._add_task_completion_event(self._current_task_span, result=text_message.text.value) diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py index 0f8ee1d66f0b..98fff01843bb 100644 --- a/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py @@ -18,11 +18,11 @@ def __init__(self, assistants_client: AssistantsClient): self.assistants_client = assistants_client def enable_azure_monitor_tracing(self): - application_insights_connection_string = os.environ.get('AI_APPINSIGHTS_CONNECTION_STRING') + application_insights_connection_string = os.environ.get("AI_APPINSIGHTS_CONNECTION_STRING") if not application_insights_connection_string: print("AI_APPINSIGHTS_CONNECTION_STRING environment variable was not set.") print("Please create AI_APPINSIGHTS_CONNECTION_STRING with the Application Insights,") - print("connection string. It should be enabled for this project.") + print("connection string. It should be enabled for this project.") print("Enable it via the 'Tracing' tab in your AI Foundry project page.") exit() configure_azure_monitor(connection_string=application_insights_connection_string) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py index e5fc84cba49e..cc4500c248c5 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py @@ -51,11 +51,7 @@ # Initialize assistant AI search tool and add the search index connection id ai_search = AzureAISearchTool( - index_connection_id=conn_id, - index_name="sample_index", - query_type=AzureAISearchQueryType.SIMPLE, - top_k=3, - filter="" + index_connection_id=conn_id, index_name="sample_index", query_type=AzureAISearchQueryType.SIMPLE, top_k=3, filter="" ) # Create assistant with AI search tool and process assistant run diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py index 46f0fc863398..d0387632b874 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py @@ -25,10 +25,7 @@ import os, time from azure.ai.assistants import AssistantsClient from azure.identity import DefaultAzureCredential -from azure.ai.assistants.models import ( - ListSortOrder, - MessageTextContent -) +from azure.ai.assistants.models import ListSortOrder, MessageTextContent # [START create_project_client] assistants_client = AssistantsClient.from_connection_string( @@ -83,4 +80,3 @@ print(f"{data_point.role}: {last_message_content.text.value}") # [END list_messages] - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py index 2272df5ef505..6a54c6d444ad 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py @@ -40,7 +40,7 @@ from azure.monitor.opentelemetry import configure_azure_monitor # Enable Azure Monitor tracing -application_insights_connection_string = os.environ['AI_APPINSIGHTS_CONNECTION_STRING'] +application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] configure_azure_monitor(connection_string=application_insights_connection_string) # enable additional instrumentations diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py index ebf991cda895..62677e999a77 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py @@ -59,9 +59,7 @@ thread = assistants_client.create_thread() print(f"Created thread, thread ID: {thread.id}") - message = assistants_client.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") print(f"Created message, message ID: {message.id}") run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py index dd974c97278f..6f897aae649e 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py @@ -87,9 +87,7 @@ def on_end(self, span: ReadableSpan): thread = assistants_client.create_thread() print(f"Created thread, thread ID: {thread.id}") - message = assistants_client.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") print(f"Created message, message ID: {message.id}") run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py index 29c5909cb7b1..603fe0b38114 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py @@ -32,7 +32,7 @@ ) # [START create_assistant_with_fabric_tool] -conn_id = os.environ['FABRIC_CONNECTION_ID'] +conn_id = os.environ["FABRIC_CONNECTION_ID"] print(conn_id) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py index f8ac75ffe5e6..367c9be610de 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py @@ -39,7 +39,7 @@ ) # Enable Azure Monitor tracing -application_insights_connection_string = os.environ['AI_APPINSIGHTS_CONNECTION_STRING'] +application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] configure_azure_monitor(connection_string=application_insights_connection_string) # enable additional instrumentations if needed diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_base64.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_base64.py new file mode 100644 index 000000000000..5ca326c96938 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_base64.py @@ -0,0 +1,110 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations using image file input for the + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_image_input_base64.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os, time, base64 +from typing import List +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageInputContentBlock, + MessageImageUrlParam, + MessageInputTextBlock, + MessageInputImageUrlBlock, +) + + +def image_to_base64(image_path: str) -> str: + """ + Convert an image file to a Base64-encoded string. + + :param image_path: The path to the image file (e.g. 'image_file.png') + :return: A Base64-encoded string representing the image. + :raises FileNotFoundError: If the provided file path does not exist. + :raises OSError: If there's an error reading the file. + """ + if not os.path.isfile(image_path): + raise FileNotFoundError(f"File not found at: {image_path}") + + try: + with open(image_path, "rb") as image_file: + file_data = image_file.read() + return base64.b64encode(file_data).decode("utf-8") + except Exception as exc: + raise OSError(f"Error reading file '{image_path}'") from exc + + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +with assistants_client: + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + input_message = "Hello, what is in the image ?" + image_base64 = image_to_base64("image_file.png") + img_url = f"data:image/png;base64,{image_base64}" + url_param = MessageImageUrlParam(url=img_url, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageUrlBlock(image_url=url_param), + ] + message = assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_file.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_file.py new file mode 100644 index 000000000000..f0a80971bcff --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_file.py @@ -0,0 +1,91 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations using image file input for the + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_image_input_file.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os, time +from typing import List +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageInputContentBlock, + MessageImageFileParam, + MessageInputTextBlock, + MessageInputImageFileBlock, +) + + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +with assistants_client: + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + image_file = assistants_client.upload_file_and_poll(file_path="image_file.png", purpose="assistants") + print(f"Uploaded file, file ID: {image_file.id}") + + input_message = "Hello, what is in the image ?" + file_param = MessageImageFileParam(file_id=image_file.id, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageFileBlock(image_file=file_param), + ] + message = assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_url.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_url.py new file mode 100644 index 000000000000..e685b049358e --- /dev/null +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_url.py @@ -0,0 +1,90 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic assistant operations using image url input for the + the Azure Assistants service using a synchronous client. + +USAGE: + python sample_assistants_image_input_url.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os, time +from typing import List +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential +from azure.ai.assistants.models import ( + MessageTextContent, + MessageInputContentBlock, + MessageImageUrlParam, + MessageInputTextBlock, + MessageInputImageUrlBlock, +) + + +assistants_client = AssistantsClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +with assistants_client: + + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + input_message = "Hello, what is in the image ?" + url_param = MessageImageUrlParam(url=image_url, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageUrlBlock(image_url=url_param), + ] + message = assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py index 4a955be24415..55918213dca6 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py @@ -62,7 +62,6 @@ instructions="You are a helpful assistant", tools=openapi_tool.definitions, ) - # [END create_assistant_with_openapi] print(f"Created assistant, ID: {assistant.id}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py index 14d8622707ea..d0428e0383f9 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -36,7 +37,7 @@ import jsonref from azure.ai.assistants import AssistantsClient from azure.identity import DefaultAzureCredential -from azure.ai.assistants.models import OpenApiTool, OpenApiConnectionAuthDetails, OpenApiConnectionSecurityScheme +from azure.ai.assistants.models import OpenApiTool, OpenApiConnectionAuthDetails, OpenApiConnectionSecurityScheme assistants_client = AssistantsClient.from_connection_string( @@ -50,22 +51,21 @@ print(connection_id) -with open('./tripadvisor_openapi.json', 'r') as f: +with open("./tripadvisor_openapi.json", "r") as f: openapi_spec = jsonref.loads(f.read()) # Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) auth = OpenApiConnectionAuthDetails(security_scheme=OpenApiConnectionSecurityScheme(connection_id=connection_id)) # Initialize an Assistant OpenApi tool using the read in OpenAPI spec -openapi = OpenApiTool(name="get_weather", spec=openapi_spec, description="Retrieve weather information for a location", auth=auth) +openapi = OpenApiTool( + name="get_weather", spec=openapi_spec, description="Retrieve weather information for a location", auth=auth +) # Create an Assistant with OpenApi tool and process Assistant run with assistants_client: assistant = assistants_client.create_assistant( - model=model_name, - name="my-assistant", - instructions="You are a helpful assistant", - tools=openapi.definitions + model=model_name, name="my-assistant", instructions="You are a helpful assistant", tools=openapi.definitions ) print(f"Created assistant, ID: {assistant.id}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py index 22a76c64f3d0..bee37da98664 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py @@ -77,7 +77,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: # Enable Azure Monitor tracing -application_insights_connection_string = os.environ['AI_APPINSIGHTS_CONNECTION_STRING'] +application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] configure_azure_monitor(connection_string=application_insights_connection_string) scenario = os.path.basename(__file__) @@ -97,9 +97,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: thread = assistants_client.create_thread() print(f"Created thread, thread ID {thread.id}") - message = assistants_client.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") print(f"Created message, message ID {message.id}") with assistants_client.create_stream( diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py index c1fc673681ba..8ce8e8836fda 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py index 8b5db1b0b91d..ad1bb8214ec0 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py @@ -98,9 +98,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: thread = assistants_client.create_thread() print(f"Created thread, thread ID {thread.id}") - message = assistants_client.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") print(f"Created message, message ID {message.id}") with assistants_client.create_stream( diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py index 1474ee93aeff..187300a775d4 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -44,7 +45,7 @@ ) with assistants_client: - bing_connection_id = os.environ['AZURE_BING_CONECTION_ID'] + bing_connection_id = os.environ["AZURE_BING_CONECTION_ID"] bing = BingGroundingTool(connection_id=bing_connection_id) print(f"Bing Connection ID: {bing_connection_id}") diff --git a/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py b/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py index 6b632064c12d..c221228a7041 100644 --- a/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py +++ b/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression import io import json import unittest diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py b/sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py index a6b41004ba18..09cf9eef56f4 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py @@ -55,7 +55,9 @@ async def test_create_vector_store_and_poll( ), ), patch( "azure.ai.assistants._operations.AssistantsClientOperationsMixin.get_vector_store", - wraps=get_mock_fn(assistant.get_vector_store, return_val=VectorStore({"id": "store_1", "status": "completed"})), + wraps=get_mock_fn( + assistant.get_vector_store, return_val=VectorStore({"id": "store_1", "status": "completed"}) + ), ): assistant.create_vector_store_and_poll(file_ids=file_ids, sleep_interval=0) diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistant_models.py b/sdk/ai/azure-ai-assistants/tests/test_assistant_models.py index b9e5e1b6b81d..eee0b4629d64 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistant_models.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistant_models.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression from typing import Iterator, List from unittest.mock import Mock, patch import pytest diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py index 0a680bcabe33..432af973680c 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression from typing import AsyncIterator, List from unittest.mock import AsyncMock, patch import pytest diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py index 55b8a02b7f2c..f39229119349 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py @@ -33,7 +33,7 @@ AssistantEventHandler, AssistantStreamEvent, AssistantThread, - AzureAISearchTool, + AzureAISearchTool, AzureFunctionStorageQueue, AzureFunctionTool, CodeInterpreterTool, @@ -90,7 +90,7 @@ azure_ai_assistants_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", azure_ai_assistants_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", azure_ai_assistants_assistants_tests_search_index_name="sample_index", - azure_ai_assistants_assistants_tests_search_connection_name="search_connection_name" + azure_ai_assistants_assistants_tests_search_connection_name="search_connection_name", ) @@ -244,7 +244,9 @@ def _do_test_create_assistant(self, client, body, functions): assert assistant.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) else: - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) assert assistant.name == "my-assistant" @@ -290,7 +292,9 @@ def _do_test_update_assistant(self, client, use_body, use_io): """helper function for updating assistant with different body inputs""" # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id # update assistant @@ -319,15 +323,20 @@ def test_assistant_list(self, **kwargs): list_length = client.assistants.list_assistants().data.__len__() # create assistant and check that it appears in the list - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert client.assistants.list_assistants().data.__len__() == list_length + 1 assert client.assistants.list_assistants().data[0].id == assistant.id # create second assistant and check that it appears in the list - assistant2 = client.assistants.create_assistant(model="gpt-4o", name="my-assistant2", instructions="You are helpful assistant") + assistant2 = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant2", instructions="You are helpful assistant" + ) assert client.assistants.list_assistants().data.__len__() == list_length + 2 assert ( - client.assistants.list_assistants().data[0].id == assistant.id or client.assistants.list_assistants().data[1].id == assistant.id + client.assistants.list_assistants().data[0].id == assistant.id + or client.assistants.list_assistants().data[1].id == assistant.id ) # delete assistants and check list @@ -355,7 +364,9 @@ def test_create_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -430,7 +441,9 @@ def test_get_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -458,7 +471,9 @@ def test_update_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -550,7 +565,9 @@ def test_delete_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -622,7 +639,9 @@ def _do_test_create_message(self, client, body): if body: message = client.assistants.create_message(thread_id=thread.id, body=body) else: - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) @@ -635,7 +654,9 @@ def test_create_multiple_messages(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -645,7 +666,9 @@ def test_create_multiple_messages(self, **kwargs): print("Created thread, thread ID", thread.id) # create messages - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) message2 = client.assistants.create_message( @@ -672,7 +695,9 @@ def test_list_messages(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -687,7 +712,9 @@ def test_list_messages(self, **kwargs): assert messages0.data.__len__() == 0 # create messages and check message list for each one - message1 = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message1 = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message1.id print("Created message, message ID", message1.id) messages1 = client.assistants.list_messages(thread_id=thread.id) @@ -729,7 +756,9 @@ def test_get_message(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -739,7 +768,9 @@ def test_get_message(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) @@ -823,7 +854,9 @@ def test_create_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -872,7 +905,9 @@ def _do_test_create_run(self, client, use_body, use_io=False): """helper function for creating run with different body inputs""" # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -909,7 +944,9 @@ def test_get_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -942,7 +979,9 @@ def test_run_status(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -952,7 +991,9 @@ def test_run_status(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) @@ -994,7 +1035,9 @@ def test_update_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1059,7 +1102,9 @@ def test_update_run_with_iobytes(self, **kwargs): def _do_test_update_run(self, client, body): """helper function for updating run with different body inputs""" # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1378,7 +1423,9 @@ def _do_test_create_thread_and_run(self, client, use_body, use_io): """helper function for creating thread and run with different body inputs""" # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1442,7 +1489,9 @@ def test_list_run_step(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1501,7 +1550,9 @@ def test_get_run_step(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1572,7 +1623,9 @@ def test_create_stream(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1611,7 +1664,9 @@ def test_create_stream_with_body(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1655,7 +1710,9 @@ def test_create_stream_with_iobytes(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1676,7 +1733,9 @@ def test_create_stream_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # create stream - with client.assistants.create_stream(thread_id=thread.id, body=io.BytesIO(binary_body), stream=True) as stream: + with client.assistants.create_stream( + thread_id=thread.id, body=io.BytesIO(binary_body), stream=True + ) as stream: for event_type, event_data, _ in stream: assert ( isinstance(event_data, (MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep)) @@ -2606,7 +2665,9 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = ai_client.assistants.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS) + file = ai_client.assistants.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) assert file.id, "The file was not uploaded." file_id = file.id @@ -2668,7 +2729,9 @@ def _do_test_create_thread_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = ai_client.assistants.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS) + file = ai_client.assistants.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) assert file.id, "The file was not uploaded." file_id = file.id @@ -2817,50 +2880,50 @@ def test_azure_ai_search_tool(self, **kwargs): with self.create_client(**kwargs) as client: assert isinstance(client, AssistantsClient) - # Create AzureAISearchTool - connection_name = kwargs.pop("azure_ai.assistants_assistants_tests_search_connection_name", "my-search-connection-name") + # Create AzureAISearchTool + connection_name = kwargs.pop( + "azure_ai.assistants_assistants_tests_search_connection_name", "my-search-connection-name" + ) connection = client.connections.get(connection_name=connection_name) conn_id = connection.id index_name = kwargs.pop("azure_ai.assistants_assistants_tests_search_index_name", "my-search-index") - + azure_search_tool = AzureAISearchTool( index_connection_id=conn_id, - index_name=index_name, + index_name=index_name, ) - + # Create assistant with the search tool assistant = client.assistants.create_assistant( model="gpt-4o", name="search-assistant", instructions="You are a helpful assistant that can search for information using Azure AI Search.", tools=azure_search_tool.definitions, - tool_resources=azure_search_tool.resources + tool_resources=azure_search_tool.resources, ) assert assistant.id print(f"Created assistant with ID: {assistant.id}") - + # Create thread thread = client.assistants.create_thread() assert thread.id print(f"Created thread with ID: {thread.id}") - + # Create message message = client.assistants.create_message( - thread_id=thread.id, - role="user", - content="Search for information about iPhone prices." + thread_id=thread.id, role="user", content="Search for information about iPhone prices." ) assert message.id print(f"Created message with ID: {message.id}") - + # Create and process run run = client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == RunStatus.COMPLETED, run.last_error.message - + # List messages to verify tool was used messages = client.assistants.list_messages(thread_id=thread.id) assert len(messages.data) > 0 - + # Clean up client.assistants.delete_assistant(assistant.id) print("Deleted assistant") @@ -2919,7 +2982,9 @@ def _do_test_include_file_search_results(self, use_stream, include_content, **kw if use_stream: run = None - with ai_client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id, include=include) as stream: + with ai_client.assistants.create_stream( + thread_id=thread.id, assistant_id=assistant.id, include=include + ) as stream: for event_type, event_data, _ in stream: if isinstance(event_data, ThreadRun): run = event_data @@ -2927,7 +2992,9 @@ def _do_test_include_file_search_results(self, use_stream, include_content, **kw print("Stream completed.") break else: - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id, include=include) + run = ai_client.assistants.create_and_process_run( + thread_id=thread.id, assistant_id=assistant.id, include=include + ) assert run.status == RunStatus.COMPLETED assert run is not None steps = ai_client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id, include=include) @@ -3035,7 +3102,9 @@ def test_assistants_with_json_schema(self, **kwargs): def _get_file_id_maybe(self, ai_client: AssistantsClient, **kwargs) -> str: """Return file id if kwargs has file path.""" if "file_path" in kwargs: - file = ai_client.assistants.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS) + file = ai_client.assistants.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) assert file.id, "The file was not uploaded." return file.id return None diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py index 3c125f7a078c..82bb0d5d1a49 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py @@ -216,7 +216,10 @@ async def test_create_assistant_with_tools(self, **kwargs): # create assistant with tools assistant = await client.assistants.create_assistant( - model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", tools=functions.definitions + model="gpt-4o", + name="my-assistant", + instructions="You are helpful assistant", + tools=functions.definitions, ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1550,7 +1553,9 @@ async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): print("Tool outputs:", tool_outputs) if tool_outputs: body = {"tool_outputs": tool_outputs} - await client.assistants.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, body=body) + await client.assistants.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, body=body + ) print("Current run status:", run.status) @@ -2371,7 +2376,9 @@ async def _test_file_search( if streaming: thread_run = None - async with await ai_client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + async with await ai_client.assistants.create_stream( + thread_id=thread.id, assistant_id=assistant.id + ) as stream: async for _, event_data, _ in stream: if isinstance(event_data, ThreadRun): thread_run = event_data diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py index de9bfa0b8763..14455ea69d5d 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py @@ -214,7 +214,9 @@ def _assert_tool_call(self, submit_tool_mock: AsyncMock, run_id: str, tool_set: else: submit_tool_mock.assert_not_called() - def _assert_toolset_dict(self, assistants_client: AssistantsClient, assistant_id: str, toolset: Optional[AsyncToolSet]): + def _assert_toolset_dict( + self, assistants_client: AssistantsClient, assistant_id: str, toolset: Optional[AsyncToolSet] + ): """Check that the tool set dictionary state is as expected.""" if toolset is None: assert assistant_id not in assistants_client._toolset diff --git a/sdk/ai/azure-ai-assistants/tests/test_overload_assert.py b/sdk/ai/azure-ai-assistants/tests/test_overload_assert.py index 3f7c8b343e18..38240159be7f 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_overload_assert.py +++ b/sdk/ai/azure-ai-assistants/tests/test_overload_assert.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression import unittest import pytest from azure.ai.assistants import AssistantsClient @@ -9,7 +10,9 @@ class TestDeclarator(unittest.TestCase): @pytest.mark.asyncio @assert_same_http_requests - async def test_assert_errors(self, assistant: AssistantsClient, _: AsyncAssistantsOperations, assertion: OverloadAssertion): + async def test_assert_errors( + self, assistant: AssistantsClient, _: AsyncAssistantsOperations, assertion: OverloadAssertion + ): # This is a special test case tested verified the decorator assert name field presents in one call but not another model = "gpt-4-1106-preview" name = "first" diff --git a/sdk/ai/azure-ai-assistants/tsp-location.yaml b/sdk/ai/azure-ai-assistants/tsp-location.yaml index 2df370e20752..386b3a320a97 100644 --- a/sdk/ai/azure-ai-assistants/tsp-location.yaml +++ b/sdk/ai/azure-ai-assistants/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Assistants -commit: 02a554c61c069231f265000c3e94c09d42579ae0 +commit: 877eb552cfa93c270707e2404f63fc05683c6f87 repo: Azure/azure-rest-api-specs additionalDirectories: From aedeac25612b6b5aff3cbe33a7681d8fc0ffd8d6 Mon Sep 17 00:00:00 2001 From: Nikolay Rovinskiy <30440255+nick863@users.noreply.github.com> Date: Fri, 18 Apr 2025 12:42:23 -0700 Subject: [PATCH 7/7] Use 1DP endpoint (#40474) * Use 1DP endpoint * Add more config * Fix linters * Fix linters * Fix spelling * Add package to the build, so we can use private feed * Fix samples and readme * Fix readme-s and spelling * Make back compatible * Fixes * Fix linters and record the tests --------- Co-authored-by: Darren Cohen <39422044+dargilco@users.noreply.github.com> --- .vscode/cspell.json | 10 + eng/.docsettings.yml | 1 + pylintrc | 1 + sdk/ai/azure-ai-assistants/CHANGELOG.md | 4 +- sdk/ai/azure-ai-assistants/README.md | 1184 ++++++++++++++++- sdk/ai/azure-ai-assistants/assets.json | 6 + .../azure/ai/assistants/_client.py | 44 +- .../azure/ai/assistants/_configuration.py | 34 +- .../ai/assistants/_operations/_operations.py | 360 ----- .../azure/ai/assistants/_patch.py | 457 +++---- .../azure/ai/assistants/aio/_client.py | 42 +- .../azure/ai/assistants/aio/_configuration.py | 32 +- .../assistants/aio/_operations/_operations.py | 360 ----- .../ai/assistants/aio/_operations/_patch.py | 41 +- .../azure/ai/assistants/aio/_patch.py | 495 +++---- .../azure/ai/assistants/models/_patch.py | 100 +- .../azure/ai/assistants/telemetry/__init__.py | 3 +- .../telemetry/_ai_assistants_instrumentor.py | 60 +- .../assistants/telemetry/_trace_function.py | 2 +- .../azure_ai_assistants_tests.env | 19 +- .../azure-ai-assistants/dev_requirements.txt | 7 +- sdk/ai/azure-ai-assistants/pyrightconfig.json | 21 + ...tant-5szzLs73bsbQ2k75xUGKv8_image_file.png | Bin 162061 -> 0 bytes ...tant-6Q824dJfHkRzsy46hPatQA_image_file.png | Bin 181757 -> 0 bytes ...tant-WhEPqpcbmXadRJmCzMUeTi_image_file.png | Bin 181757 -> 0 bytes ...sample_assistants_azure_functions_async.py | 6 +- .../sample_assistants_basics_async.py | 17 +- ...basics_async_with_azure_monitor_tracing.py | 7 +- ...tants_basics_async_with_console_tracing.py | 6 +- ...ample_assistants_code_interpreter_async.py | 6 +- ...tants_code_interpreter_attachment_async.py | 4 +- ...eter_attachment_enterprise_search_async.py | 8 +- .../sample_assistants_functions_async.py | 6 +- ...ple_assistants_image_input_base64_async.py | 103 +- ...ample_assistants_image_input_file_async.py | 35 +- ...sample_assistants_image_input_url_async.py | 27 +- .../sample_assistants_json_schema_async.py | 6 +- ...ample_assistants_run_with_toolset_async.py | 6 +- ...le_assistants_stream_eventhandler_async.py | 7 +- ...tream_eventhandler_with_functions_async.py | 7 +- ..._stream_eventhandler_with_toolset_async.py | 7 +- ...ample_assistants_stream_iteration_async.py | 7 +- ...m_with_base_override_eventhandler_async.py | 7 +- ...tore_batch_enterprise_file_search_async.py | 9 +- ...ts_vector_store_batch_file_search_async.py | 7 +- ...ctor_store_enterprise_file_search_async.py | 9 +- ...sistants_vector_store_file_search_async.py | 7 +- ...tants_with_file_search_attachment_async.py | 7 +- .../azure-ai-assistants/samples/fix_sample.sh | 20 - .../sample_assistants_assistant_team.py | 6 +- ...tants_assistant_team_custom_team_leader.py | 6 +- .../sample_assistants_multi_assistant_team.py | 6 +- .../sample_assistants_azure_ai_search.py | 4 +- .../sample_assistants_azure_functions.py | 9 +- .../samples/sample_assistants_basics.py | 9 +- ...tants_basics_with_azure_monitor_tracing.py | 7 +- ..._assistants_basics_with_console_tracing.py | 7 +- ..._with_console_tracing_custom_attributes.py | 7 +- .../sample_assistants_bing_grounding.py | 11 +- .../sample_assistants_code_interpreter.py | 8 +- ...nterpreter_attachment_enterprise_search.py | 10 +- ...ample_assistants_enterprise_file_search.py | 10 +- .../samples/sample_assistants_fabric.py | 6 +- .../samples/sample_assistants_file_search.py | 8 +- .../samples/sample_assistants_functions.py | 8 +- ...ts_functions_with_azure_monitor_tracing.py | 8 +- ...sistants_functions_with_console_tracing.py | 8 +- .../sample_assistants_image_input_base64.py | 7 +- .../sample_assistants_image_input_file.py | 7 +- .../sample_assistants_image_input_url.py | 7 +- .../samples/sample_assistants_json_schema.py | 7 +- .../samples/sample_assistants_logic_apps.py | 12 +- .../samples/sample_assistants_openapi.py | 7 +- ...mple_assistants_openapi_connection_auth.py | 9 +- .../sample_assistants_run_with_toolset.py | 7 +- .../samples/sample_assistants_sharepoint.py | 6 +- .../sample_assistants_stream_eventhandler.py | 7 +- ...eventhandler_with_azure_monitor_tracing.py | 7 +- ...stream_eventhandler_with_bing_grounding.py | 12 +- ...tream_eventhandler_with_console_tracing.py | 7 +- ...ants_stream_eventhandler_with_functions.py | 8 +- ...stants_stream_eventhandler_with_toolset.py | 8 +- .../sample_assistants_stream_iteration.py | 7 +- ...ts_stream_iteration_with_bing_grounding.py | 12 +- ...tants_stream_iteration_with_file_search.py | 8 +- ...ssistants_stream_iteration_with_toolset.py | 8 +- ..._stream_with_base_override_eventhandler.py | 8 +- ...ctor_store_batch_enterprise_file_search.py | 10 +- ...sistants_vector_store_batch_file_search.py | 8 +- ...ple_assistants_vector_store_file_search.py | 8 +- ...s_with_code_interpreter_file_attachment.py | 8 +- ...tants_with_enterprise_search_attachment.py | 8 +- ..._assistants_with_file_search_attachment.py | 8 +- ...ple_assistants_with_resources_in_thread.py | 8 +- sdk/ai/azure-ai-assistants/tests/README.md | 14 +- sdk/ai/azure-ai-assistants/tests/conftest.py | 24 +- .../tests/overload_assert_utils.py | 3 - .../tests/test_assistants_client.py | 637 +++++---- .../tests/test_assistants_client_async.py | 675 +++++----- .../tests/test_assistants_mock.py | 3 - .../tests/test_assistants_mock_async.py | 3 - sdk/ai/azure-ai-assistants/tsp-location.yaml | 2 +- sdk/ai/ci.yml | 2 + 103 files changed, 2665 insertions(+), 2669 deletions(-) create mode 100644 sdk/ai/azure-ai-assistants/assets.json create mode 100644 sdk/ai/azure-ai-assistants/pyrightconfig.json delete mode 100644 sdk/ai/azure-ai-assistants/samples/assistant-5szzLs73bsbQ2k75xUGKv8_image_file.png delete mode 100644 sdk/ai/azure-ai-assistants/samples/assistant-6Q824dJfHkRzsy46hPatQA_image_file.png delete mode 100644 sdk/ai/azure-ai-assistants/samples/assistant-WhEPqpcbmXadRJmCzMUeTi_image_file.png delete mode 100644 sdk/ai/azure-ai-assistants/samples/fix_sample.sh diff --git a/.vscode/cspell.json b/.vscode/cspell.json index 404795164d2e..dc0c2667ff7e 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -39,6 +39,8 @@ "sdk/ai/azure-ai-projects/samples/agents/tripadvisor_openapi.json", "/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/data/**", "/sdk/ai/azure-ai-projects/samples/evaluations/data/**", + "sdk/ai/azure-ai-assistants/samples/nifty_500_quarterly_results.csv", + "sdk/ai/azure-ai-assistants/samples/tripadvisor_openapi.json", "sdk/ai/azure-ai-resources/azure/ai/resources/_index/_langchain/vendor/**", "sdk/ai/azure-ai-resources/azure/ai/resources/_restclient/**", "sdk/cognitiveservices/azure-cognitiveservices-search-autosuggest/**", @@ -1360,6 +1362,14 @@ "azureopenai" ] }, + { + "filename": "sdk/ai/azure-ai-assistants/**", + "words": [ + "GENAI", + "fspath", + "wttr" + ] + }, { "filename": "sdk/ai/azure-ai-inference/**", "words": [ diff --git a/eng/.docsettings.yml b/eng/.docsettings.yml index 679c12c2d4a8..b783fb5e37ed 100644 --- a/eng/.docsettings.yml +++ b/eng/.docsettings.yml @@ -16,6 +16,7 @@ omitted_paths: - sdk/vision/azure-ai-vision-imageanalysis/tests/* - sdk/ai/azure-ai-inference/tests/* - sdk/ai/azure-ai-projects/tests/* + - sdk/ai/azure-ai-assistants/tests/* - sdk/storage/azure-storage-extensions/* language: python diff --git a/pylintrc b/pylintrc index e58b01fd5c1b..2621c1bd1041 100644 --- a/pylintrc +++ b/pylintrc @@ -8,6 +8,7 @@ ignore-paths= azure\\mixedreality\\remoterendering\\_api_version.py, azure/mixedreality/remoterendering/_api_version.py, (?:.*[/\\]|^)projects/(models/_models.py|_model_base.py|operations/_operations.py|aio/operations/_operations.py)$, + (?:.*[/\\]|^)assistants/(_models.py|_model_base.py|_operations/_operations.py|aio/_operations/_operations.py)$, # Exclude any path that contains the following directory names (?:.*[/\\]|^)(?:_vendor|_generated|_restclient|samples|examples|test|tests|doc|\.tox)(?:[/\\]|$) diff --git a/sdk/ai/azure-ai-assistants/CHANGELOG.md b/sdk/ai/azure-ai-assistants/CHANGELOG.md index 628743d283a9..40bc6bf7aa1c 100644 --- a/sdk/ai/azure-ai-assistants/CHANGELOG.md +++ b/sdk/ai/azure-ai-assistants/CHANGELOG.md @@ -1,5 +1,7 @@ # Release History -## 1.0.0b1 (1970-01-01) +## 1.0.0b1 (Unreleased) + +### Features Added - Initial version diff --git a/sdk/ai/azure-ai-assistants/README.md b/sdk/ai/azure-ai-assistants/README.md index 30d4deef34eb..69c1c39f58c4 100644 --- a/sdk/ai/azure-ai-assistants/README.md +++ b/sdk/ai/azure-ai-assistants/README.md @@ -1,20 +1,1184 @@ -# Azure Ai Assistants client library for Python - + +# Azure AI Assistants client library for Python +Use the AI Assistants client library (in preview) to: + +* **Enumerate connections** in your Azure AI Foundry project and get connection properties. +For example, get the inference endpoint URL and credentials associated with your Azure OpenAI connection. +* **Develop Assistants using the Azure AI Assistants Service**, leveraging an extensive ecosystem of models, tools, and capabilities from OpenAI, Microsoft, and other LLM providers. The Azure AI Assistants Service enables the building of Assistants for a wide range of generative AI use cases. The package is currently in preview. +* **Enable OpenTelemetry tracing**. + +[Product documentation](https://aka.ms/azsdk/azure-ai-projects/product-doc) +| [Samples][samples] +| [API reference documentation](https://aka.ms/azsdk/azure-ai-projects/python/reference) +| [Package (PyPI)](https://aka.ms/azsdk/azure-ai-projects/python/package) +| [SDK source code](https://aka.ms/azsdk/azure-ai-projects/python/code) +| [AI Starter Template](https://aka.ms/azsdk/azure-ai-projects/python/ai-starter-template) + +## Reporting issues + +To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-projects" in the title or content. + +## Table of contents + +- [Getting started](#getting-started) + - [Prerequisite](#prerequisite) + - [Install the package](#install-the-package) +- [Key concepts](#key-concepts) + - [Create and authenticate the client](#create-and-authenticate-the-client) +- [Examples](#examples) + - [Create an Assistant](#create-assistant) with: + - [File Search](#create-assistant-with-file-search) + - [Enterprise File Search](#create-assistant-with-enterprise-file-search) + - [Code interpreter](#create-assistant-with-code-interpreter) + - [Bing grounding](#create-assistant-with-bing-grounding) + - [Azure AI Search](#create-assistant-with-azure-ai-search) + - [Function call](#create-assistant-with-function-call) + - [Azure Function Call](#create-assistant-with-azure-function-call) + - [OpenAPI](#create-assistant-with-openapi) + - [Fabric data](#create-an-assistant-with-fabric) + - [Create thread](#create-thread) with + - [Tool resource](#create-thread-with-tool-resource) + - [Create message](#create-message) with: + - [File search attachment](#create-message-with-file-search-attachment) + - [Code interpreter attachment](#create-message-with-code-interpreter-attachment) + - [Execute Run, Run_and_Process, or Stream](#create-run-run_and_process-or-stream) + - [Retrieve message](#retrieve-message) + - [Retrieve file](#retrieve-file) + - [Tear down by deleting resource](#teardown) + - [Tracing](#tracing) + - [Installation](#installation) + - [How to enable tracing](#how-to-enable-tracing) + - [How to trace your own functions](#how-to-trace-your-own-functions) +- [Troubleshooting](#troubleshooting) + - [Logging](#logging) + - [Reporting issues](#reporting-issues) +- [Next steps](#next-steps) +- [Contributing](#contributing) + ## Getting started +### Prerequisite + +- Python 3.9 or later. +- An [Azure subscription][azure_sub]. +- A [project in Azure AI Foundry](https://learn.microsoft.com/azure/ai-studio/how-to/create-projects). +- The project connection string. It can be found in your Azure AI Foundry project overview page, under "Project details". Below we will assume the environment variable `PROJECT_CONNECTION_STRING` was defined to hold this value. +- Entra ID is needed to authenticate the client. Your application needs an object that implements the [TokenCredential](https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential) interface. Code samples here use [DefaultAzureCredential](https://learn.microsoft.com/python/api/azure-identity/azure.identity.defaultazurecredential). To get that working, you will need: + * An appropriate role assignment. see [Role-based access control in Azure AI Foundry portal](https://learn.microsoft.com/azure/ai-foundry/concepts/rbac-ai-foundry). Role assigned can be done via the "Access Control (IAM)" tab of your Azure AI Project resource in the Azure portal. + * [Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) installed. + * You are logged into your Azure account by running `az login`. + * Note that if you have multiple Azure subscriptions, the subscription that contains your Azure AI Project resource must be your default subscription. Run `az account list --output table` to list all your subscription and see which one is the default. Run `az account set --subscription "Your Subscription ID or Name"` to change your default subscription. + ### Install the package ```bash -python -m pip install azure-ai-assistants +pip install azure-ai-assistants +``` + +## Key concepts + +### Create and authenticate the client + +To construct a synchronous client: + +```python +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential = AzureKeyCredential(os.environ["API_KEY"]) +) +``` + +To construct an asynchronous client, Install the additional package [aiohttp](https://pypi.org/project/aiohttp/): + +```bash +pip install aiohttp +``` + +and update the code above to import `asyncio`, and import `AssistantsClient` from the `azure.ai.assistants.aio` namespace: + +```python +import os +import asyncio +from azure.ai.assistants.aio import AssistantsClient +from azure.core.credentials import AzureKeyCredential + +assistant_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=AzureKeyCredential(os.environ["API_KEY"]), +) +``` + +## Examples + +### Create Assistant + +Before creating an Assistant, you need to set up Azure resources to deploy your model. [Create a New Assistant Quickstart](https://learn.microsoft.com/azure/ai-services/agents/quickstart?pivots=programming-language-python-azure) details selecting and deploying your Assistant Setup. + +Here is an example of how to create an Assistant: + + +```python +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", +) +``` + + + +To allow Assistants to access your resources or custom functions, you need tools. You can pass tools to `create_assistant` by either `toolset` or combination of `tools` and `tool_resources`. + +Here is an example of `toolset`: + + +```python +functions = FunctionTool(user_functions) +code_interpreter = CodeInterpreterTool() + +toolset = ToolSet() +toolset.add(functions) +toolset.add(code_interpreter) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, +) +``` + + + +Also notices that if you use asynchronous client, you use `AsyncToolSet` instead. Additional information related to `AsyncFunctionTool` be discussed in the later sections. + +Here is an example to use `tools` and `tool_resources`: + + +```python +file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + +# Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, +) +``` + + + +In the following sections, we show you sample code in either `toolset` or combination of `tools` and `tool_resources`. + +### Create Assistant with File Search + +To perform file search by an Assistant, we first need to upload a file, create a vector store, and associate the file to the vector store. Here is an example: + + + +```python +file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") +print(f"Uploaded file, file ID: {file.id}") + +vector_store = assistants_client.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") +print(f"Created vector store, vector store ID: {vector_store.id}") + +# Create file search tool with resources followed by creating assistant +file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, +) +``` + + + +### Create Assistant with Enterprise File Search + +We can upload file to Azure as it is shown in the example, or use the existing Azure blob storage. In the code below we demonstrate how this can be achieved. First we upload file to azure and create `VectorStoreDataSource`, which then is used to create vector store. This vector store is then given to the `FileSearchTool` constructor. + + + +```python +# We will upload the local file to Azure and will use it for vector store creation. +asset_uri = os.environ["AZURE_BLOB_URI"] + +# Create a vector store with no file and wait for it to be processed +ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) +vector_store = assistants_client.create_vector_store_and_poll(data_sources=[ds], name="sample_vector_store") +print(f"Created vector store, vector store ID: {vector_store.id}") + +# Create a file search tool +file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + +# Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, +) +``` + + + +We also can attach files to the existing vector store. In the code snippet below, we first create an empty vector store and add file to it. + + + +```python +# Create a vector store with no file and wait for it to be processed +vector_store = assistants_client.create_vector_store_and_poll(data_sources=[], name="sample_vector_store") +print(f"Created vector store, vector store ID: {vector_store.id}") + +ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) +# Add the file to the vector store or you can supply data sources in the vector store creation +vector_store_file_batch = assistants_client.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=[ds] +) +print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + +# Create a file search tool +file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) +``` + + + +### Create Assistant with Code Interpreter + +Here is an example to upload a file and use it for code interpreter by an Assistant: + + + +```python +file = assistants_client.upload_file_and_poll( + file_path="nifty_500_quarterly_results.csv", purpose=FilePurpose.ASSISTANTS +) +print(f"Uploaded file, file ID: {file.id}") + +code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + +# Create assistant with code interpreter tool and tools_resources +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, +) +``` + + + +### Create Assistant with Bing Grounding + +To enable your Assistant to perform search through Bing search API, you use `BingGroundingTool` along with a connection. + +Here is an example: + + + +```python +conn_id = os.environ["AZURE_BING_CONNECTION_ID"] + +print(conn_id) + +# Initialize assistant bing tool and add the connection id +bing = BingGroundingTool(connection_id=conn_id) + +# Create assistant with the bing tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=bing.definitions, + headers={"x-ms-enable-preview": "true"}, + ) +``` + + + +### Create Assistant with Azure AI Search + +Azure AI Search is an enterprise search system for high-performance applications. It integrates with Azure OpenAI Service and Azure Machine Learning, offering advanced search technologies like vector search and full-text search. Ideal for knowledge base insights, information discovery, and automation. Creating an Assistant with Azure AI Search requires an existing Azure AI Search Index. For more information and setup guides, see [Azure AI Search Tool Guide](https://learn.microsoft.com/azure/ai-services/agents/how-to/tools/azure-ai-search?tabs=azurecli%2Cpython&pivots=overview-azure-ai-search). + +Here is an example to integrate Azure AI Search: + + + +```python +conn_id = os.environ["AI_AZURE_AI_CONNECTION_ID"] + +print(conn_id) + +# Initialize assistant AI search tool and add the search index connection id +ai_search = AzureAISearchTool( + index_connection_id=conn_id, index_name="sample_index", query_type=AzureAISearchQueryType.SIMPLE, top_k=3, filter="" +) + +# Create assistant with AI search tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=ai_search.definitions, + tool_resources=ai_search.resources, + ) +``` + + + +If the assistant has found the relevant information in the index, the reference +and annotation will be provided in the message response. In the example above, we replace +the reference placeholder by the actual reference and url. Please note, that to +get sensible result, the index needs to have "embedding", "token", "category" and "title" fields. + + + +```python +# Fetch and log all messages +messages = assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) +for message in messages.data: + if message.role == MessageRole.ASSISTANT and message.url_citation_annotations: + placeholder_annotations = { + annotation.text: f" [see {annotation.url_citation.title}] ({annotation.url_citation.url})" + for annotation in message.url_citation_annotations + } + for message_text in message.text_messages: + message_str = message_text.text.value + for k, v in placeholder_annotations.items(): + message_str = message_str.replace(k, v) + print(f"{message.role}: {message_str}") + else: + for message_text in message.text_messages: + print(f"{message.role}: {message_text.text.value}") +``` + + + +### Create Assistant with Function Call + +You can enhance your Assistants by defining callback functions as function tools. These can be provided to `create_assistant` via either the `toolset` parameter or the combination of `tools` and `tool_resources`. Here are the distinctions: + +- `toolset`: When using the `toolset` parameter, you provide not only the function definitions and descriptions but also their implementations. The SDK will execute these functions within `create_and_run_process` or `streaming` . These functions will be invoked based on their definitions. +- `tools` and `tool_resources`: When using the `tools` and `tool_resources` parameters, only the function definitions and descriptions are provided to `create_assistant`, without the implementations. The `Run` or `event handler of stream` will raise a `requires_action` status based on the function definitions. Your code must handle this status and call the appropriate functions. + +For more details about calling functions by code, refer to [`sample_assistants_stream_eventhandler_with_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py) and [`sample_assistants_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py). + +For more details about requirements and specification of functions, refer to [Function Tool Specifications](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/FunctionTool.md) + +Here is an example to use [user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/user_functions.py) in `toolset`: + + +```python +functions = FunctionTool(user_functions) +toolset = ToolSet() +toolset.add(functions) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, +) +``` + + + +For asynchronous functions, you must import `AIProjectClient` from `azure.ai.projects.aio` and use `AsyncFunctionTool`. Here is an example using [asynchronous user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py): + +```python +from azure.ai.projects.aio import AIProjectClient +``` + + + +```python +functions = AsyncFunctionTool(user_async_functions) + +toolset = AsyncToolSet() +toolset.add(functions) + +assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, +) +``` + + + +### Create Assistant With Azure Function Call + +The AI assistant leverages Azure Functions triggered asynchronously via Azure Storage Queues. To enable the assistant to perform Azure Function calls, you must set up the corresponding `AzureFunctionTool`, specifying input and output queues as well as parameter definitions. + +Example Python snippet illustrating how you create an assistant utilizing the Azure Function Tool: + + + +```python +azure_function_tool = AzureFunctionTool( + name="foo", + description="Get answers from the foo bot.", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + }, + input_queue=AzureFunctionStorageQueue( + queue_name="azure-function-foo-input", + storage_service_endpoint=storage_service_endpoint, + ), + output_queue=AzureFunctionStorageQueue( + queue_name="azure-function-tool-output", + storage_service_endpoint=storage_service_endpoint, + ), +) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="azure-function-assistant-foo", + instructions=f"You are a helpful support assistant. Use the provided function any time the prompt contains the string 'What would foo say?'. When you invoke the function, ALWAYS specify the output queue uri parameter as '{storage_service_endpoint}/azure-function-tool-output'. Always responds with \"Foo says\" and then the response from the tool.", + tools=azure_function_tool.definitions, +) +print(f"Created assistant, assistant ID: {assistant.id}") +``` + + + +--- + +**Limitations** + +Currently, the Azure Function integration for the AI Assistant has the following limitations: + +- Azure Functions integration is available **only for non-streaming scenarios**. +- Supported trigger for Azure Function is currently limited to **Queue triggers** only. + HTTP or other trigger types and streaming responses are not supported at this time. + +--- + +**Create and Deploy Azure Function** + +Before you can use the assistant with AzureFunctionTool, you need to create and deploy Azure Function. + +Below is an example Python Azure Function responding to queue-triggered messages and placing responses on the output queue: + +```python +import azure.functions as func +import logging +import json + +app = func.FunctionApp() + +@app.get_weather(arg_name="inputQueue", + queue_name="input", + connection="AzureWebJobsStorage") +@app.queue_output(arg_name="outputQueue", + queue_name="output", + connection="AzureWebJobsStorage") +def get_weather(inputQueue: func.QueueMessage, outputQueue: func.Out[str]): + try: + messagepayload = json.loads(inputQueue.get_body().decode("utf-8")) + location = messagepayload["location"] + weather_result = f"Weather is 82 degrees and sunny in {location}." + + response_message = { + "Value": weather_result, + "CorrelationId": messagepayload["CorrelationId"] + } + + outputQueue.set(json.dumps(response_message)) + + logging.info(f"Sent message to output queue with message {response_message}") + except Exception as e: + logging.error(f"Error processing message: {e}") + return +``` + +> **Important:** Both input and output payloads must contain the `CorrelationId`, which must match in request and response. + +--- + +**Azure Function Project Creation and Deployment** + +To deploy your function to Azure properly, follow Microsoft's official documentation step by step: + +[Azure Functions Python Developer Guide](https://learn.microsoft.com/azure/azure-functions/create-first-function-cli-python?tabs=windows%2Cbash%2Cazure-cli%2Cbrowser) + +**Summary of required steps:** + +- Use the Azure CLI or Azure Portal to create an Azure Function App. +- Enable System Managed Identity for your Azure Function App. +- Assign appropriate permissions to your Azure Function App identity as outlined in the Role Assignments section below +- Create input and output queues in Azure Storage. +- Deploy your Function code. + +--- + +**Verification and Testing Azure Function** + +To ensure that your Azure Function deployment functions correctly: + +1. Place the following style message manually into the input queue (`input`): + +{ + "location": "Seattle", + "CorrelationId": "42" +} + +Check the output queue (`output`) and validate the structured message response: + +{ + "Value": "The weather in Seattle is sunny and warm.", + "CorrelationId": "42" +} + +--- + +**Required Role Assignments (IAM Configuration)** + +Clearly assign the following Azure IAM roles to ensure correct permissions: + +1. **Azure Function App's identity:** + - Enable system managed identity through Azure Function App > Settings > Identity. + - Add permission to storage account: + - Go to **Storage Account > Access control (IAM)** and add role assignment: + - `Storage Queue Data Contributor` assigned to Azure Function managed identity + +2. **Azure AI Project Identity:** + +Ensure your Azure AI Project identity has the following storage account permissions: +- `Storage Account Contributor` +- `Storage Blob Data Contributor` +- `Storage File Data Privileged Contributor` +- `Storage Queue Data Contributor` +- `Storage Table Data Contributor` + +--- + +**Additional Important Configuration Notes** + +- The Azure Function configured above uses the `AzureWebJobsStorage` connection string for queue connectivity. You may alternatively use managed identity-based connections as described in the official Azure Functions Managed Identity documentation. +- Storage queues you specify (`input` & `output`) should already exist in the storage account before the Function deployment or invocation, created manually via Azure portal or CLI. +- When using Azure storage account connection strings, make sure the account has enabled storage account key access (`Storage Account > Settings > Configuration`). + +--- + +With the above steps complete, your Azure Function integration with your AI Assistant is ready for use. + + +### Create Assistant With Logic Apps + +Logic Apps allow HTTP requests to trigger actions. For more information, refer to the guide [Logic App Workflows for Function Calling](https://learn.microsoft.com/azure/ai-services/openai/how-to/assistants-logic-apps#create-logic-apps-workflows-for-function-calling). + +Your Logic App must be in the same resource group as your Azure AI Project, shown in the Azure Portal. Assistants SDK accesses Logic Apps through Workflow URLs, which are fetched and called as requests in functions. + +Below is an example of how to create an Azure Logic App utility tool and register a function with it. + + + +```python + +# Create the project client +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Extract subscription and resource group from the project scope +subscription_id = os.environ["SUBSCRIPTION_ID"] +resource_group = os.environ["resource_group_name"] + +# Logic App details +logic_app_name = "" +trigger_name = "" + +# Create and initialize AzureLogicAppTool utility +logic_app_tool = AzureLogicAppTool(subscription_id, resource_group) +logic_app_tool.register_logic_app(logic_app_name, trigger_name) +print(f"Registered logic app '{logic_app_name}' with trigger '{trigger_name}'.") + +# Create the specialized "send_email_via_logic_app" function for your assistant tools +send_email_func = create_send_email_function(logic_app_tool, logic_app_name) + +# Prepare the function tools for the assistant +functions_to_use: Set = { + fetch_current_datetime, + send_email_func, # This references the AzureLogicAppTool instance via closure +} +``` + + + +After this the functions can be incorporated normally into code using `FunctionTool`. + + +### Create Assistant With OpenAPI + +OpenAPI specifications describe REST operations against a specific endpoint. Assistants SDK can read an OpenAPI spec, create a function from it, and call that function against the REST endpoint without additional client-side execution. + +Here is an example creating an OpenAPI tool (using anonymous authentication): + + + +```python + +with open("./weather_openapi.json", "r") as f: + openapi_weather = jsonref.loads(f.read()) + +with open("./countries.json", "r") as f: + openapi_countries = jsonref.loads(f.read()) + +# Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) +auth = OpenApiAnonymousAuthDetails() + +# Initialize assistant OpenApi tool using the read in OpenAPI spec +openapi_tool = OpenApiTool( + name="get_weather", spec=openapi_weather, description="Retrieve weather information for a location", auth=auth +) +openapi_tool.add_definition( + name="get_countries", spec=openapi_countries, description="Retrieve a list of countries", auth=auth +) + +# Create assistant with OpenApi tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=openapi_tool.definitions, + ) +``` + + + +### Create an Assistant with Fabric + +To enable your Assistant to answer queries using Fabric data, use `FabricTool` along with a connection to the Fabric resource. + +Here is an example: + + + +```python +conn_id = os.environ["FABRIC_CONNECTION_ID"] + +print(conn_id) + +# Initialize an Assistant Fabric tool and add the connection id +fabric = FabricTool(connection_id=conn_id) + +# Create an Assistant with the Fabric tool and process an Assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=fabric.definitions, + headers={"x-ms-enable-preview": "true"}, + ) +``` + + + + +### Create Thread + +For each session or conversation, a thread is required. Here is an example: + + + +```python +thread = assistants_client.create_thread() +``` + + + +### Create Thread with Tool Resource + +In some scenarios, you might need to assign specific resources to individual threads. To achieve this, you provide the `tool_resources` argument to `create_thread`. In the following example, you create a vector store and upload a file, enable an Assistant for file search using the `tools` argument, and then associate the file with the thread using the `tool_resources` argument. + + + +```python +file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") +print(f"Uploaded file, file ID: {file.id}") + +vector_store = assistants_client.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") +print(f"Created vector store, vector store ID: {vector_store.id}") + +# Create file search tool with resources followed by creating assistant +file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, +) + +print(f"Created assistant, ID: {assistant.id}") + +# Create thread with file resources. +# If the assistant has multiple threads, only this thread can search this file. +thread = assistants_client.create_thread(tool_resources=file_search.resources) +``` + + +### Create Message + +To create a message for assistant to process, you pass `user` as `role` and a question as `content`: + + + +```python +message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") +``` + + + +### Create Message with File Search Attachment + +To attach a file to a message for content searching, you use `MessageAttachment` and `FileSearchTool`: + + + +```python +attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) +message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] +) +``` + + + +### Create Message with Code Interpreter Attachment + +To attach a file to a message for data analysis, use `MessageAttachment` and `CodeInterpreterTool` classes. You must pass `CodeInterpreterTool` as `tools` or `toolset` in `create_assistant` call or the file attachment cannot be opened for code interpreter. + +Here is an example to pass `CodeInterpreterTool` as tool: + + + +```python +# Notice that CodeInterpreter must be enabled in the assistant creation, +# otherwise the assistant will not be able to see the file attachment for code interpretation +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=CodeInterpreterTool().definitions, +) +print(f"Created assistant, assistant ID: {assistant.id}") + +thread = assistants_client.create_thread() +print(f"Created thread, thread ID: {thread.id}") + +# Create an attachment +attachment = MessageAttachment(file_id=file.id, tools=CodeInterpreterTool().definitions) + +# Create a message +message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + attachments=[attachment], +) +``` + + + +Azure blob storage can be used as a message attachment. In this case, use `VectorStoreDataSource` as a data source: + + + +```python +# We will upload the local file to Azure and will use it for vector store creation. +asset_uri = os.environ["AZURE_BLOB_URI"] +ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + +# Create a message with the attachment +attachment = MessageAttachment(data_source=ds, tools=code_interpreter.definitions) +message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] +) +``` + + + +### Create Run, Run_and_Process, or Stream + +To process your message, you can use `create_run`, `create_and_process_run`, or `create_stream`. + +`create_run` requests the Assistant to process the message without polling for the result. If you are using `function tools` regardless as `toolset` or not, your code is responsible for polling for the result and acknowledging the status of `Run`. When the status is `requires_action`, your code is responsible for calling the function tools. For a code sample, visit [`sample_assistants_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py). + +Here is an example of `create_run` and poll until the run is completed: + + + +```python +run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + +# Poll the run as long as run status is queued or in progress +while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) +``` + + + +To have the SDK poll on your behalf and call `function tools`, use the `create_and_process_run` method. Note that `function tools` will only be invoked if they are provided as `toolset` during the `create_assistant` call. + +Here is an example: + + + +```python +run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) +``` + + + +With streaming, polling need not be considered. If `function tools` are provided as `toolset` during the `create_assistant` call, they will be invoked by the SDK. + +Here is an example of streaming: + + + +```python +with assistants_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AssistantStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") +``` + + + +In the code above, because an `event_handler` object is not passed to the `create_stream` function, the SDK will instantiate `AssistantEventHandler` or `AsyncAssistantEventHandler` as the default event handler and produce an iterable object with `event_type` and `event_data`. `AssistantEventHandler` and `AsyncAssistantEventHandler` are overridable. Here is an example: + + + +```python +# With AssistantEventHandler[str], the return type for each event functions is optional string. +class MyEventHandler(AssistantEventHandler[str]): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> Optional[str]: + return f"Text delta received: {delta.text}" + + def on_thread_message(self, message: "ThreadMessage") -> Optional[str]: + return f"ThreadMessage created. ID: {message.id}, Status: {message.status}" + + def on_thread_run(self, run: "ThreadRun") -> Optional[str]: + return f"ThreadRun status: {run.status}" + + def on_run_step(self, step: "RunStep") -> Optional[str]: + return f"RunStep type: {step.type}, Status: {step.status}" + + def on_error(self, data: str) -> Optional[str]: + return f"An error occurred. Data: {data}" + + def on_done(self) -> Optional[str]: + return "Stream completed." + + def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional[str]: + return f"Unhandled Event Type: {event_type}, Data: {event_data}" +``` + + + + + + +```python +with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() +) as stream: + for event_type, event_data, func_return in stream: + print(f"Received data.") + print(f"Streaming receive Event Type: {event_type}") + print(f"Event Data: {str(event_data)[:100]}...") + print(f"Event Function return: {func_return}\n") +``` + + + +As you can see, this SDK parses the events and produces various event types similar to OpenAI assistants. In your use case, you might not be interested in handling all these types and may decide to parse the events on your own. To achieve this, please refer to [override base event handler](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_with_base_override_eventhandler.py). + +``` +Note: Multiple streaming processes may be chained behind the scenes. + +When the SDK receives a `ThreadRun` event with the status `requires_action`, the next event will be `Done`, followed by termination. The SDK will submit the tool calls using the same event handler. The event handler will then chain the main stream with the tool stream. + +Consequently, when you iterate over the streaming using a for loop similar to the example above, the for loop will receive events from the main stream followed by events from the tool stream. +``` + + +### Retrieve Message + +To retrieve messages from assistants, use the following example: + + + +```python +messages = assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + +# The messages are following in the reverse order, +# we will iterate them and output only text contents. +for data_point in messages.data: + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") +``` + + + +In addition, `messages` and `messages.data[]` offer helper properties such as `text_messages`, `image_contents`, `file_citation_annotations`, and `file_path_annotations` to quickly retrieve content from one message or all messages. + +### Retrieve File + +Files uploaded by Assistants cannot be retrieved back. If your use case need to access the file content uploaded by the Assistants, you are advised to keep an additional copy accessible by your application. However, files generated by Assistants are retrievable by `save_file` or `get_file_content`. + +Here is an example retrieving file ids from messages and save to the local drive: + + + +```python +messages = assistants_client.list_messages(thread_id=thread.id) +print(f"Messages: {messages}") + +for image_content in messages.image_contents: + file_id = image_content.image_file.file_id + print(f"Image File ID: {file_id}") + file_name = f"{file_id}_image_file.png" + assistants_client.save_file(file_id=file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + +for file_path_annotation in messages.file_path_annotations: + print(f"File Paths:") + print(f"Type: {file_path_annotation.type}") + print(f"Text: {file_path_annotation.text}") + print(f"File ID: {file_path_annotation.file_path.file_id}") + print(f"Start Index: {file_path_annotation.start_index}") + print(f"End Index: {file_path_annotation.end_index}") +``` + + + +Here is an example to use `get_file_content`: + +```python +from pathlib import Path + +async def save_file_content(client, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None): + # Determine the target directory + path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() + path.mkdir(parents=True, exist_ok=True) + + # Retrieve the file content + file_content_stream = await client.get_file_content(file_id) + if not file_content_stream: + raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") + + # Collect all chunks asynchronously + chunks = [] + async for chunk in file_content_stream: + if isinstance(chunk, (bytes, bytearray)): + chunks.append(chunk) + else: + raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") + + target_file_path = path / file_name + + # Write the collected content to the file synchronously + with open(target_file_path, "wb") as file: + for chunk in chunks: + file.write(chunk) +``` + +### Teardown + +To remove resources after completing tasks, use the following functions: + + + +```python +# Delete the file when done +assistants_client.delete_vector_store(vector_store.id) +print("Deleted vector store") + +assistants_client.delete_file(file_id=file.id) +print("Deleted file") + +# Delete the assistant when done +assistants_client.delete_assistant(assistant.id) +print("Deleted assistant") +``` + + + +## Tracing + +You can add an Application Insights Azure resource to your Azure AI Foundry project. See the Tracing tab in your AI Foundry project. If one was enabled, you can get the Application Insights connection string, configure your Assistants, and observe the full execution path through Azure Monitor. Typically, you might want to start tracing before you create an Assistant. + +### Installation + +Make sure to install OpenTelemetry and the Azure SDK tracing plugin via + +```bash +pip install opentelemetry +pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry +``` + +You will also need an exporter to send telemetry to your observability backend. You can print traces to the console or use a local viewer such as [Aspire Dashboard](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash). + +To connect to Aspire Dashboard or another OpenTelemetry compatible backend, install OTLP exporter: + +```bash +pip install opentelemetry-exporter-otlp +``` + +### How to enable tracing + +Here is a code sample that shows how to enable Azure Monitor tracing: + + + +```python +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +# Enable Azure Monitor tracing +application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] +configure_azure_monitor(connection_string=application_insights_connection_string) + +# enable additional instrumentations +enable_telemetry() + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with assistants_client: +``` + + + +In addition, you might find helpful to see the tracing logs in console. You can achieve by the following code: + +```python +from azure.ai.assistants.telemetry import enable_telemetry + +enable_telemetry(destination=sys.stdout) +``` +### How to trace your own functions + +The decorator `trace_function` is provided for tracing your own function calls using OpenTelemetry. By default the function name is used as the name for the span. Alternatively you can provide the name for the span as a parameter to the decorator. + +This decorator handles various data types for function parameters and return values, and records them as attributes in the trace span. The supported data types include: +* Basic data types: str, int, float, bool +* Collections: list, dict, tuple, set + * Special handling for collections: + - If a collection (list, dict, tuple, set) contains nested collections, the entire collection is converted to a string before being recorded as an attribute. + - Sets and dictionaries are always converted to strings to ensure compatibility with span attributes. + +Object types are omitted, and the corresponding parameter is not traced. + +The parameters are recorded in attributes `code.function.parameter.` and the return value is recorder in attribute `code.function.return.value` + +## Troubleshooting + +### Logging + +The client uses the standard [Python logging library](https://docs.python.org/3/library/logging.html). The SDK logs HTTP request and response details, which may be useful in troubleshooting. To log to stdout, add the following: + +```python +import sys +import logging + +# Acquire the logger for this client library. Use 'azure' to affect both +# 'azure.core` and `azure.ai.inference' libraries. +logger = logging.getLogger("azure") + +# Set the desired logging level. logging.INFO or logging.DEBUG are good options. +logger.setLevel(logging.DEBUG) + +# Direct logging output to stdout: +handler = logging.StreamHandler(stream=sys.stdout) +# Or direct logging output to a file: +# handler = logging.FileHandler(filename="sample.log") +logger.addHandler(handler) + +# Optional: change the default logging format. Here we add a timestamp. +#formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") +#handler.setFormatter(formatter) +``` + +By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key or token), and the request and response payloads. To create logs without redaction, add `logging_enable = True` to the client constructor: + +```python +assistants_client = AIProjectClient.from_connection_string( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + logging_enable = True +) ``` -#### Prequisites +Note that the log level must be set to `logging.DEBUG` (see above code). Logs will be redacted with any other log level. + +Be sure to protect non redacted logs to avoid compromising security. + +For more information, see [Configure logging in the Azure libraries for Python](https://aka.ms/azsdk/python/logging) + +### Reporting issues + +To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-assistants" in the title or content. + + +## Next steps -- Python 3.8 or later is required to use this package. -- You need an [Azure subscription][azure_sub] to use this package. -- An existing Azure Ai Assistants instance. +Have a look at the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-projects/samples) folder, containing fully runnable Python code for synchronous and asynchronous clients. +Explore the [AI Starter Template](https://aka.ms/azsdk/azure-ai-projects/python/ai-starter-template). This template creates an Azure AI Foundry hub, project and connected resources including Azure OpenAI Service, AI Search and more. It also deploys a simple chat application to Azure Container Apps. ## Contributing @@ -34,10 +1198,14 @@ see the Code of Conduct FAQ or contact opencode@microsoft.com with any additional questions or comments. +[samples]: https://aka.ms/azsdk/azure-ai-projects/python/samples/ [code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ -[authenticate_with_token]: https://docs.microsoft.com/azure/cognitive-services/authentication?tabs=powershell#authenticate-with-an-authentication-token +[entra_id]: https://learn.microsoft.com/azure/ai-services/authentication?tabs=powershell#authenticate-with-microsoft-entra-id [azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials [azure_identity_pip]: https://pypi.org/project/azure-identity/ [default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential [pip]: https://pypi.org/project/pip/ [azure_sub]: https://azure.microsoft.com/free/ +[evaluators]: https://learn.microsoft.com/azure/ai-studio/how-to/develop/evaluate-sdk +[azure_ai_evaluation]: https://learn.microsoft.com/python/api/overview/azure/ai-evaluation-readme +[evaluator_library]: https://learn.microsoft.com/azure/ai-studio/how-to/evaluate-generative-ai-app#view-and-manage-the-evaluators-in-the-evaluator-library \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/assets.json b/sdk/ai/azure-ai-assistants/assets.json new file mode 100644 index 000000000000..823831a56f44 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "python", + "TagPrefix": "python/ai/azure-ai-assistants", + "Tag": "python/ai/azure-ai-assistants_a471817af2" +} diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py index 6bd70ebaba05..ef1341bc0ac4 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py @@ -1,4 +1,3 @@ -# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -27,18 +26,9 @@ class AssistantsClient(AssistantsClientOperationsMixin): """AssistantsClient. - :param endpoint: The Azure AI Foundry project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``, where is the - Azure region where the project is deployed (e.g. westus) and is the GUID of - the Enterprise private link. Required. + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Foundry project name. Required. - :type project_name: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or @@ -49,24 +39,9 @@ class AssistantsClient(AssistantsClientOperationsMixin): :paramtype api_version: str """ - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: Union[AzureKeyCredential, "TokenCredential"], - **kwargs: Any - ) -> None: - _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" - self._config = AssistantsClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - **kwargs - ) + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + _endpoint = "{endpoint}" + self._config = AssistantsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -111,15 +86,6 @@ def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: request_copy = deepcopy(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py index fcf3bd499866..b3aa33c5f408 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py @@ -23,18 +23,9 @@ class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attrib Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: The Azure AI Foundry project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``, where is the - Azure region where the project is deployed (e.g. westus) and is the GUID of - the Enterprise private link. Required. + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Foundry project name. Required. - :type project_name: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or @@ -45,35 +36,18 @@ class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attrib :paramtype api_version: str """ - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: Union[AzureKeyCredential, "TokenCredential"], - **kwargs: Any, - ) -> None: + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: api_version: str = kwargs.pop("api_version", "2025-05-15-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") - if subscription_id is None: - raise ValueError("Parameter 'subscription_id' must not be None.") - if resource_group_name is None: - raise ValueError("Parameter 'resource_group_name' must not be None.") - if project_name is None: - raise ValueError("Parameter 'project_name' must not be None.") if credential is None: raise ValueError("Parameter 'credential' must not be None.") self.endpoint = endpoint - self.subscription_id = subscription_id - self.resource_group_name = resource_group_name - self.project_name = project_name self.credential = credential self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-assistants/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py index c962d3131cef..06cc62f53078 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py @@ -1400,15 +1400,6 @@ def create_assistant( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1495,15 +1486,6 @@ def list_assistants( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1564,15 +1546,6 @@ def get_assistant(self, assistant_id: str, **kwargs: Any) -> _models.Assistant: ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1821,15 +1794,6 @@ def update_assistant( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1890,15 +1854,6 @@ def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.Assistan ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2059,15 +2014,6 @@ def create_thread( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2128,15 +2074,6 @@ def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AssistantThread: ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2300,15 +2237,6 @@ def update_thread( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2369,15 +2297,6 @@ def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletion ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2565,15 +2484,6 @@ def create_message( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2668,15 +2578,6 @@ def list_messages( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2740,15 +2641,6 @@ def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2909,15 +2801,6 @@ def update_message( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3287,15 +3170,6 @@ def create_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3386,15 +3260,6 @@ def list_runs( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3458,15 +3323,6 @@ def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadR ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3627,15 +3483,6 @@ def update_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3808,15 +3655,6 @@ def submit_tool_outputs_to_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3880,15 +3718,6 @@ def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Thre ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4212,15 +4041,6 @@ def create_thread_and_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4301,15 +4121,6 @@ def get_run_step( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4411,15 +4222,6 @@ def list_run_steps( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4484,15 +4286,6 @@ def list_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4564,15 +4357,6 @@ def _upload_file(self, body: Union[_models._models.UploadFileRequest, JSON], **k ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4633,15 +4417,6 @@ def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4702,15 +4477,6 @@ def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4771,15 +4537,6 @@ def _get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4863,15 +4620,6 @@ def list_vector_stores( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5053,15 +4801,6 @@ def create_vector_store( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5122,15 +4861,6 @@ def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Vecto ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5292,15 +5022,6 @@ def modify_vector_store( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5362,15 +5083,6 @@ def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Ve ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5466,15 +5178,6 @@ def list_vector_store_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5632,15 +5335,6 @@ def create_vector_store_file( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5704,15 +5398,6 @@ def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: An ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5781,15 +5466,6 @@ def delete_vector_store_file( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5947,15 +5623,6 @@ def create_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6021,15 +5688,6 @@ def get_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6096,15 +5754,6 @@ def cancel_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6204,15 +5853,6 @@ def list_vector_store_file_batch_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py index 9bd7d46ac371..d75f6dd1d754 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py @@ -12,8 +12,6 @@ import os import sys import time -import uuid -from os import PathLike from pathlib import Path from typing import ( IO, @@ -23,20 +21,18 @@ Iterator, List, Optional, - Self, - Tuple, Union, cast, overload, ) +from azure.core.credentials import TokenCredential, AzureKeyCredential from azure.core.tracing.decorator import distributed_trace from . import models as _models from ._vendor import FileType from .models._enums import FilePurpose, RunStatus from ._client import AssistantsClient as AssistantsClientGenerated -from azure.core.credentials import TokenCredential if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -45,8 +41,6 @@ if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports - from openai import AzureOpenAI - from . import _types JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object @@ -55,10 +49,28 @@ logger = logging.getLogger(__name__) -class AssistantsClient(AssistantsClientGenerated): - - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) +class AssistantsClient(AssistantsClientGenerated): # pylint: disable=client-accepts-api-version-keyword + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, TokenCredential], **kwargs: Any) -> None: + # TODO: Remove this custom code when 1DP service will be available + if not endpoint: + raise ValueError("Connection string or 1DP endpoint is required") + parts = endpoint.split(";") + # Detect legacy endpoint and build it in old way. + if len(parts) == 4: + endpoint = "https://" + parts[0] + subscription_id = parts[1] + resource_group_name = parts[2] + project_name = parts[3] + endpoint = ( + f"{endpoint}/agents/v1.0/subscriptions" + f"/{subscription_id}/resourceGroups/{resource_group_name}/providers" + f"/Microsoft.MachineLearningServices/workspaces/{project_name}" + ) + # Override the credential scope with the legacy one. + kwargs['credential_scopes'] = ["https://management.azure.com/.default"] + # End of legacy endpoints handling. + super().__init__(endpoint, credential, **kwargs) self._toolset: Dict[str, _models.ToolSet] = {} # pylint: disable=arguments-differ @@ -93,12 +105,12 @@ def create_assistant( # pylint: disable=arguments-differ :keyword instructions: The system instructions for the new assistant to use. Default value is None. :paramtype instructions: str :keyword tools: The collection of tools to enable for the new assistant. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -113,15 +125,15 @@ def create_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -157,7 +169,7 @@ def create_assistant( # pylint: disable=arguments-differ :paramtype instructions: str :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet + :paramtype toolset: ~azure.ai.assistants.models.ToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -172,15 +184,15 @@ def create_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -196,7 +208,7 @@ def create_assistant( Default value is "application/json". :paramtype content_type: str :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -212,7 +224,7 @@ def create_assistant( Default value is "application/json". :paramtype content_type: str :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -336,12 +348,12 @@ def update_assistant( # pylint: disable=arguments-differ :paramtype instructions: str :keyword tools: The modified collection of tools to enable for the assistant. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -356,15 +368,15 @@ def update_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -404,7 +416,7 @@ def update_assistant( # pylint: disable=arguments-differ :paramtype instructions: str :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet + :paramtype toolset: ~azure.ai.assistants.models.ToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -419,15 +431,15 @@ def update_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -445,7 +457,7 @@ def update_assistant( Default value is "application/json". :paramtype content_type: str :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -463,7 +475,7 @@ def update_assistant( Default value is "application/json". :paramtype content_type: str :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -504,15 +516,15 @@ def update_assistant( :paramtype instructions: str :keyword tools: The modified collection of tools to enable for the assistant. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet + :paramtype toolset: ~azure.ai.assistants.models.ToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -527,8 +539,8 @@ def update_assistant( :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -538,7 +550,7 @@ def update_assistant( None. :paramtype metadata: dict[str, str] :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ self._validate_tools_and_tool_resources(tools, tool_resources) @@ -624,7 +636,7 @@ def create_run( # pylint: disable=arguments-differ Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -640,10 +652,10 @@ def create_run( # pylint: disable=arguments-differ :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -672,17 +684,17 @@ def create_run( # pylint: disable=arguments-differ :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], AssistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AssistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -692,7 +704,7 @@ def create_run( # pylint: disable=arguments-differ None. :paramtype metadata: dict[str, str] :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -716,12 +728,12 @@ def create_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -745,12 +757,12 @@ def create_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -788,7 +800,7 @@ def create_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword assistant_id: The ID of the assistant that should run the thread. Required. :paramtype assistant_id: str :keyword model: The overridden model name that the assistant should use to run the thread. Default @@ -803,10 +815,10 @@ def create_run( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -835,17 +847,17 @@ def create_run( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], AssistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AssistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -855,7 +867,7 @@ def create_run( None. :paramtype metadata: dict[str, str] :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -930,7 +942,7 @@ def create_and_process_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword model: The overridden model name that the assistant should use to run the thread. Default value is None. :paramtype model: str @@ -943,10 +955,10 @@ def create_and_process_run( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources`). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet + :paramtype toolset: ~azure.ai.assistants.models.ToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -975,19 +987,19 @@ def create_and_process_run( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], AssistantsNamedToolChoice Default value is None. :paramtype tool_choice: str or str or - ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AssistantsNamedToolChoice + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. :paramtype response_format: str or str or - ~azure.ai.projects.models.AssistantsApiResponseFormatMode or - ~azure.ai.projects.models.AssistantsApiResponseFormat + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -1000,7 +1012,7 @@ def create_and_process_run( Default value is 1. :paramtype sleep_interval: int :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AssistantRunStream + :rtype: ~azure.ai.assistants.models.AssistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ # Create and initiate the run with additional parameters @@ -1092,7 +1104,7 @@ def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword assistant_id: The ID of the assistant that should run the thread. Required. :paramtype assistant_id: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -1110,10 +1122,10 @@ def create_stream( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessage] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -1142,17 +1154,17 @@ def create_stream( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], AssistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AssistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -1164,7 +1176,7 @@ def create_stream( :keyword event_handler: None :paramtype event_handler: None. _models.AssistantEventHandler will be applied as default. :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AssistantRunStream + :rtype: ~azure.ai.assistants.models.AssistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1203,7 +1215,7 @@ def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1219,10 +1231,10 @@ def create_stream( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessage] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -1251,17 +1263,17 @@ def create_stream( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], AssistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AssistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -1272,9 +1284,9 @@ def create_stream( :paramtype metadata: dict[str, str] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.projects.models.AssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AssistantEventHandler :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AssistantRunStream + :rtype: ~azure.ai.assistants.models.AssistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1301,14 +1313,14 @@ def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword event_handler: None :paramtype event_handler: None. _models.AssistantEventHandler will be applied as default. :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AssistantRunStream + :rtype: ~azure.ai.assistants.models.AssistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1335,15 +1347,15 @@ def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.projects.models.AssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AssistantEventHandler :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AssistantRunStream + :rtype: ~azure.ai.assistants.models.AssistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1384,7 +1396,7 @@ def create_stream( # pyright: ignore[reportInconsistentOverload] Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword assistant_id: The ID of the assistant that should run the thread. Required. :paramtype assistant_id: str :keyword model: The overridden model name that the assistant should use to run the thread. Default @@ -1399,10 +1411,10 @@ def create_stream( # pyright: ignore[reportInconsistentOverload] :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessage] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -1431,17 +1443,17 @@ def create_stream( # pyright: ignore[reportInconsistentOverload] :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], AssistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AssistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -1452,9 +1464,9 @@ def create_stream( # pyright: ignore[reportInconsistentOverload] :paramtype metadata: dict[str, str] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.projects.models.AssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AssistantEventHandler :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AssistantRunStream + :rtype: ~azure.ai.assistants.models.AssistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1497,7 +1509,11 @@ def create_stream( # pyright: ignore[reportInconsistentOverload] if not event_handler: event_handler = cast(_models.BaseAssistantEventHandlerT, _models.AssistantEventHandler()) - return _models.AssistantRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + return _models.AssistantRunStream( + response_iterator=response_iterator, + submit_tool_outputs=self._handle_submit_tool_outputs, + event_handler=event_handler, + ) # pylint: disable=arguments-differ @overload @@ -1520,15 +1536,15 @@ def submit_tool_outputs_to_run( # pylint: disable=arguments-differ :param run_id: Required. :type run_id: str :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.projects.models.AssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AssistantEventHandler :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1550,7 +1566,7 @@ def submit_tool_outputs_to_run( Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1572,7 +1588,7 @@ def submit_tool_outputs_to_run( Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1597,9 +1613,9 @@ def submit_tool_outputs_to_run( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1648,7 +1664,7 @@ def submit_tool_outputs_to_stream( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.BaseAssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.BaseAssistantEventHandler :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1675,12 +1691,12 @@ def submit_tool_outputs_to_stream( :param run_id: Required. :type run_id: str :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.BaseAssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.BaseAssistantEventHandler :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1706,9 +1722,9 @@ def submit_tool_outputs_to_stream( # pyright: ignore[reportInconsistentOverload :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.BaseAssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.BaseAssistantEventHandler :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1761,53 +1777,6 @@ def _handle_submit_tool_outputs( event_handler=event_handler, ) - # pylint: disable=arguments-differ - @overload - def upload_file( # pylint: disable=arguments-differ - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - def upload_file( # pylint: disable=arguments-differ - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - @distributed_trace def upload_file( self, @@ -1822,17 +1791,19 @@ def upload_file( """ Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. :type body: Optional[JSON] :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] + :type file: Optional[FileType] :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] + :type file_path: Optional[str] :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - :paramtype purpose: Union[str, _models.FilePurpose, None] - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :keyword filename: The name of the file. - :paramtype filename: Optional[str] + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :type purpose: Union[str, _models.FilePurpose, None] + :keyword filename: The name of the file. + :type filename: Optional[str] :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: _models.OpenAIFile :raises FileNotFoundError: If the file_path is invalid. @@ -1880,7 +1851,7 @@ def upload_file_and_poll(self, body: JSON, *, sleep_interval: float = 1, **kwarg is 1. :paramtype sleep_interval: float :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1897,17 +1868,17 @@ def upload_file_and_poll( """Uploads a file for use by other operations. :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType + :paramtype file: ~azure.ai.assistants._vendor.FileType :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose :keyword filename: Default value is None. :paramtype filename: str :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value is 1. :paramtype sleep_interval: float :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1921,12 +1892,12 @@ def upload_file_and_poll( :type file_path: str :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value is 1. :paramtype sleep_interval: float :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1998,7 +1969,7 @@ def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2027,12 +1998,12 @@ def create_vector_store_and_poll( :keyword name: The name of the vector store. Default value is None. :paramtype name: str :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is @@ -2042,7 +2013,7 @@ def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2061,7 +2032,7 @@ def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2093,12 +2064,12 @@ def create_vector_store_and_poll( :keyword name: The name of the vector store. Default value is None. :paramtype name: str :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is @@ -2108,7 +2079,7 @@ def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2165,7 +2136,7 @@ def create_vector_store_file_batch_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2188,18 +2159,18 @@ def create_vector_store_file_batch_and_poll( :keyword file_ids: List of file identifiers. Required. :paramtype file_ids: list[str] :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2226,7 +2197,7 @@ def create_vector_store_file_batch_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2255,14 +2226,14 @@ def create_vector_store_file_batch_and_poll( :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword content_type: Body parameter content-type. Defaults to "application/json". :paramtype content_type: str :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2316,7 +2287,9 @@ def get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: return cast(Iterator[bytes], response) @distributed_trace - def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: + def save_file( # pylint: disable=client-method-missing-kwargs + self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None + ) -> None: """ Synchronously saves file content retrieved using a file identifier to the specified local directory. @@ -2387,7 +2360,7 @@ def create_vector_store_file_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2413,15 +2386,15 @@ def create_vector_store_file_and_poll( :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2448,7 +2421,7 @@ def create_vector_store_file_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2476,15 +2449,15 @@ def create_vector_store_file_and_poll( :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2522,94 +2495,22 @@ def create_vector_store_file_and_poll( return vector_store_file - @classmethod - def from_connection_string(cls, conn_str: str, credential: "TokenCredential", **kwargs) -> Self: - """ - Create an asynchronous AIProjectClient from a connection string. - - :param str conn_str: The connection string, copied from your AI Foundry project. - :param TokenCredential credential: Credential used to authenticate requests to the service. - :return: An AssistantsClient instance. - :rtype: AssistantsClient - """ - if not conn_str: - raise ValueError("Connection string is required") - parts = conn_str.split(";") - if len(parts) != 4: - raise ValueError("Invalid connection string format") - endpoint = "https://" + parts[0] - subscription_id = parts[1] - resource_group_name = parts[2] - project_name = parts[3] - return cls( - endpoint, - subscription_id, - resource_group_name, - project_name, - credential, - **kwargs, - ) - - def upload_file_to_azure_blob(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: - """Upload a file to the Azure AI Foundry project. - This method required *azure-ai-ml* to be installed. - - :param file_path: The path to the file to upload. - :type file_path: Union[str, Path, PathLike] - :return: The tuple, containing asset id and asset URI of uploaded file. - :rtype: Tuple[str, str] - """ - try: - from azure.ai.ml import MLClient # type: ignore - from azure.ai.ml.constants import AssetTypes # type: ignore - from azure.ai.ml.entities import Data # type: ignore - except ImportError as e: - raise ImportError( - "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" - ) from e - - data = Data( - path=str(file_path), - type=AssetTypes.URI_FILE, - name=str(uuid.uuid4()), # generating random name - is_anonymous=True, - version="1", - ) - # We have to wrap async method get_token of - - ml_client = MLClient( - self._config.credential, - self._config.subscription_id, - self._config.resource_group_name, - self._config.project_name, - ) - - data_asset = ml_client.data.create_or_update(data) - - return data_asset.id, data_asset.path - @distributed_trace - def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.AssistantDeletionStatus: + def delete_assistant( # pylint: disable=delete-operation-wrong-return-type + self, assistant_id: str, **kwargs: Any + ) -> _models.AssistantDeletionStatus: """Deletes an assistant. :param assistant_id: Identifier of the assistant. Required. :type assistant_id: str :return: AssistantDeletionStatus. The AssistantDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AssistantDeletionStatus + :rtype: ~azure.ai.assistants.models.AssistantDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ if assistant_id in self._toolset: del self._toolset[assistant_id] return super().delete_assistant(assistant_id, **kwargs) - @property - def scope(self) -> Dict[str, str]: - return { - "subscription_id": self._config.subscription_id, - "resource_group_name": self._config.resource_group_name, - "project_name": self._config.project_name, - } - __all__: List[str] = ["AssistantsClient"] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py index fd0dcd3fbe4e..ff7afa16a25f 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py @@ -1,4 +1,3 @@ -# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -27,18 +26,9 @@ class AssistantsClient(AssistantsClientOperationsMixin): """AssistantsClient. - :param endpoint: The Azure AI Foundry project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``, where is the - Azure region where the project is deployed (e.g. westus) and is the GUID of - the Enterprise private link. Required. + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Foundry project name. Required. - :type project_name: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or @@ -50,23 +40,10 @@ class AssistantsClient(AssistantsClientOperationsMixin): """ def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: Union[AzureKeyCredential, "AsyncTokenCredential"], - **kwargs: Any + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: - _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" - self._config = AssistantsClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - **kwargs - ) + _endpoint = "{endpoint}" + self._config = AssistantsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -113,15 +90,6 @@ def send_request( request_copy = deepcopy(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py index d991c5beafcb..637f56b4a09d 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py @@ -23,18 +23,9 @@ class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attrib Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: The Azure AI Foundry project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``, where is the - Azure region where the project is deployed (e.g. westus) and is the GUID of - the Enterprise private link. Required. + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Foundry project name. Required. - :type project_name: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or @@ -46,34 +37,19 @@ class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attrib """ def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: Union[AzureKeyCredential, "AsyncTokenCredential"], - **kwargs: Any, + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: api_version: str = kwargs.pop("api_version", "2025-05-15-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") - if subscription_id is None: - raise ValueError("Parameter 'subscription_id' must not be None.") - if resource_group_name is None: - raise ValueError("Parameter 'resource_group_name' must not be None.") - if project_name is None: - raise ValueError("Parameter 'project_name' must not be None.") if credential is None: raise ValueError("Parameter 'credential' must not be None.") self.endpoint = endpoint - self.subscription_id = subscription_id - self.resource_group_name = resource_group_name - self.project_name = project_name self.credential = credential self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-assistants/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py index 172b51ab51a9..a9696b0e89a2 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py @@ -296,15 +296,6 @@ async def create_assistant( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -391,15 +382,6 @@ async def list_assistants( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -460,15 +442,6 @@ async def get_assistant(self, assistant_id: str, **kwargs: Any) -> _models.Assis ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -717,15 +690,6 @@ async def update_assistant( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -786,15 +750,6 @@ async def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.As ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -955,15 +910,6 @@ async def create_thread( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1024,15 +970,6 @@ async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AssistantTh ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1196,15 +1133,6 @@ async def update_thread( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1265,15 +1193,6 @@ async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDe ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1461,15 +1380,6 @@ async def create_message( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1564,15 +1474,6 @@ async def list_messages( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1636,15 +1537,6 @@ async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _ ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1805,15 +1697,6 @@ async def update_message( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2183,15 +2066,6 @@ async def create_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2282,15 +2156,6 @@ async def list_runs( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2354,15 +2219,6 @@ async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.T ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2523,15 +2379,6 @@ async def update_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2704,15 +2551,6 @@ async def submit_tool_outputs_to_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2776,15 +2614,6 @@ async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _model ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3108,15 +2937,6 @@ async def create_thread_and_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3197,15 +3017,6 @@ async def get_run_step( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3307,15 +3118,6 @@ async def list_run_steps( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3380,15 +3182,6 @@ async def list_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3462,15 +3255,6 @@ async def _upload_file( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3531,15 +3315,6 @@ async def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletion ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3600,15 +3375,6 @@ async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3669,15 +3435,6 @@ async def _get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[ ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3761,15 +3518,6 @@ async def list_vector_stores( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3951,15 +3699,6 @@ async def create_vector_store( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4020,15 +3759,6 @@ async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4190,15 +3920,6 @@ async def modify_vector_store( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4260,15 +3981,6 @@ async def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _mod ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4364,15 +4076,6 @@ async def list_vector_store_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4530,15 +4233,6 @@ async def create_vector_store_file( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4602,15 +4296,6 @@ async def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwar ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4679,15 +4364,6 @@ async def delete_vector_store_file( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4845,15 +4521,6 @@ async def create_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4919,15 +4586,6 @@ async def get_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4994,15 +4652,6 @@ async def cancel_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5102,15 +4751,6 @@ async def list_vector_store_file_batch_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py index 8e56156b502f..5a7115701b6b 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py @@ -6,52 +6,13 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import Any, List, Optional, TYPE_CHECKING - -import asyncio -import concurrent.futures -from azure.core.credentials import TokenCredential +from typing import List, TYPE_CHECKING if TYPE_CHECKING: from azure.core.credentials import AccessToken from azure.core.credentials_async import AsyncTokenCredential -class _SyncCredentialWrapper(TokenCredential): - """ - The class, synchronizing AsyncTokenCredential. - - :param async_credential: The async credential to be synchronized. - :type async_credential: ~azure.core.credentials_async.AsyncTokenCredential - """ - - def __init__(self, async_credential: "AsyncTokenCredential"): - self._async_credential = async_credential - - def get_token( - self, - *scopes: str, - claims: Optional[str] = None, - tenant_id: Optional[str] = None, - enable_cae: bool = False, - **kwargs: Any, - ) -> "AccessToken": - return ( - concurrent.futures.ThreadPoolExecutor() - .submit( - asyncio.run, - self._async_credential.get_token( - *scopes, - claims=claims, - tenant_id=tenant_id, - enable_cae=enable_cae, - **kwargs, - ), - ) - .result() - ) - - __all__: List[str] = [] diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py index ccdad560d92f..a51ea8cfefaf 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py @@ -8,16 +8,14 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio +import asyncio # pylint: disable = do-not-import-asyncio import io import logging import os -import uuid import time -from os import PathLike from pathlib import Path -from .. import models as _models + from typing import ( IO, @@ -28,24 +26,22 @@ List, MutableMapping, Optional, - Self, - Tuple, Union, cast, overload, ) from azure.core.tracing.decorator_async import distributed_trace_async + +from .. import models as _models from .._vendor import FileType from ..models._enums import FilePurpose, RunStatus - from ._client import AssistantsClient as AssistantsClientGenerated -from ._operations._patch import _SyncCredentialWrapper if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from openai import AsyncAzureOpenAI + from .. import _types - from azure.core.credentials import AccessToken + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import AccessToken, AzureKeyCredential from azure.core.credentials_async import AsyncTokenCredential logger = logging.getLogger(__name__) @@ -54,10 +50,30 @@ _Unset: Any = object() -class AssistantsClient(AssistantsClientGenerated): +class AssistantsClient(AssistantsClientGenerated): # pylint: disable=client-accepts-api-version-keyword - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) + def __init__( + self, endpoint: str, credential: Union["AzureKeyCredential", "AsyncTokenCredential"], **kwargs: Any + ) -> None: + # TODO: Remove this custom code when 1DP service will be available + if not endpoint: + raise ValueError("Connection string or 1DP endpoint is required") + parts = endpoint.split(";") + # Detect legacy endpoint and build it in old way. + if len(parts) == 4: + endpoint = "https://" + parts[0] + subscription_id = parts[1] + resource_group_name = parts[2] + project_name = parts[3] + endpoint = ( + f"{endpoint}/agents/v1.0/subscriptions" + f"/{subscription_id}/resourceGroups/{resource_group_name}/providers" + f"/Microsoft.MachineLearningServices/workspaces/{project_name}" + ) + # Override the credential scope with the legacy one. + kwargs['credential_scopes'] = ["https://management.azure.com/.default"] + # End of legacy endpoints handling. + super().__init__(endpoint, credential, **kwargs) self._toolset: Dict[str, _models.AsyncToolSet] = {} # pylint: disable=arguments-differ @@ -92,12 +108,12 @@ async def create_assistant( # pylint: disable=arguments-differ :keyword instructions: The system instructions for the new assistant to use. Default value is None. :paramtype instructions: str :keyword tools: The collection of tools to enable for the new assistant. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -112,15 +128,15 @@ async def create_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -137,7 +153,7 @@ async def create_assistant( # pylint: disable=arguments-differ toolset: Optional[_models.AsyncToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> _models.Assistant: @@ -156,7 +172,7 @@ async def create_assistant( # pylint: disable=arguments-differ :paramtype instructions: str :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet + :paramtype toolset: ~azure.ai.assistants.models.AsyncToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -171,15 +187,15 @@ async def create_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -195,7 +211,7 @@ async def create_assistant( Default value is "application/json". :paramtype content_type: str :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -211,7 +227,7 @@ async def create_assistant( Default value is "application/json". :paramtype content_type: str :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -229,7 +245,7 @@ async def create_assistant( toolset: Optional[_models.AsyncToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, content_type: str = "application/json", **kwargs: Any, @@ -259,7 +275,7 @@ async def create_assistant( :keyword top_p: Nucleus sampling parameter. :paramtype top_p: Optional[float] :keyword response_format: Response format for tool calls. - :paramtype response_format: Optional["_types.assistantsApiResponseFormatOption"] + :paramtype response_format: Optional["_types.AssistantsApiResponseFormatOption"] :keyword metadata: Key/value pairs for storing additional information. :paramtype metadata: Optional[Dict[str, str]] :keyword content_type: Content type of the body. @@ -310,7 +326,7 @@ async def update_assistant( # pylint: disable=arguments-differ tool_resources: Optional[_models.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> _models.Assistant: @@ -332,12 +348,12 @@ async def update_assistant( # pylint: disable=arguments-differ :paramtype instructions: str :keyword tools: The modified collection of tools to enable for the assistant. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -352,15 +368,15 @@ async def update_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -378,7 +394,7 @@ async def update_assistant( # pylint: disable=arguments-differ toolset: Optional[_models.AsyncToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> _models.Assistant: @@ -400,7 +416,7 @@ async def update_assistant( # pylint: disable=arguments-differ :paramtype instructions: str :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet + :paramtype toolset: ~azure.ai.assistants.models.AsyncToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -415,15 +431,15 @@ async def update_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -441,7 +457,7 @@ async def update_assistant( Default value is "application/json". :paramtype content_type: str :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -459,7 +475,7 @@ async def update_assistant( Default value is "application/json". :paramtype content_type: str :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -478,7 +494,7 @@ async def update_assistant( toolset: Optional[_models.AsyncToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, content_type: str = "application/json", metadata: Optional[Dict[str, str]] = None, **kwargs: Any, @@ -500,15 +516,15 @@ async def update_assistant( :paramtype instructions: str :keyword tools: The modified collection of tools to enable for the assistant. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet + :paramtype toolset: ~azure.ai.assistants.models.AsyncToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -523,8 +539,8 @@ async def update_assistant( :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -534,7 +550,7 @@ async def update_assistant( None. :paramtype metadata: dict[str, str] :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ self._validate_tools_and_tool_resources(tools, tool_resources) @@ -604,8 +620,8 @@ async def create_run( # pylint: disable=arguments-differ max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any, @@ -620,7 +636,7 @@ async def create_run( # pylint: disable=arguments-differ Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -636,10 +652,10 @@ async def create_run( # pylint: disable=arguments-differ :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessage] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -668,17 +684,17 @@ async def create_run( # pylint: disable=arguments-differ :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], assistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.assistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -688,7 +704,7 @@ async def create_run( # pylint: disable=arguments-differ None. :paramtype metadata: dict[str, str] :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -712,12 +728,12 @@ async def create_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -741,12 +757,12 @@ async def create_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -768,8 +784,8 @@ async def create_run( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any, @@ -786,7 +802,7 @@ async def create_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword model: The overridden model name that the assistant should use to run the thread. Default value is None. :paramtype model: str @@ -799,10 +815,10 @@ async def create_run( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -831,17 +847,17 @@ async def create_run( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], assistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.assistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -851,7 +867,7 @@ async def create_run( None. :paramtype metadata: dict[str, str] :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -909,8 +925,8 @@ async def create_and_process_run( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, sleep_interval: int = 1, @@ -926,7 +942,7 @@ async def create_and_process_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword model: The overridden model name that the assistant should use to run the thread. Default value is None. :paramtype model: str @@ -939,10 +955,10 @@ async def create_and_process_run( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources`). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet + :paramtype toolset: ~azure.ai.assistants.models.AsyncToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -971,19 +987,19 @@ async def create_and_process_run( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], assistantsNamedToolChoice Default value is None. :paramtype tool_choice: str or str or - ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.assistantsNamedToolChoice + ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. :paramtype response_format: str or str or - ~azure.ai.projects.models.assistantsApiResponseFormatMode or - ~azure.ai.projects.models.assistantsApiResponseFormat + ~azure.ai.assistants.models.assistantsApiResponseFormatMode or + ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -996,7 +1012,7 @@ async def create_and_process_run( Default value is 1. :paramtype sleep_interval: int :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ # Create and initiate the run with additional parameters @@ -1073,8 +1089,8 @@ async def create_stream( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, event_handler: None = None, @@ -1090,7 +1106,7 @@ async def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1106,10 +1122,10 @@ async def create_stream( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -1138,17 +1154,17 @@ async def create_stream( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], assistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.assistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -1160,7 +1176,7 @@ async def create_stream( :keyword event_handler: None :paramtype event_handler: None. _models.AsyncassistantEventHandler will be applied as default. :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1182,8 +1198,8 @@ async def create_stream( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, event_handler: _models.BaseAsyncAssistantEventHandlerT, @@ -1199,7 +1215,7 @@ async def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1215,10 +1231,10 @@ async def create_stream( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -1247,17 +1263,17 @@ async def create_stream( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], assistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.assistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -1267,9 +1283,9 @@ async def create_stream( None. :paramtype metadata: dict[str, str] :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1296,14 +1312,14 @@ async def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword event_handler: None :paramtype event_handler: None. _models.AsyncassistantEventHandler will be applied as default. :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAssistantRunStream + :rtype: ~azure.ai.assistants.models.AsyncAssistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1330,15 +1346,15 @@ async def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1360,8 +1376,8 @@ async def create_stream( # pyright: ignore[reportInconsistentOverload] max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, event_handler: Optional[_models.BaseAsyncAssistantEventHandlerT] = None, @@ -1379,7 +1395,7 @@ async def create_stream( # pyright: ignore[reportInconsistentOverload] Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword assistant_id: The ID of the assistant that should run the thread. Required. :paramtype assistant_id: str :keyword model: The overridden model name that the assistant should use to run the thread. Default @@ -1394,10 +1410,10 @@ async def create_stream( # pyright: ignore[reportInconsistentOverload] :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -1426,17 +1442,17 @@ async def create_stream( # pyright: ignore[reportInconsistentOverload] :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], assistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.assistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -1447,9 +1463,9 @@ async def create_stream( # pyright: ignore[reportInconsistentOverload] :paramtype metadata: dict[str, str] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1491,9 +1507,13 @@ async def create_stream( # pyright: ignore[reportInconsistentOverload] response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) if not event_handler: - event_handler = cast(_models.BaseAssistantEventHandlerT, _models.AsyncAssistantEventHandler()) + event_handler = cast(_models.BaseAsyncAssistantEventHandlerT, _models.AsyncAssistantEventHandler()) - return _models.AsyncAssistantRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + return _models.AsyncAssistantRunStream( + response_iterator=response_iterator, + submit_tool_outputs=self._handle_submit_tool_outputs, + event_handler=event_handler, + ) # pylint: disable=arguments-differ @overload @@ -1515,12 +1535,12 @@ async def submit_tool_outputs_to_run( # pylint: disable=arguments-differ :param run_id: Required. :type run_id: str :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1542,7 +1562,7 @@ async def submit_tool_outputs_to_run( Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1564,7 +1584,7 @@ async def submit_tool_outputs_to_run( Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1589,9 +1609,9 @@ async def submit_tool_outputs_to_run( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1636,7 +1656,7 @@ async def submit_tool_outputs_to_stream( :type body: JSON or IO[bytes] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1663,12 +1683,12 @@ async def submit_tool_outputs_to_stream( :param run_id: Required. :type run_id: str :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1694,9 +1714,9 @@ async def submit_tool_outputs_to_stream( # pyright: ignore[reportInconsistentOv :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.AsyncAssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AsyncAssistantEventHandler :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1746,53 +1766,6 @@ async def _handle_submit_tool_outputs( thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler ) - # pylint: disable=arguments-differ - @overload - async def upload_file( # pylint: disable=arguments-differ - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - async def upload_file( # pylint: disable=arguments-differ - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - @distributed_trace_async async def upload_file( self, @@ -1810,14 +1783,14 @@ async def upload_file( :param body: JSON. Required if `file` and `purpose` are not provided. :type body: Optional[JSON] :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] + :type file: Optional[FileType] :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] + :type file_path: Optional[str] :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :paramtype purpose: Union[str, _models.FilePurpose, None] + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :type purpose: Union[str, _models.FilePurpose, None] :keyword filename: The name of the file. - :paramtype filename: Optional[str] + :type filename: Optional[str] :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: _models.OpenAIFile :raises FileNotFoundError: If the file_path is invalid. @@ -1863,7 +1836,7 @@ async def upload_file_and_poll(self, body: JSON, *, sleep_interval: float = 1, * is 1. :paramtype sleep_interval: float :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1880,17 +1853,17 @@ async def upload_file_and_poll( """Uploads a file for use by other operations. :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType + :paramtype file: ~azure.ai.assistants._vendor.FileType :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose :keyword filename: Default value is None. :paramtype filename: str :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value is 1. :paramtype sleep_interval: float :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1904,12 +1877,12 @@ async def upload_file_and_poll( :type file_path: str :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value is 1. :paramtype sleep_interval: float :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1981,7 +1954,7 @@ async def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2010,12 +1983,12 @@ async def create_vector_store_and_poll( :keyword name: The name of the vector store. Default value is None. :paramtype name: str :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is @@ -2025,7 +1998,7 @@ async def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2044,7 +2017,7 @@ async def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2076,12 +2049,12 @@ async def create_vector_store_and_poll( :keyword name: The name of the vector store. Default value is None. :paramtype name: str :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is @@ -2091,7 +2064,7 @@ async def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2148,7 +2121,7 @@ async def create_vector_store_file_batch_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2171,18 +2144,18 @@ async def create_vector_store_file_batch_and_poll( :keyword file_ids: List of file identifiers. Required. :paramtype file_ids: list[str] :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2209,7 +2182,7 @@ async def create_vector_store_file_batch_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2238,14 +2211,14 @@ async def create_vector_store_file_batch_and_poll( :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword content_type: Body parameter content-type. Defaults to "application/json". :paramtype content_type: str :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2306,7 +2279,7 @@ async def create_vector_store_file_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2332,15 +2305,15 @@ async def create_vector_store_file_and_poll( :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2367,7 +2340,7 @@ async def create_vector_store_file_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2395,15 +2368,15 @@ async def create_vector_store_file_and_poll( :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2457,7 +2430,9 @@ async def get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[b return cast(AsyncIterator[bytes], response) @distributed_trace_async - async def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: + async def save_file( # pylint: disable=client-method-missing-kwargs + self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None + ) -> None: """ Asynchronously saves file content retrieved using a file identifier to the specified local directory. @@ -2515,72 +2490,6 @@ def write_file(collected_chunks: list): logger.error("An error occurred in save_file: %s", e) raise - @classmethod - def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential", **kwargs) -> Self: - """ - Create an asynchronous AIProjectClient from a connection string. - - :param str conn_str: The connection string, copied from your AI Foundry project. - :param AsyncTokenCredential credential: Credential used to authenticate requests to the service. - :return: An AssistantsClient instance. - :rtype: AssistantsClient - """ - if not conn_str: - raise ValueError("Connection string is required") - parts = conn_str.split(";") - if len(parts) != 4: - raise ValueError("Invalid connection string format") - endpoint = "https://" + parts[0] - subscription_id = parts[1] - resource_group_name = parts[2] - project_name = parts[3] - return cls( - endpoint, - subscription_id, - resource_group_name, - project_name, - credential, - **kwargs, - ) - - def upload_file_to_azure_blob(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: - """Upload a file to the Azure AI Foundry project. - This method required *azure-ai-ml* to be installed. - - :param file_path: The path to the file to upload. - :type file_path: Union[str, Path, PathLike] - :return: The tuple, containing asset id and asset URI of uploaded file. - :rtype: Tuple[str, str] - """ - try: - from azure.ai.ml import MLClient # type: ignore - from azure.ai.ml.constants import AssetTypes # type: ignore - from azure.ai.ml.entities import Data # type: ignore - except ImportError as e: - raise ImportError( - "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" - ) from e - - data = Data( - path=str(file_path), - type=AssetTypes.URI_FILE, - name=str(uuid.uuid4()), # generating random name - is_anonymous=True, - version="1", - ) - # We have to wrap async method get_token of - - ml_client = MLClient( - _SyncCredentialWrapper(self._config.credential), - self._config.subscription_id, - self._config.resource_group_name, - self._config.project_name, - ) - - data_asset = ml_client.data.create_or_update(data) - - return data_asset.id, data_asset.path - @distributed_trace_async async def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.AssistantDeletionStatus: """Deletes an assistant. @@ -2588,21 +2497,13 @@ async def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.As :param assistant_id: Identifier of the assistant. Required. :type assistant_id: str :return: AssistantDeletionStatus. The AssistantDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AssistantDeletionStatus + :rtype: ~azure.ai.assistants.models.AssistantDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ if assistant_id in self._toolset: del self._toolset[assistant_id] return await super().delete_assistant(assistant_id, **kwargs) - @property - def scope(self) -> Dict[str, str]: - return { - "subscription_id": self._config.subscription_id, - "resource_group_name": self._config.resource_group_name, - "project_name": self._config.project_name, - } - __all__: List[str] = ["AssistantsClient"] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py index 458875b29d44..84a1440612ed 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py @@ -7,14 +7,11 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio -import base64 -import datetime +import asyncio # pylint: disable = do-not-import-asyncio import inspect import itertools import json import logging -import math import re from abc import ABC, abstractmethod from typing import ( @@ -39,8 +36,6 @@ overload, ) -from azure.core.credentials import AccessToken, TokenCredential - from ._enums import AssistantStreamEvent, MessageRole, AzureAISearchQueryType from ._models import ( AISearchIndexResource, @@ -77,6 +72,7 @@ ToolDefinition, ToolResources, MessageDeltaTextContent, + VectorStoreDataSource, ) from ._models import MessageDeltaChunk as MessageDeltaChunkGenerated @@ -97,7 +93,7 @@ def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[st **Note:** Classes inherited from the model check that the parameters are present in the list of attributes and if they are not, the error is being raised. This check may not - be relevant for classes, not inherited from azure.ai.projects._model_base.Model. + be relevant for classes, not inherited from azure.ai.assistants._model_base.Model. :param Type model_class: The class of model to be used. :param parameters: The parsed dictionary with parameters. :type parameters: Union[str, Dict[str, Any]] @@ -199,87 +195,6 @@ def _parse_event(event_data_str: str) -> Tuple[str, StreamEventData]: return event_type, event_obj -# TODO: Look into adding an async version of this class -class SASTokenCredential(TokenCredential): - def __init__( - self, - *, - sas_token: str, - credential: TokenCredential, - subscription_id: str, - resource_group_name: str, - project_name: str, - connection_name: str, - ): - self._sas_token = sas_token - self._credential = credential - self._subscription_id = subscription_id - self._resource_group_name = resource_group_name - self._project_name = project_name - self._connection_name = connection_name - self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) - logger.debug("[SASTokenCredential.__init__] Exit. Given token expires on %s.", self._expires_on) - - @classmethod - def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime.datetime: - payload = jwt_token.split(".")[1] - padded_payload = payload + "=" * (4 - len(payload) % 4) # Add padding if necessary - decoded_bytes = base64.urlsafe_b64decode(padded_payload) - decoded_str = decoded_bytes.decode("utf-8") - decoded_payload = json.loads(decoded_str) - expiration_date = decoded_payload.get("exp") - return datetime.datetime.fromtimestamp(expiration_date, datetime.timezone.utc) - - def _refresh_token(self) -> None: - logger.debug("[SASTokenCredential._refresh_token] Enter") - from azure.ai.assistants import AssistantsClient - - project_client = AssistantsClient( - credential=self._credential, - # Since we are only going to use the "connections" operations, we don't need to supply an endpoint. - # http://management.azure.com is hard coded in the SDK. - endpoint="not-needed", - subscription_id=self._subscription_id, - resource_group_name=self._resource_group_name, - project_name=self._project_name, - ) - - connection = project_client.connections.get(connection_name=self._connection_name, include_credentials=True) - - self._sas_token = "" - if connection is not None and connection.token_credential is not None: - sas_credential = cast(SASTokenCredential, connection.token_credential) - self._sas_token = sas_credential._sas_token # pylint: disable=protected-access - self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) - logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) - - def get_token( - self, - *scopes: str, - claims: Optional[str] = None, - tenant_id: Optional[str] = None, - enable_cae: bool = False, - **kwargs: Any, - ) -> AccessToken: - """Request an access token for `scopes`. - - :param str scopes: The type of access needed. - - :keyword str claims: Additional claims required in the token, such as those returned in a resource - provider's claims challenge following an authorization failure. - :keyword str tenant_id: Optional tenant to include in the token request. - :keyword bool enable_cae: Indicates whether to enable Continuous Access Evaluation (CAE) for the requested - token. Defaults to False. - - :rtype: AccessToken - :return: An AccessToken instance containing the token string and its expiration time in Unix time. - """ - logger.debug("SASTokenCredential.get_token] Enter") - if self._expires_on < datetime.datetime.now(datetime.timezone.utc): - self._refresh_token() - return AccessToken(self._sas_token, math.floor(self._expires_on.timestamp())) - - # Define type_map to translate Python type annotations to JSON Schema types type_map = { "str": "string", @@ -1130,7 +1045,7 @@ def _create_tool_resources_from_dict(self, resources: Dict[str, Any]) -> ToolRes Safely converts a dictionary into a ToolResources instance. :param resources: A dictionary of tool resources. Should be a mapping - accepted by ~azure.ai.projects.models.AzureAISearchResource + accepted by ~azure.ai.assistants.models.AzureAISearchResource :type resources: Dict[str, Any] :return: A ToolResources instance. :rtype: ToolResources @@ -1183,7 +1098,7 @@ def validate_tool_type(self, tool: Tool) -> None: if isinstance(tool, AsyncFunctionTool): raise ValueError( "AsyncFunctionTool is not supported in ToolSet. " - + "To use async functions, use AsyncToolSet and assistants operations in azure.ai.projects.aio." + + "To use async functions, use AsyncToolSet and assistants operations in azure.ai.assistants.aio." ) def execute_tool_calls(self, tool_calls: List[Any]) -> Any: @@ -1711,7 +1626,7 @@ def get_last_message_by_role(self, role: MessageRole) -> Optional[ThreadMessage] :type role: MessageRole :return: The last message from a sender in the specified role. - :rtype: ~azure.ai.projects.models.ThreadMessage + :rtype: ~azure.ai.assistants.models.ThreadMessage """ for msg in self.data: if msg.role == role: @@ -1725,7 +1640,7 @@ def get_last_text_message_by_role(self, role: MessageRole) -> Optional[MessageTe :type role: MessageRole :return: The last text message from a sender in the specified role. - :rtype: ~azure.ai.projects.models.MessageTextContent + :rtype: ~azure.ai.assistants.models.MessageTextContent """ for msg in self.data: if msg.role == role: @@ -1756,7 +1671,6 @@ def get_last_text_message_by_role(self, role: MessageRole) -> Optional[MessageTe "SharepointTool", "FabricTool", "AzureAISearchTool", - "SASTokenCredential", "Tool", "ToolSet", "BaseAsyncAssistantEventHandlerT", diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py index b9c4c0f6003a..4d638973f4c0 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py @@ -10,4 +10,5 @@ from ._utils import enable_telemetry from ._trace_function import trace_function -__all__ = ["AIAssistantsInstrumentor", "enable_telemetry" "trace_function"] + +__all__ = ["AIAssistantsInstrumentor", "enable_telemetry", "trace_function"] diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py index 16fb9520212d..e0512a34ca6b 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py @@ -10,10 +10,9 @@ import logging import os from enum import Enum -from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast +from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast, TYPE_CHECKING from urllib.parse import urlparse -from azure.ai.assistants import _types from azure.ai.assistants.models import AssistantRunStream, AsyncAssistantRunStream, _models from azure.ai.assistants.models._enums import AssistantsApiResponseFormatMode, MessageRole, RunStepStatus from azure.ai.assistants.models import ( @@ -67,6 +66,9 @@ except ModuleNotFoundError: _tracing_library_available = False +if TYPE_CHECKING: + from .. import _types + __all__ = [ "AIAssistantsInstrumentor", @@ -80,7 +82,7 @@ class TraceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): # pylint: disable=C4747 """An enumeration class to represent different types of traces.""" - AssistantS = "Assistants" + ASSISTANTS = "Assistants" class AIAssistantsInstrumentor: @@ -1349,7 +1351,7 @@ def _trace_sync_function( function: Callable, *, _args_to_ignore: Optional[List[str]] = None, - _trace_type=TraceType.AssistantS, + _trace_type=TraceType.ASSISTANTS, _name: Optional[str] = None, ) -> Callable: """ @@ -1360,7 +1362,7 @@ def _trace_sync_function( :param args_to_ignore: A list of argument names to be ignored in the trace. Defaults to None. :type: args_to_ignore: [List[str]], optional - :param trace_type: The type of the trace. Defaults to TraceType.AssistantS. + :param trace_type: The type of the trace. Defaults to TraceType.ASSISTANTS. :type trace_type: TraceType, optional :param name: The name of the trace, will set to func name if not provided. :type name: str, optional @@ -1417,7 +1419,7 @@ def _trace_async_function( function: Callable, *, _args_to_ignore: Optional[List[str]] = None, - _trace_type=TraceType.AssistantS, + _trace_type=TraceType.ASSISTANTS, _name: Optional[str] = None, ) -> Callable: """ @@ -1428,7 +1430,7 @@ def _trace_async_function( :param args_to_ignore: A list of argument names to be ignored in the trace. Defaults to None. :type: args_to_ignore: [List[str]], optional - :param trace_type: The type of the trace. Defaults to TraceType.AssistantS. + :param trace_type: The type of the trace. Defaults to TraceType.ASSISTANTS. :type trace_type: TraceType, optional :param name: The name of the trace, will set to func name if not provided. :type name: str, optional @@ -1492,108 +1494,108 @@ def _inject_sync(self, f, _trace_type, _name): def _assistants_apis(self): sync_apis = ( - ("azure.ai.assistants", "AssistantsClient", "create_assistant", TraceType.AssistantS, "assistant_create"), - ("azure.ai.assistants", "AssistantsClient", "create_thread", TraceType.AssistantS, "thread_create"), - ("azure.ai.assistants", "AssistantsClient", "create_message", TraceType.AssistantS, "message_create"), - ("azure.ai.assistants", "AssistantsClient", "create_run", TraceType.AssistantS, "create_run"), + ("azure.ai.assistants", "AssistantsClient", "create_assistant", TraceType.ASSISTANTS, "assistant_create"), + ("azure.ai.assistants", "AssistantsClient", "create_thread", TraceType.ASSISTANTS, "thread_create"), + ("azure.ai.assistants", "AssistantsClient", "create_message", TraceType.ASSISTANTS, "message_create"), + ("azure.ai.assistants", "AssistantsClient", "create_run", TraceType.ASSISTANTS, "create_run"), ( "azure.ai.assistants", "AssistantsClient", "create_and_process_run", - TraceType.AssistantS, + TraceType.ASSISTANTS, "create_and_process_run", ), ( "azure.ai.assistants", "AssistantsClient", "submit_tool_outputs_to_run", - TraceType.AssistantS, + TraceType.ASSISTANTS, "submit_tool_outputs_to_run", ), ( "azure.ai.assistants", "AssistantsClient", "submit_tool_outputs_to_stream", - TraceType.AssistantS, + TraceType.ASSISTANTS, "submit_tool_outputs_to_stream", ), ( "azure.ai.assistants", "AssistantsClient", "_handle_submit_tool_outputs", - TraceType.AssistantS, + TraceType.ASSISTANTS, "_handle_submit_tool_outputs", ), - ("azure.ai.assistants", "AssistantsClient", "create_stream", TraceType.AssistantS, "create_stream"), - ("azure.ai.assistants", "AssistantsClient", "list_messages", TraceType.AssistantS, "list_messages"), - ("azure.ai.assistants.models", "AssistantRunStream", "__exit__", TraceType.AssistantS, "__exit__"), + ("azure.ai.assistants", "AssistantsClient", "create_stream", TraceType.ASSISTANTS, "create_stream"), + ("azure.ai.assistants", "AssistantsClient", "list_messages", TraceType.ASSISTANTS, "list_messages"), + ("azure.ai.assistants.models", "AssistantRunStream", "__exit__", TraceType.ASSISTANTS, "__exit__"), ) async_apis = ( ( "azure.ai.assistants.aio", "AssistantsClient", "create_assistant", - TraceType.AssistantS, + TraceType.ASSISTANTS, "assistant_create", ), ( "azure.ai.assistants.aio", "AssistantsClient", "create_thread", - TraceType.AssistantS, + TraceType.ASSISTANTS, "assistants_thread_create", ), ( "azure.ai.assistants.aio", "AssistantsClient", "create_message", - TraceType.AssistantS, + TraceType.ASSISTANTS, "assistants_thread_message", ), - ("azure.ai.assistants.aio", "AssistantsClient", "create_run", TraceType.AssistantS, "create_run"), + ("azure.ai.assistants.aio", "AssistantsClient", "create_run", TraceType.ASSISTANTS, "create_run"), ( "azure.ai.assistants.aio", "AssistantsClient", "create_and_process_run", - TraceType.AssistantS, + TraceType.ASSISTANTS, "create_and_process_run", ), ( "azure.ai.assistants.aio", "AssistantsClient", "submit_tool_outputs_to_run", - TraceType.AssistantS, + TraceType.ASSISTANTS, "submit_tool_outputs_to_run", ), ( "azure.ai.assistants.aio", "AssistantsClient", "submit_tool_outputs_to_stream", - TraceType.AssistantS, + TraceType.ASSISTANTS, "submit_tool_outputs_to_stream", ), ( "azure.ai.assistants.aio", "AssistantsClient", "_handle_submit_tool_outputs", - TraceType.AssistantS, + TraceType.ASSISTANTS, "_handle_submit_tool_outputs", ), ( "azure.ai.assistants.aio", "AssistantsClient", "create_stream", - TraceType.AssistantS, + TraceType.ASSISTANTS, "create_stream", ), ( "azure.ai.assistants.aio", "AssistantsClient", "list_messages", - TraceType.AssistantS, + TraceType.ASSISTANTS, "list_messages", ), - ("azure.ai.assistants.models", "AsyncAssistantRunStream", "__aexit__", TraceType.AssistantS, "__aexit__"), + ("azure.ai.assistants.models", "AsyncAssistantRunStream", "__aexit__", TraceType.ASSISTANTS, "__aexit__"), ) return sync_apis, async_apis diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py index 1890a6f1e88d..0ac5ea43c13f 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. # ------------------------------------ import functools -import asyncio +import asyncio # pylint: disable = do-not-import-asyncio from typing import Any, Callable, Optional, Dict try: diff --git a/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env b/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env index faaf292ebf44..3c74e991b06b 100644 --- a/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env +++ b/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env @@ -5,21 +5,10 @@ # but do not commit these changes to the repository. # - -######################################################################################################################## -# Telemetry tests -# -# To run telemetry tests you need an AI Foundry project with a connected Application Insights resource. -# -AZURE_AI_ASSISTANTS_TELEMETRY_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_ASSISTANTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING} - ######################################################################################################################## # Agents tests # -AZURE_AI_ASSISTANTS_AGENTS_TESTS_PROJECT_CONNECTION_STRING= -AZURE_AI_ASSISTANTS_AGENTS_TESTS_DATA_PATH= -AZURE_AI_ASSISTANTS_AGENTS_TESTS_STORAGE_QUEUE= -AZURE_AI_ASSISTANTS_AGENTS_TESTS_SEARCH_INDEX_NAME= -AZURE_AI_ASSISTANTS_AGENTS_TESTS_SEARCH_CONNECTION_NAME= - - +AZURE_AI_ASSISTANTS_TESTS_PROJECT_ENDPOINT= +AZURE_AI_ASSISTANTS_TESTS_DATA_PATH= +AZURE_AI_ASSISTANTS_TESTS_STORAGE_QUEUE= +AZURE_AI_ASSISTANTS_TESTS_SEARCH_INDEX_NAME= diff --git a/sdk/ai/azure-ai-assistants/dev_requirements.txt b/sdk/ai/azure-ai-assistants/dev_requirements.txt index 105486471444..b5272c25b382 100644 --- a/sdk/ai/azure-ai-assistants/dev_requirements.txt +++ b/sdk/ai/azure-ai-assistants/dev_requirements.txt @@ -1,3 +1,8 @@ -e ../../../tools/azure-sdk-tools ../../core/azure-core -aiohttp \ No newline at end of file +../../identity/azure-identity +../../core/azure-core-tracing-opentelemetry +aiohttp +opentelemetry-sdk +opentelemetry-exporter-otlp-proto-grpc +azure-ai-ml diff --git a/sdk/ai/azure-ai-assistants/pyrightconfig.json b/sdk/ai/azure-ai-assistants/pyrightconfig.json new file mode 100644 index 000000000000..3af0746bdada --- /dev/null +++ b/sdk/ai/azure-ai-assistants/pyrightconfig.json @@ -0,0 +1,21 @@ +{ + "reportTypeCommentUsage": true, + "reportMissingImports": false, + "pythonVersion": "3.11", + "exclude": [ + "**/downloaded", + "**/sample_assistants_vector_store_batch_enterprise_file_search_async.py", + "**/sample_assistants_with_file_search_attachment.py", + "**/sample_assistants_with_code_interpreter_file_attachment.py", + "**/sample_assistants_code_interpreter_attachment_enterprise_search.py", + "**/sample_assistants_with_file_search_attachment_async.py", + "**/sample_assistants_code_interpreter_attachment_enterprise_search_async.py", + "**/sample_assistants_code_interpreter_attachment_enterprise_search_async.py", + "**/sample_assistants_code_interpreter_attachment_async.py" + ], + "extraPaths": [ + "./../../core/azure-core", + "./../../identity/azure-identity", + "./../../monitor/azure-monitor-opentelemetry" + ] +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/samples/assistant-5szzLs73bsbQ2k75xUGKv8_image_file.png b/sdk/ai/azure-ai-assistants/samples/assistant-5szzLs73bsbQ2k75xUGKv8_image_file.png deleted file mode 100644 index 928fec58517aee4c8bc063b37949f8400224d95d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 162061 zcmd43hdY<;{|Bt1L5r-E5>jOEQA*jXY?;}c>`f_38JSU}viB%tN5hQFgh+{u5X#o` zy6)d|Jje0<51x+WKJM;reLmN9Ug!CKzt;N-y`n0&kDP&=goI?Dg1odk3CXT15)#s+ z-8=CoYbw)j_?MWQjINu8lck%diHik^iiz8GdnY%08&f6^3l~=#Cr1HpVQv9VCTlmh z>#pKFJP!Zg4{$rVSn+iHsr`cw*>hd~sw)Y}u?FIwZH^z^B}ulCkSIt?X?i8ieDT!P zm{>nQ@7eErdN(--2l?zaN9W5!zU2j#r4jobe#zOD+D_;BYUgN8=V;xpn&sjk=Wsko zO?|aQCm?X1#aMMG>jTn@!-dmRTCSoKVwa;On#DRc_eSY*(%!oJKfmPf&{KPo{m&oW zy{q}oT!iv}eoY=_s|x*}pOT;8{j4JWKflE9G&?f?&#x{qWsgJu^FdMVCMA#m=Y#IL z?T`8LKc6F==9)0W_CFuQoc!khf0tyI_Ulk>;J;(bb0c!suk$v%dGmW~(>Ez| zRd%1-Oh2Xn`rqKp%*+s5`Tt#=N@-Tw`|lo|7Cm!@qA`N;O_{^sL3;Yrp9`iYrJ19~ zM*d9ozM(VryZPjtZ^OrrR~+0=GIQ)xS64qsMHM9CG|a@!z2C;hhF3@^I6t3@LpyKx z<;#~79zUk@6v`%D{QH;oY_02rDqrvIC7FvCw{NXZ93QInKhlwMUJn1MuAy-dZU!4W7?hF-vXZ`5l;Na1wD3(`&|0MDF9z1w(BP?uh^N8=#&zocv zwA99A?iXxrIi5)R-@AKv_uIE`nS_LB-oJleR9t*BJbd5KmoI9?rZV;CNYxbP|Nhcn zn(9?r&|O;(H~%fvdffOp^E3Rd+qZ8s2@BJvNcvy&@DRp2n^;%`FZ^gty%+PJp5 z-I&F1ZE4Sm6DM{bI6z!-S9iCGrR7dvUti4}%{|AC9pmNWW1qE+j<_Eh8fD;mij9q# zg=NS1`1tkf*Dw5Ni5s1r-Os|3f0;h_5Y?eWzgK>nRr@aQC%#Wnk-V*~?V6+GemOZg z&8#ciXwGQg&d#=R)``2DrKz@F7_H0s{@JBFMYg>hw}XQf%6^o*e0e)F^Hh#b;eIPC zt5|l8o%o)E_ut8@sJv^8JWhG|aHv(slM}40qzVcO(Pqcbdfl+;&KP}Y98jNk^~J}! zz`*1>c|5**SjHq#=j~ftTdzFlj+K;@P|w;~Ti;##GyUe%C%W3&+Ujj=T}&2-iZU`X zib_g?YHH4%J$Fv6+|khO=f`ax9v+YKukE@1cjoHGQupN`yP7Tz35g>|L>zCYq#Q4K z@#21!xcEeO=4H`<4bk>wi92@=93NlT-$l~b*GKH#N{?UngdGN6x3wuN37uqScF-@n zwk_iL`GPpt=IJXRUszqn5*55Q3l9&+(;&06v(wDiqj~b=iFJ3zKHPl4U!U25QVP4i zJW6|450Ceqsd6UQu3h=*k}jb;hMQ?1{=m#E)g^IreOb=hI&19r#_D1xJ~urwaC1#) zKco0Gk)<$i()koUa66?TIgpQ1LZ_KKRTrKP2{?tD(>^JglU zmP?O%%J0UvxsTyTM4mf;f6wu7d9maCcl>;Gc3J3~H zm)iDT$y8+&bQ(U2zZDV}7x#WzL0a=sw$NwP35SjOHwSj_-@o%g%ouH`^!&oY#d60l zA4@Ehda^a-CP_BzdnunOyL6B; zyA>s<*zyB|w2TaUX`r???d)*P{VLBn)5Y-*D+{eWouzg<`#wd#9Bl}u*u7^@)LF;D zGOn{{&)zGRP0M@vQZOw&eH6>=wY4FJ4H6M|MyCNA6kF>~X{o!jjv9;O(5ssjt`j+D zzena=ym;~YXaiZZ*?|KGUOEhFr%uFL5#O`9JW?89WccvWBl7d-&u^?wq{yqPYOlSz zoFX}jqyEZom7kM~i_gBFH;kTddwhI6Lmn4p)WYmg#jP7RNSe)fczDX}VqFd&K72Pk zT*ls>v!S7Z_#*p(mwQU>KE0p${6ekJ=v-;C((vwoO-=GBa8_&HjICG%;&RYt(?d&!{y%6(sI$y@9ekl-zkqB zi#VfGa1FPRrC-inyRp#f7gKl7+{)_v*ZN!i)!qX5p*V3iF0Oq#g+}jsb9L0R)ld2@ zeh{(l%S&vT(AFNFV5FqHY4@qXWc}~2EcJA9e1_6X+g?Sp32i#;iHcBBMQo>O6;mHej zQIhgXN^hRa++V0UD9p=y1Lq4B^~dl=;L7|qEYh2FrHAu%|2892huwyPDdM_Q!mAu-vH_Ev&vwF^=Pz!(iEIor0umYv)BoMDqLcS$(lJYd8Pu*IV;=6Xo32 z_Fh^VtGT6R%f>l&_C5OLj_>Mk?JBnW#El!b%W098R;1Xy`@!MfrkzHc)JKno;l!Fu z^<=Niyy)mrA8vif^J7aTCbX(IN5R;PN;v?cKExD>LtUdPx!y>yW_y4#h;X0rSKovFtT zq1pP<)U+EXO8)ZYj&hcplI>gX>`pMNi7)1`L1wBclHs_ZujL~F-+hLq8G34bJ$v%?bu4`W7oIPI8=03=$hT-| zXo3$5SYDfGPfq8MqQf5p(a0Zr$fZw6=t_VSdiJg~g+u6~I{`4>>E=dr zF)@({ShdF<{@`%!+KqTF{pS{~@z%IJ)LYi%(zm93#1~Oh-^QjJt^2oiNU-VM`}fR} zlC_%t@ptc@&ScR~7I%*w5RsHThR#WCe*}+(Ln|k(=XZ-+gI#Y<+)z|r1xr=RS??h1 zKo%C3OAF;QGxi&Q2c3$_%XyZT-c&3+cGSmK>FLq5AE{*=@mmZZXpI;4oHa<4E-x(= z!jj!0`s|l4hVw@c9Xdn_#P;>;HG>KlGGI&|b<=G1bVcljgtotVd7N+2V}5^ceC59` z+||`Z8i<|CXz$?AS?)Bly@T?|k^6fNu{F6F@bdCr$kopOSmA2rZ9dW9mo7|GVjxJakw>9;Q{>|uBmGKi?08+vGF^0)hqAC4CRYB z@cuw-^(()>pB(h_^V6K7FLs+$uQ!uf)((y`-W7%0OFHLE>%X?7=Cy&Y)`_o9Z>OQ9 z4LRrgcXye}Bf&joWo3$tg|qAFMMb>I3A|F4(j}pxd(gS>s3wa)Gpg~yirqn-(bHoX zuJylvpN{9-+t5Qn+3hb%N{W2`*b>)?hpDNwDeO43`}pw`s;b+Qi2_z*Y?WAsa9sN{ z{1fCG3E%`MHj3@Pd-v|w@9xw6Mjdz1P6S}nuoL!ajy_4O=X+mrhQo(V=D*g{^IjwQ zJNqTK)0L7kj@q=C-gTlqPQgt=Ow6P`No@Z2?40KFA*)9v)VVB=ueYo3C6_iBsg&~@E zL@AD9k$dj9y1>AGoPK2;@Uz=LQ6My~w{Z2d*t#O-{ES!s*9qTCl{V#0}ZLLl7)c<9a z@OTZpQBY7oazn4wHe2vVsauJDqVE|C1xtW3#0$ok69KZryCy1KCnB&g*f3dF zo&|MTWGNXK9L-Qo=7{-Opi_7(U}bcdX12P_IK76327W`Pm7`g2mEs>#r&* z^BHoqazjASm<0q5eJV6=G!EPhMiIA59gvnzf28+OO-_(V=l&UwJC?;oFC z0$dHQ%vF?@*0kQgf4^>}8@sss6l?zzkUG?Rn#v)ihunLj9}5c$(+gTZxOUuo+M7;h zxU&MNi(xKd1?xh73^0cgJ4G|$XMCpp=Ms88Q__f|VnH`|?8>PK5j%SHXb}#A+phtw zqH|naDwf_yY|d!sy-t;jBpsaU%fFv2;mJ``R#jNY<1y2JLcnJYmnDi@QMaHDLAk8m zTC+=JUlkA#Ai8yUa(U61`@zE^j!9pguU&h3O*^OSTj0O-<0v3!`uoZp^l=3;j?eWU z-MqOi3jNU2+h-9snIPol17|ou6{7lkdc7(ePAXW$yF{t zT6F6IYRsD-8sHIJxOg#0(IVS+W92tLqnFdrt7o0Av>|T0C_(M~{(iQ~GOQ}QcHZ5S zirtamRlwAwgA78pw@)g?XH~P>;L?6i7uJy%ydcUoHqynD*>Yj@CsvI1Qc$#ddWU>- znd~}Q{~b)|X~5=MF!$vc0n>w&lN5x=Jp{xMfz#BaOqbvO*6_}L#;=o;?A}RnalzOLx%nH_ z=tC!8^p$a>p1ywzjH}J3T}@4mT;gb+aosV~V%-H@d_KN4c$RZ=b~d8etdYD7kf9~) zfsP0^P1}4%-i|+_!!O==3sO32?@nreLOTWQYSxM{g5>=F%hLJ=Gx=zZ1%0? zx_>m4-{oIeelXAQ0qb>t7Tp6m;?lpZO|ro<$1nGmmXE` z_YaWV)a$@Y6SGSh1g&qN?a^cp6&O~1L+1?=dJWDL&QrIc{-g7ocRGsg=$kuUf2L@t zsHm>l*zAFj(Nkz#C;y{m5rPY$tzZZJ{{6eCsOUl>zr__uQLid0vi}%VNcykc-gV#@ z)7i5}0Zw`O`7Z*PcLMnPMG}QBSEukG1H-*nuSCH^-V6;LMTfuc>?{R(4uqW(B~C|I ztQ+>=!NF!cs9cR}nQdGdI5tGEOe} zI4UYCnB&aJlVkuY(z?3z#MJ}DfedhzzBdH>Iwt9N7i$1WRR8uZ>EFM9vviA@ZTs@B z{!F)95dNw*TJ5(=#bZ=`0b~$>7-GW?;t~oB_03pfp+L+NG7o+PkE*GqB?!WpxZ6(> zU*C1+hjkz}^=o%Y=6{usC6#*ko{ac9vIAxS5>HIP>clK`0X`m8PPj zqk~?*zHmDvgc(QH($X?ZGn}2Z;I@)FWU&=5yy5QIyfrw&QD0ce5)zb}Pf4!L+L?+Y03YXCZK-?8K5 zryr;sfN65KcJAw0^L-Ycz?qM(#`d!(AwE6?s!N>Q@3r-HiU7GYS~;wTE&74!>tFVx zP$h^uKOYeP3LL1wOVJ8k$p;=(%JYSpM=gg!V7w<+=d$HX4|8)?T*m2sT2LRY-^rOc|Z<;th&2w-*L(6rn*TG{Wm~Z4-yhie=dV2=l4lM z#~hWF=wlBc$S`HzYJ#-K3ptIsS1nmGVB@rx!O)0IXJ;pL7g>Azys@d};D?)4RaLTe`mr>i`KK;FA(YHLd-g!FyNJ{Fb#}I( z*iI7X1W?cCy%`4t**xjPX9-@{or|rU{sw-AUdB3fJf`{`943%>)T%FrzQ3W50a4EyHy$n)e+?!2t-IKv||m(L8wK&>q1 zxAaqu5L|H%MBRQKgrt7*#eUra+@{IW&rV!SVT(rkHZi+(s8RcHJ(sZNL;i#e!c)6) zu_R-YXeq7R{C>ZqOW-vLYMJQUIcc$T>|7&(%E2t68^aldqTAXJf}?Qhmwg5Oiy5Sl ztTSK6Qz!;6A|)eZKU#my&hCC+zW(?7Jhi9iGq~z>$(X$Md;jp4tau-JoM)vSdSHz5 zkYZrOqBxZCT6cEiTE8ec357uJG=)zhI#Rz05ozOh<8e{vZ_>5N$8)sv@1d?<`}+DO z$H{v%{#)RDzt`vLz5|g3*&K7NsHkA~Q*CK!Ym2>lz67^p4IL(`g)0xGjv$a=!5X65 z677c|rq>O{AhB z!SOG$>1L~_(;&HKWp#%THgGA5r)p^YJ2Epe2rUfkBcGgM^7<>igQzQz9Q9?fMy#m~{z-wk}`<(ZsI zMMJQw*^fmL^_piQG*<{E977!tTh93`y+-*mN=x&D_=`gm{8+?keEq7hkdO=n>yMCf zo9*{h2>NryK6-SI(68hokI6uOZ`g#;_jP*uKBS_iO-60$&Kfl^aq!~%J9h17;^0sm z7j<=Ar23~Xqsd_HaO@rWI!MT2=yBSwW{tq22x5%u{D#FRG(aG&d0feJJa&GK+dz7^ zAe&Jf)4zK48TN-QdlghVe!4210t2;*Brr#_k58rg%bik&q(vTdkFrC#0pEH9hC?A# zxrjX|GdEmQ4Ee_I(3cCUJncZ`&jSB#d2Or|w@kFyfymN>L310|COI{T7XqD{Ha$4e zU+XUs6cj{$mK$~tzKPH;im%mguL#|5c#w|n4l1Q7us1nAvi-wXKnq^qzxIhXPbjF+ zfY72%P|b+{sk$o|jlvYlfnuH0$>61lOL0&7|7|QDKs^jXjm#1JqOYk*EgN|(92zWy z-(9P0V(943GyQx^b0f!bbsB3&HAT^6WZ){zV_(pTw*14>)~#?kxwbrK{4@KCD;XIX zaTWZ4vt}K9^>aKPp9x?p7RigFX69V#}D{r4p85Jhdbw^<Z6I`#MS98| zW>nI*34CVP0bGMWaexGhdd$#6dS;{N%4P@9)qP>PZ|7e68z8pS^p7Ktoohfhn14rS zJhm>8r~{dgAfs(h&K-fku5Me|x3c18@ap=u^BeQG8gAPv97Mx}1VQlb)mje|6O)y# z^|@l}PUhq)$EIm?T0)-27LS`-UjUX^m4{ka7bi-^Z+*rt{RqiD&!B>TgxK}Z zgv(!CFp*paYD*AvWs?nO2*xv@SrH@76R0a+;FjOY>T0%MZ3Fr4-50hg(m=9x$o;g2Jd&K ze2_um5HPv^R+iQru&}iJhAkE30N8b9(Crh1Z2r`|MUM>;Q&0$-Sdt%Hte37 z&mhjjS0a?abf=y87-9SV8@RVt2VD_C!P{`{h8AUHWK2LQU%5}mM|a3ux5jf3^yg{R zMRc(D@7YG8oL7ZD>g(#h?(5SYy*mz=U;Ue2uxv4ZXL6Ha<>iVOU|vdIdx8B<^KZf|OO4P01see`1TeI3JMrxD4vWC@*7&lG^=p`G1msHQ*(!H(B_ zU>@N6g1|#|mpy*`IJf4Rx=)MhQ?I_S1~ooH_+IYv9TA`_4N(U>o}7CFU`l!Ps6p1^ z&Rx6AAnyh(_XAsj8@U8p!5F+*ShuCOkT_W5E25pJdqTb)=Y~Bj91q$A96C714s_+e zMZRHfV)AT$U<`!HSpMa#rI_hj~VS!^t=sfpt)k`G1f#Go-dYXL);zuy7{o{nKNgCQ#uLQ1lXdO za!H>ro9D^-KyEL4Sh@>M5|eM;;fq+vKClsY{*bVQ-k*FFs74`x;a_i7?YgM}rj8YoO^%;TsGV~etUP|>Nwx0^Pyfy$v z^Necr&Mdzr=mUg?A6xMZ^z={{WoMlNKq!Ht$zZspcl??8ETC{l&)a40Dz%}Rb?%PW zs1Ljj1A@W7q(QS>teEbFhrc7dm@fA#Msp)|jS#9&uI3rwuyBOapJM*=aX@f<+qP{a zE)4qc4LBE<6B6i;p7lBi9T)N*;h_))38D=fbAkUZSzDhDx^>GLnBHx6=&F7%8#9MC z0vEA1M+;zR>va&`w-i4sNFI252X{gojcs8dbU02K6B0~x&$Q=u4zGoxQ(s`hMoA(oBx4jI4 zy8-8eU?`vrUA%P3d8&sUcSVt%oh^F(>y5EL9FzP40v47mS1b)bWM!S^gkF20UBwDV zfyf-dc%p+!3y1t}SlC-QUZjobX=&F$f%rv2aKZK1=ABq%U0vM^oU|+uc&yr!>LH4g zrM?4!A_v5SYA>5%lb&+_wqu9dH{?MuUU?vfUxr_1l4Zr0tp3xJXIC6U##i8XFr!nvi0pIsst-_3u4I0Mg_vmm`WmfoH7g zx#R%|$J>+dtKaiX<#Xcnrm$MUuH3)|k#q`co=YL25 z5MDZj`kWrZ!TkuI4_X@)qCHu9r8(CWM{qGGL`CTdX%gG+Wb-7{2*Q{FEYkMQe~nWp zZ+VHZun2Ri*59|;HwB^}D#0}%tDuohU=LIW23Q+0iW3?k=xoa)fswe_J!K-cJ-fl9 z?>v0?a7-W}IXN7wxU#-Z2I>pe&O@8BSuo=F3?P%x+qFf97e+DZZ7ERC8+ zmiiC`{2-D7Kvkpf!;if3TP=}|*_08&gKM7Ck6}~cHE%iy7e%3VE;Tik*t?+8TG#zY zCnip<&g6nwM2?kKyZpHKb*%~>3O|d}qWZW9A087-1R=C`(kS`cIAp{l35qB|;EYK1 zY^{7hAk|<-_&mU|HI=JYc0xkaU=ac+4n zDnm4u=L}`oR35ReaPNVIhHZ3DP%FTQ2(%1WS1?URCna%S4R{JyFw3~EcHp7rX%6pX zh4jK->4m{0ABL7EROX=)(h`n#<`JHpq3OtK@Ls){X@-gDS*EV1QafI~dbPU@osR&S z{dIwL=7W$@d)5*=g}qsB^d7cCp~SNdil8P77jN$X<|MKP&F-?ZC8`=3%Iq$N_j7Or z_(y7>37sM|9^$w`SzriVMyRf3$XHxl+!R>Zx;KXg8MBikHvwT@18@^dS`pCE($avC z0@RlXXxL-E1*k5-k6Pp6g{JTB5xA1@S#YYGoxFiVK$;0Vn)u{oXN5zbE+QJ&c;Ng7 zQ}<{cwg|!x$AKAlfeo5lSP;=Js8#iVuB?oQ@Z^iko2hWH6yo`>^mUy7$63*yrXPIc=Ok;YH7XarRK|!RGFwkujFcTRnR>Fse^aEQ<|CbeFWou7<4Cjqz zt}k%?mk7=bpyn`&v$^b3$US>eW@99}dU|_lh@uOKPB5V6C2xpBD<~;idJ?n9tdO{H z=U@w+S$u?Z-|*psisL2g8lOKO@sPJ4@q}+TIyH3<$~h5X35t?NKBWPVY)trqm#?o< zU;18haw@^+`29o`+l!1b;XZaf-+q{SFx6yn0el`ax>7Fggy_vym7W<)6)K$|U=^W! zXHS$oDH0qoi?<=}1__ZF{`I}H=`YF%0~=|JAcvN`&?Mg-b~ZMl2<9~%Wx?Xr`gw;j z2r2$&aPn*LMjjQT!j~K}co8biOq3WxJ?hIpLSRc#N4SqfMvH(XQ%}mdvE;Nz(NLzD z3s;kLhC9;GKbSrnR=8NeK)v8F5uFL96x7%iPTBJ0A6!V?qH8yqIvw!!uvJtnWsxFe zX`UX+P&v|;a^5(4LrFoQzS?{7>%>H6*WI?E_aBKM1AHI;K!8#iEO#t7&-t!8cSlFZ z)pqHpjzDx^gz4AxD_njQnY`JVIOmB1uW+f2Xf@Nu0fbdg$MYT#GH0p6^-u%Cdb*{y zamqXHqf-!80NRQPx>iv3F3-8)a6}QXAqg0;0wvpA{CGlQ;xqU}M8Hq---c_mr#0eU zL=qL6*BL{dum1uT{r&ui4V$Re7ZDLb&u_k)NCLt<-6K9rG*n14P33C60|N|jEiM)s z)hsypm6w&t;=exTT@~1SRP-P$TWNsU8%h1=+^1q&>TXrkr6SAL0Nv6&BU2Df3`G&l zMUT_q)s-TTB|;4bLhTufvRpd+pu0I6&m|alPr3@hUWuBGDs!wVT!U2_9VZ`)ykzszdWTo;YDC%C666q%ZG8`4z<0G zii;yaDf-QuzCKz!UKz;$tSMqz;?J`6f##pGFq4Rii9JKv3UXMOU8hi#&Q@HjJzvg} zd*{v_y^bAY)pK1+v{?*;kDlA_qGZbKb{sbMgdYJDi)C?ZlH8$`=ZA_%Bhnt&&%uft zj}j9Jfi>vr0Fsh-pnta*_FZ^9Q0|1l-hspj4nl?sBBQ`+5+Py&I00YEet0BAc1w_) zOz=vlRNfKd;@wO-hHyi~9 zB%~hr7FyykVZ%OCOO;*ucAw|^L_1@m#r|FhR*8wBbG7TL4}J>s^EaT7hT$_P{t5{S z#wOJvRs_nNVJ=&UluX-WkpsK-AH8Bj`(ns<9JgQ!;KeV-gNPZle5v#aY3bn1(%A?g z_gxRVvv@8KBZjfFlt|XTd9wp9-{{gKc=Bq)orM)WUmkGGjT`$63 z^tfyC3=u^|MULxV(xGbZW)KM3&v*|yBTavnEu>Wl2t;}!+5t*Klti*#0MZr3$g#x9 z9R>`(lpeykW5*6co(_7yIus&j$bbFog^JMG>ZIgkKG$*O`Z8d)i7HiZNNYqQyS@y8 z7CKRZ3-I864?5Mw^~-wMps76w>uQ5fs|qP-3R~d>)m!doo3dMb`-fR_f$GAz1TVV?Wlb~Typaje#q}4(FT3_ zlt^dt!&|p@5LlvLyWd^)%a<>NToBwHxJ zlBHTM&wNh3&aql8&`N*!aP~{Tgs3RWO1H^qr?b@M(1V*)+$OYYeB2X1ySbTgUGpA? zmV;~6L8s^}G(KNn7L+m3YpERX0kdZXX)ZDa6DBJ?1MkqOjj>-|J4hg2YKdxuyItfF zan4i)f}`5dhW08yO0_3g4eh(_k&x2p$ahG9KU!!AbsUFF4lYG!U0@)mQ8hgsx4{*g z3U2q69}kVqEz@lW%ktX-7A%{tct#sBE^m^p!l^@E?f=qK4|$A_p@3;mSCA`J-nxsP z)X-rFm-R}itrp_P9ZB3#j>av9IWoyTj8=r>^Kuds39)y5RRIejnX=obv?8&Jc?IG~ z9}igB26N~FsvXhnX9mkJJL(%tA@$6eAeD!mv@)5QLY|Zin7tRxhPI9MFLW2bhgFr= zM}yj^C~>n5h_%Pg#=-&(hwOBw)X60Ie4vr#o>bJ=Cf3SyA;%$lM9Qe_?}vqP_Do1D z`*Yd6xOZ@pCEuUiRqphU1t!hU3p{qTFxC?xZ)O6ryvvSB#c2ej}2?v>5@UJ<3X;;WHmMlS(j{ZYir#pJ#v?e zV}W4jpdsa?d-j5=zJAx|fdNHJOLlpH67y&#^bCzx|9~|~_dQ2SgI>ZTPkn;GrlB?l z&6h7+AbB5ln5Db_$M^3n0(yFsNSSRv@@f&r@PnrUp{1pzU8()V`Gl8KUq(U4&BV{I zKDij<2a}4wxsr&Wp;Ia{o=1!+G2t`RY(;k>2Uzs|UZs48`O^>V+#p|MssC;!u?Ei8 z$Vm|(M1U(frx1lbzY_xV_T@7eXc)S^0^$?*NVOIqH)y{VjxHhtlP8 zi1i+k4(IKbp4E(B7;{+aFnnC~^17?5hR@P{|s%;dH$ubqhHf=f!cA`mzm6Ls01m zWjStGD_CTX#Z4l5Q(9mqfd%5cS|UBU{te?Q8wj$kOyw4S%-0to0y`g@RSv;zy$dm- z#mxZL#XA@Y+%E!$Nwv8hhL)9#Z~ZtWD5&XZKQ~x@K)=kMh{ckDy(Ze}A|=d;1$^sv zFZz=dYuzWk%1e=Tb#>Z(7Ja0X9>-KRhUrTE6&^uhRknJG#f`%u0n7*JI5To`Y(Y_8 zo+vFuNDj$9MzE_=bK&|Rgq&4*S#GAMp9KBQbqnf!`&K$5e;fdUe0ik8HS?=83~^q> z!_(W-RbXd5{OXKK7Wbq$jW9Vr@#PFR5-MNPzRk3CU%R!jD(pk1GGx=l4af=HMkhAy}0!R85TU~}uC zk^U=y8AYL@iC!>TBG4cGu~i~HoacANQ+aoOcJlR`)=zIQ{vLEoYk zbg2#4)EliF=&}p-z-&XhF5#!P`BV?Mq1{#_i(}^J_vo8lq@<*~W&3_`=$CQUu8to` z@6hW{;e({(x;x`Y{SGt@4)v{wdlHZ3 zM}%8(Sy{aDj^l?8-D+xT@_U%jc@m^bL)_aMhAAAVNabCar6n9P$e|KSdnGg?q z#hD!%ZtoF=NN-O42UYhvG=Ri(qkGQ*Y^=M|_7GeiBQ2T>jM@+S`*m?j$Ow+vm9C^^ z#KPD#X?~qJnZF>;-GvQFoK!^1D?V-EAV*hu*g1b$Rw{IUoPr^roU?-&7=@|*x3x|s z@T`wr%X~ zFU+6uLik{g%iT|)3*-)o5F{4v%$HR@pbS_egcr(d*+w6B{JiAIbR4SBp4vG$MtKWP zA^855c&;;(O`b4SF)PvP_fJk%RE@%fxziW@1$mfyNIYZ0+YL|-7v`X@w4ReJr%T9D*w4&t$mTR<;qJK> zrSpVCL$chaTQ+?Wuxvlr2o(dcAxg6>BQ!(CIbt@xP1oI1DWb}_Z|vU zFfMER-X-hY>a-*BP$M#3Zpom%Mrfe;^9HdTBXIf znK$KBi!2)S4sPATC0IWkfpWx9$Fpb8PgMklqlx4*}(FY@|rnmCblU$aISKT643 z)w^12dy8Y_6!aZLN-&R-Z9WLnJ!a%-Edk#S4g^roP9C1Zi;OAd-$@>H+Y~?b4MAhw zBdce}>^uSBjiU3P^yUC(TQ*ZQS##|P*IlHyh&cy^7`Fcewt!7>Vu*=gT%rqq0f`9l zfN+Qw{AF}?6WSIIlko`(zJ-}V&0U}Nv(`8{wd%*0<;MvLS;zna`4qt=j?4V<=_QU5 z#cUnp(WB&Ho5-Hj4-V?{v)DL_RVK{DM+m6wJ$=S@J^Cym>hqdxuU(&&Xf6Z6CgRYS z5L39PStgE&8YC6S3@hF0f#{uof0Kc(xeNLx=XFhA&~GB=2eNKbA4EpJjIoo0XSFT0 zPkUp)ij~pzJR*1q3%4rw^7wP>>knQmeP=TOZ;x!`540hU4=%gL+V)$3ZNTwqE<*?{ zJ?6_mwH4d*ARDgFza}R`iF6n*??LSK8;};qoV+t*wO-cRNK2EF6qWC#oTTxtF{KdT zvI-cB%T#6-eOS_cM(KEr$Y(C6wJGfKMEA?E19DxQTrTvRox=bjh_K>0Id|4d0^4q^ zIO8Gj#C%fxY!@GNVo7kRz)y^DLq2xlP)pzd56`!oq zLelLXo~a1{1V_yrD6&qr)C~R=N<2J1Ig--gw=+jGdpn|-QqfzmA{9?oadB~J zJt{Nw>8Z*Ysp|SfbFbEuhP}tg|Ry zr*sQA5VyqH?rqAuibqTcn1u1gG$FYF3D9d{Y!9)sF>OK-Z`Ty}RjUWhzv^y+r_XCKDg=c8&xxpJWECpvgbu+pUQhNpBjHsxMY zRt|frwvPnn>$8Q5vCzm!ot(^!oP8l(1j{BScU)3ZIHF@>+?hY;Pl;D`v0SEhCw<_U zR8mxQTJw#vLQ+UiPY*Q~r0B(qTbT4EQtL#TNifcfTkE^lntN$+F}voia5v^>S-(|F z3ZLaPKQchE3oE?1k4AF^Fo+0M0kQWbuDfWBe0mOuACuETb7f0Ud5|x=wRYtj z1u>CR#D&%k%Z-GF%>l z!$oNAK^C{moJI;3)TM(>933Mdr=;R!5Mnw%KmYZu4Q~S0v315`ngBB!yJf>@utgj)p4*Va+mW#TeE;UPf3Q5sXjL(yGG%Xf+BS)unJSdm zri-;LNlc^U7s6YFgC43PN;ts~ARxs41Q#G-f{2gfa;L3}=|cNn{LIpW$)gXFlexP0 zzk=XU+M_nS`E02omhnfP!_=$D7|$e*Pi+5R;r#-ZPq;~hHH=*O57lzMfDNyTMrdw_ zV2T_>k%MoNfk_KZ)26B{kgRMyC)!0-!vv)`n=@Jt*1OD)EdMaPmKz;QyvxwzO>CLFLaxmZ+L5<7NKzmh3-sEful+Gz_tr(0^0qb;`C!NDUK46DatpG@9OPz%U7*Dz70D3pXE{2hM|A)oEn zjietU0jodg!cWcOOb0`$(&zUPgkJ?M@AX|umL8ucLG(wC5ao*~wM4iB1M(N=mw~G2 za5dU%RWMd$`%5hD>rJ1YtNjOIIrx!`3Z=I|F133FIpj8kah}4b$ik=(!KlDa5pUgl zq1}lXK8J^GG&Aj?h~(?S7tY0eb3X|D#X8_*yb^BmmOFWh1;!Sqx7sk5@qa!U-xGGK)(bPr92WNK{@7 zrjXLT?$RKqiF5OYZS7+9`J=G+v~$k=3`c60FL*lxTc?AmX%Mzd1J)h}&Yjd$mc0CY z^j(gakCWi+08yEp;!{+UrDNnrj(DG!lh$&uk5kbwzjt* zE5a#EgW5gx%4;fMT?H$px)~-uq%xfFVg~wC$`uLp`E6R`u#t*11qEba&!_BAFKJ=7LG{_m|Iarv2T&J?FtE zskAJGVIV2^HW~dfD>)D7tS&z}tEOL~59gniHSM)4?K@<02o0$DJMM_(^y%G*2&hea z3+6*TBakLUu~Yp4pizk(HEe@?f?tGXrPEaOpzGfN^&#{U`{C-tgvAc*dj;3BH1qio zh;)pvva<4C*yW^5R;=6bij=g=Vd3FwcnQEzU7&HvudU5>M8Py#Pd~!gz0_wCqO`}! zcAq?XvYC~Ig89p&!tl$h3uW-DWGrjCqOB3k2x`Q@#cRx@#Xi~6>dn?sl9+t*HP}#J z`SCQ#^!CY)gi0eF!vvSQG!30LrQ+{aLF}|LC&&80 z1`%k@{)&WDF>+HF$5zN*gwmL0P#&$gCX{1XCA_h*@zTW+BSSjEo$Id(L@Xd=UUV zw?^`j;t*2%M~P^%n>mrbK{kPil$1V^BTy1?MB2pbKuOvA%sqq2X}*q)-3ISzF00n~ zK-UY}Yo0Oh*kAN)prs=hsf+|+hey}4Z-F^}gC6#?EB(G~`_+TqjM6=$AhQW)y-NmK zdt)*D_8Lmh$EaV(lG{FZK)^tETu<$b^EiZ0NZPw4k3&j$XmhKR47Wvxz4KNCm&g|=n^ zzeUSe>|_iOG@=H-{X}>=_)KPNjrb)& zM1+vAcY4)ptEW9x-X^;3x&@&69rtc~J~HDf;)gJ`i&TdHvhq%GS?nq&!AS#D@pD{E^xn(y)G2m?PZ zvN;!-9sc$Hfq|AK#{+xz+>D7qwxp{JtaSHqE==YY!y`&j#C()nNbH-gE-EN?7ZJra zAH;fS>6sz4d}(%VX)O=$G(hwx_U&U`PVOE-4|ru%XSF?kNvJEY3B^9~P(KVz5&ehC zwRydOYY&PsOrueU$R}h?p%zF4gg8OFetP4%-mcg z!q*%}!_;+&ql43Ykdbj8(*(o2|Mz7a)84Tsoid7|d3|+}(XyEBK<3@}tNgzppqFpx zA(Zwnii@S7n%~!S41gm*U^>6eKcAukzwBv-lI@?=_4ND3EKN)I}Rd|x}J|m&UEga zVS3`Jcc#T{o5nUZkt|)i^o|p+)5>vnak)q+HG_}@?|_6-XAj|{z7f)>1DvoR@OH%? zV>R_x-eU_}Fr?9Zd95@eIIbcetN9-T$YGm6S}<=x3_OA?Qpv2uz=!K2Xj8V^1~fSG zb2M%?7LJbGF>cYvu3m+DMNKHRMft4xgE_e=;eag^$DqEouNYvWMh4iW45c(jaCm!x$yBi+&*j5zSgxsxo|9dvf`y zC^0}bE`nfp41a4xHrRwAKZ^jCt1#Z8bD)g;}Y9V*Cw zko~Z;?Rk|!AiZ&McWgG7%nt2$5CSYL_i4T~&lXq4(Ar172|^k(&WtDR8j%CpkE=>? z8%$seyj27HB@$wYkW1}zBZ^+U!Q$W!i2WSphk>zlECp+pYD6g~BTV73HMiI|1e^c9!ZDk7o@VT z=>tIs4QE}YCQ1_!JJ}#ozQhzYL;!>vAQ=-4i$4&UffDcKIvI)KUqXpaZ&k&l{9&&2 zW*e#dg8)gC2M<17+Cn<-;KqI=G?2Zbby?&4IrMyUX9N@{lXu{+cU@lK@I>2e=v!I z?)u;?zWW^*xu8oyP}8)*%;>1vUZE%p{Ay-@RXDg<7ZfSH?% zlg?r8=~tnHT48WrUY_{?Rr=gbiV|ee>%XH~m{3d* zaXLmoW=syKTaI8goHYkU6gSQs(C>pLn#WcnK4{`wAy&|fmfs;?hLP;QM7<6tO>QZvSoF!qiB#(Qs5sEkJ!HO2nV0`InQ#;vrwGnE zyY})P;QDa@Spa2%KNW6bZ(5Kx1KjZ$)@<%HQ^U3Af z!Nx|p*)4cjmvAD;1_5Msen#+K*m5vKr>*$<8xNy*SW1EM)5WgkSDv#&^|;U6o?_$@ z;uAj?yc}tyjPuaD>`|y+zMn`Q;#5E?*;lN~xqA5+CYobmxnS6v2phL_lT%T~(Ly4R zJIr?&rVesE29w!7#4Ex&B-fS4Lkgj1^8Y`aeFt36>-+vEgp+kpA`&UmB%y6ZKFMl` zG$^61ElrWgNFNQQK~gFj(q2d^qqO(lLsO-a{@0UL=leb9{J#HQ$C1Woyx-4rKlgoI z*L~gDrixI}vZ8nFL)=S(E+AoNf#VHF1>7nae9CpOv>le;B|T4w`P?^k5U|i1(<>j zKxiDVb;1`4+HeKI&?F8Q9X@_&;9&aVd7=VH=bTO^XcVo81=#7=&5+#-DHiQCvan#b zwnMEr6L(qQ4a@onP_hm)DF`$;qFB(~J-pHaB)Zr9KwzBt*Ye%~IV9dDtGuQ)-YCZz z7h36B=8O({O&nUSHEDJi3%XG1sC+!@g#ZJDVu`SC19AZ^(Q`3|HEKbJlUr#y{UZ&iG0uT*oPkWZOtzV2ogQR9n$30P?yc3G_Sb~$))iMR;8N_`wYjrEs z_}WAyjFO9gYshCbN6dvBln5+BDiO{Y-fr5uRr*tTjJu>-4b`PG5#66MHAWdb(COiV zNUzm(4FJ9)%H-?@)S_Mk=fup~6&g+AbW09IriQFtYyoJHG)6ZXLBdKr&?F_%>`wZ0 zaScfi9x;)8)IClV*kpB%9;PTSYkK00lW_?u)<5;EdTQwi421p3t@s8JY$M1-ulZfX zA`?BYxbd&7v+9ct*?IwAUx^B_6NC>064!k9# z2K66GB$?aW1E$8>|`H^x-p^zz5K4S?ft5%oRBTY5P^(-CG^H)!NZE)`u zl?C}=0tK4LKW@z^vu2aGZ(D9fp?1}b~;i3HgwnBzuAr*#Rc0B$S;;5P&(t1?v0g3-xW3q zF}s57$pHdNEX4^gE+~P1dvRqyx%Wq-${~T{v9giHH@V?Fb_s7`g{bLl{5a{TRDOPT zb;4|yt5=Dv9&OBaS(OyD7Cn7owtqR?KS*07`Uz?JI5)_h%>xflXi5Mg3qK#o&C1%2 zo-t(iSMW9oLu*`60M+kKiGy0mkse@)mPVg(IaPst{ZF`<;X*$|0YM9t!7C)rg#-)` zbdE&D4jTixwzW@Dm_~w_=U9o1iyRKH5L^PxR#09fDIo#mT+-u%iXC{(7XgIVgEsKc zBi?VM9S9lX0^}=F==XF5gNxWz50;eXx}I$Dj}I=L@D5Ebuy2bdHyvtQI3?cEL*M3d zA}%LJND%hX?%u?8zz;;iYR>EMHqukwcK&eXkA{RD4pz_^qJ7~a5G|l>^fflf5ui0b zZls_{%xg%yCODjZEXIo$Eh5}F0ZDMyZijs<%rWheYbZxbS zxH$2@B8c?{hc-lfVh%?-l}Sa0Lzj5nacn^&AAniXhL5f^CekK;;gok+7!U5eBck}R zXP-dN6Fo1^VG}vSt zp2IC6a2^h&EK|9aTl9{mcDxxvXE{n2+G{I(7cj;}^xjW{L>OG-8hfA1xi~nC&^<=2 zmj+RTduJRxR-^^5A5tXG=y=73b%tFj4Iqz@-T)wHi|P_nLvVq_t1w(W?G| zEP4_(c#p@P(zBk!+JjAKB^p=#o7p8hTp;#qJeQ3xU&h&@)3K z34ueas#tMP9J=*!ETN(cx~ns4N7^FJ0yHG&(~05$>~EPpxx_i=^bGaG(vQKMeVAa! zI|gwvTJb_$-+7$I?SlCC1ggZ{-0MhWb79LwV>gtc)7aS90E)9=3tYT(>C%Z~HwXs{ zkPuw;D;&S4XbrgE1z9G145(%Z;k0y{ZF23%SG;Kgo4{Uel5rbN_a$&Fa!cVI!T6lH z5w#5~@iNqxWTJ=|B@zcxlQQ% zSz5aLh|`ckl_+6*&V3*SJOOJ_TeKF2EC!J^2mN*6s{?_`)T^MBraScpmu{K9)kS}Xn{e}a196SLmb@1h9=MkghVhg0Xg@j=j2O!Mj$)TR$VQ5 zCM9}6cS25qf6m^X!~BKDxsP}uP(%t08a+FSEh^rjD>zx8_8U>#Uw8*!N1VW!#4Dqq z;T!NKV%=x&b7clPD8Xtqiv`5v>VH9CWje4X{01^7(w{&yJlG)|H<^(vz?M--xJVnz z+U=Vp8#XW)lqmEDWcO3i7EHQKba#2(z54=Bg_ej6^M`kIQ6iviF8PF1IfCsuSkx^A zy9LQUC^5l-=g*&$TxcV-pYAzlPM^MlC7_A@MAIkTcEJ29$6((4;7+yy1#KAf!Gw~A zr0iJma3rk-4&!Nc1JUeAxXSv1FyU^lB{ru)_o4sd1db1fw!8i2dw5&>E5nooyh3th z2uIw2jFQ%U3-OIm%w4<-aL)xcW*e+JQ+yM!y(T2hw3#)IjGzB^$mq8~g@$E}k zbC1X|khJXNJABv&V#i20A`lkc;YuD}5-`+b8?^32m=EY2q8*>^Ma&R(3d+B3E&?ow z)cTJ-I%u5L08~WmP5_UcKws^-wi#IJkP3>|n7GY^E8{%U*hajjotzqQ%FVdlKZ@M= z(K*vIsDd_H_BC3+8|>+M0XzaN_;lH^C};57EAT#?93AN)?QS|yA`C#9!ey3i0NE|> za1g=2HRb^2#b-v0 z0fHfapuaTFc^Ay1-fwn8osDF_F?Os`3vv-Guf}`UIYoeSh~Et0i|D}jSTQdXZsLxj zX8=}N>}LEX=<5Kox&ig!4zV4`JxBpax&7P_K3E|_@AmPVsmxJ?LmgyPugAX)ZKGN_ z9_95gb$+YQrug|DX%PWo7l25{UE+ULX3104JmkQnixfvCY4+jBdpZ|43?~)(KutdE zXV15SsTy(017XFrtcimUI3o5DTq6K-(H4sDrkYL{S>sv)MksoV0pu~2ukY^Vc!l1< zUKG_~Liy*Q96Oq1&IkLA#%~h1N)Sp7S4#ob=TYHN@P*GJIsDxO)YX~|fp#D?c!mT+ zQePaQuHL#QjC8a=={?S}jDP2Ze^17%vw^jOQ8Z zc9zTqZAh3Z0vNbwUD>&+CFl!aO@TfORikByF&@0f*e&!kaDh2EII{FBpTJrQ?VXpQ zl-7Ret)m3#Jc8>iz(enbqW#b~jQs%5JapcpjgAR9JO4R}VYKShhDD^(mh&K- zU~!0j)?he;&8JryNry9fiQYk^cBLEO#3bF4_hM7Ou0Y*KgOvdJfz>-xng|_lV;|>7`W-xnl z=g#7G6n3?n&)`_W2jkjApsX9w9svd~L8@i}?vp020%_^i)6;A0x>FOgih|R2A@D>(!%N-~blH zBM8^ob94yN79OrG_gZ7o#a|lhp@gi1JU(=CZ^x_*Mm+*LUOS{l6q)fAB42>;|47%E zn!)Jf9k;Royd$Qp zsC485-z>p{l9K^d^tJWaVBtP-}{=nb)Pds;(Kq2uO z84a5WHP6`^2ea#;m?YMk$Le-D$)^$WBV)Gu^~kkf{@#*`x`(KEGTKx8S`M*EXN3WG}@Chkg%87IZ}rX9lF?Y zz#FR>u=rBpbE~|CwB=e`q+XEH0ZbI>)dc$U!-4R&x3_ZroweT1RGZWK`Zv*-M^IRr zKEMxBXKU5(@Li)}QnD&Xj6Dw|Fs@8tR`+q7!Fixo9HRx-%VAsuS>yoR;*Faah!_s% z>`UGa!$u}1yHNM!VHfzu+tM3%Spo68f_$* z1ZNQJuwASQ)`#eS+XY17dWy+YkPB!8L6M&b{R)$(u*|s)$7-RFW6Mr*=^i>d~Og6 zJ17)!N+B_I0!;yhG->|@HXHgVF6{H?5&(n?#Sb7i1qa5-{1|!`)Q(aB2AOV22|R%f z0G!oUL)L>HV<<1KAaw)wU5mmAy}n)xTQXJ;ZMKBe8qpSpk1%AkQoy!HhaK<2d4Gg+JR`ON5yb-sBtM6zBewuO>9xwrs(6M!VWx$N3%EV2zuRNNRz?b-QC)NM zObB0Ne0Y%cKn#0+sEZYF=4~2n+MGFa$mA3tJ1|aGzSe3=W+b4*UIPjPg^&X}aftse z0G#v>AD+gCNlUTZk2z(5*LNGrcvzU7fLE78+WI@4elSvkW{`qM6bBWvToFTtL>Oeu z7rRTL-)WI-Kc3?jj<*db_er_AIM9BLCCb>Z+yAL4eADBz-boIyjigZUj1OZ)@t$36 zZ~NuHuc)|pZPM%`*5H^^all)M@?ZS?{l2K|>}dgcD>l0XK$tC>Qw@ zq)2<{B0SiS52nRWIg`Kv(o<&7o0oz8rzmQUWC>=YbQH83KTa{gYY1FUKrvLVE08nn zm6d&OQG;@fSdqU$MMqzO^f3>;4EvAV`8{sw`HQ{B*RjCjoS3MfU?K-3N%c+|8miYC z0S0>mN{6oTNrcFW7nJ=NYjgFgOe7X7ai+P1P>FoQ5grNm;oU(TVxi|DLK9RTI9vnWsTe1R`_779ow0Oi4WIh?9;RbpZdfX`Qr%QicuZ0V>9U+#Z^b{Q2MjRIq zbauc;2Z_LRsE?K|8&ls*EOx++tr8Vwp{!bRosI&^pUl{KeoPg2gqu(<(~+|!y znVXo&(QI%9_in?HxqtvZh`UB8z%e#yDe#5efXPsP5q1Qx63UPOaB$?5M>;WGNvWhh zoUE@OA0b|Qd3}!+)3Bj6Z~($JL=eTe);9x`L$Fakf!B{ABF8os-u`{J$!9X58vD#! zFr(xZ_A!|^glyv_&;b-Az&Exa;c$-D`FWeZ|E;hS?E)z{F?<#~OQETr1G2LiBjul0 z;QQa)j_?sPH#2kmJ`4n8O2ke1NlO?n`|x_L`%f1`PeY-g0(ovG^Or|>gh$|43SVLG zqy2vBY_Y4(JAti0jIfJ$Ndxega#7Bo*Wvr$?s8a9SnX^SqVHRq{0D5bFX5;|wRfaa zarKlZVhuzvz#%~<*Ff;D30&gp_3H)2At5tBuS=lE2~)d9>%$q^E+Th;Fl~v&=Bq-^ zMh8ahC;f4Vn)2_&_j~WH!wzL4z5*Dl1i=8AM<4}O?WC@5*aw|IUHw9^ON^8*?rm7w z61ZT}AAzMEQ~v!<3SH(;ZZ4h&y9BZ}jnH<7&4iorUV8Gv7z=E$yCxzp zk=AT!X}KEVA2b1yOs=kW*yH{FaJ`jRp4A1YX&-By8Wl-AKtvCIQ(7#oo*7*v68}drcGWM4uuBIU`2YZ>^5bRNbN({3PJvO`|IJr`MX$-|H7F^rX4Fp`w*s*XjNSPXrgbPs0Q$iv9m`Katr&Var65vLN2cXxV}o zibs4cQZzEL2Z#-M7MS7#7bOCVk4Ivr?%l^T^5J)e@VFbN1LZ!BHW+VOrCu|joHI6l z1^#0rjur|zIskwM_a-iCTjY9moauATo0}rzOcoNRP1`1KUsW9CL%4ht3Nob$7aCD> zoF2#;W>5%zSfw~AIFnyweTZK`l!SH8M;w3DdgWvIP0~4o!x%;cLJGSkKR!-lCd3k0 zUQDN~U}c?SW{t2-M*(W16E<~n;lj4HYxI4yV7$S$&Gt3Y90oIML~si6jPosGXP1by zG#RT8pXU0eC?Z!lINxQ`=K0T!vnB~P`?Em|107zyem(V(CIo>-pUOAmZUZqmylXSZ z)R(FH0AUQ(FJ?qisdu;8rOIUT1N%arN3lmW5SpYTrSMkqDT`C6VuN!MixdkJJWb)O zR3ARmbnDCpaB~9O%Qq6D9sS*N zD8%^sLo@*$uMu;?z3DCJ~CV1h_p@r@n_B((O;jFuL(YV zTJ{OedWhr=KtXV1yn=|HbhSP=tXTF(g?hh zvtgLUH2GuVpqfWPKEPR>X14%VcZ9Bh$31FqGA5#O3om2_**`DxifKhxh;GxgQ|D%%h;3#-82{IQV)PgH+%;4b2bENeogf zXHLKQw-v;Zle8grCK@c4Q%-1VUP4$xy>oPV@S{a<{}TD6gN^E^%WeJim3#6j!F02@ zxbqvFB~V%&N;21nH|V5ydG+e!z2ta49{Z$rTuQ3EzKQ7%frzY$#k70R9(F;R(Sb+H z8+<;~{rE$#>HF(*e9hDg`55|~DE2^A^nGbpXH#lFvCG4>oeNg%W_ju=N2W^!66LS@rq z{_H>dNaG8kn=BfdrS^9cEO2E?E-P)sD1iu-;L1A~67-J8`=+h1c1< z`0?lE(chorYpQD(;bHWBvWkin`I@l4p)T3Or**iu7c>4~k?-T{3pw4P%A!1N%aPlE zhB%trGEKgZ&UOpaia3M~oAUAB2FJ1-kG2BL^t0x9fnM7tL1HEeL~S~S zjQ@j1C#KkS@|(SS)%4CDS`{&4MotVLJOYTx`UpoIYQm_8FvV8I&wyF z>XP3(OPixC1+CZ=dsG1Mucg4`dZ)W4ib5e2hCxhPz$09v4B|T>j38u>;uY!VO{D!1QxRy7Euk_TevY%_2mijCJYK*%Si5z^)BH z2@s~PwRJXy6sF2=IoQWGo;@^s<~#-rNBvV<0T*iL&ZR91iT`wVgxIk3SFV4L;b)JE zjd7CQMDa{SpgMe>S7o+YjTTOvhmV2;b3#6A`rMH`@%>Ngi6_54Ctu^2AI<6lX+XJs zmu%b0m~}}QX-A=unMizF^&L4T-Ufq9a)Q=n%$k2x2%-Pv@8o7sZts!+JG*1s+awz5xfmKh#`y10nkw?2=9 zQh_%-1wQrkJwklAwD%$6Lww;Z_Y42Bwve)|1SAE+QW$>EhWUbOU>~#ymvD%FKOBGn zK7I%ntH~dM>4kosv6#JzLYqPXnFQ`l@Ed5To$h8B4}mOT8NP`13PFN6Fu6h$%K6h5 zubrXs>hg<;AP(C#_6@iHEr^$wbk4LVN)X~Lk3|IP)EeD*Lr072`PSE+)qaugs zQYbJ<*#Tz^VxO{fS<|?OI_6D)uGO@h~YdfaQBN#JTY0nY@lbp?6)+OYMc zA|ZoI$x{KJZ0zcXu@0fI@zUrh#1tAQz)pbhja_KV(V+%ZfDl4!yASv^4qy?_7465s2XdQWZqcZ#D;;W{a^j%01;X}o)A~`0+2dcm&nJ+j4~owBg!f$ zG+raq-UdpI7@PnScnvg`xXwVjuqzC;AEB3_H4E90H;)31-BFCHglKJjSuCiFgwZ|H z(wf-+w9Gk6c>m-`q(h_K3{;!D0LoGtImSGBkI@VzP>ymB;}xcCG^`f)Dhb7*N6Mha zUhsu~E?)dF@;wxNtDwuI)S^_!`OdJYib675DE8%_sSRPp!*3(Q`mcA19K%7eozo6k>INY&)B^Xny09YgHgyfhtX> zU|l*A{Y59Su_1m@6c85nSV&wx&8oah(law>;q6SLzzdWOP@HF(0MLJ|dryue=mDG{ zQRKGnf)aZ(_Uv>@Yh}VF+wn0A$I-V9O-+tK%u$A~S+#2WBTeew9m0oK4i1J;>`RYE zYlL4j;7V1e=XNeE^by{=NfYTp#y_N!LA|b3!%>(J9vvNhU~MAZeUM5tyLD(9t!hEp zM6+AKBDD7;COjN#5$`_R3(Af0XpoD`!PE|~>r(8*n5iquQWd7gv{oD*psXTZ310MR zp>+Ur27!}fVHZ%(xLo&*)K>Qk=r~MX`bh4d3gIV5?^Gprp)jP`B_$?e0xt%_;~u+< z=NQiGQsGm6e*3GKg|DGD7(pIWFM{hTD|^3}w9IX4YND5yFR*^62htlFWV%N2TojIs zM;8RtYZ&@-FRR+Gp!|rSEhiRDL0kR*BTE{ z`JaNTquFh<8kFslgwd~OQX0Eu?mcuc0W4pVAgigV2`lus5`K>ZmkDvrxHQv|k)Vg% z0HIf}zG1>$H3??=4~Xe*=Mi}QFc5(n%0z-JA;!<3C_s{ejxG1HafExCotUsN*JgUD z>h;+$870)#%NjI+aTeU1-Y4z056Wz2*#v%pjlOZ=!~3s?KMt2QpMqPsZdt};K`-+R zeTokNIuCah0FHTbJnleOVgq=t?~iYG01`+q!M?Sj{T?AQ38<9raOB|W`kscFvi9+8 z@=O+VcLni;OrtsJAX;h}6t_%C&)n5;8EI(9wuK`qjJ%H5NrLjA+3{=&Jzi~V`Ho_b zFmFAi6r|E77#J54CaXoX76@cH7Hl{oz4WmYX5GGXYQemD?#H(xYp00F=Rmfc-Khyb z8>HORDR7%=THERC<5Sz&Id|0=mIao&Xp>c0hT--Udmy(7xi4YZi8Tl6PEnX??@9lH z%YTf=u*SM#^xX7oP z?u(^IHZ(K7YWzxq4mOhT`DdJN(;%i2;wL%sUhAo8623<@5aH0e2x4*;n-YE8+Q{ePkL& zU$-bcZ*dC~k_gWKBH1g#G$z8iZrvJA&fU${iC=&JfLQ#Vk%YFsU%!4QS-51NMS-Pe zon&x;#5zl(@Qz&SO37f_fKQ=gc9%6=U{9jPppe!BL^U$n4(x{%QUkaQ-ayu)2|YRJ zH1GGlmNinu1U|>B!k#ArbIx413TVCikmMimra=Y5Af5G-_#+fsd*umSty1a!aWj$< zA~Odjy(^ zOgrDZGsLwN%4b4zQz*m_77`z>FUZxzK#x!;5P9r@#~cH%3BoseZKqT26O~DSKTpqH z6`)7hG*Ylg?t#L)IY}URW3K;Rh04ub!^YAQEw@eteeG4+v9)Qi^WCRM(h4m9RLvNj zvRI8UOH9(CD}RF=k@I14uCLfEoNL*(psul=#rlq2NK4kD*h541{@YuQq<#+`)jeIY zm8UFDwsS|wYC%G>!D~NDCdbyc>1tPLKT96+_kWZr(|Ucr_h5dL=pYt;Z!63#rB_kmNXlIunrwX zme!yUUe@%7H;@M@yxN9bE;|ilZw|%k9eY{Bx?%+Y6CTSID_5$W@PZtM)Ju&<)!O+@ zMVk#--JXUe3tp3P29O9-QI&~G1Qi*iR7#aatMa-22^CnIbe+qvUIZC(PaHDFmoHYe zaDI2=;g3uPW-9Yd6>71B(Z>C}v*_tV6oQaC;6PPwMC!Mxw=sKhLgr$3AsnUfy8Ad$ zWjJDxqLACaR2j?eRPAPDV&WW*`>arn5P$)hu8qBjDRX#<3mnZZ4zoDBBypZ_nR-=M zS8u=^&a!pg3Dc%cyY1)aXXr(QtWSFNkqIuXJ1{B71I?<9$hHGc3=o&_Sn?X0n&2$r z90%ZNm)Zn5Wi5h}v5TTYGphSuEE(%}AFQe|0XVBF7^xYC4A4GB5_{-H!!sKG(Q zG$w7E5~&=UpYP$iujFt)_b7~MJ)3WSHI z5QCjgBUB>-Hu0@MMH30x2%2|f4ZNU=+yujU3I$t)4A!U|Bc9+eVX6h)g&~)(AXtus z@pi%}-q~_UBW6&L{E(qj#zUL7YYV;0ThTXd~Zh35h867t&Vs6fwISV$vU5Qzw6zCa)e zNPxL4OIBc(+;wz`tmicW3r%$1gscYvg#`SMw6VeE1RsV*0!@eVk%L3s%g00;?`gOg!zbTXALA^XuIL75q$T3?r<$Ab!n_!EW69F zqIiK^6GdGH7(l4f=4D&8bap;DCna*2x@fsB_)%b1n>PI$Ji%^sGCGHw0hR!eQE?WS zBK!N<$dHnfaze^@N8dhPW=DU2Hez)5XebU)0{C>|!?Bcwr8&tEq&V>jB^tjvNun@? zDl+Jxe|X_Ax>G<^UqV6-UE{7gTil*EXw751rQm6f?l>|84CDjD5cv1`C_M!%e2uj* z5MvVIsj$p#SNhnty$iHklu)St`gnX6p!GMn$3~)U7GNo)x#S}D^z?ju6QbA$)d3Nz zK^>KjErzvw@$%&v6p?Iw6pMwBK*Pd^G|mECROB84MO25cixd2%1Nn zy;0tTskC5(2XR}AH*H%)rqrVtCGP8JS|Z9f^n5)v_99#nmZc*S=~ZwS0IK{v@crOMJ=n?m9K#)ie9{}Pg7v^(6Bx@eF;7%2N zf1mRV%8VJpAZtyhBavP&D7Y@i*LPWq!i9f2=a!xAkisBiU^{2ao^dXQvy5VO_2Hhu zTig@KABNWj1pFKF2i8dcL13ydatr z04oo6Iq7tKmI};NOOE8};{H<`@c4JA{h#viAN+e&ywP+@56&QBu1551(A$!>T8P=m z`HYtEkZ1TjC_9w4Wa@J4YHtWlm3-!)nqXd)8kod6efkp=o zpd1?e*V_>O3a?;>i>itWbk_Of1Q#ohx_fo?l40U8<|vSK8Phjknr>vAN- zvHJgU2Zd}Vgl53G&*rvvYuNKS{s*L6_t_{Sz2nI8DYt&1EBF8Rl`c#qSDI?tEZZ~z zw=0If|5x%5Iaxb5?{@`BJNJS02nV3&^V>y2S^GR~X*e$5AiapfOdoXng~jNxbNjB9ojrr1SFMsZ!AM=a2u3PR^4}P#m|LAIH=Uk~ z_8ZdE34kyFtPP38kX||>pfDy@;{PX*zL77YvyL=HaBy_=r#YiRxZ@6}AhDnQdz0V~7FV&nb#G()=}!uADITL zV2tR#il9ikWT3p+jQL+7Z8}p~+Prbi?$q=!u7YtFyS29uP#<5Mv1Wgy+%%b(Y1{!v z_fjqv?Ov#0x|{cCNq0b4z$$0<#3c&ebgBy5XYj7#bfYZ%lRbrF#k9+m)sC0nce{Kw z-m==!P2M;sS-r&loyZwK%jT82AMRvVp7HBlb>-Lw>G*p-qGmEsSkeLk+@X^s)3@%$ zZnVRmiY{XW1q+_Z!l@x|ZhF;11-l0ij?n@aubWo6;@X*W-G=lE=&`f)n>FKhOOINk z$P;^UC#KsikqpMHN4EQ=Gh~?@rL&xWu=%rCl0YB;KgM`EVW!;%N9ooDL{95l@$+Zs z6Xx@*$~{T{{Q=hX8(B~&wR(7}!K5TQLv)g9jn7~{Bg0kpi$B%)`?o;3)HQF$IrhJO&kZ>Szg`Q)KGUeox9Xtea3Dtr(z3*k@FebJ z!0~nwWEj(xlV{;8aBKd8-@j11L*+>4t(!O9PcCJcKR;X{N^JG&m({Z}COu~4mY|KZ ze*eb#6`oaPs)|9{GPJqr=S(;#;vcRprt6wdcP`PM(zl}8<@;hG3t>n2&&7h$c=5r<_{@-4cd~qYBJe}^gw#kMpA}_#TlmydRnYw@ICmFNaC3#b6kc}j zw`-}+E(=UdIPCjN^@c51F4^OkZspqY^Hy+YnUSUaW7&#($vameW998FD-Y7Rj+}wk>qIOIVr=QsDE6VCB6}7zjQhIgI-$L5{t%mHBHzuObVp1S<{j1xct)(4$9ztCtN3OO^T9y5 z-rO?~Ni;1G=WmjZ=kM=P1e%^{3_)0-5);OaX%zQ~9WWp~{hCx&pqQd2%{PE$JVIsl z80Q38T7`Hy3_}>Ld)fvzW6IH|Z(?p1FI6W$@ERs$g`y-6g?^T)YZH`RP#xrqXt#R@A#PfrV-{19HyV`m%3T#%U1#*1A^HP#sv>xt)@CuY2t#3 zpgb#hYUd|eIK_+2c+mzrtCDqEt?7>tbpz^M`f5ZrvQ^Z!ypP59Q~HL?c3qTu0gONRu9gDxhn@zWmAQrTS7+lujjlG10Dcl zui|fH_)2?qN)p=Rp91~VuTCyY?aJRLP=wix7B7>c;`A$mlDZQ2VQ!}@$1sfgAXg;! z4gxfEgK#%BH_On-@8Y-)12VvTW5Aj*iPh=4_Bo-6_@}V4rRAJZPA&tHieVG5Y!qxyd4+Jex{{mx1PS|6ur84 z;z!R2%0UE)ysc-EJM$#8-djip^ zXK4#|89Z|&sl&Nv2v-~xl6w!u9NJP_-z+y&0IdY#jm#`M3j+|&!9Nsc@f{DM)@!gN zAIJAN4(zR~`f*^_eqC|o<=vmLk{sOY`V-FGao)JU;G2ALV(3@leb{HDElzGum1EU~ zj;HOg6d4;JCIcEtm~6`>B65tT02@Jx-q(vC4T_+VdSXxlMC1?TMgbu`hI;9g-Po%+ z0*pMS%<;W%mKR|zEj1rV@#i8Xb@#+^2r6EsiW@g($@%x9Zq=0d{BhS{2|x{jn1jCq z+qs!e)n%&1pX+n{E>yCDoeV@6puY95PU67Yx??kprYUfQZB^UB!HP>?u^F&2Q`M^ zB+g%Y2AA($(V2^oNsxF5vRNQMs3(;KM0A}9j_P5nB4CCi9W9Rwo|O5#P+TE-Gr=8k zC_S|jnh%YRgU~|3*SAz%-6(^JxwHQ*PsDS93cW497}9t29@dRVX{r!Hl@54?YZXed zx&0OHh3kI5Y~ky>x;sHwd$^5@cR&sx_N+r^-TL(*SydnDb2Iq$0442K!)!hE$eR8N zG8hf23xSv#q}hF!!!V-+EBE>9cn&fx+r2&d1;Ed@G4pEYFW(|$ZZX@wV6%a&U;5~e zWBA3e$PSx%W?hTs_sd3&A-qo*T`+rgBy-Z7q19C6>%WdCZU@W5(ZpN^FkW3l4-Go5ZrfO`>)IOUT)up{)PGZ|-T1dfa|Tg+#V+WB zpkjCG*+XRL7BWRzAbw<#3z_=LFnji)BS$|+k*NFN@d3VgxB7>rdfaY$6uirWYPfzgo>4=Xlkj0T(f7uS6)Y5ElRMDkHU)DnZ^9=SA}}uQL6)fD-8Wys0kqeo zrGcR1pn9rZ2>CWzGaia{#~C|b=fozYQ^-XfmbZm|DnVrw?7eTw*t-UgQl|xyzInURiW>gX7M!XN z(BGy;Bbmb3*Zy4Q%AoDv3l-!gfoK!(DD%*M|L*kCu3PsF~Nvm&_lA7fs$hU(KdpeM#6d}h*ZhkNb znNA*Tem^;OAqV;3BNAP>GmZm(wiTW{#w{jwv5up+QFPvyt2ly4QWGZY-;$}QhO?Y8 z`gc91aCg`BDw5RU@z+u>paX9Jk(c?Hp?460$DxH#y1;hjWGk+);vdnkpqJYWOB=+M zgL2<|`jJg0oLIe0NT@Isw34=#mX>OdWhwUcV2jzljNluy=gxhen<7Ak8jP=GBcp** zz7V6MCSQD5a;VL;UvbSo`itx2mJ&HaWew~F_?B#vJ!TlDS{Ef!S{q9-9txl}dI=4>iYMPa?W z>svLN8tCJo$GUDK!pr;QwRS#E2z~WxfFbmw&&%|7cfI0Iw$U0D-&Txu|NEKG7_|yD z(@&(=NRD<_!^h?J$)$;yaJ3nN;WM4Z4?v2U!Y@r1!~m@?c)>Ufi_D4?l@uTQV_&qT zjgLieXhWWcOIp&cT@?A|fmsHHe=ECly1$N&t=p0TNS+ zkHEykved6gV$fk5T^Nh|tPJCku>rogN;!=r@CR9$>&72ez^n?kdhc$m-m)e3^?3$e z+!0r?Vh}gSPy0KTs9$J%H23o3VuZLnKZ(^{^QWH+h10a-<;(G;O#l0|#^hL(7_56S zh#2LTSrI=^6OQc*pYx;#kf-jY5wHh_25sfK5lJ;NDgbtO z5K&QI_vLOZr)!^Z%O4*%Oey*{5>ukaGjOaKV(t>4W}H;m{Cxz>CQ~(zyLLw14}I{i zA-QufGdT|jxlHAxx>x0;Rz?>JFm8VS1Ek;&Ge34RNLvS|zHWPG6{7D8jzjGHUGY6%E}fr5v2PQ!ZmhZI`guk2F@JgTm<7ns0^SqI&0 zAd*xQV(mV~l*m}y!BkoqSrcK_Ac1Pb#L!~0T0oM>&>&HJjIy(=VgY1Ym{qFPMS|-E zgwaoqpCJ+5LyU5;e}bSmh6Bg9d?@ESKMACKmLpXLcl1FJGd5p)Gf9POiUV99BU*_2 zE2vTeoB_6l&|48M7_J;7UmYLLwNr>*tNnZPy@;t)djv1XIE=9h1=;y1I5-T8npCsl z@%0QbdS&^KqE^7*EUN>#n+`ITrs~w^8gU#cV}li;m=1{m$X_6YG(DA_-hc`K1J7Bq zPas7hd~DuI@uNZ7?f&CmgK=AcD+NJ~$D~{GP$yJad9W6lxu)t7+ zfRK4l>v5HdZ(uwUG^Dgh%PFak=jcOpoC~DECq2E zhBhlvGd6he9fF<7it;n(&&T^+Xb)k}of#2|w(csB@+2~{7EexjodKYbaW_}yE(-u+ zF=~YaOeZhEk#Jw2zO_!DCRX?5@M`Sq>*Ju}k@Il}!k~6&LpExvmG%3@+UsO(5HPih zxzyO)+!yL7D@(mM(E*0h2kb#pa3X<+_g~n^o7q} zc7O;w?tKV?=|M(jW|G(nmm?RKwe#@^3GJhiMD5<1-nrq!PbtzEYn$6Yfy7<^4n?{2 z_s?ngnyewbZAj%t+fS$q#p`)NHl3DRoNL!|Z*&!RvRLy2ef=-#J3=WiEh>#ap^+w|uO6woW83fBN9iy=7FAJFqC|58 zj~em`Rm}0gNkyH#S;CXjs>qYOyn;fIc`NFSI*<_GniVioZKkx(7U;T1}_(d3>T6W4ZnunmJ8);XkZJ0dr=EC6W>?y7{Tk z%Jf8$)FUu6GBO6D-I#}$_r2^N^@~>@gsHO$Vhn!+CC3gPQrcN z?!9xa^0)50HaGnc(wFOuiIX1VF&DP&V-y**Km^G6ze2cTNid-eM6uh{*hs#)iYjWv z{-SC%4-ILI{ds;m*Y7DgZ~zpWW$+)q`FE@2!}nG4dh#lH7qrpk z+BH{>PcS2g_Qs;fVA6|MBijH;9m%e-7F{F;??Nrs5~?~qG9J}rK!6^}%ee1-SbOj0 zzin9q>cm+D^sfzFx$OnPE^4sw#@V(XC}a5C>Gm0>?WR&d=#9vFL3S!7eGBo!;P2t$ zBGaN+JAgJ$=1p+Sf6%O)ywSXm+&`Z^D_+#6&0T_^GT1| zV~j^d2T8n+66MhAt}3LgAX%^`NlMN7_iOS>_Sd>|m!Q@Gyo!~zUQ)8Us_wkdO3ek* z4RNS`{;~;aVo3x^#g2yQj4Cq9V$?c;NLUgyN+!KTfyU<=76u04lEaOQ$6B_HAC{4k zi5eg$IsnSgFQzZZDk&*Bc9LR$VW3=qsvOIk|G|}#6K!W_>nFX3_M1rjFDU)6@WTSw zMi9f<3&`(G;bVqS>QI(H$HQfXNI;32)qXPT9p(9^S7`i}d*`98QzkgE_1%#HA=f1Q z;6I!gAU%-u^^qRX`wgo`?WeWKQV`V~XcPbIvEnsx7PX{SlYK+n$k{%;FvkS&gNX4sEEpCn5Qx?XS_aye5dsC= zVlh!h%54t)(vPJiB9z2${Fl1ydKDa7XdIgXVW@n9pFlVWp;2>6EpUvJ6U#Aj!z9ix z2PE!aQVgeqn&>n;MwKHdSotVWv9d`;jQt#=^HIwwp_no&mGPP*krZNptlaPJ^-E9s z2SBh#`#s(L#6RYJFedn+mX>QvkL}-YYojw$m|x*fIbBH8v{6+v>v?*3oL28DkLjSV zSbpUJQZXe?q2E{3-pNEcN(|cqvyPi|beCS?AfZMpcGLcX>LtH_?1U;-*u9{(8A7$j*P{I&r&6G;l)92|N2o%tr3^` zr1dI(>!f#3Ti2=+5|Gj0Ca1BoYagV z_ds0Vo{rA0sZj@ylLtI97}OD|Et$Fi-ujY^ip*0+vBrb0dcyoUJLg08R*1uZVaUtd zI~0)s;_JPnr1sYKYEj*jDXwsaO(uTkANP-%;8m7XSiP(#DkFHRTG z$xoX4zu@D0qDI9bdM*a*3v_R$tiVNW3m*)uWhVhEaY5+;G2P1JWdmIg{SUEGsu z5L1+Zt=>xcQ~d<0P?Cy*6b>d_I_l&gWR!~Gk_cR(NS8=Pw!516l4ATd8A)l=m%WaZ zS%s+Gj58U2FN<3{@p6EEn4W(iwr-vD0`VSu941>z#R!L_dWjsV&QO4K8%v za>L{Y&{W@CwyD`+EOnsxxHsHy$IQTMnxWEshpP?HEy`~38BC}U1+GtQFp8cW8>+=K$`!^FH)Vh)5Tg%gDDc%uu^aHqxg?e?I%1ECbJDDjuDI_@1vxv1X4Mgf>1 z5CA5_qr3$z7ixhYNlxG4b;i#X5cXIKNK;*4G2j{Uf$U(?j+@~1R&d%EzSKDZIE56d z6?m2cxk#pvTipsL6%Iseae$9qlt$% zF54am0v_UAIv(;3_(u?YwM)N^pvqn^*XqH6Mp{B;CpO#?0ZHI3@=*zCkIxP4)CfuS97gv zQ-B>y>Rm$rI}LX1#g2*nu4CtQPnC`WO6W>ekUG|j#H~BE7%FW&B}r`=pnBPN>^W=^rs2&!l&|o zKD-i4iAdzrHqI1_Z@G@;D8QxsBe*G zp*>{a{ifWr*jM4AKE`E0#|UiFRC7`_U$wj?sqZb%r+4UF3?)_?1!lKKE zu+b<)p3B&bK%jsK0uUCfk|XuE zHKEtLu1*~IVfh6G1IX{%0a39?TE`=qx=#Wkc-=IBrVp`0A0v(?cBhVh&JpWAOTZ@3 z^q7M96x(MPz-qFoK6ggvw&Pa_Jjao~;gomK#2^CnQ7Age^P^`gR$%MdkK5_!=s?o+ zxHY0_1tsEn*8#{DqYrN7rveON?%cFt!y&Zk#HI18l=81HeipP_fREt$q1UTZhQc5U zn*ghEPUw!?B7Hz*MD%#m{!6+AZ5Db1pV)nEU=xKIIhlR~U=-I12BU2V3WrJlXw@wt z0m=cb$6k;|uv34?C4?{z3{jKpC6ECYgAd%u5R%S!^H_wqO17Y_n73HEZl_tDBu~B} zc5tU-zk^|`+mepqjc%Z+X_LuQ;NkA=zf%SKAs{GZ5(t5|2m=!AcOi6V1xkrK-XbPJ zgxmqg!d|g$r1jxQD(j*81n?L#ks8{C{aOPsv;oXC@RDDLwSA#$IUK$Weh(6_>?BBchDljh2COh4b zKnEMFRi-6N)>6sI54~g57Bun=hDg8&NJjh$evu1(#@HeFHgD8^Ef?*^tnrw#6fvoH z9!+fW5o)7F&$N0q%Of8|)i?CJtY5qK@Ph{#F4|x`M%^6NNPGM(g?gmkcRA2At}R=} z@1ovCRq>^z@v3HG=d%V;)5jiTFG@g9R(}D9t0)#k>#Hnq$zHx~9R>zwD#-a+jcjzktQo47 z*~rCz?~OIOC4ok(CYl~b-N*f0AA;W^*coHdK+nN=ObLCDkUt%USZ@9LY}eIF$Lbd3 z<>p4HR4}T%;0sx>ew^L#B-q+>N%{OMnR^BX%=Y^K4n4KJglOe4H$~%52gFY-7+{O3 za);_Z`CB$G!&B`;*1$KY*F~}s0HYo_XAvG69^bx>zWM+h)Ew)_hZ~TyCDm>r6fG!J zMXDTt+2!;A^UpO@ieEr9mHfr2;u&49 z#{6oeUs};;biIA8Ea%@;*U+$Qm>^rGEI=`80(v0PKVqmSK9#Y(A&*% zHlWLG5&{SDQ-e!$T^>BMnO3y>W%LNe@JNp=(~EomP~SZ}W3FydWd_RZy>0!qbu!7j zk~k1?FW~deTsQvK;7A-V?>5ZctXKNch)!%fY!g1cLrCQ*VJ&d<7hAb#Q2=rKJ63}f z8}lbizKx)4L!8A)2?P^yR($^jYLv%NS&QRel`cTuFnh`$4)PpS!pjk~6YdTk2_KN~ z6e4FLy3&b~e6%YmV(&dzpr6GtPYdLUNY3xQsI&vGrK#c0cv`8@2aQ}7fou@mo$sK- z=a1T7_o@$h8c*RFY@b5JEAAn$d-uXHKFBRz;!u^;?lMLutqE`w$W56ZM6t{N9_7J) zOeVY*6cV59#)s^%F=3!qKqwd}%}BmQuxMysuzJnV8A#-RlKW$|y=j3s5n_)XHbXof zY{EvWCPx)cC;(OuaSE=Y5}X8(!Q0BU{QNnoV6ui5RhV`Z?*~FWD2XgvwM!DC%{c7r z#x?W-e1_xvQ>Q^79f5?TJk9424SInJ|6W!EV9*@y%cj@jFIHEMj7+Blkwq^oZ>`AA z?VkV+zEc$c{Ul>b;?#b9bzvX$fH>|?!&ed&gL0y&c2q1qXtBQj{sNO+iC{PzGwPPW zuAhkOXr{oAMA!GdO=|Ku-btXBQo%U|unOGWWuAsF$$T;a!9oi}VMH{oI@s(XW9Z5& z_e7@CjCYBm4?~Lb&~s-BUBMV41tGu_Mvx=~V`#UG{+!sB-H>p9{*FQzMl;4nxV zGavO$57J&T>e_d`60~{&`a1XVn(oWjkE;qo4oG16)B8P(yWVk%ik4&GF_IuaONu%b zcH^Tpu;%sQSkKKJTBu!yCW#1OMEht!N`r}NpiuJ6`@0XY)Ic{w%GBE0x~KvWP&=3cC|`$*(=Td||e{e2d;kR2`;Y zzZj8o0T%wC`U^m#hT~3=6pD4Vs4Qj|K~J|p8#x;-5b72xGTMA>hm(ddX?z%0ePJJ( zOXJcY;>dL`>y9^S!1RJu)O;dTMKEq{3-d%vA zE5bE3gSRLVw-JJK7Mt~U#fHC{E?Gdf#s#D8D#?v~=?4=}VQ?4Cg)b!(OB z9kFz-(~ztIY}j2*MswSUK%|Z3${$-rB)#?hbN$ZO|M65kO$`LB9OO_dZNHo(Jn7Fm z6x*cS-)R-~pu~V1fM4a)H*k#$w|(nfkb>P!3tS-iWjiL}+zd}0(Sf2%7kj%rO4I58 zk@hFxRK8vNIKGi4B^7C)B4kQP8H+@wWGu=Q8c;HZ219cx$&OScAyd&HQ^rV&N~R`5 z8e}L#k~06!r9N*x&+~qtIeUJW8TzqIvK>rdR`{wZts0*Il{OI5^S_68^%zQcZ_H4X2JLvq|LDi8bFL z1{pvL>#4je=gyhf?I#)m#I=g)HCxbKz@(uD`|J(EeDOA1WGkPDjTtCc~CuJk7pG5!m^15ai;qXebwT} zyl$Y6##o0n@%r;H4A}-cogE=!)hJ}d|58_bs-C4jmcqJj3#|F*9>7Ae;F)OQTtiDp zX7sBL1fbMUh?l^%2L~^H7NjoE2&eovXAL7V)y@9DP&`%w z+uHus|M9;NzN=E-7$L0_COH@MH-BGI8LkNMOu3wX3Z}InT~B?pgdvJ#90c%K(FEbU zun6df|HAsj#h#Y?T-}z%`0>)?& z#JJx36BanYQI0CG>t26#+jI*qw@QzNep#2nJBi-`#zjn_A38ZG};~& ztTZb0+5brx{A(lDzX8rpN_Wy=Bqw+?2cnYpdpOaFY$!hL+3zs#535-_Fstk9fe zS7E>vnw2X4tc&;0YqqQ;MIO`fQrKp?=HGwR_A1_eMGEnCD>)0T?Ck5t+DqQt6{vVO zj7wxcjbi)Ze~Nykvm55_hX2~X>M}kBB(O0w5sF-=J@inR`$4g3{%Q0{Jp4_f2j>7Y z-vNZ$7z-??m=P(Cq;%6JO7=dW)$S|b#zLR}=wmVZGQR1%o!tzcnde%^G*qZr?F50G zKlI_0z@wm>`R_6bPT%d`n}AzO=zy(Chi=v8T?Y(6VvufXGXaS4&jBI-&!58LEbIbE z@NwU8f)p5+bOOQr92>j}MD?_PX$MmXj(AuvrwyOO`?!6N(*gDWx?+#*f6~RJa`TeB+t^dJ8_qIjZ z(p_dBhdE@XtU)tZ;_j?>$C8h*7arR8qj9MY(2X%YLFLGB|K4Yl#B%5Szn{3`wO0+8 zjA_?}>;a8m5C&=^t1j3AO)LQuxEQCpj1=@IY)p5kESmG~uTY_K8hvYsmlA!};QoY? zpd*_Lwb?SSzjD`G6Bn3u-ne6O;V%Nm@IrOeYu|i`YO=vN|M|1D zY4QEL;%Caa?2>om|5p;I$r&V8)`Slbm3-y<=ULJxg{E$4H;2QM~4Y`Ol- z#6Q$C?Op%lO*e&AF)KW zqPPe$r=|bpi9tn%8sL(&?V-HFr;_bx!8A!&#Drd@%j=`IUzCUXWS9T4#iW*a`2)`< z()!Cy3^s*`3aBFfMw24J(qM|;oNB@e4QP}O+--?}zEObFlhO^!1o^{y6ng(`SU#;2$?4N!g#y!Ks&;H#rr`jp)pbF2HGj8 zH}Kh%nE||S0G^GdnIL)5y2m|Z#(RoocK^?_;10A+m17hH!A(8+0DOEEmn}2FAt{xS z0y#5eJ>QWU!`6Pov#HzxFE>GT&>#>UsM~*rJ8)rzM3GzrKa2QbELE;KI?%qz{AZMu z%1IlOb_F)nT4iPVg622QoW{v{UI-=lVvWDfh;AXgGzVHDCA3D8yX?ndiefz|?>!$2 zhlgej<30%MAO(5GF(bgNy;j11sEKoi&$8cX;^dLecNX8fx1Vl_3ceU#cHDBQ>UQfM z%gCI<|7obcqnrbE_mbbz&n{}#>1#$94$taX;@^G6 z3(<}afO$+oL&sG%r$kdd`|o~iA6YCALfFRuPcN^G;bBtvkh!4ar2%x5^+JTz4o;@; zJMojmC7wICNC(7H#C7%G+N3S3kKH%e>$hr1dbG5`&8G;djVmYK_GV6}l1>%ImeOD~ z!OuXQoozs-Ws;yg38e^WbKoIz6%`#?3Za)q(Viu~_0K|5Ti#5N&)()R;^C+P5$8K~ z!u9Qe2M$~nOfY^Tww_z~&)1ajaJ)UJ4z4R1Vrg;ozoC;N1VS*BP!IVZNMV-P4K&SyWY#Ggy|Wt3 zD>nqYNSQz5iRwB7sFV=}k=RL;uc~PA*2mKbh$-Uf`cT{kifY$X`h3SuA() z=hRUTY|5RO-N}r@!lnMld(a7Y5R&&D)a6d}O)}%4E=sY^+l=%+<+I%e^7qrk1> zXn>9w@|k@h=o>+#F~BhHY(nOBuzE=vTK*TOxWySND40>2Q?DLI7>c2^q<50`9q=4< z_w>9gg%-~lbyNXC!G5P83gbQzCol8QH+1|<1^TDSo3`6O-UL`Ey6c20-Zjide>#{z zsSBU(!LJJCi8zg-#f-VI;XRn5d$9wufa}l-L)7kFT2i98d{wkK^+wIT)cmiF{eK%J zK+75$2I)Z#;?6lF=KMrAK-TD4D`mt22*wb*Fm6aW6L62s_CY8qA+vjJ!z&f~C$0yi z;rD2WB`|j2dD%|ki2QBP_096KcMnd^>}!Uy z|NPDfJkHnQ)m2sE>2H5`%k}@cU`9($w`DD-oYxm~bOe14l5c!Y5(vy4Zg4B*zXzUg zLxG5t10t55Cj;|XChV}N;uzuu1Cjgm%qsb7v85`^d9px)^ee1E18 zB+%$&)H`6Z-~BS;rXX%*oQ{2odb##)%@) z#se45NctiZ#MfW20*4W-pN8z)Yj+kvu;#Z84T&2M92k*-6iHm}?oXK|R&$e9?pGi2 z9~EAJ2sH1q!t`jUSV|s*9$2@)H`IR}JRXv~yWW?Q0^80WG>~{1nkfSP*Pl^{SK9Lo zy%|qN-po6rCZ;=&AAMM3oh0F7wTc(=u?OT0=eRgkwN(+2}E{qJ( zqoce7K8*C6)BGJTCosqatbH)8umVD1W{K>zftn%Fg=` z?elCPa>k70m*!I0oo~-2ET~2VKh;q^#sDQ$ozdwzEu-$uXo9j{1g)1nWXV(m5_S?Jt$t?OnF z3|77K5PvJv&*Recwbax-w{gXJJ>%+l$eWBE645c~9ud5%Y}Dn{EaR@ml9yK$1WF!$ zTB@a~1Iu!kwEdZPcWoMRziAC6V@Vwl_ma{UC=EWAdgfo#yS8$FPdLZh*+%BOZY(>K zSvhior8 zbEwW66KUh{GTd{`GIfIZ)8(}y4AHV{)faLSQ_tvPwqwT9@E=kgMIDn}Y^IO3_r3FE zuinqSu9n4T(eE?i7m#>;;Y%p^vv+)O_wE06U_?=>`K!`MJn~%qTM!>wge~h}kNYZ| ze`Ud{KWAoWNofO1^gf48tt@=a!*=SJfb5$M;)64I(w`UU0w-X`Az`iy_x|z6!|s4kkI)kW_&g8WLjbziN#m^bP44_Br?&IhOMBzdl*?$+ikWoRFQu z*5U?OSOnhnAV1x_wa;_&@!i1dKSK`#UgP0I0Fucyebh3J0zkM~^=3rH)Y=WW)`7kb z37yRaetGcsX#aGAQTo51)xYkOLCbkNZ+u?ytel7d>*;Fu`xv5C&n@%kxm$3zt}bL~~&}3Ioee zUU>RpblSIv0yBRXXt?Av%DPPd`B=E77-%WCrI4`_O=|;asZ9l<{$^wjjqb+xGlBl8 zVGc%M{PTwc>H=j1_i3s_x9~K#U5W?1LFw`^8uIT!QdVA%X8in~uaBpFM<(U1nRUJv z4rzF7?GQSIhx)e+)BS)`{E|&~G;s5nB;F^n+y?Y|G|Z9u=YtrGQ|_Q;sUH;9^5{n& z>B}r=qb9x|86Z$hP;{rmd6c?PP8z8*kv?2w=a|)scBg7cOA>H80q9sq1QSWD(JAGG z$4YG)IcxqL-CK+(;jy}B%ETzsB`cv@=4l*Z>Pb~%y>(Os9`oq$?-x^2+6!vH?;zTl4W%Iuk3_GH6r$)e0h0!+S0dPbI+UHYqGn!Mf73Z33-Vfi-aQUZW@E0rkZ+=n#G}xeO2+sh{1<80aa=GS4FARa# z)z^pitR1qSnXyN%{~R17i6%f5L*zIAVkg8)m$+sC*%OHYE+LtOdWq~x;589rS%<1H z4_Py_=SrO~G;>Nax>;qY0sv$GStHXv-8uPVKgt~`_}|Lu4`Yq(gYyWf>OQSNM@21) zt<=y^ZWMD6ALxf$x}Q8H_}9%hH6tO!b+7X%2hbeThEj}4LOxU-YhWJy0@<=yv9ugK zOW>m<1gNQk4%7D<&HD-+cVa4;;qK&@f?s(gRiTT%5c)d!vy<$piO$Pf_5}#7) zEp@L|F#N$^XiH+FrS+Hdad2os&ZTd2;OkLT9vhuLb`E#!VoXS%E4$%zpVubrYKW7*k z{hgsQ<|o7}d&8?Z6`w$jnSc<@5HN`pMWCY2J5wx7{AcqsuD{-<_AD+AHa*XwU4R;d zb=!^|;cuSd6d;-w{`t?(AC<5iv!fh^<$2$p zDQj8>&sp|RA3YN@`T3n~R!ms~!{gnj$Ge_EXrHy$N}-Fz1z`J_Hx&`u@xyo zfshVF03-^N%KvF+c4SInIgFhjNlzL4jIDG}W^SS4 zss;dBw9f&ub%p~q;*gB~*< ztbAFz6;>Jf-5^8~tw=AhMwiv%wUY~Ve`-#3G3!1mQ&@*!3`mLmb=y2mrbn>BX9V?f7nPsml+C(6$!_%AQmL zdEG@Ex&{VIJtDBCTS*Q@$RVN-aKP6iUv~fXX?w=H5MLRA3l?(35lCn%E|E^J@0fuA zAQ&oH80?S|h5Ww|=AEDTgJ% zj1IB0KK1&#HnMt4RQOm|UHibyP%OA?JA15&65A|=Hh`te!P;8S=4ZM}isA+-4VvG5 z7`5h_r3VkGCYKGaGUPWEl3yBHE}Y_BqUqSAKKt-5HVI^DYP5DK7Pr-2kK8SmEA-Q9 zoXe`myDlf-PLWdrrUMaeQyHck_r~*9JbhYQbd-bexN14H30#Ldk$bNOrNY!KbNTX; zV(E`9EeY7|$q<E1=3rO9i^!qIH z%g8b&Fm@@Ykkns;f?y*q!3>c`*SP>js&rBhK>ol3tN?+|7|EBX?E|%v%ZeUr?H`^7 z#Bd4{@kD~DkW2}G8h|2aIoi-kb^(~CCMB~m8St|Nq;^w7*qTd!MPD8|GV>>v)oHel8@K>dN} zL*N*VE|MH+yJ&Y{P#p-|E?RJq)B;i)iK*f+sZp}>dcEz^%KaGt<-PBB{B}YiyE34) zY1Ehk)q2)KyXg0zsfb5LjzU@qJb1B}HBk`#-!7eBJ=FiLHcn?ICAiSzNu8LT7>QiK z0WHC3%6<;4t6cIl_bYUOkX!?nQw;~cJtZlwC6}0?BJrOn=wzTWfhxj?TmT5&g}TGa zHG-ZFMbc+5(p!AxfHCf@+v+&AX7Yi88wM@D@9^-#FajgwVqjaSYqJ?E0@f2cpeEey z;?!z?A6hOgA3CAn5dTzhU~Guv^g6s3YZ78(idb4>A{ele@7Cfq-Y%7L#?XR zasv&MOYYaT*;i~GXCH$_;#4f3^6hDkh>`2ha3|){r%`bqp`ARL5JSud9xG73AcMWn9|+v>pA5{yEv#uL8GEnv6@^L9}nH2ZPxvSqIW#s?)_ z!=y@tY$SK5RDd1?O!OV`pGxi@Pbe3$G~GuYiTqRCvQ^6PRs+yF z{yDRn43T}Djnh@N$Ol8hj6=epBq>r@IWqLk01+4Q%G7BPe^WY)h0A$niYwo*I3j^5 zhxuh30NRey4bSUC(<4Ty&ieJ|$yCUUPD2#nUv6Ds09iiHdd><^fcI?opK&6)`b?%qpGrg2gS z{5&?tEr?bUe^ibB2- z&mC(+&m}Z$Wb9YqH;kup9VZOF>0of2;7t(9(J&d=j6~A}2O2dyf8k>BxdoLD?mpJ| z#-Gy?0i%M`(_sRwjO5~PnvEcM9!EXUXT-G)SDnef4Uh-%z`!|SaInD)zIph4yl^aA z0Bh0#yMg1xr4hgaj9jM*IXeos`(EtBj9MccgeYXNanjNJKiN44crnEL3&aS@Q}TMcR(H1vcR4 zVM@7Ytxv!jiUOz<@1s!`jviD68^1HGyf#sJP)R zK%G?pA40>!&FGoZeV}#Hbl2_&qI@rOoV*wJNg_r-RUA%3714?}jLc(mw)-_QxFCB3 z4BQ~k7xeB6k?9I-F)PtxluFxB!jv&zQbgoo{t$>$CkP_ZbhgAJf?_%3gw^O_GqO`v z+w(26U|1h<;j9ojnXEZBr4l=~tbRMb{ai@=$l7$Q@t;7Hgwn~TFRZ*9w%PcRgco-> zbZ5@!$d3X-M^56Ajb$SE9`*1?W%jt#%!cP^cEa99#At-YZT`N9OiIG53a1~NwjdRi zneLe5AZ|<>qEHpYUV6K(piqLfVcUzX zK-G1GFfhGfQ}xh2T#L;2%h4})wr<_(fax97@`jdp#KIw%3E{@H1Cwy`%wnk2qlbu` zN>ID&7=t$REamqWg+PxClK>%X-yY9oGVcOnBam1Z+uHFGZZFAN3Fneuz;2i!|Mq|h z&a2xk*+Nzz4beCWFjc=}Zxl*kta0+*ts32KIA`TP&ybm>(f4HZO^Ds5X=P%gdcjad zzC_52#cts#|Ax|#z%ybBeN|kjuMfwD;OP2*_G7;i{W_BQ(dQ$f!1qC0KlQJ-!j3`g z>%Ce;Gz4lD0=dw=hm_l?KI=BM2F8!mOz)#Fjz#wZMcJ2@dc+=6XhKg}xll*b@F%uv zxec*vr_wpqUYB_@BA8W_pAyQ?OK!0yaOV&l_sfWxtdm|uDtd5Z_|Q8)6vuJM0(JsM zLvD#C12%RsMhV?`B=MVbyi(v0GCD&_@BZ_bI7Xggn&AEW_fseGBr@?Wtk8Rv-r}-p zuXWZ_v@G=Etj!Wy8!dij{>tRmRtIGJ*VX;i5G=q{J0Y{iu@y3N(KtS$#gXOKQ5ZmY zPkS3@boK*8aN_Qj01}J)lx;cOS7OId;{!~AIx>D9?4ct8);VIxq@Ije>)7vmr}u=E z!!VIaS>-!0_PM{4;!F=8GLv4N+29oc8w3Fg?gI*SO25|Tr(jQi%QAF| zl37;*D)wxlwT08EXhLqFjS^f}M@KB(3aUP`>Is@HqmJ+EVT&{VpOocOx7`o={>T&| zj3k)^6wwA@gOQM8W8a`-g2K;d$f3Q|;uyJ=*qLL70pvr1v@r~Q!6>m{3}8-ok`$a| zz?PcRX}3?n=PtgNBQ_)fvlzg+yW$%mlR%Wr5L58)65hurnZChz9yz%F5_9W6(w~4# zY^O~N3W9Q}oOJ+$s^Qy*b^{LiwUbSMM-}<<$B&J`sZ;iZtY{7LhLSfwfBtMuzMeYf zV+3^hQAI_?)DYG)w>?_c18Yft<>Lp*a3T((b#5HNIR38VyvPkHKGgtOZUXhs0n8PJ zf4G46I+*Py;?k+c1B)!@KQfd&fqI04-JtOqEV+wUW+{VU2(Uv(S1d%|X|KXl$(MKj zWp>8=BY03nD2oqYI@9?aE!WahKu;*{a@vA(4gX&oR(&lnFaD~;JBgYm%nK_P<)G zyPiO=u}*)>S=2XfM>ADnw*~ovunzGATO9(5e;sLzZcf~duu%V$xGlSGo`46J1M(b1 zK`a6cOWoDsSt{~{M&75`x;d#-feCmLHDemrVfcX!{R@gD ze!e+P3Rt2f`iJY8c~S>hQOZAXxJ}R+ukZ}aD>bH)NZh#2ADv2MrL}I|>BEcI_>TYD z43jbG0Z7L(s$9a}_~8tMKI0-Gf@AN1sI&*FD|o!c7}q6SyVi#O5)#1t^9}$n%nxD( zK;3i}LnMS1@4h`Wf)2l~=YA||%yreC-V+%C;?I=(?Wxhg3Ak>071pU*8{tm_B z$PO3lkOLeU5PgIO`#H*95wu)k^Lig=QyAO4SkmJ-uV{$?S)>dD>+UMfZ6`odmt?mE z!vE>h44yzkauGxlGkT! z(YWM~kcEluX~6d>v$Qc^9fgr-|6-%B7RFuQY{W!jjd$62wgvs#;MXHShDa3*xoaJr zWIBmXJe3LJpCBgMoa=L&Cb*&qAV(QxAvGpeq*ePpJRc*F6{2RU(A$%75{v`k%>wSC z^WJAZ9uY%l>8RLyv@6P_FL#^v!yb)FQoOQnW`xD=Y|UvadkwQkwpeBEzPxJWr#&Te zeH{^Ujxn$l`_?hNum}q~aAnTT z96BK+FjHI`o&0N&-G~O^Rnka4_&B{#lo1$`>W5_k96uRqmchN{97}VfL6|Bkp!Vil z8O6J+F3Zh+iPgQ(&)JEyelM5}ibKzKi}@=Z#t^%91obNPiXi8hwbc3sF7!GuKeENf zQi_FM;kT0(R4~+Kf_fR0!#q2^NnCA z^rNHB46Z;a=LE-#pZOEV_+EsQm zrhx0vGIoP<1(+TtJenG9898*Bzpw+o6oNX$JfT^46Cm>%41LU!lQ_K?i8=IeuVW^3Kr4UC@$KOK) z6$8$$Lq}0W?5_YyI67;BfjuXqmj7-4LDa$BUYNiDIvH;Vrqq@0E|5!?9~2y6Lm#KW zdUMAmk(QQjC%Q?gnT0cd{x5zy|ATb@|Kp9r`8UZGP*vnL-^80H={9oU;K$JR`p5v@f_^yxGI>xB9+&Y9BA)z+{wKfX%9==-E-vBtB>@de+ z>lQsT;&&!(B$_Sxkjim+iJxpIe}fFbuy>&?`=gNL!Tnd8I(|RkFzQv1iTqm1>;L?! z(xhqATE@*d4^^PcKk#X!ePNyA=QC!}_XxP6DqWKRPvAfvWaflEo)_dz)pBD;E^F7g1*Z(i$f&Q&4 zQPTxW_#j*h=};kyCUf+>o1u>og{)2$*h#**zpNAj7R#=yhNO8MLt*p(rb-FWLs7E; zEwV{e`$5vtiloR8d}vNFtCRzJY3IsrJ&{^4vd{ZCtAhR*HS&*(zM0P;y!6NVJ#1&w zu9_)04wFhQ{9@+v9=*LvE;EEQpwG$s_=M0l2svMZFG>OOa8H>#`i%m9$ycSIHRe=3 zKc}G-SX-YUw1AB1^%0=LE?h`F^zP}HKrC9j4)}s24n1ZZx=@`816ZVjc|~u6`)UW% z#1taL8=$u7$qsA3{%yV^{VgSp1!%}z?pP&+?&jhoH`WbChT~`g0={J&7zWI8sc*1l z;lxtf2oNX%QXR5qrPB@r6tgpf&YbbG+X|+R4=#P=r6!bQcGH?j*i-3`%gT(A6cb3f z%L>&zRsYK!o)@;=W(@=u*?=*~l!1hWp7%()BVk4?h(NSh7IT})y8doafqD*Ud9(EySU6AU$Q%^7WG71FCET^rb@o5 zYTJfTqblM3@R1PT;3M7MYY-oE%$7TcGYx3rO7iAF*=;pl%5Y3<4Mt(P|HuJY!#csZ zk17ik$@Tnmo45!uM9GN&G=J~MkQt{Mkmx~*dYJ4}x2n8N-x)rpn^7I@4~FqVN7=1| zliwu^FoJP+mB^5jY!4iNIEAw@5Y%)uR+|!D1eoFlwPk;N6k?n?4`>?QZa^Q7@YMk` z63!7jzRU&Th#7~K(~1;bDr42sr4@c5PcSwKLI=thNLY@pAYdUx2hmXujJ3Jy)xjQe z79a;=lrTDFOWVn96-c=uI9EEe;ktZ*@7OienYzpiGq`ITaeCZ<5tKQ!LcBvW#0{;0 zAKv>|uF0KaWaa$o)@tLCy;>;HPo=10Y7+S+je5Jo5;++_9CdtvoTDrf(q~d#U-NUE z=bHIjW0hWjnaTykUtsiz1u4ko;CtTyj`zT8?)e}+{L+?&ugGbG>Nf`NX8Ti*A2}il zLJ#+B1KN)<2X0P^Ww!AcJ~Ss$8r^g%n}0VxL-e3mcBS1F=^a5@WH<=cJ+H=@w9Pb7 z08|m=Q3+rrcH*CylP?MG2jqJOWezqo5mCPJhj0TdVVTW@i4Wa2fNvgBLHwd* z?4nE9A!c>`qo<;$DReD9o(i)Ur3?ITpu4L9V9Pzw(PBUE8zZIuV&ez5I1i)=ZAfwC z+&^kVH}LxHP9RlMzyvT2N!Db7imqn<4Wp{dDZHBvzK*MX4QLfB48VxR`*l8$5o`d^ zh@=?(w##jc#tq(sIch6%xG*7t9r0kCebEp-=5dB0LNK{`?Q-cch*gYhybIT3ptjn8 z?Q5`1ef(rps?ZQD+9ShcSiQ)}+n#06^=6c@Si|UTIIaBmWtW!^^0TxApmbBU@HZdFMBB zgH{5>z9NDop~3z!!YR)R$ErAv3>_@lP%)D)HqU7Y2F??-H~sd2LLYZsh0VWl$jQ{f zz=WV*&$7V1S$DAKEpTnYHRn-MMA@r1zLxLThLV3Tucw1k&D`s=1#ekgxfHl;Fod(m9IyT4@>Hh zQ7f^kw4bn`aYwFVL7zegTW|&dNF8IOw!Qc$oOo2cpdb1Lss%oRQeg6kQ3zb(I`=~@ zsK=-;XfFg0DGVom|uNqW4 zwG@xRfag4TJ&oZ#d)fANC%wNQ_FzBF%2ufcF z;qpmkLftdCdJCL;FK{Zie|mKA4nNxSnYzB5zW1lt;Z5#Aw{+^pP8`Dk`R<}2eelUS zwLKqyakvl)3P89DrvTITivsK=#W;tQ6l4qC$oMNvCCCo@6=xs88=%s0tjC&196I;< zkf1=5BTZR>otgvKeK=GxQ?ozK)#)D<0fU8#$q<6PbX7nd95iQB_35Cx>D7+ zw95dVebzAOfynZ4kQRGr%9gd45GjmXkG<5@)2oJZ4L=bhejF8wXxj57&2JTd!uX;c zspVZ4|Mw3j@oG*nwY>lKJo_?UZZ8;Qz@Or!-^zF)7<7e)j_z_PtfrHj>|V+3kr*2~ z`{1iJoU4KcEK&!Me;AZ|#65phN=`Xc>OU|z7@p+^m>n?$OEQ}p>~{Iy-zCSc=@G5; z9y1*0r!>7++jhljlWXY)&8U*`S(~v?$w&=e{C1x)0Q@Fg68GPp*S#H(IJGk&WuYKK zvYu919y7yv4%Gli@u9{|IH@F21>fnf@X!3 za0b{e0&Rw6Bm?AW0tXT)WB@=|QYJfFkTF(Qqm|xyNfMoidd0q>o@^Lp#L#tN{cDay z4S`nrQcicQ`$^X!W!~C%n4@tX} zySit9y(ffvOiE40TqH-pD-bbBw(Q}3PYO_-oNMWx48V>oHK~maJBu3Kkki~T5H>?& z4{%^@Ov(uv&ZBV>5YSX1+IM(|GdgF?pP!0RU^vTBE)+T<_Mp)bXI59IAtON%u(?nY zOdUbBlYqeVJ%5_o9yg@tdQ9l&dSM+1^D+}=FC^kS$;N)l8bm3H?|VzoFbQGrJ@{B! zamzHRVBqJNDJh70h;w$1k;cU%-aEjhg}lT;Iz|%$5^iX?%0?bwa3CzXeD>@Xe^tyv z!Cegz8ni#5dj}$JD5AT@Crj*hVQH!)`xjh`%^LtAoS49IwQUBklx1Q~O^vMoMYh`e z_>SLEc}d_sC>{{eReRIMh8P~ee#lb5@J7u@IIavF3keSXfzldzjcKM8U zmKyffg+*3~^peKP3_t)JBK@skNG#NYF6S zHuxo3WEI@T=x%kw4XM^VK7XIK@yPcw@QpcN9T;sWiO6dQ&lri-m?8~d;W-j{Q0Q-d z&NNUldY?gg&hRFx8eKO$Qaeh<*}*R`TZRO@Zfysts*@R2%6wY{%l+hx$B> z$rQw4U+0|JMzR3g8nqGtog2|dT1&ht(EGtb1g08DIQ545uA$=?-%hCcN|rSQ8B(Ws z6K~+9jtE~AI(?HL9O7`I{DSfx>;!m^CRmmffnjdNl%-l2>RuGbP_MM@`bZSd54@UG zI1l6#slRK;PMD;I0A4rTw9cc(L-JL@I=$^bsZAZ3>t((rhKdlX00~PYSvRrAs3Vtn zTL;gRUk?+ezMRB>_Aj_yFQ{pQ26qA2Gy&iODOxgCM%&57QKCKDs#)o^j&8$4LqK|u zfJ7n(V9H=|SiATaG`UogULLA~N+k1eFp7{Jq_I|%y5U%GLO3$(`j=fGpGhX zADG*Psi36NCgA&I9m>i#;bb4CZ}e~5q?`X zHg&o(5-|Kx)VNpTLVHFG)C;P-a}sycJS6IP0NKop!`adfA-&nf>irljW&(VpN7YjFILEtV+E*^Mg;G$DR-nmR{T zBNK81oa5p(KWH~%v-fz5CI=l#>c`jKS&;hbrcHryg$;=)F`N&xqsS8SiE)5a5DJ8w zx4?80#g7S$%sAPli9A#GWBy_#?rrKRse6xvWmp-yW`=xX2Y7$rG;k?c^t?2o$KY*0 z#vvqO+l8K*;+liyZU*S!IXQ$Og~}d-s)Jivc=IF<$;_*5pz;d*FZbhb*HiE5LX-;= zx+GRMHuB~A_;D*8$QXRc9Qom5i+}_%P^5+3gt8C4ZPk6vfSYn;z+OHZRW>ED*cdj! zuXIKw zgO&|9V#v!Ap-Bv>A`DZ{T@N4wtrFxY2q|JKzB=iMutkVn*wb16T3~UC+8*-jq6QdT z`j$_uKBut@KBL~RMQk#^mXmd`T@pHe2+K*(gVR`*L^++DlNg2+$q4yF^5K6!1E5Y7 zEfu6P_ZG@VJE3f)Z$;}A_r`-b9ZUEUAmbj}#=n?YMxqe;6q$Pb1_FB$ffDA;h|YR7(6O!MhXSD?TL(l z$vDP&N_P2#fk`t*az=Pa&Kj1;cpSmZI52|{#|9V4=h6^1x!v^&I3pKF`NXK#p_9W5 z1tZL`C`}_zSmnjq(SZcD8$Jbce>M21U5&nL-H~Q%rg*Oy#(a zfKF-wI?PGNs#Ar9^~jv_CYc2d_8{exxZ44=h^+T?s-R&KLslOHUPoF$PnCBjEJ#psD zS|=xQ6ik8jsfZ9x>oiT3*gFI_)L*`4z4XG&FkVC34!K168F%HZXEVVY=cz=S66^&j_ z>$OZ}q59-fk47|QpFeH#WaIp(70z=35lmx9FI!d&mrYw2muWgoEWWg3Er{p#V4RI4Vq^tOk z@Ii+<_G%38J2)px zoxbA&)OW19ymCKcL2NAB;>8IEO0frn0Fk`M>MF4o3{SjamA8)v4{O7$HGvkbB2`|J zUe*3O#>O+S>kmRIz$Q9$Y5sx*!BCUiqxf|7^n3tt4A0K!ywSP^jK7FXvEu%(2LlGxY8z80Rt6Y-|b&KkzQK zKElO{&Aznz-L0@N9x2PLXvs@69b>`io}RyQE>uls`3cvrpF(C+j5U1kUaqsSp!iZ$ zOVa37Qo+p&8uak>^_@LujsTAQy}_{xi$z5z;#;m!Q)6&6#?|ui^TSO2AnJ=n^76c4 z;v1?wg#^PqyuB-a+|XbOp|TK!(dpZEyDq^aTW&|(eF4Pj78r*IrKR@RE|AxT#xI(l z6q%pD3cF(AxyQOS)|KnEw6wx*pD+zHn0hkNGRO5DE3&ll0^@{q4uynF2hPK0VPP># zE3rsgR`$?35tB0y4%DJd#@?GEaKyd8=jEJv^Vk?4-Lp4)%m{l3Z0IC5#hQsMGiS~` z0RCaNjNOr*AK@oXlm!zbk!>l+=xwj%0Ia+gmt^bcn542)MrJxTPa)QUYj3L&qY&VV zH0Dz6LigjM*Czw*ptUI59}Yap9zepSGa7NT*!4x5+VxCBko+z{1ze0o7n!MQ;q&KQ zNQ-MdB?ZHhH>S+pDmw>okV#BmpjUnV(Ytf5t7({~?%ls%!p6I;iXFg3*$+lgq9AV5 z`1vY60jRX|+^}5i%U&Mk$>G^=fb=(M@xqYF<0edC^HKcv0o{0?@>4W4nyHj@Lu2nm z0W6{bBoNVPq+pM-NZGu+x9jZ!Y)UOdLoW2|9RlUNZ2?y6(ht2FnkG=rN?K%0frPjS z%XZbZ_R*pZ^oP`zuP3_ox($(20kk)_UgV=JU0hraAnw4nWZUgN*-rQo zU8jVOyo04E{YwdO3k>MXU+RSm7k&cy< zuFckq67nlng&I?6qQG-N6|&lTrm~V=GmWy(U$`()U>wl&C+hMW%(31c;xuMq0E{h0 z8;R}i-JA;+EU3@%o~Aw#fYG(W<|7b>yS6;z#~Wd_$gW>+>WMqWFuXDSp+@tjb?c_& z=H}x3>3-oX()<0p3^w$-h!sI;nu(T%Q`W+scU5F$WMHiqBy8q)wobz2g}5)rwdbLj z847x@ZE49z;>H_(=6nKzRh)r#QSaY8YaU1vbaQv7b;5;!2e8Q=hMTxcN3yf`UUiOA z^*@7}@dGXw2M2mhXRkWoGbdg|eEzH)?Biw2muuSD2}(*z%JCo7c1FyZpscKHdCwDP z-Xsw2Yx6&-PsAmr6lS0%I$Pe1Hvx{Z?der??5`tl1W z5{J_6+$odyIhiO}_3~vYh*cR-Ka4_Tw3R>vrtm9qLr%IH1^1R+yQW-RcMQfiD1XLd zRO&&*oIV-!6$vhFGl-is_W1gc1q;Z?giI?=EP^6!5y|5$5#!$uDk&+A9a=6fHd}H~ zj*Hr0^4o@#IVW{vK`LZ&QqWeIJ_XdG+fM9~Kv%)uRjA3%kIDUDC zk>aP0j;UC?*bV}igZ*eQW_p+w)|i6($BCFo9Snd0DCz`Q1&e#`?ZKh4?vmUj{9IZ_ z=3r)~_v`T@;>YmRiYqIpyJzzulr^tPF%>|V+j-8!H=$REx*<%Cg&S)Oe(Zrr%>lX2;U zcDZGa*HlDowOzX;k?4m+L~s}w80eP@3dRU3?dug)QBgsH^Zl7eN=nLYu+a(%3ZbiI zW2$WF2u(*soHARQf&8Hql$$ibF?}uk<_rw{Sw==X=9@Pcy?n{LV#Nvy^~Qm#_o+|3 zyX*A~pe0C#_V37nu|QUK_B&uCT?f=_{eO~A9C$^cn51697al_|7gY&JGF#Vo`Z6^(Fwu$8X+TiZbN;GcRaz zUH9xM!2u+2q_FCmo{am)N3z=M@I}5s%5uN7bV6aoU=^?Fm+=9pY6TZAWMIcA_>ZVi zF(k6-C28TFA=7en^+RYi|!k8NktF2YbVGvh^N^FqTi zM$pKzf{)`Z;}Z=wO_BXGKUcu<0Vs=*KHlkTry42&FF_k-&Xh>$8_J3X6-`8XFsrHtxsvjR9fu=+UD>qzJd&o6GK;F)-bQMqkQz zal*oDjM-(ry8JuM134=+2`F+A;#j#XJ-B1VaVHb&B@!nVxv7{ML6bHMC*;Ow2UsUx zVHq6i_wa0e&Voz>F<>e{OA|`3_1!u?Esqozhl=)5v-_j1cV?uU-KEJs+mLUirKJ@k zbL>XeLX{FzNMP~e3ErLc_rZttTpny+UH}}*Jl%0WSVR`3Fnfgx%uqfZ}U?ghH50=-v47aWtQ6L-FM9% zc?l4dA4z{P^n<96P6MUl#=L`VJjgk%i|yp6hwZdndtyWaHNOnXZ+gyZm4OFm&6OILBau} z#6{4c1%fjz#;(GtdbF_si7MLy-`?q9=NXtpBfDPLREaWM)JLvJaSpHX58c@PdWUZ$f@_}4YIt4Vqls`!O8jAPh*qy9Cdn=r`Zy%3?Tg7i^HYUxB-fch- za6{6BT>m}R{%ko1!%dnZf-x`0;?AY;@Ng>sPJ+?=fFCm$s3%U(mSM%=dLJ1MqT^Nd zIz?q=dxSqm0Kk{i3sm#h^~{)F8G&TzG=!-OSFM_l`Bp`Q8Gp<`q?C_R*~^Mt30+8Z z_wBcDKZb_8Es$zwVGIESwVC@RiDbI=hXo#&O62Xk_ikua*R7Cq zs+q`WR8+o&=q-(G2v7iy(F(jeWIXObab4X!hBS0=w8Aim__1;wI)Hd^sU{;|-oKAY zJ)z-L4YB*I^d$9*7|uL0GU(D=0WNc`uI^-LQ3Q?z%2y^p7HC zPm~d^%n{2_kldA1QFLbd25zNGoN5|~+%51!$Q2mIoeVwN8H47PIYK%`h#NK2gHN0Y zeDv%YClc6=oi;eJIg^UPU&LQLoAO#nSiA{nKumo#R8vR{QE-mK(8-pDNN!zWo zwCY|h0l@)-96U=Pdklebl1r9+MV&LYy5Kb46N&vxSwa?WFt+J2bP%g+X-zobU=7P7q#nq4Fb4lW$egd zZuGH`i8#7Dxm6)8;skOE2vm&8K zV$cMnQ7ocRL)Z{hF6v^5u1$f9=nkb?9z^wiX}O87f*?<}|7gDZa?X`2^XxL^mn~ys z#Kp%aE6p{!4T(W`>ue*<`xgn}XejVs0I9;OyN)wq@O2PzN3_}tb4%d`!1PMtWROBE{mt$!jQhv_}e`>e{(&yU5oxcl3;!#IB8RavK3`dd@6 z7L9O;b^uZL9E5`pP{kb<()o#OG8D;XEcP-AQU{v(Q{t2Z<_+K(?!S2`A))wu}h5yHUtgDA`^fAQoB)d-A zR2D&>p_~8m;yMvF!=AhsC(1UjT{|8f@h3qcy+a}4Gcu$IxHa(Zm-AeQ^d5!0SSBa; z0JZ%g(0!C(psV;kqGAZf$f+dYP;zL@RfJ;y5V%>Ai~ws3y@Dp@{3|vMZ+pJmUo(pI zU3Rj}*wa()2$#!~R=@OD_|+hc(<`X-LgGoK@N^!Yqe$kX@hscV=$Q(prly+TwqyV3 zu1E=^+|Cy?t-*H(CHz}(;sYdn@?;9oqNRuLY6zQL)8zqhW!YaJm&Gm7Dt3N&F%mK0 zBPYa07v^cOh^(ZH&_M=uvhwWOgF};k>)y0I=z5 znYo73ZQ#|kn}GQ;vTky3*o7}$3UD?|H6boeP2}ADSFdIRQk%POZLb$#zW%^{H)mb5 zQQbI~&@kirgxpm#yK9AAx5>@{RIz!>AU;-5R+eaYLyNUBRcMmpL~?kfxjcN4fB*}` z8V_&DZM$YVUPnQI{S|{Om|aS1x06$upnTZ8 zz@09uk2-I#4A15hbzFu6Aub_7qQF8t_nx~sqQupb-6f~HARkZPCS-gR2T~KzO$FB` z2G;&T%#GLra-n~`7nX+PTfdUP0+vNx($`i9N4KA0>%Q95oFgP)AjlD zDHK?!?lJ6bdAyJ~|Ni~^CkjZz<5kl97N)BJA4#>meORI`B*Sj3ia}Pk?xL(_cKz9; zBr$SCg zJ$;J20xYMYy>3hulqN+$vli;i5I0Lb4R)`nwwA}*+Pb^1?-&;=4p8Jws7)Ut+qt#p z^F}CL*^up+=lZxEOj~zCt^@nVyufcIu$!YemzxJs*seibkcPI7b8U?K4x}N%N^4Dx z&=EOv+JaRY00Q%W%#(A7KH%Hm)2x0<1do@|-7_c|vSOxs6S(su2?>igrdpnaHj5Ep zDP_i~Ykb2q+Azsj_y7o=q5W#jMbKb%Lvg~w0OZ83s#+lEEZ~hnc0*8VFc>(#(Uh0C zd~ry;8_9sqjB=}9AKXU*ySFgytb*m7OP8N5Qknx#DOz1;IK&sn@!ydD_TF>C==pKtF9K`Pur zwh5n?dC1M4WkxE7WB>;q`HU5aAI#O?g3h_|4uNwg`*AJ3e@V*}_-8Y*mvJ{eG1%~o_5 zf-Rew9HbA0-S_^Bt@!jtI6{tvglY+QdaeS}dw@(R@z%$rj(IDFUw{Mv4SO&`WHs&2M zTx`y4VhYg}Y~Q~9g34a^O(ly2V~nYs#t``c$WYaP1w}^p$Y59Tix;y{a;`yQzx`R{ zNiZK`X8g)1M3EhZ4bJgs78WVm^|$)xMr0d==H@pSBR`_VR(5M+a_mcXBlX@^)(lZ&kWoV zd~>($>Nw{k557-0k;s>pnW+Ul19H8?5WR8dkS&@k>Nk{Y?|@f1X=}$RK|XaK0++X< zm$)-_#J~6T_V%VCeA`TB;t@Ek;D|6GegTG-!9Bjfa++m7VG)bFmQTvIJOhp1LSbQc z+{XLZ;AOH(mGhtw07*PEJmTm#O?uo;*2=x`TsPstMUX3M8ET&w-jj z@kDs}yIu{AUMZplQ1zh7$NdjGoXFCIfI>(}PopKm_WASYA3b^U|50|{aW(h<|377v ztV-FdlyS0UujHWY>LeqgWL}}H5Jg307Req7m65E3h7qn@M%hJWM3ew-q(_L${o zwoo$FJ4*le`t@t~u+Zoz9{aE}(-V#yv4DUdjVOWV8e?K&N5$7mVPH42&$6#I6cFvr zwnQgI?v89`>d_6CMB9nyjkxYMnYD2VouIqlXH?PN?b{pB5ni+I9$uYzbA{qQxjwmN z=g(Ip=w}>h@y>-w7;8AT2Zot7MyTk~AUGtQ$h)3Dk|2uZET&Gd+{ee2F$iAZghHXZd3d$msnomxxGh>7?A-JCTF|2 zWXBCM2h^imATR@jxd);JeLg-p3O+YW5@z13$`iY{KpkmKUzU*i&D?fXLPGb8Nl9y; z9d<1X$F{V!bCb{?zbOcQYv|Ukt3Q4EB)9=Xvd$~r3}V9L<2y-Q&C^FcV7*H7Z*nMI z`8R0jHRyyH+F?issm?it-IuGj5A*+Tf7>l|5}0Y|T?RryzrWS`b|tc=q;__Q4w z;rr!tbxrgMeEaIV28csB!fwI<7nf%2-Wi!u)xaWkot&KLYHh3h8+6}agXM{_;AyTG zJv(Ue07PRsPd;)6lz!D}j8-G7vtjr{z-Vq`PUOT_{T#;vZ1z}EY{p9S=ibkch-M#0 zvMardUsVGQ@+U^_%9AJKp0sb*uI9&&AAw@t(`H&-uTO?EPjPvPsxx}rsY6292Dm=B;52T+gn_y% z*@&*7yA5pyMXk`Z!s><;NmfKCQXJ8EGh4aN@NFHnBz#oIU5^{$I&?x2;ae}UjYC!X z>kIB7NTcPKYC$;^%{8o^mwn_}vLbXoc{T7#YwJPZ-Z|}U)iAT?@8VKc`}&!Xe#=BXWyZS6KwASTLnAxkA-G0%B0Vp}Mg4EVwQ3>UTn+J@p zd%Sw@!jkzxr=LM;XQ1Z+td`=0hx7=(_Do*a?aE%a-?xnPn#+!;-+8JoBLQpJhRp-I zZrp2ut^!I?K`l?gD-_I=!9_aB^i4o#;e);E7(0S*AhEsNz*oJWkhG=%QYw}aaP8!| zbL0AS!x^-zho|*8MIuE*a?*O^M_<5_lIQzQ_xxBMA*DCEt&Lh#AzYx28_3 zk_t#v2eu{}b#y#hb6iVj==Y{1K~N|w>ifJ0Av-|0!YQ@8C{MWS;pYK(>iL2k2-3s{0$i*~eY%{;5sS&{iEf)TWxa#yQ;y>8#-Fw{PDrcyg-M zf(1RCHl<@h>96Rpw}YXh+WN(&TBflIF2<@{u3QU8Z+Jj%Rq4>7!^gdY$Iqp#7kC2^ zj~|TK>|V7(vtGS=MW#+YaO}BlpLB@0`TXC1t3e_HPa-dP5F1EAR~M00+neW9fy$4a zJ)u{B7t<)y+&68q>1venE>8JjPj;A@>?1goHS5=pi0NV&bLCx183f?Fhc$(+Ci!gN zk(-I;>P~;aj5x|uDMPrvXhBDDsP%u*xAOz*{jqe zrN%%)gL~>SI;@W9goX3xG57;29(rkDkWq2?35)g?T7i1p6?lbX#W6m6)b`F*K(eHArc^w-IzaVy;q%KR zL06v4l?URJSiC&#%*0WnYV$=`uU%W63hVQ=G0v(iS*^e^R0w3oZKL>V^ycaLh1H=c ztd@fr+qbU5T|gRxR4co2rv?R`cGl2FZEfv?r}e33QoMe6OH5RJGFQ)s$$Ah6rl zkI%%c1gqRHx06X_B54G}mFAe6Eo1c1$jfa)o^ew>#wHV=O+Vk5i^e}V*f!a?c2Ig3 zh;-pPdDYiI5}MnFd0A`kP1tnSYvIB{G)DWg?U5ZRxD8QqQ9p1GS3-2Q5e81W;vS3M z*ALJa>sUv|Cf4-Pj$C~2!2*S8WaQ|7{_>#Kpa5GV;&Jm|6bf}9r8s@ycuzOHekjA& zmg|82I-+~i8uhNiCsUe<>Nvf512jx_8TA`B6l8;?gYZcUmMip}NdfJ4#-ftCj7&&tgSjBbk-$qoCoAk%dF zp6F-cmX{tzO#rmR@vOa3hjd_*yLRvHgl5TySUxTl zq~X!tbQW@zgChG_bZhf*A-dZlG6megS-$61l&?WUG|(wx<9nM^jT<+f2VeqT`<$rq zW{h&ERlWM!#b<{B22rPlMnveCROUCOH*XjELr`a8%;|PU&oMYWws^bEPV<&6wH3Id z?DC3$EVLB>4|z-uEF8-*lyTN$g7dj~D<{}*8uM~@7XzTRHQI>ywpq?JWMi$7Uf%xT zPzY$1D)z)*Pfm9KYH}75M`S$i?p6L2BHg;xWst(Tr6o#5b8d6_?wZ?gb<4;)1{o9J ze*}qw(z6*hY#?w|XqG1({2kDxdAtO_dg(Gh<5@4u@##;TXR{7LI1%Y(m!C9F_zyt?vtyEoZw z<1YO)tGNZVHh{iea>cBb7r2(VYBE%PB29@w(Dt(~w2+e9q%tWwlk)bpBnzF#Ghm!$ ztIWZgS08z9`xdim5HeSRUgH^=4Rt;2%HONQa zPrkWsf%LmytlOUX6a{h83zLcF9JNN<|0XHx`bu0iwj}wRw}x1Um1Ra5|rW;W!u| zpsP3cU3ozm2QRNxr%uh^KWt!~nbmpsyjYS@FM7U*gs>Soa<#s7qEG2N_Li2yE7AE_ z&)}h&4#Np)V(6w#evDmHbQrP+B)m2b0V{TA*G3zlJ^q4M`;BwTj)HZ_N%W%1s>w>< zARYeWgt^U7YsDig2Io4r&c2R;-2fz49f`xxi0{g_4sb5ttkmX2;|061IM4Z%p5#(0 zTttDBhsHqY1!{-6rwg;rSP(`O0yZXfyh6Wz{Tqm8pHeo-O+JM#$ouPiI5cZCJn}BM zncnpN_F#4*B@O~Ly&*%cpWHB9Z3?I;M<8AceIug)P-;b~Z`_FC!v(1WNjre9ru6K= z=9$wlT8L(mEqtsy6xpKTrRPt$E{1UQix@QD)FVwuXp1pp#(Zb@3;ra^Tx(skmMt~h z3#P~K9sSBT)6zhz2r}Q;!&aRm@+ZaQ88d>Iky>}jT0Mv&9!-5z6eR|$0yb(;(>j;(MWUAK;rXI6h1vZbyQ8D!ghDX&lkN%>T?ZIIuSt}e23q6| zh(4Eue*M-m^EB&HmNkEP5Sm@TpH=1n{|Y1~_5P!{W*mu@#^KRdF?a>jX5{e+c#oZh z$ji%bE-9zbT8cBk?iiW6vuzPX*Pi2?)4=}947gW46rsEfaCa7cT&9!h(N05GexS+ila zMNUG=MoN!^gSDxtGK4Q9e4e!~ZfFH7&i~Aru1K5afix=oVA_j+(h!PbA*G_cH*b%m z`uJ(L8X|?9@fL~LJ@nN;+iS_A)xP0UAGdK!DIugTKm6?4zHXRXNsn@T! z$}KO6BQn;SRsx@y=1@Y3MdZb+N^Y4;AtDgO(8^B>&Vq8D_@p+>;|2yq6qlTIzd6~% zb@YAQb#mpAGS%UvNh3;(~Aj@~*VjpYY=GGCW|w$0p;ZmzB!nLC4) zb^s`px)s`FVUVdhZA2o_+D1i5{|{1G1mKSstvb!%tKt2XkLWdEgQXnF z51R+Iag{wGHN^jrElIBr926+$z0hc#pqDB$KSX2z&p2q2=dR>4wvHIsR>|t&edzW z1d)~5YaKqxh#_Xm zP#zU(%o<4B_LQ(|mcH!6r6_GGMu6|~hS=bUZ3ecFcuAeeA>2w5q*jiy`Z5R1;2H&% zT#JyXO>ahY=-`zNIirPmnmy7|ZhNRkqzksghE@9Hb?E*Pj>qEN4v)(t&YnGcZh2WZ z(W)kIQXQ0N=@CP)z8X*7zq5Rply?YYa-j#_cZOje?och#_%XnlLYnPQ2YeN!+PXMPJcw9E9&!= zA837#JoHh${R%X}ZEXtJ$r3to;)E47hh>ws(N@CipWQyXo~Eg_**z9iPwyS!56nRD z(CGKs)(G$Tf1;I-=g}qHu4|Ub)DoOb`zQi`%cVhvTM(L+_IHq)C-rN+jt8}ebUBzv zMo(^J8`eAk>h(H@Xih2MFMrK?`m|;Qn%^9-SiPUnu*wN@bFZQL{G60~e&Ou=g)4ZI zA~MMx+v^J_x)kmK`q3Q9+ud6#k8wMK;5VmrhYec((ivM)4EjwyTII8@1?4Fj5X(ZD@_|9%~n)2v>Gq3KVu zvs?U@(Ol=phA&%;Jb_!TmZ(&vT*FLUBX{|?CUCHuLPJ+l=0OmSdNPXL+qJ|XDC9kT z)wZ7VTM{N+Yu~4`Fq%q^CR2W-R5@ZXu&_ANdH7KE30}_ZUfQ8qVo73$PyTn+hVa1y zei?>$O`EkuFRs=r?_XnQ&ePVKX<<@S&BP$Q(!+-jDXzp&bYk*=_-xTnqbIsV{ZlQk zVvb5oR!9;+=Z%dIzJ07x7BM8?Ip-%jt1Da-ALL?2E=^U%a-|$-e1kgYq)+Xuv@St_XGv$0%xuwDHRPd< zDM-#yWwEApWQU+s$!YEN4#4ZT#`H#d`qBe3a^nO-1fQE_bqqq6(nedjq?G?GeBgHQ z(wmT|nQoWI$_?>0Oo-PVQBj3kxhdR9^v|BCPC!epB2NIDtX{8PJ+50% z^IFPBh{``P4Iz)WW;1;FKa0V<7Kuj!z%7uIMLXN+$e5aRV^jCG6oJNF@JUIb^#q^u$MFT(tWZ$JUEy~cP`L9~Ml4xj8d(vRaBG%h{yHbq5zDeK zj=8<$V8{l(X0&tb5C430>WjjXtmZ1mT~hYUZO~#vs+k9#j2;XV+7I{+bGIz?>#mIm zFQ8eAz8AT{;LjU_mas0ZzF$g8e_=%+-`eCltvem3jxm%D1UljXF3{gtqUo7LGGf)~%O&yGq*RNZ57yoe$@F#G+my`TJWG_>g(PywoL+&OThsfJ^oB=`j2NEaBTOQifwor(|?%TA9RXnR-0Fh2BmHv zj4hKvC;q6Ch<8ey8m(c;*X-W?tan9ClgflM?cY<|3jJg_rv%6TWf~Jei1dVur(P^| zp><&eP7!7_>-MUkAhisE5oue$?|GFPGphCR2byqydP6~uIXE2$T5CRRMXlcm<}TL; z6_47K2c#@Ly6;;~;IPs<1~WHay{h7@k-rI$t8_z~;~&i19oIJqorO6|AC#HrEP8sx z;^+*dTEHwM-YpISWh5AXepw>#HL$70dZ#idN>lr;6G!N9??*0Pmi_qjstulP8-*_x zu`?)yh~mg+@#!)#cTjklok2L3#@H!9A@#e@X^c-LKx!9|5cn_qN!f)@6AwJ!qEc0O z@Y}4Bas?+uj6RKj{}3dsaW;okc*;g=UGB}g-bXDb6{4OFvU$KkPoL?9VsXa?EA=$? zb^ydjO%{5)qJ1^%UWkdDK}jYovS{!50%=?TU7LXLDume0kd<-B$T{#b(4 zK50$Hj!@g-O%c%of0311ZNzfhM0#S>*{2OR1lgSW^EZokeL>~O&f>iRsj>=-GVDLr zA?Qd$>8|o&v_Aek-+?VR8(%o)I=hFdhhvsU8;UM^ zb8YGtD&d&c#-X!3@4yQ3c+PPV3Z;{7JxL_~umenH*vHGb`=H&A`nR}cj##t|9iRW| zl{D*uxu6w8kfQZ!ucIUIdyNL2D>Kn6Q$5^NPG%h!%>`0&Ek3h#G@ri9{MW;|t;Jr) z*}zq@Y&wEjUC}V%US0*xy|wK)`>M9VK?NSTWVKvea_ws>^Oskj*~RhcUJLA``)LpA zcl)b8@K9CC*YGdH)Pi!dL_@F1CCvE_R|1Wx}BVzMre~zmLD4mAz81mk~ufZw*_{o#}WuF*v zwo)DvuZpifWX_!SqL4=*BQ&-DUSK1-p;aLAN}VDyU~~vHm7e{dTes{Iz4A@Jr8$<` zOo(|~?mWP*N8H2<-QajubL_{S_Kge^At&--25w+)JbnLHPmC zPo}!VZsq||dw(e~08HKT3*Ge;c*6p^jVCKw|JMDhqVfrs7aOWHeyR_6?L^s5#Sqvf z%4W8ZOPoET3lrD>zdcm53~&^}$HDL9(W0b(bNMxN8q&Dw)2G*H)U5_JafWI*6vUd# zTPrPJemC=7|9Zv|#jQ-0jY0-IqL95#%QJ;AGHmd(Og8`Bk>Wu5FS)UZs;F{_D0qrq{O-TZ#JZVePd$32D5(iUcGj7IUi#!V`Y+`EZ2hzzsj{lHCY4G z@(9tNxq3f(8v1DC{NvROLccC4dQk<%1~wW@DEpVAap|$3Kp!(oaOZR~#*%BMJpTUf08B%Ea3JzAK=h(H=ioU}Spc@0&Lrl3^&X(LS?z)^Y87 zS{WMH_4LB-LGx$dJFS~V?5DGldZ8`1@7yVxVDJ`=6(1z+D&^n)0|)%!uj-cW_{-fr zkpIQp)AICwf<-I1Q!=;I6ja8yK}3zP>2pzk1OP|_LFUfnG?RP$pS}H?31j~0 zC$+OEfqIap<5e{{Jlt3UP)@EJp>8YPElrnrQ3xiwbosJ1M6`M&b}1?Ve_;r*oN_(G zzu5O=r=tNCgQ@28J{`5Ey!!hO9~?W^AEmnza>m%q)IN!YC|?TAPlh~?C%}vf`6HYJ zOX9N_gw&XRDUH3rIZ?|!zL9sL*End#PX!q*@FR{mxIH*zt;0oblhaRuvT9IXyBCdW zxmobuZ9P0qF)DuE-N?uYIC+gv;73q2LHo>eKJScfD}X0IoH|Lgx0X%T*tENR`SJ1t~**JFxji z5z)(m4YvHcC_}asew5H*)cjhS5+Yp-a7&IW(f%eU_*Sf-sLVq_E{-lln*ypx;zF3& zrK?vf)0_5Y_~)(Vn{3}zGdVHyUQ0`6WQtmW8-UMs@ZYg2CE}2-tQBP!(azE?-X#=O z%(W1o*J!4?Ho=1s!q?OTjXrVq?2323?DYX832gZgppVATsp%WT*HO11vjp;?N|;>! zvlKc$sxFOU6~$C{_weW>*JY|f{o@#QT1mm_w54N{&{?{{SKb9h@9GVlNthk1mmdy= zdWp{{>O;40ANPKdY6tN=y?&BvlD<>T4iKjlUd42~&+<}HOOIyw;fTTJ%CmU!+KxZ1 z%M&C{f#e%MXacxf`zUuKjbfp#e-tg~ChB2Vc75rzA#IN5ve5=p6$u`S&C?oBh(o8~ zAe%H0fD@?nk2)tB(s4sCueosH!XY<`PqWdK$>nYTn<3)L%5U_#@^Q#`-A0mG2`2}t0 zn{*ZCS*8cf5L!&9_+ zBC5bV!TH$$CvAl%j9FoyR)83k5u8MK@7){7&CeXwI&f%pOMAD`S-j~ir>c?g*(Q}m z^iAJgH_>DAlSCZi>Sh=^~u7g_7lQ5>jYXc#^T!bKtgFpmn7 zT6+9L$TA-D+tl9(v3bt0O*RiC?ZXET(Kl&*-}Q7<1NI1vOLcdGumXzK@%KUt`u~%a zb$+aWw7KnzeY0ani^&PxU`*nzfQX2Cj1T>Jdl66W3d{`%gqBHVPF5io@{6FZ#j4CD zU67m7Um{I!ZyfMtjD}b2MlKcWK@^h%sY>Frg+oNpFU%j|B=?abmB4KT2&uy9cHkegD!P03ZRN2Qzz%ii3;tz#*+vSOOFU(~ zs{<(5g|q_9L#|pGh!&pOYQ~J{;-4g-x?#&M9TWPL){bcxaIivJ3h&^*H>~{Kg04;c zAXzy%4*&F@U{KE8fJD6M>QcJtj3QYA3COk_rj7| zk;TXP9thVv3!=qCl5$nIMhz81#EN%8_B}`2^qn3&e8PDBnd#ZblP-m@#{KL==sUMb3s_Fjx(EVz}VA!sQCxj`IOH%2_oC-9wgKRfIvbU?VjEr$Gk zng6KL(!^DET+*PaGq2Gr6tQEBFZ?2;ZwY7!Yd`q$cd`EM;%#wba2g_DqC$=p1! z%OqP1es;IqJ8_d1i{0ts9*03)-LBOT5kS?dRmou3bLgeM!LK)cx!VL+wz9Gc&6cVP zlK7>nVdm0ZPXI7>96#P3hT^x7+T3G^daLwhFxaA&4>J>N>N^RGc^^OSnX5hsbqib* z1YaRH7SeaoM9K-zgMQ9ug=5%LwSP6c=SIt{hO;NO3Vq_m+0o zj0+P}Ry0v~&mE6qS+|yJ*t&g$#T}tCNhg$i(ka}82(9C*)~RFM zL~@gkj~hF-dCQiEt`2M5&-;@VNcY(vvgpdHjHfu|szmL)eE%DO^XmooW9o=pmGY-1 zWv9q@L_V|=$gU-8K+q)dL@t{4eLW%T8Kr5@B3A8mF z92^!7Hn;DM8kGx6PEyKg+%bxM7vGqeMi$Rz1OLX7HH2Fe5g=!vvd694+IOU_tq8Bs zbEizVPdxbfqXV^=6AB3=4DDT#*AZ)MbmRj@nt3SEbxp)U5Z%TkbQYN{v`8|t=vd&K zKK`BF(8`>= zMm^^3t4d>>ae*sQ1kY_3I3!-vY|9c@YzU?SG_Q5rI|l(}aKFPENcx3I<)vQ=_G6De z2z|JU&&g?+IQ9b$t{RlS65k-UHnFsUWO(fVjBC=r_H zmc`AnMLKSmr>8@&wFaB9G!p`K;Afe2YiQI^{;l-U1Uh@Un=4AE&C7oBL=Ir4r9F=& zwPO6f9yd$gpo^t>=a-_q@W&+TD1hV*!-f=WN zw{`Z^>C@e)P(*B}kc0~EfObc2sM6{JsiMdYKewFY(UUbLUVP>7K~dxRglU*&IvFH6(S}(#&%w#~aUN?2CJGp?=Jc;VlPp zTuGK2M>cJ)`DM+w1XBFYJlanHZ|M0@*SF}=qdxV?V6<{}*nqhbzP>b&>z369L~uajeF=Lx*80TBBrau|)gp1x8*ObM=avR_^kg>zl_=>UMaT zx;{+b8fGtlFy2}@zXAdR^0=_6{nFk3$Rfm;E88PIeW=JHHV6>=O_cRdL3y}3SLti> zh+9-stm_#0%ov{ap7{Wh z%!eytLn_$Nh*|;ZfKm9EA>{j#FKXiJIH$eSp29gQhmQF`%LPIYnM2l)Y{DjSh5tJhY1{zvgu3 zI!VsK*KiP0s1{yaY}feCty^g{81m@;+^Fsb;nAvqs~5aXW3A|NYdC!9mzGrO3M-nOj3Z7R!b`+MPVuB;ttL_;K7zje}LWIgbT1MvrA*k9t|53HiAav|;9_ z&HBc-FJ00C`bJ!4jUZvz{UIz!3=7=d$6qWUCWTXE9Yj{o9&wy>^1G-62;thm8D1|@ z{#i^X6c7dmk_rcA)w*yZeN@g%Ss?&==#_CMLpo^K5{bo z``vA)9m#|d?b4So3p7nz&-sUnAM;m2#|HuWT06Cd?d2%F+P~tb!{hE*l4`MBCq1j-E7~(uG72*^I?F-zB&{V;`Sgqa>~jb<<*f9h%SNVy!)7KF(5w_ z{v8SNL?@M$F-CVKywpp{K{#?w<^BR39GU+W@FQ*&Ex&C0O<$|muC1d$52Nt3*hyjv@MP}SX4Sw@=v z7ijk_E~;W3aM2&`npRLS)TZ}nWLYr(!C-4L2FTfrvp*U|fc7m@^qH(di zt?hHh#y-)@{g1K^k<6>MFx89TbVnj-KBru$BrZ*k12`f~(J=e&zd zky_&-9aq;50|$3M`i#pcU~;}M_f2~I3!#G)2`plPWvFh*3bKnQY)0EESm)n1S#5rp zXjHPH50@=$=47HuHg`~;x&H3mFT8fIndBok=q0)l5>cX_*0~F1K^F&BU7c{>7tl>TU-79?`Px>doNoSu}~Fh58{4_1>;FsjvVC* z(2xdX3^QAYL|FU{lTINN4i=u3&x;dDA}i$}z-(6&mg9z^G$FOdYFdY+S{J zh|Hxgt2%fdwbXkHU1s?P4qjRX>|D+TJ*jrD@}n6OR*C9(A3U)8woTDfB`b{YEwVM? z`Olm=BaxiLd0Pwz6%9N+g0}amRQgj7DFlec!gBGtZ;$W^$lN3VL^Lge(9+RJK$;h# zdTZoZ$UFWBB=I#SdG6s!puv#fNO_~5N=ni=-_d(XCe?f7`g948Qd>1wHlz!X$gK3e zjk?YX;8J~w!5&ej*ZvQ(evnsC2~sg_OL-G@hGSPBOAfL&)B=Ve85UmIY)CoUbhXe- zX`&OF2xX}ZNoW_rRUK0fp8r8?+&+{b-b|YbJ^mb{!d2{&=l*IO-UyAba1zWWq(0hOQ;byC7Dd}wd7KofuQkU3G?wSFKxx= zMDI{*`0iegi6L5cgUAqw^O-ny|Apq@+7eb`Ke@jut0h@Se3i-yj$k(wKRs6b>>&zM zoZ1)9cmOEwkBh6VaBdfNdORJ8mI4dccLa)Pp2kEi0^<=74^l>~OUuci63n*Kp3GtdzTZ5Cm>4k-bxxS$n0Uk=ctE-6jH80o}L9_nA! zZa-!0mfCeuDZ|Af052k@B@)^}d}C5-viobcGGCe?(lWx(26%aO_WSAYAS!S6lPnvK zBTb}?t6})Ih=@nM@BB-C9BVm91l06#^pSlTJ*4g6aL+W(`O}YnCsbM1@k6CHU+et; z%B`3TQXR~fDu9ZpvXj#z(}V|)9*KxsvW9?5G-0FSb)@3;{_&-aTxKYxMHmnL@3-Oh z&d*cKZ7;)pp1>d`#uIB@aHpilzSV=$8KI|&D#YAAl?AzkN)f(G;Fen&m&u=;Oo(X3lGv9)wXC#nio{hSRc3MEu)9l2K7 zP6y~w8jc*UKF3bChPf31P5fEZoPyPDmxiQT_#J`M{}a*WGK2FM&zQYhj$rABul z0D$Vl2b})2{>LW|9RGp3Na2T`UGTZIIc)Of&WT=CXzL{UvKMNK?*u?}#FHuB4$i6i zT>X^!(z^O=u1TV=Y8XFAw7n&7{YCJlIl-a9R5DSMn+JbJ7L+j+X=VzA#tjlcGpy1g_! zqJ)(`1*+DKlH_sHvhMUDM|{qaDi{oVFD@IhL5z`OZh-57J-5E{Z*ciWeQm<{@TqkJ zO**ja$uG#inIt>Kd|I)p{2ecHHP(JM@L`px|7h%Gh{Ow2Setk3(96qDT4F0nOMb`O z-)tN8i|mFF#G-O^e5X|qsH(uKZ<>_{)>VY&DeE>H8t>f{();}u-sSzvSy|&fPW2{N zS<_R^`gd>iX$Bdr=j{`$SP#OZ?IIabDv*mvyMRo@i0A$i*zxh1+4V$W#QM)K3rVTX zQlW|JEumy12AOFqgjf+ofI}nsn{mn!X$N7qq?p{cb7xA%B+p!M%0?WOhlh8@Taoc0 zH>H`#odoxWczT?bRX6bi7q@sD6cQ;>B$g+OmUc0%lh-g1s6OFs(FF9r!16&xIlG-wT15E^)-VG}5^Mkab`*)2&m5%^*Jq1G?& z8r2S4JHLHsHv+b+`WY4$F%7J%B5#kM`s0(VtjolM2+YXsFMNjvjI?xx&tEz07D^XJ zde;&Spuk-Sq+2^Z9{qG@z?wBJX!H92`cg+M!?4iuTse6k&J*oY2zd2nWaqRf!Uu}= zgk6|?Y9h^upwYHrSYK1EyU*;3xmhwUun@IblufD64+c2k);?lFn1LWe9K;0-3=qe9 zWdZG`T)lc#oRSoNL(sC|P7K|=*&*tzwJtq(bWf9vY)atxjw33@(S9LwyNZzzRSYz` zA~p27dJ16Hu?gM9X-p=?cV1>ZFNHwWr310MOD54z?j2cULZ{P*jv4o5>&<*Q?GWV- zA2C8+G+TLgkFE&Cg-+{886&%fn@PSbFu$I2W;gC^s)fsZZ4uAl2Fc?`b||~6N)w`x z7n8^O6EO|sEa5~5;{nx2=Ool|RBza@f!nvGCUN=xg2s{Bo-`iHZ`=rSw@XEiwfymi z#EV_11eqpUpE6j&2B=JM3$1>t%7M{?~Z3VOPd>9VH4SJd7pb%jiGh;@oPUVK!Q=2-X6fF5-=o!*Tff#e(5n&^uiV(bs< zEm*jaCsIT&q!`(-CDI))63qjU&`h$df?fIAyz~{P=ZzWL{!kLb^ z+QMEOJA7&Gr$Bn27M7M@j#VlGR<8o=PE3OK^wY|m*;r=jKzi2MWciV60zj4V7cpq| zBg4`m){sSL{O!xCOuXXKmBFBLx>(Quqxs$HTE9lCt&T2&g=j0KH3 zcMl5K#(GNdO52dIev@xWx35g2xdPypZfa^5{CB<=(0_d#tz7Thybk3ZNx2EQo`!8f zz9M^JHtzZDi4a~$I#;rX-b^rzsD00-ksznQxWuA~Lx*oxy>EB6H<-dDe%I%A-~O64 zOAic|66E!Sw{#N?#DlT<4ruD2`LvWGOAEk? zCG2@uShzRt?k^e~iNuw)qmYPJO1ag9K2uC^ddh4J&h*$qd^dLS>;y41)2zsW4Z5(2 z(&dk91yw@FjvenV5OG({-)s{%IL{bIyr^zW?L#S+BpOtlX>z-Q_mH%10M3EhNXnAG zJUlYEkboZi%MREjO2+Wq2inBtasRksxgl%y!Gb=f>oXx*Nc_jlR79Gvn1gy58ekEi zih8I-@#6h1kp@mNsn)bu5GrotJ#$??Qh;5?TFqrC1)ZWfppnap(t}GcQ?mp^`P%ku zPUh+9W}1(VQ9$Dc_k6jUi`XDkubRnSrXtFzOK!b7mOgmiqD8BbMsP`psK)}`lpAt^ z9UBG&z>rsxxY2!aaV`Z*C@yIw9c!ghg*hi%e>*}(_p!$R6ute&Qv<;$8))`7|F*Ft z2GgUPT3Q-0MDDK0fJ`H`oAmMCbZQ_|E<&LV;kGC%lS(e~mQ2x)rQXz9Hg`htQ*<{Z zj9%e3@MM%g_-k|f1lX__NPW(&_|;2}>aAPh0qcN2w04^G7Do1vn<2QMZw<}&6&)XS z!*x)bPD~9J^jAgzGVFp+(4DyqDs?iU1eH>IC+wi$9G%S5?gat-<>6;O_&S<~mvo#5 z*W0LDYFDZx+_H%h$K-qwbVD><$;xU7TqfftZgw=W`bi3Rj^>Ao`KaFANF7wrKlB}vKJ z1Ux{b#CLVq_Z`%btT+hQQ*Ikvy|UNzT^ zoGX)nLvH#rH5Kef1hf+2!M+bDuD#T5@KhkZX|P<{-aR|7;B>>q(AKU$H>s!_+0|m% zLNbrpAfMr=dO#GO=xWdc4uDJR(4lZQin8e-a^)pLLJ(t% z0BkQ4LqFYKq47;p8^=~37Ue1#79gr8@4i6?$^7SPXgLC9M)ze9NvVJdiQw6YcpnPf zp+U=5%3Z@-UO`JPtcXq6mbi}52M+k7{So3)q_xn3)a2qD0rgYf*=(kX;3Q-pvgDzm z?_P8JdXD|;P1oA}=OVv(1{dPpkcN%3~lc9YJHrniFwP`7EZhvQ6uYK^4wiM5(jOk$7E3w$o zj~O4oQX0d;IB{|CAP#V9&GirmES9NAcsJ3=Br~20TpjdHY}~Nt16X=DUk}tzN~1uv zY_ihEjmI46C0JnkBk>u+Z(D9#ZPOReB1jl-XPEkZKdwrdpZ43BcL6`ULI$$nAv`$r z$6s_i)R-Hfe)F@tY_`c}?b2kmAW%bbmuD*g43wxwVY|eOKBAyth7|VjK@`osJQpwa zeV4f(27El8LwSNgp;bteM6Mv|XkgyfG@Q`qhZ=7}C239plfQd8P{@SR^Q}*%23zLS z3}C&Wx<}vHVax=o5wX3KKqv90B1rI-k$ygXX(6fd4n1BDV&F!(C<+WIJ{28Wy$}d2 z75swHCYGz^oPT6A?D^SQ4?+Uxm_~Y8c|M!-@aXoTWlW3jiMV(JMSsj`7gNU{w3i5Q zYSP|vG0RXffQz8uU}@{M6lA}YW(iz|_WLT>1DmO;$h-KWnJOO+%_v;wdC11n-z^4) zM?SG-S(0*^F7m`C@L?-vyTF@6AUKr&^FnuK7qlzVwUc9GJ}Yjd!u> ze+SeMq~_q;x%{V?@W2?wGs=Z62_J)I;we72z*3_@Lwv8W${$oGGDOuWJK$zcST{%; z(om-*DBJv4R`eopXc>kiKsGY^`<8Rz>^oBSG5$L8BRWe3f72S;n#l~V6$W;(8ry3H zT{=ac4hp^eU#DM`j}J4GU?xhe@2vdXClNT;fCCj5owpz2E_hpgq_!X!Mu-2i^7svP0ZB?c2)2F|r z##W@7pRuuEm5N*(s7d*uJDJJ~X+Xoj4vQKZl3VsD!sJm#X-bs@@Cl3xp9W_mFRcy9 zcLjgAfhG_nyb(a4ApV>s!WYqSb2qq>uTgq?vzcvAKga(X{gH6;^y#ZC&!JC~$+S}r z9~d=><1qkwSk5KXLk%gg)W?s1>qeJ>_F@RpR}@}Xm(|VG<6gkrJ<5%T6aHmf($a;( zd!ngGFGr!=MRUA-kO+3xc@eW-1hnDMqppm!U$U@v$|G7Cpi!4_>@}=Z2?#_tL(c=m zXXiLP(vTd`hRh@I{QP=E_}69pr>;pz*Ovj&N11T1r2u>RoHTdp)F_`{6bjZltx#kv z6NE=bl%PUUo+jW%GJ8p)VLaXAH{TcXzlK2hZ=zcS0hr9u=iVS3otGPDC-r8Yd{`Jm zk(yAs>LB=3p1#F#d1CSGW|>=>AMT>WgKN@Uz@@L@rn#GF3mxr>y*5L5p$9re%2^Kzsh~9r_owYXRG=ZxrjI9(JHJt7O$BBa zoL#0GHWPs?FGDi0S$NTR7ePTjh6hqKrwZj%?XdYGS;Q?zjk?o)&tWb$N>o(8uhxGy z&UFgZ2lt^u8yH&N1fyWv)*%_O?H4)4*jGudFuwPnYyVbGPQToU4x0GO{R#b477WEi zh!DGBi25)7dheHgkxp=bu=`Bg4Qd}Cf+LibJQN8bwMg+Zm?`9(NTAul5)8$WwCcbC zx8Sng#8skvP+atxmw|7`age!vBD+kI5;+$C{+7&}10{hIjmPOBAmH5h=5%69>FH6h zqiEUU{#T?cYB;_BF8HSzx6)l>(= z!^2^Tf!Ni&@BV;qnJc(syk*bp4T%4?_r+R!+v%x>Gk3VgoO5dP_1}*u!9|Di5GPGK zmJgCSud-{U(&iG-(H8Y7Ef|{KHWhUhnz9D(e9qrkK$grcN4(Rz`m;d~Ky%PF z*Ww!Z6N$sUjbonLgHufA8zNaFkPI@p~eUu2YT7BN7YQfGby^Kyn17 zel~5F(2O#gf_n<=WBRHPsKey%FV=Rp8Kw4;SiwDenz9+==jQffF59>z@6i_xVRQ(* zf^g1Mc6A>Q!6(@ft>wv(;p)FkOiV~Vx|1uVP=eS371*jr9!vyvlV?oZRZQL%wMxzz zBU8IUXs!q6@7q95r??KU1P2Yh&M*!EI;CpnW`eDH4=o~=8orhUfr~|pri2b+HQGYJ zIFY(BE(p@8()Sd{KERoxGrPhyx4GHcg58oa2zK?Hk}I13PIi;?0hmT|VhLw5ew@sH z8+&0MkgVpQZy$5IUKJ?P!Oo{q`Z)&=EepF`%fnGm0oGS zQl=Ln4~TO>ZMX>=jvN@(5G&8)Q`2ihVFcl@S9p)R$iacParpiZMwlBFn`j6$LH;bq z9|P;eNPehZ-)XaFuVsD7pfssHPtKspT`oW~_f3Z@Q7ZGzEMA3_jCRWyc$QZYk^?`W ziWNMXQ+^x*4h%MbJ`+_P0Mg3cyLSsCq6QS^junaQ6j~MNj>?Ms%vW&^M(4F;dM~uX z6$F9%4*nwCfiE!yL|8)%+m`6{K0;=_$djL_qPjN`%UzSuDfxsqf z?Ce0`e<6b|^LwfO`;#{s(zuqUS&JpYdFmr*Blx5s$s`7?Mqazqa@u zZP&J~XzF2XlxG3qO~jBss*~NIspS8`L!CQ!u5~U2fWxFKqBN8nnmK7&;6hA@ACY~RN)U~3_k$5%8qq>j=a~sD2;YhkHXmx!7vOqjv zz&1BZ{`GZ2{rdHrTUu_pxzy0Wu_ITf0EraRSUtgKYE-XY-I9?jKmj{ArQ1rqz$F37 z7$VInKSLOv|Cm#zZ_UgQrAzPsGzJ0jy=14PW3@)*Sa?ix$=U@EyN# zDejO{8JWOOKpYbAD~j{*z(W;MnFNznpYI(L@?{gcGT3aCt{ zwP0al$9mzlaZCLUWI!%O+!~WLqq~y7M^@Sn zx@?s(D#J6yCGRSQPU00-sX336=_H{9TGS&n&&IP0mzq=-ojNeUe2bo$vouh81T2Yw z4G8n*{)~T!)B-vcZ4)Sdc;PmEZIRk?yN&(S@))E-U!N}-L$Hz#qFj?ir~Uj*SUr7>iR|2%sdqze~CX^OX3OtMzbO;D_h*goNY8rAz2VC0&us()@3Xxulyvfd`_duU_P3 zO)NzYz?=-u&2RROu)Zmv$Rb1G91VX%h^35ymS8#ttHt`H5~{nHzTDBFC^*NqXtE5l zf^xCDT|PVObQ@7&($X6*S#VIa zQYJ%>0HPmbUXAfh?(cYtR-|Q>Zn)&F_+=Ns#DdqicUMQ<()kw8hNUm#H-T74bP43A zAl0cCb4*{G5ig_XTl3FMen>ch z_80P0X}`tyA`s(g@CR3})RKa&{8w4ZsfqtJH(1w1CcG@5x~XAU$Jk}SvV(25mbPu* zzS$5}R8K_85|Kt1M$5SJwRm_X3O9jpn51)Xw9OCVqUg#FsQn)!SFk zpKGBRgBF~AyRnG6pghS{e13TE3X~1Nb#16&qw)BeL!dByugkQ5QhCy}0r1j!uU@HN zoyFm}t+Trx*$WF71|B|q_{S>4aP4nHTOj-q*CA#JV{2WR7>TSXjppIg?M19{$2so& z5HO04?&(`PBT`ukY)@mml8r%8-L=V@=&OGM5L`bjEA5rD7bi#P`I3L{>5z^zm<$~B z32Clp!p*9&(?r=Pf=Ql2*MnxtCYqoP<$A7ig%#4NwT=Z<{jfiZ1{U*uD1nO}miXL% z$8e!4+E-k{Q5K@+x3PWP=z7wZ?dEbm1t7YWPefa8*~C9uEje$E7t~Z>v|R~P%IREd z{`~psAb zWVhjeg-3B}c9%6FLd?2JYljt$P6Q~4T{kFdjQC-SiUvroGVwiy-&`muzz>J;14M(1 z=V5loHg*|FG=jfFAMv4yt9cAUEujfy;2m-$;|ocW(8_e3lkxEbC_;$e@}py=V~@Mx z0flDJu%VtwWmu#8+rpxejcBkU2r)K0&wC#Dg6o+iu41Hbh@-&J9&C{);DUo-j%dI0 z*QLI&f10pmYFyXIlP$?jU^*3|Vq+QQL82ngX(F17Q=qs}oVbCQh&5E^7tuG)?A0ry zcbjS5JQr%m7Rca9x}Phgd~*(z9)F%z3eawOm`$SaqkfQ}gh9!63bSz&QLUot%uHT|B;*5(-BXu#MTb^9|+}cN=GeePn0UUSmp}~F7|9ThM_T7 z=V4&30>hDM$p-Ai!~&Us*3iV$yNN-#8}J|ptd2<4Yz-N*rbio2DJBHg++BMaV2}NV z!7k4^q!oT3w$A}#e?|_@C%~VWiCV0s4b&F-x@D1II!^O@L7FzgLdqYxMB^=nd`gNh zx4TJ_1-|A$^vU8y<-lw0&+*s7){y%T>Y%Dfg{_e*3I~IRur4J#ViSnxUV=f=6iHMM zIR26qR8N%7(f@9=R=1|{1r(7SFEa7`K&wP4{8ZEhq#{sbNF~Kwz=6m?flEl)l|Tb& z?$D`h*_`rQW+<827KL~4rLk#$D-8=jV48^sY%Q;U)j5=;F&aPm~x~%!^_;-t{Hn=IZ~!Io}tZZ z%iKkfuoDRhcbqC){*N?1G}+aud;=*|SA2i2gOGdVJ?{5eY*n$q|EG#s9W|~w{IDqX zu)PQ9i%y-dce6PY6~H>hAH;`V1?K@2PvVeGK5gMj-o(p__{F()b`uL9rGC@;chCSM zgDsD%Xf3NW;XP~2C_K8;wXpgQmZdE-wncZ!FsicN&h5Hcw-fta3#$yWv%B=Ty5pvt z)hlxQ`%GV9*rw7y9V>mjGyZ7TfBL>TF}+>mHD$XZnOQ`A7)i(}>T8NSaa6}8|InjW z2ze8rA9s3L;?x z69vmdL#HJ8p?XO4+GC{t=fmsh^!f~N;w)9|-SVmF7${9R<$L`Eail3^ZpY_N^8)V%ETj1Ya% z{~Z_$RcGt+Q?DORP(Kj+zdRT4cCKj2W5Wc;IMaIc5^+Y|^z`&(2#ln=fd&E{UZoPM zf*P@AvR-5ukHNUq$L@kr>wzo$p8yZZ*#{A?+_KTeIHatvlX=|>w)aqUuOjhVqS)Z8 zTFopb%~E?{1-59~0b4zAi22z^SZ=9#MF+wOCHfKc$GvE$Vlu-|sbbAK41SZJ zFY0SS1vIg2Lpc2e$VAqQia+=D%Hi)UUv1;U_ONhwhNeKZV66bUMem>_o>G@?WN(58WcHa=t z=$cGwvmoW7p;_hJs43xbBp&%0 z$*M*W8V>6;Mwmm7R)XO%?MqBE(qmHlsVL7hw^2}mu@tBVGZVQM72XFwO~;h$z3X>2 zuhEkfaZRGGWa%A%hcG@3#w?9EO}30A2@d=i!Hp+g0y(q9VgN*V`Ps83ihZ2G5;Vqf zP)Pxtu~i;Y*JbaHB}iN$(S1hp7+8~7Z~X1c29an8mIoX!+H}^Wg-JVujCbvjzFd7L zmKx0U63Q+|q5KXQURfm2OBPASDaVc-Q@)TCCM-!#(JA`c0taFX;*gXGUdUAAgMUi( zEVQi}KFQizu3DIW(HcB((j}42rE4orO6ncNacI0Q-2DtdpzgEc$CsJ$X>*pO!0eO+ zn#Jy8%~VoIkc-;xl{?`#R$Pmo3}S)8cd&b3)sIvZ$$FORMAYttMyS-M$^FU`*p+VO zZo*s_DM6UY#pUaJ(1WrP!dSwfa3ePX8kbZcWq(V9jM>pJ2Se)}#gf5GCfq%~^RF5v zl__0s)}feGPe#ErH06=SKzIQ8b6owzQXM_%8LX5VIkqkq`t=a&l*I@BS&q}^U*GSl z*lE+JuYoy)ncIK&Gi8cnA506K!QMtstIh)!y3uI>NB z*qaB`yuSP2A9FJ7Mz#!FDH@EKhfT7gGM2Rp5h9c^W9CpOV_4>?RBD-ah$P7nTWk@T zLMfT2vdu%!>stB#zUMrDJkM|aan3#twLZgr-`Dk?uG3Th!6XVU-yOWS(m)`Z-QQ~! zvNxbW79S>W8Md{mXvtZJA~EUIX-M&xp0KE3RUJFl0n20tswHcOm_3F|xv2lom$a4Ou1H7V3#0jWN#S~;hTc7{OQ1!%)9Xd2@ z+Vqvy$`^w})V_Ty7B4O@rH+Ii5CZ5K{|~&MxyyMd0`-^)(T;9FoFP;D4w-XexJRXl zkToS$8rknblw{wankDlG20c!S`203mrF{*Y@7}-G{1Y7{AvyT;rfy9pHwty?*RLjs z-T6xd5@^rgy0!7`ag{3PKAofLTR6FN=@F>-dI%; zF%YV=y_+|Dl^u|#=kKg68GM(Tn#z}63``zaVq+p5mL?Z+Ad&fC8Gnpr|GLQWe;qmv z-I){hEq$r|K?trpy%u%0l(%MP>L+RItnI$CmS1XLmN+BZb_sJdv4eGo@iZrd+* zamub$v}4>5b+>*)Y$iT_`qcZ`w@tI!e14l0F<|E2idHjhv&K#)?R1DJrRY5T&W2Vc zCZ#9V=64Tf};&i8;6^rt9T-}P(=+V|PDq402bTFKC=V(PE{`8HE zcQgoJs4?*{|A@p={!Q8R1lizaSV4WPE(i}K)KT_z6g0@`qYLh_8OfMh0A{n?;p?o? zTo%%59NPcAads`CZMeT+6>tME_XR_dg!OS9+x<0nna!O>sGBsDlAxFnizST^pkqw#;A0tnSu|9cBpux$d-qB)7 zylN=MHO)O<1*k*jRNz^ne-BJx>kz#VPqEtk_=+Kt-~+WQXFY1F6|ZE(I?8{PheQ5h zXGT(4MQ`^4u$62iOw0o(>|*JN^A;(jSC{rvAOGzMzT=vt!&tj+9*TLLapFS)X6heU?DAUr| zVc@~_9Zm}v-Kq+EPTbHRML!-FrtXJmk)xYie0j@i*S3&A(e?*uhzMPlad9HX^{6DL zH>JX$V-xB~a1Uxx&u{G2BT$uZ?l^5sWar;gQ!_e7Hb!@(5D(@;CxF64=3kL2roN#k z!7}&rSnIZ%kdB@}GPQ$!4HnT`6t3^zwL8RB+s&XWk2x=t-S;a6%FDkrYBHbYJ~jFc;Q4j-uEKGKBW?#1^R(GIM%uS;xMB_^YT!`v$zRWRhxQ* z-+aOuA!@r~Lh(Fv(~2+Dm2@4Fs|HlP1QSR#GM9?CJ&^9ysb{G~+$WTtF8%Qvct(YH zFP?7Qw9C|41nh)|Y`=FHHTUzl{<{u#IR1_By7O$u`=;kn*GRd?{wrhY;P;|lvTdoj zk>ZkrzSi_=;%VV6Z+;v{vM?;@KQK1CaD1{gRbi5oq_1p20D@Y<0H2qnWXV(5v}teh z!M%IsZ;08S^>~Kbkbxuz&YV}N+JIwcRR6qba>d%3&o!}u)26-StjqaPUtlo;_8 z=F2C(FCZH=a|QkUs)IYp-R~FP3ABBh65mw)F);mwMW;{bH zSZvZUiQfxQE`7zPckrW90$B9xuMPspJWG9QR$5uoYpE4v>IN;8NN~rDY5c3xXS$DF z*O%}I%mQWN!?1IQ7Q4=_RH;((!qTp^J7}E`exC1jfz}qRIDR^%_F_2nP@`lHcd*xp ziQh_elYm6zSm6z6dQ*JdNXRZCrf;Zc8DxUXjI@eqj53&JA=yT@Z|Ymrtt&Z%Vxget z{f)ZyS0_rPByImB!`S_tzJ>B+B1C`B@O|;upQ8|o~A8U87QK}&!81Q<#}ae zX_KK|?BKcx7xhSD!Zs74MXg|V`bOsWO+QLD1_6d-KuqTs6nIt*NAWU=@?p@CF{}Jm zlz>j3JAeMMb|%}Tc!FinS7WsGS6*-c9d#I zGhxSgjz`{=Xu)dDx5W?T21~%MXvl!0{NJ<)9nfcvRrUs+p45Y6b$5@_JS?@*1CwTB z=brv>%ZD%2H*#VYRu5w4Agbg_69@2GwvqQk#P$4Z!v~$4UcLOOVU_B?hjc^Bp*@l& zR5dG2NoV%wfCBd*s~Lsuo~YDC<{G8x%Pu=h%r1uCEMi#-_5;jfmgTmtA}$euKV?EA zOdApf(X|7!h#8zTj}~nLL>J4JExv!)s5Zr(%k3p?hk8>OgrhSS29Q`3%~<5vlZ>Bz6DItQI6EB6Vp2G&lJ12gx6(?-8CE zrms|jE3DMjMfeL)qY7F-h;R$C4QOHU)eTJeF_{jZt&aRM=9p##BSH^NOIu)*T0T0$ z4S1iYO18lo>fw#!OH907BjYX|e?0-QIjltQ3FGO;XoO`n4UYT(8yjoST|807ru6Ca z7m`oM4?oEJ63hv$j>t#9@9#gvfh*?M_we%CSa1SNPdu}nhVdRtj_1a|SbON^v7<+u zChnN$_=m#Qwwp@B=GZKPIs_^(H62*Y*q0a+c7+Vix**(@L&-S<)|4Rbo;@q`n@0o$ zgoEry@h_!frN7$u{BM=}qrH>a$K-BoV|5f#>KHkvqtJnWXGrq$>>q@V%A|I&lL&MO zOC@p~=A%|);d_mm8Xdt5p;~gTQp70!d9h1eeg2+o(VlU&2_F#^pJkGo*^lMA!=$GZ z`uF0+kXHvQXMnVICrV@r8ASbtS{?W(TQDFGL!Qytfc;6@f~XOPKJIL*Jy!UD?`HlQ zL}&Qjs~=BO%v8wF80|%5bpsh%MRDKlP3>kw)N#(AF5?zAItD(v0dYH*a zU*leHxyMk_wc z{EUB&fEer>eTDzogAIbT#)#IpmLHA$zVfz!&m4*HkzD#sbn;U6v3*PDMQ&L1^gIy} z@Ut-#zsTv0F3J9Hyn~|b9(-QiOqK)PlA>JM`KNwnDpau9ZBr8ygclLc`%-#4jMy6& zR{#Qt3xJb6@G>lZEGiIy7KzbqQ$NggqLlD~jac|BHpFE8!^iqsP^*)yC;KE5TYSPv zn6e32wbor{xtZ0Md*-hO^(Ri`SUg4Myd8 zkXDLqORA*M+xMA;D$>Da%c{^|0J{m+hSdm12lWMXwdY0N;`r$KpaeHdtpH;O@z;A| zN|(`El8PzfTTL}dP-m@&a~5SB8MZ-Fc(&Oy{5UqXj7=Vm(9_5{e%`kB$Y({? z&d8?NLX!kfntPFMkM{Qu=wuj~DT9Gez387d#GZ*d=VzAGy{NjE$rBT4#DZ_H+oS8i z-7oB=d>0O%kz>bJp%P~gw&D@+TH{o+mNWBo-mdu{>okd7=T(;)LHyX9xS}MX+F_P_ z^i;{d6g%LYf5!nu$W$31eD%tr7GhKDNy-oqxvfuYKrXOd+VU$a^;vNe%SiO+I*n*X*MJ2 zWaBpi-cn?WiDVIz)*sb#mG{&4;7^jnG3e+xV<50{iiq0LxQ`e;TAgq={O3Lp1IjW% zdXOw>+EI~DW9(!FQ1veJzpDLtE3$b;*8!w?E+Zg6Qb+dD+`GJQ%gu~)=X$*Ewj@59 z7i<|g_Tb_ZC#~VD&LfIz1&!qH+EbkpA0O|T3$`tFo(oDfCVi~{RM`ECq?%K^<{Lk! zd)DBdG+5`5n@WHdNCxLe18T*PlPBj!ej5C9JnbCn&Nd;nePR}()Ok^ZvEnxC7RKE@ zy|A0KAzbDjP+A!dLPP`ybHcy#gBDD()Xyx{1VBqkmq$~_`A*~QK`rk2EjBLB54cz$ zfKVeG8JgVlap4S7FQQk(nej~;8XMci@-Bs?ZL+Q^jUNi3%WZ0owGg<) z9=Y$-Q_T{8**gY*InT{ z>~ZUrrJZjKTChs)f+HtYK$#djthjZJQ#JKV_#dv2o`HVe)_dXXZa7#q7p!z^VI$a< z#&@dh5xc&2(%}23qknz;A&*p=tpp*`+hw-xK{BQAF#7t3yc3VRiGTMxv=8u5uY9s` zadFXPW+f&i9oVo-gt>e)wWTSFJ&}@901AKzQJS82jE!=5FpI_&zZW+PcuOsog$E|Hp-HXNL01z{^AI49gck&@?#CagS91~PdM)mva6 ziXN3ZmG76K>f--yvt_nEF<#pQi4c+&jvCwfH~Rj(b(;vE&tJZr;bH7B7VQ|9yj4QR zMI%H-`|_nv+^9q$RPg)acoRfLB7^lm|6f5CM6E9IB|;!EJXibX@~EY*+n$f>q!@d+ zumZ_p`VC))_k_KJ?rlQP$e11yap-aRXfIKRlv8L0{OzY#5NH*JiK3up(9CWpPcB-f z1B>8BWINm;%O*{FC476$DL@&dL)6^>Xg`75 z?%%&pi&T|9kx7eIbAF`GYdy$dQF?m1)gjIKg%|ZBv67JN^XL)sY&dZU<(tp(3o!g5 z;nG=R%r^EIiCpUf-Ez$5hX8rb2E6#ZL@-DEhm2YDxLM1YA1O%p)`-5p`1-`-$1k!2 zo99t12?PVb0?&7Qgrk*J1YY`A>BV`YnmoR39~~bgcV69$vsPhf#i^+uHho&ZriXfm zxtW<$uVvd-m>37R^QMTWNof&rzxT$b!j%_8ADsi7(Z3vu6KJPIVoRe#$tr+3+i6Qu z()(sK@igofVC0cBX40gZ6dDq|a#ZD(>#Lpdt?8t_NNiMF-<3q{Em`}?;c~|s9zqEy+&%xxWx5q5lG4qRhhX-iGX5I!cPE=96Gl3xI4vP z|BN+;U-~>c?Au7!oMH+%f^R0~W5-X!+2k?p6NklNn8TogKX)>yNOUYYw>WVyzz6nI zsawfpF;sIPOqX+pKRm>1pwERK7}w_vMK05fwoU!ef*lzMF^E9qoXU+z2#x&X!x$Pu zWW@FlCUd|u)9Ng5sNfjPLjQwm*3-LBwBg9IL^S&_WGE*N8ecDc)Vy*`UrrIwe01rG z!dV=;XHTKSYZ zNLbXuWpJ-jbqQSo4zl}A zva0Cp#4F+P?o=+)I9-EQ!;%}@XFhV@mD2T6>POU_F8ih`W;+F-AB>7FJC!U)jxH4cmDeYfqQvZyqKamB+Hj`AcT;;;+so8YBJgU5VTs zE`$;nz-IcEuXBQfsc$RsVUJ#d_8LMEedUE{V}Hg8r!vM;i_%Im-I4zx7s zD@wP|{qRB)mo_==OI$&>4}cr{I@Dnkhz$Ib@e>M(-3RYBR4EO*1c#Ju8}d#eL-f)O z2k1{6PElD&p#qVU>&43cYD=la0C%dntz*8(GBy?&pF%oMPvGRJV;s5sH-FMvf}k29 zxd1RDR2f9;`Zb4!_74Q4qflc`reInOoV5+|PAxt7Y-8<|xfl-zM`8doVB=AIU#-hsLl{km~(|n>bW*-fHztXoF2VVzVZ~#|TELB^w{;~F4#`f;ep#SEU-D#sa z@FL6)^Jc0zr!|hvBRerl^*4&*vmCyPf;fawIq@9`V2XNyTgJ$(As!7kyd5FK$#%|m z8^Q!9Sp<}#@Nanj+e@y0KJdVtqMyBGAu;Ay;P~_v&YviQ1fc7yYGM_=NjxNkghgDs zF%|ny#6{#WVJA<1bC~}J8I`EA!5IMP-^8nmM5Cw)D0w{)P>Y?V>yiUapZ4b&&-wE0 zn;xQJEhSErcm*s+XRcnYnK$o!4JacoX9=K-6E>pc43zRFu^E96nx4 z4JO*DCo#AvawgEbdR^L#6PZ~4p6q3l?k7jBYZZRe1r>y}ON=-37+3c_l9n#>J`X7V zu`o;bhvR4i`t8`>M| z7s92igH2!RCUur!KL}j&eqv zcNiO8uEl~wL?=U09sGAti&CF)xGH26ns&6c&frU}C)@;R1Y~yNYM(HnsVH?0b1O2K z03s)>L{nfM7I+Ul{Ls9mty;E}s(|2>%<>I29nqBw|1R{IAV~xcgn*!6Mq`_sOM{2F zdE3%~G-^Y|vx0!7QIjTBh=^`F-Rv0)Oyqw`I!bcpbPSO}Lqu4J37}q~Q0#Wy0ivjI2Mibx0@dOCn*TPcSFgJA zS7PEK=F~MVFtK~hNcA(U70b{N_90@^fg&Wal`O|UxEj%0@Iy$|i2!i4pJe-%z;s+t zpXqVh{%~Aw7P0NGs4J?~o1Q}{6FwA-BuPf&{f=_Ys)0usn_PJX#BfhGfYE_!3MZZC z^JLYFn|QtGaw|}_Gs5;9BoT0TIhGsUcojMSp2rwPXUZW-H!h(k!5?4M`API6)J`Fl zUeoD}9_>g#wT#?E(T;t0P?XGX?N~$9bCpE zqbgzT>f>3mvb-l8iC&qS+9hKPSbV#{K5j7t*3CpfODz5ylIOSa z@IL`Y9`)A7%^9Q-#RLKjWPigqbb>ZVq)!PpHZt^RKk{1?B0QsS9)ld98TAMp+M~xg z=R#AB4b+w1&@;*?A&h}W(jLruIJ*i589_C%1s2sLwY*uwhP&^K$Hz1dXk2hiv(!y_@1O61*raN3J!bB=}@HC?&(7G!Z9u2^C^ zV!bQAU>YMNB&@2$-3D7x6(z_+`s3&ThE1AbXw|Lu%qA#Xvvmr;*;Zs`ibU~VE zKYq8cI9oPk03RN8UmNBXi;9g`fF>-A@?8|d1?4MRO+4G91{YVi>%3$?mzO*&L49Mt z4&w$5u=EGzV=73DJ(Ydd%kZGW5$wF(M4 z{bEEn+5`zZHoxd$`>j-oY%Icfh7IxPSEB*YZ2o@1+u3=s`^8mj)>PSjKY}PN-8`&@ z>43$w>(yZ3zO!`X_0^cds`uiOuQR>8R!V~VBXs~|fEbb(h#~q+RzKU~1(rlB(gZ&?wdpP+~VacR7DxRNp;z06!ptR~`iib8fOH>Wa8*8Kky|K`~c&xRkeva&AHDfOO!_=`Z}umv@xad9gH;e>Pmp_CE6 zT$T*4qheP9wvQmP=@(82`3lTd{@uc-_24%QXfm#ko)_&Yv99SWUU%8l%qPxvy2E7- z6mHTvZby#s8dn)fMEL&jH`;k63s5WLok6fKsRKyWmePJoLkdD`-rzm8vTNVIVMfk2 zg~pntayvm(B(-$KM2ViH=QW(1T3i{koOHHoKg^?-nmE|aRo}Y&c>VUw8~X9V+T>Lo zjWx6RO_*4zwCXFVpL;OttqTE@fQT26ECq4SaXPpL)d)`faEmU%%qPzzz~PJT4e;2e z@lQ@^wdP9BBE=|>Z%W+o%BP;t__nPN^?UObt)G;B zM&;=@rfvBudJiNSGAo18sQl2_ooIITU|3zS6ez4v)JD6!}Z^wLeD?OAg{34?>tYA9b7Pf2Brc(kA;CZ+kDG8i!UL zS(K+HQKFSoB$?^+1MndqWO0SB*TqRq!j1#EQu`5bWy6wZ58l z^TNinah&*?molST`ilBSbW&8*WG;os$;pKZ`ig0Gkl!wukqj&@wr~q{7xW_OckUz$ z3DO@KjHEd5f;c1<9jJHQ&}L#APJ-kUC(0@KePrIR@Wxb^Dvr7E`R_|grZ8km{)~Sk zGEnNqY15YM-8+ZAT8tmgM@7zA!QuhwwpzLWZ2Rgv4s3e4F_^2>7H z$xORZ6d}Q9Tdq8DqNAW=^lr>lUds5EGg^*V)sU-1c~Q@c<_216#F#O0)AJ;AkVVKo z%(g-;p>74iq=Z0571AL{v^|blaS^k&&OniX57DB_*gwt#{OvV392p42-yemuKlbN# zFk{`fSR-=>@C|TA`bl`{XtnxhRp zMf;AnwlX_X2Hj9O1;$14SAX$YAaXcuh9<^NBfnGfeQ2;Uj2%kC>q%|S*=2U}o>9QQ z|4$yB0(_%|2|_dDQxXqssX9@*al*}bW3;;On6Y8rq>(3~DB??i?E4`PiX(j_!OdXh zt37`J%?V!QkoHH*5g{p7CYsIj9IkC*3=CBVJ78+IukY{1&JC%TrQd$>6bN@K!lvdt z1~vj*Zc=I}MrLL)<8F}7lu?MMrT~^eAAgJcT5usb6|XUADRXr{HQRZhC>SLZO{%;* zw>mt4HMOI4-lVb%UW2R9+r!hg$h^{N}OR3SVWi7cc3R(E>CBSQODId+A$YOCbYBc!dM zNSIpeg5hwOk~}PQs2=XyO_Je@)$VG`*ok@jUnON9Thcgt0}rr5aL}#d%j+ZEJ(+Qa zGHLh1(x0eB&H*!FYS3NXtn&Aw9}odIUM6R#L$j>fAAWwzd&5AU%ke?Dxxo*w|O^2l%y&3ddy$%W_+OIKqj*gL;5>$Np$H%%4 z=YJZ8ghl3ENbLJVC_`r-gZe0!Nfk;aj7Elp;t^F^Ty^$6JUSdJyIq)1aP~_g8_dYA z2)I$1ITG%lgd0R@vX~dOQKwE83l=Q+e&2b>C9?9+0m>v8jvvI?LMh^arx+8=lh~44 zoHN%D|0SoSM6)4nMQ=KWgH9#J&29^Q5B|irs|-HhTD6gPOV{cD&{IH|j!nA@bmk-x6;=WTJvYpSI3CQ5@(;^mWBkdn*rPrMu9b|*HRfC{kf74??6<_g=RQinmr$@~=2f^(5||0NAuK#$|cmqG7K{2Lf@ zJ!#pAr9(g-g+NF3Z|SA1Mpv&~Y5H&K_g7vDu6U2cA4Rzj8{60<5R4zl^y>8M*Nw^c zby~0>jTxT_8DIod)r!LZt|EfZ;5E(r`T1FiR9TY#6fxg%km~+)@R*HCueu`h(GLEE za#2D^W6}CUD;%B+fy-#;@(MhWL@fkyStj44-ew1P!QsNKz$-_rjeVo$9w(;MoyNbi z|C_Hu5Dr8Ur4L!m}oBzE3zs?P~l5w%(cr$^d+)xDnNy@8!bgc8M^ zXvUj?Hx(n05|=IXpxAtYsb!Fw%pLo$u93P2C}Aa_fE8P^-aQTDz5l+8Dmt@aUS!<< zeJMqce_-!cZ)D5Qya>S!=-UG|6tu}ZMiWMZ7KlU!#@c+B51phiiL|H=U$+uj0dBgr zTK|}e`U7LRs{@a_5Df()8W+|i648<5T+?4dhRb}Dre8g@1q7$kG~|P7hV7h8&&wtS zsIZ9MT&P>1o`9%!ve{7lfOeoX^S{v8-V}Z8`HT#^nT`#02c;zA{}@RxNQMig0xMg1 z`xO&fbPF_4L}mrpdGv6!iMBT&g%R_dp=73hShH$XCFSQr*q%z_JmAcf0o5v=Iy-6@+sJS-K28A@&5)Vd$sa{d%|Ge^1jVM1UsyQs)$b&v z#mAgi`M1Z5LY9}Mt*rr#GF6mJhVsJGDAy3drkYRFA5lMme<)TUw!?G3^yp-Wgc@yK z-6zIO(r6I1v~6n+;BkiWOdH;u1Os7YJ+Zemh`Lrbkbo{+h<>aswRVSDZ|X_s$b-n} zH-|<-&=qXh<**C&y=dcU0Y1s4qCM$tUw0p8iICVig3t(DKOho*<;<&~g>S=YKFqh|&sFqefc!=yfulhU-UxIorQ?78T1#T3b=80LXoHCmB%2RiTRa z_8J;JwHO&2r~5l(5fXaLONMkebbhO0ocQ<;0r{01M9m+Qfg} zx&B`&wEi+G2O9Q`MbDhdWmh!m@!yg5gUeY-^PX&IVw~`hwJ#;OOVml`IEdOr66X+P zq-Lj~i;}1pFt;h_r`V*SQTK4T1sASrZ{P9H$P%J|d|tJseS^)G(WEpuy}(<^jT1Kn zRm@ZEwAApUkrdv-tV)t4awcc=kCN2{2splz1y5$tk|h;nrC^(3oT)_X$b2eUr}S&5 zvSru^AsJ_Q_7VtTS&??afNUxP6WA_qSJhvc#wj4Tqp4;FZCRN)mn!Chn2<+}D)h^J z39d>0C2a&&fI=B0hI&>=O*)K2srjlvTG=X4=G_Sg%glgER-}FttbohS;7ySfSlOj` z?m|%oHa*zJ67e7r@0C!9a-bMfDTu~XKo9!;#ivf4@_QP~mLLdndg+hX_t?mp?yXv1 z%uf2O?HSoMA~z14`7#K?25%`PqO>*62AEhjB}c4v_0h+Qi)U2w5YMB-L=PyLM08G; zG!e+8{1Uevy#Yd*^`huVOSKZ;WGeKnyrzp^?_O>)1S8iE%G3bzZU>O$Y8km*OyI?l6f6M zfpL23glg=0k(~^}duJ?aW*Lcj?oGRDKa*~p+{9q)`F!7kP0U)MNqXR+b_`VNdySZtX<+{2IC)KVzm-RHJ z8NE_53>G2*0C@?4gLboj-+^T(T@mJha^kyj;{Tzkzsa>3>yTr*rYP>2mu!1%7(yT! z#tC-iWIs$Pw9{ z1(skpru?f7e4q!=>#vF*MElQcXFn?v6QozGnvusG0pW3X5Tl`&DJENN(`}<=C?6aY znn|L1rl@^h^?WaCABuhuxe(H884>0G?8OTiB*2V3X#}~~;9hGG!7#k%Y-Xk*MrZGN zjbWiO+cHHoDeoz+yDnXZ9zd6sst4hEY$=#Axc$ETC$-c&5Y{54G#7h|K-XXZ(d^1H zqAmvcbt5)RCe}&y?|pi~d|@5z*#Fr0;FFvfL~!#$Zmti($!f{Em3O=hL?H;sH>y}- ztiZxY&})JH_n;we;^d(nHGcf%=yoWG3+7LnvqSILT)b=7uDC`<8aL!xlJ_Y<3m$JI z?w^fOME{#;QuZs2&I+&5221Ll39@5b?<@4JP>p;Usm7ulmKCUjHQmR)TE*12EU zPR=t6vXUZESNdH*J@UTqcHdcgazN63QU2%-ORxZ}99wh`E-FGMYSL>-D3{a`i0Af9 z&y(rwty&oa`2F5(L8`u>rn{3KT!PU&JUoV)yQpG{9`D-q65%wPk9Or<0AJxb*!Bsr zOL4zI^%{LUm-Pe^<+_K50c!l@aqjOK85sc`s(KVdMXteiTqwpNQIb;Cu(OfFdGt5#BokY-? z3(_pgVbQQ~Is8;s_985HP_WXdfB}#1)L5F_F~7bads}4%MbWJ(Cea1hi>h)9q}hIl zXRrXI<19PH^+UtEN!`fMkN@h4&tl4~0tGP?nCkqla|cejuICgGS5N`X ztAxy<5F8*dm&k17-0l_A4%3lzg{UrMjyHHF4W{_#+_mf0mCw(}oTOQ|X4CfV`?G_> zZ`z>Z4=1h5ANR=3l6MNipe@DzREclBoNB3%Gn62E;(qH713y2nYhYzJPeY>TF(}=s7D_lL0v`3Tz0N0-?!CDwTQeQ8OZxO#Ob#E-D=k6=LYF%^>0`9RQ`?atl?j3gd@Qu#$lj_ zeAv~wD{j1uPL3Wvxbeg(m#1aV6g>~6RrKdHP)+%!_`Th?-;qotdOy07M`UgaXzW0f zlt?z7#vcFcLPhwh|{hnJsXj)IF)f143r2|hWQy2HEnk+i%MS1Ndm{LvmW^{ zb@sE|<1L!Zqft}iz>wVj932_0ZFEl7~lMKq$ixlW)R>Labll?yeC8gk74yc&^tU^VMz%;mdvdn-8|# z)3ia2UiVgJkRK6k?{wFB7A1#x8vqG*gO3W-Em;gtQ0v50=iFr<^gPK%DlePZs&iG;^k@{>0s zT@E)>(>*W7sr+xzn#%IyJi0@St5kzPr<>-cvc*WXtSD?Q6qLO*9^vXY2xC!2@&KsZ z{a^S%Z~^&<3rO zTs!mH%YJ2c54qU*IZ|XPrl#u+KeTVza=_rm(Ge0G$eCHyvLc1gJ7yG0wg=#?m#&oi zAUPz=d^M+x0SE))fSl1x!x$8Kdd;TegLbT2=Q&Q|ZLDTI!ZgW8XU3rvmUcWTsoBRN zE~#CZW20$KzA23_sj5*!%Y-@c!%iN<%c9*TxIN+D2z&%YVo8BU$beBAnClerZ6-gg z2T~Tw$Y>fZ@f%AX6q)2~2@drf(-$(|8MgBV>o3~>^AeyeJU_bl7p z)m1(7sau7aW|V|m;#{JIT>9Oa`=#|Nge78h`qqE9B6j;a)5*RZ2~o;<`NnlzCW;K zM~}O0dCgFkr9+N^yix;+k5PQ9aO)vEcGM^HpCGqzeh_@_IgEorK|v924f!Wdl%dkhJueqNA3?1Fp$!O5~ zY4f3s*zH7OAro@xWzBQfk3|>8Y)LrFhy%MxIBq%ZNgq7#<%2y2SZ8}leaI9%f(QuS z3u6}-4GWK8F*AM&TDt-FLP|<{E_2`p1({C7CA*oB4H!v}Eh5=1Q?}{niq*5y%9G?I zlflUudHZb4JNy2edyL{=hBQ$u**CIh4LiXLL0?rNLQg8s^nIP**7^N+iBH(NyqDS@ zB+d1u=E6r?6y#sO>V7;mGQS8ziY-IVf`9G-;D~IE7-l@oGPXo`MwAZK%$4CYSts*8 z5bIN;Mi}Ww(h=|=0-u>1+4WjyExQjyx4qizIu>kN^iU%KkjZq2>YBbHydK<~cIOs6 z-;Lc&{zrh$&5tb2i26p!hHIC*&V}e<2h~q|U(-NSFY&|FO9KoRQ4$yKnDy`sdk#K~ zzI`)n^a8M$504#ub6mQ%-s0lKj^?=+=;C*jd)KaC?>=J0ZolY(ZW)slmgVoAXExb+;UlFaXY&KTRzkc09lI?EwR*F(JO;ue@TPuqhYu07h|gAQml8f^;Mv zt+Gh7^z3XCN$sZ4AW0?pWI4K(lY}gjJ1awn*+9z$e>gIm9isSZ=eHwx;(sM2<*52Z zJBpI`Xanz5){n=Ge9|&#??Dck`sh?VFCb%5e#VIaabk4uUQXOn(7SPm{Y*8kkk_)H z+0q};tREW6fZeX*i=o9n^YEc>g{q>m zty`|wqN^=PS;Idk>$9`AZx`)~`Q6jB z&IaTZfH&sUUgx?B9li`LeNNUipPXADqJljjF1A9J>2&?+lr^}(P z9u8`Wz5pYZ89hwy(k2ja1%(^v-b>vICnGa&$h~*?_xt6H(?D2eKCk-q?OW;HLWlE= zje-9NseSq_=UvljrD#WBEz0FTMxj4(J*SYg+FQ1mZ{3eDY3p&ckN6fIQHv&V%l&-M<+#O3i#X&uEeww=Kh zF(WzrJ4}CO#F0sMYg<%mweC&C)A`#kC=;vsVq83D@O&-_r)HciqlH6_!rsK*BtQ-b ze+dKE2&Xu3X1eM{h8~Kll}Y9jMpMSCtl5aA*ter@xq8Qh4bw6;eU=8l?S~| zPo4c9+tuLk=i!{$egWUlP1e%|3OnFG`QoA*$AcG|w7peg(b1>+JDH6*7KGEh98ga5@M z)e;tP9xVbX9rd#E<8F)68UXhRYLL%QwoL|2J>q?kn$rNZp)+5ShO<0Jprl1eM;A4& ziJJ*1Kjp8yQLpZwc&|NMHL~5{(+f^Tuk{8Emz)$1-0Ct_`}sJPeIebn6x59yR|cpE zqpnqCLcruxA8JAJEMZ+%=eGf_E(P2qZJ6L&L)L9Z2Z`&8^OY`1GwOdtFDx9jv0XX0 zhs|k6Nh$bEa&o~-&SYf7uJbgdp2dBLwUeD3Yp23Dm$|p>yjxdeO%=E!X&o3M3O7VJ z)iF|2-a7pf;-DM}pnJBz%ihxpD0aVrrO(3M`mkEiO7=KM@s^h-`oR@C_x=)dZ9~2DVmYgaoUr7PB*ky8L$TL$TP3i zQ6^Xta_ZpV=n;GQ(xq|=*v9w{RSB{Cm9KELVE*sH#h+rheG((jLP@`JMbf@Wx$MPB z*Z^e_N({-{x-Q`n|2tq|xo>ZI5jh*hN0A!*+i%sYt9XUQ4@o7KPO=l*aY5i@-Aww? z=s-H;IwUE|Q%0gQ>|iVYv@Zkm(xqEw>vAb~a^$(CHW7HCy2b3sn^WapTuZMtUI0R$WdE)w-l z)Oa+idUCPP7SCr(i!aNPMY6O$_v^rLR7nA-ZRtrbC8mzwMXs_;qNIv3F+ zd9zzR7xti&H3dnPG1Q?(5yg~$e_vt(fp#>QA1^>UyOtDK90hi@E{IG6C5q%zNDIwP zJIDPYM$n>P-xv!$VpjXVb>WP(;U2kDPNn{69T^9>Ur4n??M=4Q!k=H7aKU?%7f3ok z1t6!Y7(k^>f`r3(V%EtN&w6z<7NSO!bYTG^6jlX}<$z{iRIzXY0Ai7ISdZ~{#&fr1 z@P!nwA|m4{>t`bG8Gd>JSWz#V#x}Yx;1N<{>}@|y%V;HakNfO&YNZO29#v8|@FfyAl;d^^m%!r>&TKR{R{S6AQ;?m6~dWiCbp zCWusIF_2NU1-jZe(u7L?UFNhBpXx7XOlQIlpt)e1+t+$+p|VS1G>}&b4V@y`AuLg(Fn=lYo4>blt{-!nNlmo z&EfAR6KOmp14R+-2ivK7ULMcMH(G zgPg3|T40B#*f%1wi`0j}PRM8N+|0LSP~`6>;tlmiZSmj9h+JAcdsnKE*x4t3|C6XL z?aD70RBRjYbN7wz0|d|kC#J~cg!QC4XayuH))a<;{UvczA-MZLr1V{r*QA0+Zt9=7*Ry{+{>TB5dVF(hUdaNa8l zek?+QhS`jwpRNx8iZHQImV z@T0Xj=GC#BZi-XX#LMNJdOa;|caVc}aa7bJhxv8e28JRzY6!PXTY;SB!lkhT(!PK5 z*^qtERHG4KmPf&`iB(^-2PP)owikIjRizkj?`~G< zq9bw`DWvYmIf5&4`gC$_(%dh!f$;&37j5D1yI^0vu}X+2{K~Fwf1rq1#s&it8wNI< zG4>k>W{thp_6_vk|58>B+-i1ItL+!8U%%c1Z>H5Z5l3@^v$sXzbz`QcX33v0q^rh;SFb>WAw$Zk$8&-4ceL0#k!r!_Y^NLYHway+oMAQxl&;%WHM|?YtDxFuj^}{)XZ-a0g9%G zmPJ>AH7HdR8Xa|0{v_Vr;Z5~;&JT&#XLX2us2EGqnaoVxfNivJmdM@#_Tar7N11A$ zvFp71__6uNIj*kD3xeEUM+EkdaU&Hzo9ReNd{O(`NhR)m`h>z}z&t1lE=|dL1xEE6 zFktts2a#;aH`}$Nh%}awESexrTSu@<}>5J}%NN>yEOQNDeRN5J8GSaLsQ^c_xY@ z06_ZlbK)|2{5ypRFX;aLf2(2(X8yzK@)F*U+vE8(*EGQgtWIP-P~z^z^}6K&-Sdg; zBn_BmD~Vh(d<8Lw%)7YO7Vj}~hzg>z<%>lAI9XO&nX=7jjW@-sTy`|;PZPD0b%GIc zIGjI@$tsvRu~QySXBc@H4S*?Y*86TS`wrRnZBya--RObBqiwR+?EVfVUuZrnQSAKKm7;K|P&SobS zr0dIF&GoE_mDPb+#PW!!T<}r!6vVj-s`~Gn2X$$jemb0-%ZWcG`Q*t)>}=EvY%qi^ zE`_&IZl|vL_9tc%i&VZ(>=3fs3Wkp45@llBorh?}!8evzyM-M)uRY|{iZOA} z&mi;r)2^xPNsXzXU8y&CB)h{3waW175 z3k~hE$KXosPQM2hTZxKAf*$1)9k_a#zy1ctc3k)~wV)W)uY1$!7cX4!qudFLxGxMd z=8_SF%kh)-6IW%Zh~x`V{;1zpYpB~@VwSRE>$C1L2RK6H-V}W6EI*RBNhfd9kQOpo z{!@#|v%ckxH}|;LE`@h7NJ_JdEFt^E11w1dw9(f-UGMeBNYc_H-j>U!`3Lv20y-Cx zP{gw2v<>V%GdhB_O0{l>wVSj>w9|i4K#6=14u))YAIB=G+{VkwdrV@^W@Dln28B?OXMe^}zdtpA( zj!Zm5cV2fU=ePvR%?oJ}^uw{-C)9@f8fTKtkfQ+;e5m&jC;*$9S?eYtdXRa$WrCq; z8BGJ)qR?U>ffcS}i3Q^R{g2;*4Z-#0-Fx?Hot$$r2K7PAd8T~UEKW5f3Kn~VBHk$TwB(c9%70pX(Dgr1TDZ~CbOYOu zeAH#Bgd%-MttA347($A$fhli!E)q1$K#P3ZK|~Cmqdf5?htLI&A|P2-Cn}+q2=)X9 zhrHm&2|vMjkLvjux=GB5^}NV5UMo~y4MlajK4o}$j}!9jpC6++m1V_|#xb%t_#us+ zuy?(PF(`Z0?3eMX>9T=JaGh2`?zg7Fu(&kfD{;8<3P@TP7>mvhnc^kV0`jr(pTkQB zSI7plgd1)a*hrkcIDXHtsE5BG+vttD%bS}C>oZ%MCS*dynUB5h@UrOr3S$3-kdWBl zj7$34!vD}UuHUyxLo1!Z>_z8k%>hO12j;cUn`cVxsDyyn>!SzE$I=RmUDn#o{$Cpa zvA|pV!?96pOnG?$NM{PpG5wsAE41;aN`F4w^wB2N>B!kl)}F+t`MIgOtH=EFPPCpN zCr{o~Dtnn!susdn>arh?J=zHYUC&jAPsO5iJ;N9rwCoUbWdm9>@u$mJLFwB0b6h-L zW+%RJtirm;NWj?DqLmW0h9cAk`Hk_NJ9(Vo-rGHp!If|oOJI;t3+HU6%g}$~UzlWH zzC4cW-wbys(n6dH&^PweO8KcG;i9M);0w8#u}1**Q7Y?DBTi}C8||_vS&(HZ@1i@z zlk<3RGcs}$nxv%Cm15=jF}AD*@yNM=iAWtwYV&4OOYJJ7^4z}W z-ot5j%=dDJ`HO?%%Ghh0%;kK(XGvz7aG2s4!X8J2oA754G~~uOF?T_hjBwcMSe$Xy zKqbKL9lT5F9TX?V>1gUmU9vp>-oU3lgI+X`F(sQ=rW=9v1B-Smnv=E3n&| z>;KCx-Qvc_B#Jii4;hs&Gfc}PQ=bBgcG08y|45B3Z^w+O`ZB_$tj0#fOeH9qMM|_0 zgxR})dO+=oEe4bTRt`^~e64YC`M`MIx&v^W&<#1;m%DbiT*n)P4}6cX@EaNg$s$uiqM|Gb0gu2Exbfaw_84*Q^=XW_UAziG zK_0Vc4y@*^+CThs1AWQ}8&DKa`OwiV*Bk}`RvK?HB+)OdpqT6d zhPqx_IH#)RhEBgt<6R*net6h>IVrT(D7KVq2?Luvdp6~i8GjO>AM%vhx_x`;Q9-Pa~8zpZj{> zp|rCI+r&4@`y?lUE&^Cm^{=6*^pPJilLa9jANTVCvsLP5y zcWEe`Aw3gN#aCjgql&!nl)uhHHy9&L{uKCzLY{%&Gqr3 zInsFnfGjX=6}c%5F|`ko1G3Q%EgoomVih@p|pOW8h23 zQIC5Tju}1L%mSQbx$7`miQ#r*danzG1w#F>DL`6k{vSKaPMB~P- z@$g{c%8L{0Y>#}EyB%*Ozs|k|%Y#$OdC;KE*$b!FsasdZ4@qhW8&(kAFJ4kzM@dzU z`(G^ND#zN@!i@ZV1Ds<5A>kzFo;nquAe1J=fmW;4gDQ+&m4>EQL%H{cvbPt_XYzrb z?=5JWfJ^TjVloNd16*41x;+sj`+ZMYC{u7g%S~z1l(I#TNty{|!~7hB$YWw%kZDR# zPV}#&^(UMiMYf0dwLu&v=6Y4K)4wQB%klttDc#Fjb8c!rYIuq@!b{m0J4+_8LU2Wk z#L1MS!eD~?SV0@^wSpZJ=HVWSJCH}Q$#P%Kh&WaBg?Z3 zL*aeGE|!(s05@eRe|^)Y`sj0aYz-e|PR0C0n=F}|GtToDZ4R(|L*w$5IMM|pm#MD$ zsS4MZXYPkKLD1BWr1Jk5gvFckDOmU!kXi~el&)g&PWg+g)PFRa%QkZr!)bv#gM#dC zw|h)$QAvTc-KTdSocTZW7Qp+aut~bZ=hM+P-sUseV@U~l3TfD4lb`*{bIdAopYYmqie zT@Aq6^>cm&8G8m`v1rAL-b82LPhKR$8c1=$QKAp=mU;BrwL5uFpJ`}uouRqJY6#r? zYwQEC7`D2l#Mq!f-T$0&MfLC2vImW_Rw0XL(uuEV_K48#+gGEva|zj~NX{3@Se`X_ zs3I7lEbmDf1Q`l+lSmsOy8-Qk3)_-VyT8}1+lWQR;V!B76ar>XZ(sJF?lXAsNXDZ> z!rXj(S+=#?13;K-weVeunA|fr<;01R@E3SyX4qKED9VQbRzWxP3N|3C?*y>s`jkXNFiW6oNQe3#;{l91px)8la^GCP%YJV8|YOIYy? zif3(~8qJrHgiq{DX1hGxMSUo@7@tA?SuvDi(eC2F&;+p0{nFVpo>~)^(+`(GIvXXW z3f=Uxc z)g*-Fnyz0BQ1l7EFX4R{mj2&LEg2UgN-ZRQE%oUE2kFA4mQh+XE=+=*!GzKX#UgWY z*qufYB_NaAc}tK1$m}i}j(whv-1_S;-CQ$Yb?c#uiDkn;0QQbmA)0oC5zq2YcT?ow zvRxpL<2gli1ud2KTolrl6)BiPrQJD^LJe=9B^gQp2~zw(3-v^1Es4LcWtMo^coHxN zJ%s{1K0?wX(C1N){8vru`Hi|BdQu8eMlvm^TmfTW$b}g+c(CL(i!57iA4FW=UNCu^HXMCq)V}Nh(RC43 zK)-&?XC-zb39}Wv8ThZh*orVJPc705Zli7eFbJl6gkeC}`n|byI7dVz1L?(Kg7~>@ zpzXrC9q}d6V{*zMWJUJO-;_WQJ^|%`ddf~`M#~)fzfz9s-3Nq<@QE3T^+zoPO~Y@}^vbZ*3mv15T0(7Q@7T^{A*FT zw+1u$U@iod#5R#~txrMe0!%aL*Zv^G@C^B)GD7M3GwMp)nGbEBKare7(6k2Nd9neE zToq@6VmAAb%g;&*FN*jWWuBNskS>q+l$21-Y>v&dlwTrDG9v*%82hm1o7r`y*E2gB7FVOT&AcHQ7U;0EZkusN6gOcIlT_uN#q<39tnh(~I7qZ6=_~47 zlGcazFKX{z@8@5_86d!2l%a9lv@^U|*HdRtD{EC{(LtGBJT-cGynAlS3n2x1Wq07DfvOy(^%_;*q;cTRsr_EQyQYz!9u3h^} zdJ>3W&3FjoU!UxnZS_OV1%y`=*rcKtZ?TedY*@8!Jy6THh+he89&bCT!hkLtt*cUH z3Ozal5Y=#%yzj)2nSy9X{d6`!|7W*)t?WCVwe{p4<$nWu?#sjOul+N8Gvn7FS8BgXq{pg{hAl`uHkgu|6@iv}rvibM-J@=Hn4xQD4HB7m#d;8z3rdT3`- z*$BPlY#_PBG%4OGZ>gzUB8Na+f(=??=vSe6DEsJ^Zt|Xa5t9HDgFQYICk%u9DE!!xcQ6HY`H7Cjb#Jmt|Y=?wMB5|HU`qE!%&HhEd1w?S)RG4KCCmiPRrRV8MI<2sB6?i@0_n_Wfgg}Gev z0uUyQgmQ`)&ub(PT2BO~N0>Xv{HC^P<+!LR90%Q_WE4K_YzsUE}P)eMbJuHjV7a2}o#$1{N`weMNL%rt%8wBpDN}DD~#l53{CES5fcMh4`MhMXW2o z2^(kPVtvP}Z|B9ruP+AdOW612E)k~X-oFn!xdc~ZHt_^1@CbaD? zJwNnbvBPbQ9p%2~88VU7DmtP}Rj|CxB&sJRgz{T5J6sMbcHtC*RQDp(HfE?UZD&#m zdV+Eaf5>(tCSGC~Wz65g?}-K9Fcz7m?A*0$4M-H>zeED$RATK%-xh@qpO~N$%>x{9 zy&zFD@8@M^8Y*jG?@^ueIcO8Xssz z`G`9p<%u(Wgz44&u$J&9#k#pXyg5U|$Sotd63IwKslfIWT8=d+%GGt%pDVNY#TO0#BH?Q=zg(xgveiVZo9KKt|H5M9Ro<{F1a^-VqkHOEW^>M0;$eV4DzWsn_tAiXfJ zbtZMTsO5+0jnRO0BRbpHXrTQU4j$ZVKnFdx;f!irvSv0>U{_h9-^XUqQRp1`xl3j4 z0W|eySyuI23GA4~(&|A7;XI(7E)TRUF#;ee(6E=h=a^3PqF1cOslBU*$3?KDgIs2kV}Dj^FF=_n>udm9^=K|j^1hJyY@fzrf7Zd(bKzr zfos>r#vQ*ONlUMezkT1NmHAg5t$e;2*M4x9KgJci?J*u#e@oe@#Hy+r2l{R?|KaA% zF7Y2PMrCh!XKj~f^xHqXhAs-R(HTD6ntdLqH~ox1HluardWGWo*-={?Qvn8maAR5W z2>a)MC{IEYC`jRkDQE=l&367_1ndL_oteFW*1#v z8mtpj@BS7NH4Aqz`o&K5d-uRze*jim*k+s0PWwlAT-qXfAX0PhJmM_>9H)lG=tms7 zj3MNUpC}YROorp5O~N_Ngy8Gf15xlaJhmQ5BVCQGx6SkCA&x%SLN4s6OobrD8L!5T zt!959OS_StK8-62w2TPc-?Bbkz}-UIyH2F=&O28|Qr>ZRMf&V<@Ze(I7}RJsv2Sk= z@yt|*v(lP%9BdBbGZ(dR>>y^F*ZwVq#*H`Gf#l zTWTp*TCtYPXlH8$9MfmwRN~#~WK89`2j&zYtwwa_P?kN^jBxS@6tWBUpE!V<_B0GG>Ng#nm4QYq{~Y*%L*f; zYCE|oHy7Qjx{t};k3}()Df~-vxD*l+qSPHQ;KF~*>MM%UnD`nXoGty-%qd>?wA(gs zX5*p>IL`F=s`6gxIK4p&>T_z}L~edKSODvA0=0~8yLZn3VU2s+^y-aw4phJz&pY|* z>$xpKYP4IoJ`I;WG{MI6vAXo}BP%s7xSI-&CqB$o;W~=l-_ej-dKjEcS_y?Sv}x=1 z?fTujk3lR_`tIFyu)K8IBlNdT`TOCjdOWh23=*^c#EeXV&9LwJwt@elpk9yH^_=>m z(X@EE`EQx75Q^yA9zHL6NY)|ew22MXXd&DoG zSKRw6r59Q4l|@gSKYv&wk7C=rm&eu@>4u+CxLOUQW{B);Jhmq`xDvGOTo(66artN^ zLFZ~JaoTP>0i0a$d-LI@p!Cm+ik7#{DJ%>k9tb4xJluQUW_s842WZ3d6auc?C7;Ws zDB>DzOB5yz5|=9m{1b5cn0ov6?MjM`CbnaKpyR9iCzusDo|}=%jw@$1jZd68fTGfo zmDvtNDxR?y!5wC<{->Uz^6*a!w*7kBD*$RXE8`P;z^aHdE1uq#yQ-%4M!9KIb^T=G zs(U=d{%AXWeSB^_c`^t0FuH9^o)&v|nWXmQW`^TE=$bQ|3}iapX3AcL-O=ma`)=q! zbFjAljOcPHZu_7A%r#ohkN3W+*@)+U5);F|&p2}k@Xx-(zDIFc+zK@uIyrv$!kg-o zJHO@0KWKK@QF^_BK=hf|Hz{NSkBI?OwCsr>K`gyA0|Ew*(Bo?F=LH2O51J_k^n6?0 zdEv%4yjgMiHQ?-FWdK>C#qCFQDRJMYR@-|O78RXk6D9VWxa|J96E8=xT>jwe>}t#G zUep(9^wtAzQ6=gOf>u4vdMTjtV;Wai8hdBa@eO{s*h78+sB|rg8Y<3B1eR8N6mLJu z2P(9@PMuodnK`iNLB~#=jul?w1mo4x`?TL*{IzOCBfCbdk91;C4AdAuTPCQPZJT!^ zHTA9Mu@{s&RrI5wkV^e~gH77?T@A;pRQKS}l&Wcg=I8C%2#U+^_V=1&%|l?@-?7i; zJK0O)YVNFlLM8a2S~+^`*wz+fcC}*E^A%p-d_}i_neGFj#>aMYQa;L}ZC%Gf zD~;7Oj-C=evYx^?U@W&?;W|IJxcGR^k`8{h6pqX$yvbblIJ0-tW^MaCpOhnu%h#G) zC>slaRxha%201Y)X*?#Z@i*VxMOGcm{I{91y@>L3^z_`CpTZT+A6awqMTcJ)9=)?0 zA1p2WBNpF#Hfg?`bWa*pjLC=kjwN0n?DGB`p^Q>@fqoQ`=3=t5i?mz;Q}M>(20?@% zmQCXpP%~$gv;hopsqb3OVbbL4rN^75e8oSP|ISGA3l69r_swf_kxo=i%F4FSYlEIg z4Jjh3b;1UJy!+_yTk~?AH3VzvKB|^Xjk2Y&?cqK)Zn>5WyF|GZ0H2c=HGiK+NtX-& zL|bgnHSM+vg@>8PW8QjhFi;Y900+5*^Jch|*1Yg#irAe?TQ%hwNQB&3|oSKIcLqCU$ zzqKlh=D}s4LyLw`#jzsU%V;XTcl2S5ibAIuB(F`IZOVMCTM7w;x!;6C_3vx~rYxr) z&W+wp8#ZdxdfEhD>iH9v#I<}`R@4-hbJ+AVGrRQa71ANWz%qOs^v%%;skh!0lAC8B z>e?}|AMaS%I_Zo|+J_ zAR{PUYlRA1aV+9Tc3-P@?ewsId3gtN$_W=N+!J=T z-DeYj86YIcrLt7*1Hi9sBe%@kJYY_IocKUeobl>sIXUHw>|KI*Z2BaY!=B$l+h3j? z0GeG|QIRQS(=@Osi~>KQk+r3?-ulU}fg;PGXU|D$1K;vwgx&foJ*x|62Csg(C)e^& z9~&@>zF0P_(h2DI?dqGR{)D#0CFDt@^6`0;Et0+_w8AL0r;4ja5R^zu6 zrhus7pc4|osI@wDKnCDkV$AwG*O<+{)|U!6^cBmgC%Po+Cbn=8G(O=Fo;n2`K{pj3 z=wX^N(|97-tBt}W5-U7TO{4U3bT(GEfuyDbs~zx|vQVO$gvuJf9Y}@tz3mOT118ZR zw01;0^4)xPzTN^57fAcV>xnn90=7RtXy$DWpBo_~+@|@P{$+o=chgv+VA)OD`v3}T zDI52UjXFK?h#ypPq3;3f6bXir>0=#H^U~+^r$<>NqAy9~)Y6fxwPuTWhnJR=+yjwG z54IYpGDCNZtvHEXaVqiN^hsC;lP25(Uullkf9_=kGd?YEvpR_bC*YvMSPHI6gn_nT zh};j!0l3XGCQR5VQKVH!>^n<$hs-k;3yhWncGP^?A5`|MR-Iq^;{jV>Jxh8MYfr65 zy0%3U8DM^MNc}Wa@x#z0m_S0k$wI19o}Zc!y4oq)KG&f7`&OMgorPm;t@>4QIauNi zv~vbvqlf%#E7RFMh8Cg9q=F8ICZtLtqqI45zvS-E&9&DqmEDxakkm_6Z;w~|GaW|V z=U6G5R_D9#GQLwN8lERJuQ4s3b2?XxIQX|0Teodn!p$Y&^itl$)ruq*{GlAHxCFEb zx4Q>V5=b7jrI>B;AHv#`)ZO`EPp6_*3S{pBpLrI($K@xrd_tCE)an@*qF6rrMd0{` z>F~K+^VD9kWopc+*)IY7DYVZmtIq)q6=$T8n%p7$4o$&^Pdhuy=!p^YBTRviv|F_3 zn%Zw#w7rcXp%KRYxF*MQesx~9G)bm6(Q&KtNAg*G#S%;)cYf{K86ev$Y>UvbMzG|^ zTMQ@Wz2eT3o=cQ)x%<%48$XKu#b)@Ui^v;Ng$Za!hjEILY!3V3S|XrFC00tJiTguc zrSca-kx+x*1u@VZuIk;x1Poxc;cG~`dOSJ5>2vu3RN+BTgG7w7GuaP!rmr4v#y#Y; zn9r$S1+e344pVbY-iP8srJmQu$$bc=5UknLbS_q<;Ppb-f=LMpJrYWQc+@^{2Ce2> z*1cJw)n5}KZq~u9(sj4grT#~f-*!XSo%4-a&SYn{vl2HqD-W7)ocjK~dI}RyjJ_?1 zADx^IvTe0HcMd5lD|5+X_dp%aA17(P$0UMOH)Nr2;(edLQ((D_=Xpz(C=rdf_YWQM z@U%*pLhcYY>&f{A+sEu|a{?ur%8Zb$j3RAf?jP@bJT_?6D0Qj9-}MyN7V#L{4RJg} zi)cHe1C_fM4?J76>g|X#ZMStv?e}(eJqe~R2S<~WQG@1<45zw-Ti9o|heYr({KdwVmAw1c(V^9cJmYBoCk`2rzs z5~=Vj{p!iCcujV5u{iFdJ_#p=AfePbC za4Q4sL{TITml9#mpQuh06MoV0b)W>`^rQNDThl2nE|+AyU`IN8@`C9u67^nw{12v> zhqha3sZe zdEL|B8ijBC>@0DXSYDz>r9en1UiN_Bo@K|BGlH4RO_gaV_>?g`>y+4vZ2i=JYoIg) zyMPi>_ZhxwD3-vb;@#H@tzeSb@oH4U&x?!acpFiBdYI`S;P~aF7&pma@rKiUP4>I> z5gck|rd8C+6QFt6ovQMXP5Bhi-&%79)QLj-lf?QA?VRS!kl%7zaWRU}_woq(_bak` zUR`)fTQNWuDHU*fbK+b)-%^*8#$vE@8ujMafjymg_-i-t&TAfR6nl=aaTx_MOYj^b zE5keqrwF1aZ=0t>Tk(YW%$#S>Bz)@Dy?a4$WOfR>en;QInk&pt4>}-21{-VZy`-Yd z^Z3;jpDKr901~2sFjA=k&?jsq{+}d$^lrbzQ+Uj4>!28rl$4}zbG!}~zxPI7N^ zFFubxdi1EZOUY=Grqc(fHc;5xvv5>xt<17+P*q{?h3GS97Of+g&@`QOkC7YQT7z76 zyM25_FU>wo?J#1co(frq(J>1B^Td!-c$s(cS_d&K^4V*B>0fM%mggbGNAv*-CI!%; z^B|u<@U#_!*n^MAbkEcOi zY;3Hd^hLiJ@hWb0zS^hy^C>P@;rb}=*L;z;7C4tdxA{`)0pi(1*d`R41bK~p>rCHU8`M#W$@ zM#B+2oPA4w?OwVS!GG|I#!YHcj1%}H%Q5w{U$P}5*%MAX-#Us{`0PU zMPh03PD`xm%0*Pk4<~?u$lwj|8c8LDmJVJYK_~J>x-+kh=j&~}(yejr`B7u5`*?O4 zL3o{e>G$7%r|CggRb5Ifi6)hRtgJ?YH+(a0e1FtIR-I+y;_y>a1y++Ha!WcCNuV-;>c4pv`3RCeTBf z9=cNvGQm&ND7wL#41IL$H=-PK)GidGpLyoils9EX&>ih01st=s&0GA}=7xBueYp?W zF9JCx-FZRowvRTkEn2ojiLA?#!?+j?9=wF0CXm{q{n-ShI?Xf*v1wOSu6kH$3;=QB zO?yM~P#N4Mi}A=Nmo+2^%whwx=rv?WtDdsd8eeTWdFTmL=WD1qM`gIt(+-i1X1j*; z-BEn&c=~IYJQSZvdJ}p1=&hlcWyJuQXY_y0u@xSA1QTh};XJ;&>XAghbQGF^G4rzt z>dK-z`HKJ?nj^}P?BBEbDK2p~bU=nXkKN?^|COUp{kitl{O8{seG*fb%@9GTeP{rg zmS|i18m&f}cWbr(A6P@QJ0g%EI%(*r$n@@1#K{#R>G|PmZv$URhB;hg+G`e(iC1-1 zMZDmoO|R;hJ}^}I*|u6Gmh)**AD&y+>UIr3J9XkmM{rWjI~B&d{T5tr+IHI%pE?WXvW+9G=GaS zfb5Oz#pS|JWPcwLMpT`^B`-9prgX)QB_g>vQ1^~*y)>sWRxN0=t%zqw?3VCw3cp+a zE#KqFI~q3@ke}iBod4O{+P(usatCC@!xQYVAL&rM{V)w0$dUJ$o;*5ygC)Zi1SvSS z@q;Z}(_#ol&^YeZnT`rY(chFDbXVz3_1lS?P`}2UWtJn=B4#A&B6tx*#~H$nB4F~> z2wDu%y3Rl9RLvclnNCMBqV8^*6m$)Z-X7p5vE=t6`@K*o4h&z}^y*#OHUx4d8TP?Yy&zv6N?R=65+d)CE#w9qI`w3BJ^p+# z*t+0sTw3W?QET$_GdFazu6I zZrJ!UUSOI(G>`|>s&nUF0wyeV&xCl!fANNXX1_VE#O1aOqYWDp*z z8MrWIJrBbc?S`;9jmNR@G#Ww4FVdtFB-KHDB}qZRq|S7d5NbZi+tS1H+NgcP!^4+m z0yImm&Lab&$Q|^AtWW*Ed0)*3xVERGL?Yn=woNj40prQKCjr?q>_^sb0a#iaEQ z*-jHu!yU=eOcRAKuoX+?ao(#k87{%&*s-3f=ptQc4L#i)-4&YiD;8#+9NlE%@!_-K z9Pd)a7r^|;nE+v=qQq`|m;KNQMg&u752p~Y>gLRSjSuKi6kA~}TL_WZ3~)vSeTJFL z2edN+-CX1TmN{_-s?xGDgNNPdQb zBne!JlY-h1PH?kJACKzl5afnc(Hg6iZ`)8>Q-20AlNCs;xUkLFNe5VFK_FRkQw5(z z1~v;<^MQDn?!{k2J+gzQk}tpwN<8S>JNh2=bigoOh(JiO^?e$-ru@0-8`)7y$J^V7 zG8H?-)m6b9+^JM{sH-Z%Ht-P2AwS>tX3gk0%CqOTruRe36Z`qyBPdDCZR2LmB0eQ# z46DdG#X~Skh?Z}c8%1z&P2E~y62pgD4iOXOTK{3p+7P*;Y*3uL$%i?Ay{5N|AX^}8 z>2NtZs6y?=;DdZU=9MmP<{ERE-6s9xgdY-8#YbKYYv~yox%NT{Foo<76l1wWAo%J4 zFv)AOP&vGRF(Pf}PW zx7N%eZvU?Ha08XSqodGxJM{529L9g*JY~X@F5L(>eJa_W>+)hmM?!3?4Ij#;pqs)W@F488p1HrN|C|Nn z8e5%5@W|R5%^ado%xr(Wvc_uoJ_dU!hf#0+VS}!ufm5JLHTQ)zf5yMlS2tXi5EFX) z^WAmQxl`RBFDFf(RO6ydFQLm@@LKZewIDvm0gh0?D)oT_2lnr)If<66K)bR05cQ)h zx9cDk4zaCTw?2?p-W~!?hfo2hd5?-Tpbf#GOn!~hd(=h)53+uU#dQn(-WLe1R zTk%(xuWhcQK?bF(1yNX`d*7OC;Vs_l-Ymp$?n_;?y6CyXkjcZ!wMN%EZ@&62*;;*t zLm2kg1{w#Fm!klOJR1IFeVc7vP@xLF4~JEd9UB95F!Pr`|Gg6Pnb$kGPE4Bh(52@7 z2S()7U8TbN;C%~jYL@Qsum*|?cbIPlzLp6KxWcZAEjumNc@4#er}hBtX%3wL&B^Fi zqE_xVw8}_SDHIvkw;!+ht7{5T6sCsux=LC855XQFse`@bF4r8_ezUn%?|wK|=-S_Z z`y)uudaN*Y<8F>?Q5|3V8lBMsx`dMwR;dka6Bbvv&jiB_*rS0R*P1@myGt`;N7;rq>GxefF((2xSQjCtpa zO%*fwcgx8iKT z9+p{H^G?OeAH{t1I^9rjW_W$Y(j-doyV<;osf&&q3dZMmzHE>ptK)}?;uV^WC~sZb zbbVdusrk|UXhiK7)|_}{5DSohk-l!K3P9$=Sn>tZaDxF!zyfcNE^4!lj?b1&Ddp50 zGixK?osgo10QantnB`7qZ4dWpdg>dqx@G+*D=urlq#_^@V9R<)lt4%&ZXq4XefDhr z>vam(4jn*4_<1TH1|zlB<=6AG_wcYZN6~PML{U53l_i6U>2VN(6VNp@G)uSH0uzbW9%Sqk;P`D0dxX?@n z;cnyM!8DW|ul_VvT}C3h1h@+iO2-q67Zj=H3uXbcYJOp7z^J^dlSZW_o)6q;m0psO}ylwp19GJOVXP-ei#jKkD7}V$}=0 z<_v%bClxfq+G#X+rbdS#A7T9MD3->HT^)?fV8+RKbvL2@5@-o{*F^LXl6#Q7%8wvT znYh$m+){kWAAfwq+fovXxjriQ_zkxUC3(>0w*c2=fIfk&Zi0k#nvS~l?pE`ST-6%H z-zvs2Sez_7Avf0-r)rCXK*@B%Cb|?oh(AHOGa|wWy2Fk?WY6SIFq+%#O6n=9=TH}@ zw0UHC<#y4g5of!1@BU&?Cef#GxVDEasi@p*L+&SsX{g3@A?ygseNXs24RWkWzYzAS z=(-F;bjK<#Uj)BWsz`J#ZUbCdxg$e5T=)rQWV~8p=CAT=PNnepMVylKORIJQPeM_hlUo$7n;9U#M~N_Pq0yWZqZ%fapb&L9x``l8l>)Et_5(SP9Ik57 zO-zh&sSH&H@NFae$77$>RWsBOsyW23duTu+IM^W}aHV z(0m#_U{8WFSX{w=Ze;%4e3%k>Gk0z=R@J#MIMN_9o0&`KDpUlUh&D1&^4Z5G@FZtX zsPnip+iUDHY+qFFdrG_0QTsIMC*4S#>BDUp0=i)R@&RFV-m6z<=tj60lPu9v=q&v? zahG66Pe8#X^-Ep~7|cEXK%2!g-`72&{cXCpe_a3EDjZ5G=Y_CV{uL1n`}F22Qr9p?q1ENMCJ96G zkoE7*NVbeMgef87P@x^4iYYNjdS-obwF**Jz0j!z!>Qb)0f8tqgL1)xYmUpo8ZKCx zOjydjburh{x{Hkn4^H`CcQk7vQNV_0Kq=|C;-Qz| z#n}6jZPPABP>njcf^?UCDPcM_Si3|?1)wk!dQlg4qeTm~%`0dgAtu4TNsOi+9+xw@rdWg+}f5Iuj#si{a1o?4s(Sac{DP1Vpm$~mp! zbJRYxpVM-hUVHj(8bpBNA0Kc@z5e~vOSK|V--vJ)y|<Qfxz?!elxW;c-B$f55r>2B*{kJjxm%iL0(!w zIJKWR&t}4sC4r30^9Rq+1GUDhh=(RooqO#$=8>_r2Q*ZL^RqvDEO**SWFCQdlDS9$ z=T6lH2q%Q(m>9K6RmxeG5J+3NEmzsETzMBIXn&hTj}Cc|cV}yuMub;o7hYhzHL8pz zeT5<7aBZPU#F<-#wwc)ik}(`x(5P8EAAaCVfewTZdR9+SWIBKTN#)Jtg!*%pjep~wD)Vs@k^QS7Yt%l3&szWct*u-4 z$lqi$ZeyPWo7)W(9!p7QJ+QJM#fEo~x2K`(Bq{_aoAdsk+7I;@CvIl5?P0ZYNX%v| zYMU{h%ydx)vl`VtusW#j{_%XlJjy)5qLulyD>Wz+hFcDk?nHqkZL|oG(l>8R*_k`~)P7CZ7Pw>HGadS%zeE9mG_-=I$SD(8JcM?QOBW#g zlL6APWhvtYwXo6NJurLS&#b*`9skQx+I^~jt{yxWiypwvl*s>W$9N|FAch+cBuz`g zh9BAy&c&o2#8J(KCM?;#=_UAk#L}@{R6NuvEJZ>k1Op$n7$AJgsVy9b>^c3LZuT35&~+NXsr=v>HrcOdF{E}8D)f01YcX9wN;SnTr*R`m z5R?~xt2wE{W1YsHDHN1Q&8QK(Ydx<0TjRfPKa?HeO8jJ^21;C(tAT|0)zXG6PEm+C zG%o^=l&nm1%+=Q*_(<9;Bh;+5jh~_-PSyNLaqfH87HIKBbjah?lh6W#y;vC4nG@5!ewuh71{g z3PeMW9|YBq1xX-jm>~K*l0h&;<=C-fQFYyT@WANF$2JKQ*;o4|8&qh{P1Lzob9asZ+>^QaXM-`dQQy;FYriI;yarE@SI=F5 z)vA4){OF*;qQCsY)oSz?CV^j`Gt!)*?$1Xw_~D-8s2F~AdYzNAuP|cBs+O3KUk%U9M=_g*=?^(}85ZAXp1s1=o> zWD*WKbeB`DR8Q;WMgp%)c;Y7gf2_F(SM{zQWa&FE4L@=<&rxyN@l!>7bvjXw7!aaS z!uoER=No?hxBNO~Vd412G*)!ou9+$hs$@Z--vlsQ^wlF+5%4VD-Q2%YUaG=D~tS3J7-wL-^9vLgv+Sfu0|0+j#u0=_=i`L*OP1j1#H zR>X@g1Sj0y7?#Sbb`D~p80CBZR2#n(7TX>qK@Zg=;%&ns&#NwF06&^2b1v-3rXEQuZp za9E%k4-JzCHvJinX|%_8j9aYdv`-lPR{@QYOcw2NV-R}1c{qf zZ06~czEa%Sh2w!*U!Bz1BysYK&)kqE<#Dy2Zqidg07D`p!qXe?%rAid6FNMn7lki{ zOY$2=f9-UDmqX;a>&ZbxsJ}ZMemw?taue(ysnFTcq>nI&5Nej58}-_abK3 zj({om{f=I7Iv(rAnR(_&`Ay=m+C|{u!UkOWkmY80wRagg&C5S~96RUO0n7(PQ=ILM zeiNle2WjoaQy(Q`2O9+M76<`#UgI7*HCN!;g2(PUkuhSPN|gAQ1wm`byzrBBHX5e} z!Vr}sZ!ft}6ai2h`=KC+Tu$9@{@>paKx$cj39B^0!1RbE(^O`FRsvc!UV>;jkakh{ zELMy4x_=sq){{!Yo_9a7Lu{!E7QW15&mPD@$l49fIuF0}2eFps&`~w*`*unj#krBf zXCiTs6)bFQ25M#@R4U&Q1dJYyHQBnI@-tUR`&)ITm)1fS&g9N-2VL!Z_w<~#W$N_6-6(POQ;H87Z z-rv5tYXJl^`i1i3flvGi1FxxT_07gN|LXGskP2Q&YK7RoxEA; z+b6W!c7j^UQ4FxGJzP)zqv4cJmwveQ-EpGP+9PU+=1v0>X1Le=PNC4Bi>%|z;hL;T z0lxgKSdP~9-=8kO&}2t-zgYU+|NP@1f#tRQ{x5`Unlrdw!L2_%dyWu$SH~gK32`F4 z`l8qzvvX*_|pY-pQSJ@mCJXc>gFyuPYBA6SxK*s+)=f?R*{U?s6FPkZ@SN~ zk!!Q@6cBDkx#qPIY7s(fH30hS$1yV?8zt948zM4*h~ z(dwfyyAB~mY|({AG-+62G|9vyan)HC7Bg2+T**|?8ikAIEv}=`?ma!r41>6h~-s< zxBt5$J$%`d4XC3H_>MkmIaPha3E(G8skP7j=bw467uS7?BC6T{3>2?kjQ#RA zh2jVc@ZTT*4?!jFR68lNq@tpiXULa(;5uXxh@Rz;uN2R(!4eUiRc3xmCRZBrPSinb|&7l-NKzFL& zOSr5+i!eXIyYyxAxDvzkTKp9NW=T2RpQJmE)*;w`AmV}kpj4`lTfwQ z9IjVT30?LhAu>^?F^H4GkS8fw+y#L-aQ3K(d%CTwd$mHRt0wdy1_IjvesZ*WOT)b- z(Zc-K97`L?+G<59rbp}#qA6WzJ?)pe^DDIK)orFpJKGPOgus<3Nv&jx;~IL<5_unB&?41< zR7m?{d!%gQF3+AHYsgbr_|wAUe_=q7D@~Vcc7l2ll`J82H?&XPXi@mdt{4eu8u@s4 z%1WO7@$VxT1&rWUCs14Jvtd-I0(|bX?kHd_KP<%uBRcuwN3-JS>e1a<6NRifpl~i9A9y@_ZctI z+|?YB)M8(r-3tW)ODTg~JsfANYTK}47tq*(UH@e*CbG1OOt7Y;!*WRsB=)BuGO1D+ z56q$LM$!7y4RCUDCLq)zCAl2Th7@4U47{>I^uP*7`ADUaqA{8+7~kpN6B*T>qNF z>B12wTH1q%1)!OdN*m;=PT^zPkFwWSTaXb@nZ4!Z_-)&Fp9J?7`g4 z;L_44o!6(Js`fBmCD@Du%UDW4aXDLtMSWROCc2hg6$7sIZrZI|w|v;t^uBk7p2BxR zI%W&^TYH$^lH6yDSQHi&Ten87#0D13i~pU$rP;DvOeRl`bTqGfn?kF>)k~`y%u?w5 zB{g9Zu%Zq@x&%H#bOEw61WX6lO6sqn1m<7+*Re%u^Q}yLks{QIqUhr#f|S~0 zX;V^gvtk^PTq*wv?)YW(DH{GO80F>Xj5K8db?>C+;)|q3es{kEx9rQ|nk=P*UylFR z{wOspa(Gx%M)WY$W``rLRk&@oGxfeGlspI*0Rw|!TVL|Drcj!6reo{1(FDU7(_Tr@ z0Jn2^hv8ip5~ZXP)O@*lc{2`{XwEzErwOV3f;(#Loase+!4w=>5qD2Cw*LP9^^Ct2td(4IZs9 zU5dz21ka327^gD2>{M|0Sh`|6p%`<$GKsytnfm;r@n7_f4@~)D<$ZQmMcTb)> zc*9w4Kt4s5dw1`ie5``5fuaLfs&JRnjE!A2Ee%AtDQa*jo*~E;V}m-jjEa&)(e&fK zh(N`Mrb1H?6ilam6^S}tV*IW)KaCu82~v}u@LD%1~Om!*kTd&g3G8y1w^OAPMz#zJDqkAj|M}-VQW2WWydM&`0=R~bTfqc-`4Dk~;aMpT6W$6; zf*3Ym%2(;2UOId5Fu+UL=DkW%e^D-}edx)#1j6_OpQzQURd-Npp?48BWw06vdqT4o zfwz9(dRL=FmKs88l|VW(xb8IUo|W?^X~z#Jq5((o+(hw6OwD1G%WwR+doiW8B31HT zFZs7VquNN?DHh$gQE)t1c!&Mzd$8`ea9RYFiT7-W24=pqhDE{ zoBNw=TnN+L!Ha-CJ$w9ym?037sSty3XVsZ`ZIA;7^0Yh)KQHeYL+ygKJaP&(jNG9` zGwniRJ#%fJAHntevt**PbEKDvQ~*Kz@5_ril2k+%VMikmLfYG_E`daD{8TcwKb6Oi zB6x!>Je}HSeNnW`6-*qsh=s;xTA=%oUaZGo|K6zKnw;VoJ_zx=1UvautExRphM|wx zO>d%uIiK3cw`Qi$f#b*LT==PVFKcs(b}7t?aCu8t)&j7?8xH+{=OVCk6A$(2cIV0_ z3N)n|2bntgq^NNS4WAFLV)tcW8IG*TmWWokAR0lGc9?;#azn|r)=RkgaTT(YT`!3Y zki+|i2ovmI<>X9Nj;^2s9bLwTCfQ*_@Trq%^JB&~p{GoKg3smjIaNA zNZ3oql`AEZMa5H5$g4~ku9VT+3*PtoQ zhrV&89D8qZX8gL6lTwV!gm#+v2}L8_WN06Q1;;;+S=ZFH!V41j zNX?jRJ2XE4*gcaz0XO?YiiwbiydxM|W&GYFM?}-^Rz!jnND|*L(b81l0|Qv}Uq)<7pI8UP~^RT`x`*!LfjjovYt(2Srf7zT=3V1U6c!l7hMh&4=#E zJ1#R_7PCM^@#0bPn|q^9>x`vZmM^Q_s+Au*TaQ{n7QZtYRX77ZNJpzK#2=95P{V{N zbnGRv%8ddL*}AcsBxI59MYQ7~B)ZbZ!qWi{?VNXXt;meWtGVVN{6-L_+$sAbrb(!E zp6Lk|co5}do_3P^Qq(3T4XU|vzYmgLNTXWW z#oWdI^xYD;?n!QVH``C|OGL9cyz=?JrM?cPrhBCN93cgWM1rTz{X(WPP zkj<&3vCo_h<@@DQ(9Po8hCQi}x^B_UF%Tmo zMzMu-;z)W=jd111kA>*Y_bNBgdkXKl=tAzDZn~^>QTh44q%GkNzez`F47S8({Tyyw zDnz~XED$&H{)8sV((%7i-OE@Wxnj9JKxNV@v<{8Sv!z@H*wG+lec8(sM7zK0Zs``l zBNyw76a3Yvg2k-Q%$YkMQ3Ds!RwbRg5~}rB4H$j0ZQo!0HHe+0j?zy<6el6^Rf8Yd zXCiHpmIqyt(5X@|O9qoE%_7Mz;A%BbqF5SFNK*lj!y`ErIb#IpAWup^J3FSs4pb0; z9Ap`1L^iUmvTkk`D!pu1)Tyl*-SHF{$(F>_L< zC!nf$orbra^f3|hE8iGX3L%B@S?^-e)Anb4HHwOUnWY!y`vQ9U&K-fzM7Zl&96Qy_ zOyqjR1~!l4AJ=FMHASVmIX}0MHic4V_nGyIUPCwM&O1VZrIleDXiC~~=(^qsAsPxjq8O8TKhYQ-WDmhhwWb2}+j!^${ZdQnt#Y}xLY=y`i(c1_L9 z%nV%}4Ji(t7&m8x!|vj&qI{w=S!veAK-AP@E{dy2PI9f+z#yUFgCSOGcVuNrFSJC-N$g;fI;5_c*q$>zzUZh=>ljh#;E0H|%u0@`SoQ@#r{BhYu|I z@JCC&HljzJ8&X-p8l}V!_SV2gpz@~|NSkHPyLrUKtoO`>KG^D9ean7^I`_}~PfQR^ zF>~x}WUst)9`Ap9&CTKobp7$n*=&${EV^H~&(H(;x6?>kToM+#Cw#_CM9%)ehH5+ zymGH$@9r;Xn7FP-wsWCjQp*g{w^7Ty1{3nQbc66=v_M-z;a{bg={O6j3Y7R}v zdcP>*hfNn&d7$BV0G zO)Az$-33FKN`Bd7b^L_r{V4s3;p=cOtySI4Q3ZU2xNg9I9c>Wzt%5MBq)cIK;N~=#~59!k7FMF>?aSsQ^ zteUlvZpR-twa$3I`Np!zxkqv<;&5!Xx)+;WHKm1-G(%j1g^}h6pd+V=9xXT8^qClD zJSV>QlaC|`2J4a1zP;la8|}NZ$0@e?QJbZ!oOtMl#SS>*=;0%eAE987z^IQ+q9_10 zMOic)6u=t#-?MlVe%8j?nWlpk7p@;q-+X4mp5vQco@$n({S_Ed4%e=kq+=*=CYfC? zP2HM*eRIY2XG3}obdPa5RIb*c`>t_Lsq>uLnlHnUMahSK5qai1hio36Jlh;Ao_>D8 zSQ5gC63bz~(n+ILDM$zB>q^<;S=!sh-?taVCbq|il3Byle&q=NY)Qo$9$ufH(oKVw zi(ZK=G>{lH+CB1J@yL-`??>*G{*sSczLyI%^+2bm`N#9+24!>_k%0)>4`cPs{pT8e z*Q}Z0T`zd-neMTdE;TkBKP9?ExA0Qd`>GWi=RjD88*H78eHyxBxo%;_4NK`LW<*lB z%01y)rTJPv$(cPXs_0l5BxtwsTavhCP9||gn@+7Bo85dr?1qRra|d`B{tK!8%5J8EB6(j;>MW~T<1oS zwpV5sUhftjoGari=yIS`N$y4&63u)z*-AdBBq10^xODAU5`9gn`Z|B9iliF7@%7Ka zbhtlI{Qg#jg&O~vX8(zs+Sr8P3x{KkMIB^YGoZl%K^xa*xZj)pDkRk_v!lS z1(7gfAhyq)c8k#O@x}fLmEF=Q5+mpK$~#DmmrvETXC^68lKn7;YCGL(8wNi` zk4d?bi!T#tSKwH*PDxxF9 z5Zp}wU6WEnN38K2=teLtz-+Wyt7Xem$Wj(sJe-yI{@msG>dz}A(If{SUABDrL}KUk6F)bi zxx>swMNMXdvEgrT9lj?j3}Wr^diTxVie4X~zEWzqB-POF*zu^%gr!T*h(|G;dCvRq zPo17GVt#^@{QKHL@JtfB2t=N=Z=KEK*hfJhYH#ko4f&zLqkYGX46PnxeMA8uT%S^cL*ZYM zP4<8&7tOPv1!k`C@1N}eOJ2V>?#Y!>bZ7+xvI1yHZK~8G!>6IMyvxikbT4~smO{#f zKE4VwE7TXwzw}j7|JKRn%}5#?j>3USS_z(aibPtKLS?@X&p2Xc_MD9p)L~qIH5$ku zHhtg9)pKI7&Zs8YyF&PeWomkK1Bpx!isCXCW{nBZ4(3ZOFU zEydm|nM;w+j?W}+5j}cMZHyTurP8HS`m0em!GsNhxk3ny3tInNIJb*?inMn}tqDCN z*d9q{A*ak>Q7v!^?GGRY_7{CSh~9Go?ob13kRT_ByA&JSfBn0q-Fo!Ur+6j7CWX!< zvD1^FtQd1&vFl(!Hwk&ZZa6#{u?Ua;gj{4?W!l&C8A) z*t*uyQF{JKDqRNQ7KAf=#&>{%`6?2~4UvjX#&M5X-&L`O&=wA;27!C&6s1X__2#Tj1Lh{cKq z)SAfodDIhZ+O#S5&6Oq^_?y_j&pd+HWI98sHc*80fkHM!g1W*#A{b=tWbl}$v2N$D8{1G?ppauNp{tp?PvQ!~ zWc|~LPsx%Pw>qVoTiv){Hc`Gb$8{!~6x@|sAyfEW(dOmkoMLU769t+R8kT-~Ggc*1 zC>nN~FeE6Ai;@Mmb<38Q>=;ImPL-$+m`TVa#D%|8v$NRZYnn)q!ET>-m@=T6Dvuw6 za9Xpm>id)e7m==?v+r8?)5NJFni8p)bYXLT^H-x_FAYsQci9&%WX}Ptla*T$SW+OU zt;h~JbiS?SR*nCB2a9ImzMY0B5TOS(q=>0mZAk6f!j8HMmfP=2Y5sR-=v%Ce2G}=g zTt{svZj^!Q6tZ3DzI;(w0RfZF3UokUujvzNQghXp&s#-6vVrIE;5Hu842&ssR9Cqq zV^y7M*Tjc-OJr)IK^7p8A7@I_39Qyv|3!GnFW7Td^Q5Tm-y=)!{|lz)|8Jk^Ro!U# Y!GEHrEjP)NTca91>6f#k=5GFf09v5oEC2ui diff --git a/sdk/ai/azure-ai-assistants/samples/assistant-6Q824dJfHkRzsy46hPatQA_image_file.png b/sdk/ai/azure-ai-assistants/samples/assistant-6Q824dJfHkRzsy46hPatQA_image_file.png deleted file mode 100644 index 02f9bcd170a6abbd4ac83d0be8852797baa3899e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 181757 zcmeFZ2UnHX7B)&WcF|Z7P>4!X5m7*-Yp?;*dlwau4$^xx3c&)1NRcK=kuJT1f{K9B z`=*F=HcepD!TT(8#`oPZ?q9fP3{Mh8c=vkOn)7LM`6$R;q2J21m5z>%Uh3*aB|5q- zGIVqsxi@db?+6@`%f&y0>@I28DO(xYIU3wCq?0wUvo^P~GdH>Mr-R`gTN5iwKAy8Y zeB6KDwzIRg6+U&!;{SXBkJTOHQ?;}b8otRE>#Lf!baX6b{rXuOB>aDDEAyDI)%uW*VoqJ=w``*wZ9rcFtIJ=khm_wsI4;4$UfjP{ zKD&>rr-tKK@@R8XlHEu{q@`&|gs8L972ku3c6M0-0RgwtHB>TgNj-S*AVDoftDx75 zS+wh-&Jg$hN4_j#iG3A*)UozVX=&+zw6rwoAnt_Ys)=vpB80bY+csmzCrh0jX(TK4 zU>k$o;y!T<6Yd7*+?V{~I3vE=h zwt4FjXX9`0?h3oizKI^Izrx!5M)CEuYMRA8J@SylDY@Nu@=J1tUZpc`BSGB_*2^lsUUxCyNu6u$S2!oB zr6l5~40pRus$vOevP#BEB*|}HdbGc?r-!EPkfNR2(wwB8()fU;@a0EeMfUCbBPn{t zS~fN|`8E29D#rxO+Y|(Q+%mBUv6r7P4dkU^7wqK^h^3?ZTYYIDSbr8j@i+&^r-a4E z#>Q;hA-VC+eC5V?`BuJK(btw;1^cW|C?4?mx$1c7@@1`f5xe1+OEjwM+e_AJ($Wu= zl$7k&R-DPrCNegtZ3JVb?Nq0wc?fO>I@C;4F!>(KVMy4Tb^tg z!S_hQF4<5ZCx0S&{w!yOA8RZ5+jt-4u>;uMv6H1g^?E#i7Mr%FrqsW_lH|TTx1mXz z&bq6}d4_Cs>g@WRq6)YCnm=0)RPEEvzjJA*CZh5EKbx{|Rql3Q==HXgP^Xl7mg>F= zWJ}RV|Lf@G$6WqlKd^cY{)Z)1lGIM-wnjwUtneMdK5FPLcGcCIXi88PwjT}jSR7!G zYVea-8_tOg+4|jWWo2>Z#W|Z-YH51ovqGBY=E?iHHD2PtCCUWzxU4M>*p8o$ESK^_+aQFqUd4%CkG+KEJ$elZ+`2JE==u4ipQP+lsy92X%Y?bPuk{myZ%p zY0oq^o9N6hnD2HKaGAZ0;!_wa7B3&w^8MqZ=2B13+$ME@tTw+z2fzFBcy0l9dSkV2 ziJK-R$D%REg2Io|M1M$F<@FUmo_9enb`JeTM;Ee3@F%6>0@^12~bCdsTdHBs@kS9ELP%ap2cpRDgIh>Z^Jfzo9`bEy82B0_@Rg`5*Qhoib8a*$j7Pw^DeRZPTSFr z?4(amnAQKZ*g&W5KHuf&rKd^`nB^ZX(O+z{Wy8*zG{i_bPi){yN*Bon-YZo78~-Yjn^t|QloX^h+cParaB*EVF)@*rl1g_yDJG`7je(~r zPBv^76)Zuo*u~PMHCW#xp>VQ9`?ueIvrI_)lZ~Byq>?jA<>JMQIWwfz^71P9`1nZ6 z$W%L&+*{*0bF}6G9UU*juS45|N%@5;$aEv_0qK2)xFCV?)Yv-~^%9U{K=ch+i4Ehb1H=q!@h;lDe_Bx}0rX%R=_OFiuiQhr)rOzS5s-g`a#6nFZ@8 z=9p`u?8{=!)y6xrQ+zgT*|J60VJxI2S@X4OlG+F=M2jJhUeTNQNEhk8PfyT2BBnDc z1J1a(+ca%c$5yO~5Q!Iaox9yr;_lVTqhFHDpyzZU)9C9Nw)MVD0#zdhk@FK>NmyRy zDjlC?G~&Sp7l$ogN}oI;{Qdnc{G<%40&NmbRtKLFob_@jrxzT)yNSix#revzt_Qcq z+w-WGCA!WU8X*Oald1S&R4-;rg~|YScEd55pDQzdq^PYbqhy-}V~vx1_Hge1Jhdaw!@Q@Yt5KFK1LcRD5s?b#9IKwx zR(;Pd-Yg@OUXE)aFUc3%?b-HqqN|X;(S2#wW@%#|gT9+0-O1y^(=*+JttjNfOliz#_vmj0c;rXTT9+Wwh|+oMYQ_&5tr?&q{=z>SD=R80 zEIqIhf&Pwhb6>*-Ysst?Pa^AE|LT%fv5S3VheF*@->zM|TyToS%)<|G{>ZeGn;MRu5?m6r)iJ8 zX!Mpn9Jg^UTUyg}@?RN8I$zE-u2p-&Brx6eDXnx(3*G(X+eq`7zT6%UzJu~?2{=?P z=yUsa@BVHb3N-$}+xvv5sLoD_mA5rf5~R+UzQ4EOLER1Dm~fjbJF)j3kRFP&=as>1 znRfx--ywu+;3^8^K|MN3k#nX|ZRB`P&N^#dKouGaVxms|9k1yp%z~re-v7g5`h5A2 zghwauSO2FIeF^gpt4p*c92a^+tndp_=L}S%=N7L5k7k=TH!S?d6|>C`|+T-LW#kOmUO>LoBMv8uh{vp$ok{ITfu~klUzXmPtKo zsEJ*ZZHliikCZG=*oJUDj|j6YkV-3_dl7U>KNX!fzc4gl+C$u9q=9{PNaANYvzHU< zvdb#1^$>@2`_>&hT27=Dy&8&itx<>!3X1!=G9=-3U)a$Ml%-_h`?f-sH#j?%t<*rq zHjV>9Ji1!zH|^m6a+6v8(tA=A2g9tGTnuP4GmRpXMX&KvJ{i0UOsT6=Af41f>?S)K z+i+~KUX=~k_}IeYBBRUTBR{`Z;4hujcwRTW9j}oBd+KPPpG3`B#j2;%1J!0z-`;B# zIi+vhe&E~SelU$ZuUp6H=oI?V6Quc!zVHT;2B92xP4V{aH!}8yJjSk1-LP(6yfiuC`7Bm{$0Bi`%|ro0O}khHA`k2WC*&c zw*Q55>R)?DkDITU$HfY6JYY;l;OL27q^mN>B2mz|Vr>m)zXlBbXxh(9ym8 z^C|ZRI=Y8NBaQJV*x5fS%;6~p7t5~UDQ5AnOjY2nRez}ey~U2&@^xaPf+EwA(?UY+ zyyMb->pq8ac2lfMSJg;W3#4a0dkw$7ICkL>69_n?i!(IQ6f46}m{BxWl6m9f)^dMW z!A|RcNNtG}b7dN|b^Km|2c{p|Qs_9@GWa#DOU+XaR8ioXph?5UwY9Yk89a_Q18$M0j1Ocf4t6Q(zpk>-oG`jjV$=_ ziJ5=~NoU1TT~pu}g2JN5TC}ZHOFdSnb%q|DD!JYIs3Tm+7Cf3gn99U^{cnSXNz|`K znx4m4+i8&BuL}EFSSF%dAR^>>=X+|RHT9F1NZOyI=FH&NW&i-UW(u~#>O!AiN4{-* z_82-=J?Ku~9V=DM*qE5xAY!Ds6CMxFr}{?W0!m!9da*Bl^yQ~dpFVu_sPW??234?j zvY%&rboB|NVC}PEu8YFE++#T$rU2 zs!`|LeGh8!gYY)_KN#8B*+q^*EFg@={`-e5*ws=cB_+r8ON!7qCUk;}oqoKU1JY<~ z)A(3Xk>vE_`*H1@_}^Ef3}{`Be71wdsGPhNvA z4C8U{j4n^0JqV-ypopJhWo3PSN?#94cfx-%V`0IGn$CUfm=th2v<>A)`%iW@XnS+| zIX|!&t4#Xh&IeS>z@y{;@rGi1hM{V#^gaQLj;52#gQS+#O8guKE|tD^Ef9b1vzvYI z?%l7j;?z&fR{Yza+ZN~9jRZJMcBee!ls_&cq>19J_VMAq=2w><9ch;|zz&%kYk$pW zTr-T~Kw4{}O2Vr!tCIEn0&S+~|8e#9iF4;k{5yGk<>kiBFF&0WR+8>#z;eNGI5INFe+BBB?E+FZ`;yznc_VI z07y2&54CsKZ`wZNd*JjD`JrbbgpqByC={30{$LZkH4UeWSJ-z zUd&kmgsdam3(Ox|h>GXhx^=4(peMMx#X~A5$WI5DH>97}d3onz#nz$Z@G@{@A&^pSr!gZhpyXrPid+4=tOH;RPQ=#7ncIHRGrYi2mh&id{J-7rZxH! z5)Bz;7%$bK8hmaP?W+ZfP%;DG%#p)~FW_u9CaNacegEKNK9r=DowzhNp0qqsC=YTc z*IaBjqEx&%7}hcZ3O4dYu-l*Q&^hb#=t*@@iwx$SRZ?}@4KkW~%e`sTw9-+$lP1Uy z7P&fPn>1SFcE#ZUXq?o_`kb9&RrJHF>+ADtRwavZ1|sH$U8%_#%jTK3V5?O8rEIxV zni&^?73)EFvY^O(4duIHToZ1SXO8bBU^lEl8>a}$3+~>%XODpG;I)&w1u78eb_|(7 zrot(V15=W!8je0mwGJxxqHn}Y*VS`0czcMWvHq+j@bT0K7o3jd;_`@dHh<~6{dmxl zs+*Lgp4M?PtnpH*fLY7!?jq-oT&t+AXSt!|{LPJ1%-Y@>5KQh%31}=ievA@7k!dL( z2R*cGDgx(-zi_qXtZybzx2CTJPV=3+;&&|v=g*!xYuR~Qu;?;q%S`y#bA3W9v7u8? zYL5vBH1~=U)eabopVasS<%*F4+oiPy;7am6$GZw!doxh_8_^<1-g52+JBadJ(*!oF z$*J%M`5b=0pjQVmtzSjkcFFSGo&JC0u7$n=$rlpk*syV9BJ>%T=}J!31SR(Pmf(ax z{hT%T&81fys_?}+>&^_-s)IE9E*k;b(mw2w9|q~MPFN%52`iulbxkfxJfg2iTwI(8 z$6C3T5XW5heO3rgEio2~@I*5*0k6UDN0hsx_{pPgwGP+6-o~xi?GRNx3a(OV(@w}o zEVIS({A6pENn;iiE$=OF@g|jXExY*d^j{_TZp6i6->mouz4^0PP$Zqyn}p}-1-rSq zZ5NZKjkP!Wa}P{>@Wb{pL%nEeBQ=l+kKPqYD{o~>NF~*#p5m$r_lu8>HNleohhlTP z9JEVXJpnJ@phI|d$DpY`pS+md`TK?qjBzs=Rt#(u+n!{}$}am>U7|WQG&HaovgH9H z>RT>~iV|Y#P;AoWLQNrMsf0Swk`gAq*uSUTx*Ot^y^yAKz|!oL;e z1~$mC?jYSkI4Qx@I|&I19a$#NBAvc(DF!NWDCf~T^}Vl8;Kql| zvLU>ODPhe@pDtB`(+s1c<}wR&3zE(8?(TX49CPzMu`!Q-&oz#6R|6ZdU9#odMp>{P z*z%KXeUykOyu*R1d-@JHZrtEUF?e!FM9p<>tnzH=m8z@0@GuhaobjQ2#=^xrtW;Hc zv?+bhKq!fhHavb6wU-b?yi60E#rE!n;4=ZzWG9gd{({%w9?eLL$XM=>l9|L@>~egR zls?fvO}|+&3UZijHjnL1gJRTwE{k9U+VE>XQ*>Zw%cQ`kD*R4U*HiRM#VL~?AZMoN zz z7>)k7j~KFv6f76D=Qsvp2k0?uQO1!1o}>8RIx-M>?%P%hGYue3?c&Azlu^eIhn5{K z*aTh=v1i?}Ws4#fl)k!u_WFBTS^awL3F+^fH*c2Qv7GpTMcf)HbJ0XqB9PLgogt5VT+GoqY-&x zrt|g9xFS8l-{{_%|MGG!scC7w=%Q}}951vOe)0F;bcK460C?1fuYF|Y(f=LFPoP`~ z9V{rnk$xK_gGs`psBw$({zsR<#Ol%QEL$J>1ls&l0)K5sE5n{WuZc~D#jw5e=rJLZ zsK;OZJHPey5t(+m@S zPSXf@T~1+}I&j#*sPhs4&MfY;_2zrTGad~!HL2i{WTC;d*yon+4|=uIJ<*X}&FkLx zL@?33J>x-#7vQbf+Cra$k9H^?bYv>TdAvj5q)r~Mz7$`S`&=m8nVVeOHg6_IS^44+ zyc$-Ig$qiTl^Sq9#(iZkT)42qN;)n+p88x}uO71KbW;mz<1n7;EZgNT9JLqd0Ac*91{5{v>B*D~*EsIv!k z*o{wITu)w%7TC4uD7W=y)a-uLoOIXoADSbjK>thyn_OD-P=P-9wWMe>S9L}#{6J?s zP$P*_IymcCnrZIJC55AvlveVhFTH$wxDacu*xXe{v9-KMuM5m;OFNH&TJqd2eCyg( z=`+6OIrCMJl3jb&7vIjfpgUb>;I@RVv6W_S=bcB&R6D#+G2*6B?MzJ*O#(8R`EOr$ z+;RHmr|sk2#f9%$Bd|!|-g00CHcrJUI2r9MGmemgv8KXB8v`QpZHG*uYP9cQebKlS zQ^wdWcJicbTG8~wlBLlT-aa}YTD5aJ9u>ZaT%a@Cqg{)ph@|~$7e{?{K#D&TgRpB) z51Nu4Rha3l`R!rZQPeD9kCnW}@;$`Y03#2GdHhIT(rddSz)CWM0O@I6+fD09&blZ`hj;BZjQtiD5 z!7@I}ec53Z=YT+i>=gn)iBAAoq0^uk50}yHq=E9;`$-+ls~N(rIYmXGyy-EsV2#Vs zQfw?HQKz?-h2v+bPadGPn?P+Nd+)~0n<>?#lB2;nrV8!{I+>07xl%E4!x5l~G*Wnm ze_@e>!JD8+!Q@H-8sGv)0?)`QRJ^GMJnv8(Z8&cDrwBM5sU9(pnX4dXH=%2$0L8u0 zNWTGt@1d`64)r7l76lJq)b+6y`<@!@&7y=QCk{8bE$PINPm@a4mdwiQ_+)~(6+vNV z(S$m(Pf$yaO9936&2FDZ%|CarJ0AXfzGdc7*%$ov@M_o%**u|~cTT++rw>fdC_E6R zerde;>nMaGhvK%p<%Pg7&?eGOy?3&n4Qx%;REEc>TIevYf`cX1fSs1*Fs}9M-S*%6 z8YW16?I+#<3GI7B!Ow-hov6tAN}uX1u}kU?Id&QxDj*sZF@E@+r*DA^y)Wp*K};p2 z4m~)N$FosZaHQ9pwFUIRNHza8DCo5 z6_^bI=#>Ii-8v9Vq~+yXVOi49H%9QCb83Q7?Q^?bWMDSfFNME+>FX?XZ1|?UUO4nK zn8{g(gVwKKZ%=pzF@0WNev&NZMu;7XIKNf5X!(;$Si=dhZH0Ceui(j;S`>?%Y-rS0 zdGruf?4s@Ee{jNXf{a5b+F7ga@FBM8GV6sY17Z%~CBg!1`un%_c4YGcD^JsC&p+O% zCl85rVDc)JM(N12>Fex1(L>1BEBGW1hN(OFN??Lht+h*ZNUB{vOD?Ddmjr*C@crl~E~1CJ0X+p8xVArBRD z27BD+6YxOLdNZ_J26$>B{U{u#mzS3d`M+3MqLv(mV^gJ$v?9<*J$xVfo@yvm z4ckr?y#%BE6uW8tv`g3Tbaa`&Je|FozL5cm+PSIPt-WtbU~CDwKNXxAtSO%FP72m@ zshoJ_LXL3TO+T{GOlVrs+vaM;m#FF^a1pRa4<6UN*{*PuXqN~u2AZ0 zBpM)`y_kT&4&*6AK4QsPlx~9LcUXC4{bxP|2L52YB^zYgo)Jqa7;yXuagI&uf24X` z?-Ya$m-&elie;fJ(OVDSy#-exzx(YPC=Gq}!otGsv9@%FEslghYG%fH?PN_wz~T0X ze|J6f@kyUZGRie=KF-e0&RBIuf*O8{BNaev1XU$N`x9sq5L^p*#`XaKu`K46Y1G07 ztbk#j!Wgg=Sp+0k$a@2~Gt{sI5i{*7$VR;Bdh1-W1u<>6S7%%GbSI44so&111Pha9=fLpggvy(j=sXES zI3Y*Kf#Ev3bcOoJ5oZRH#_f4A-T%3KsEDmc{X}mR=fiP)iPT7=%d^3!S6J$az%7_1 z4~2)IH!S>!=g;M+Y&X1GM2}Qpl|)#Yp?AbQ4*QMR7hMWI9v&VABXMDNsE}xFaxT3O z7?K5z5XhlHwlPHw5b8d6eE6oG+}5SGzC-{zVsM;hi*15Cw}<1J0(8&u!JUwmkf~~l zmfYkHiQ3CH63$;%#fiKiYR9j+k8dwSz*uek6=EKs;Np0(+U6!zK+L zZj&}TqKofHgS*hd?!L6J@CJa~WhlZ?1qPd>-#(&e1CJ1UFgH6MtJd-pg0w~6g*KuV z&ldzZ?`VZGEgODL5e}_j(ZtYDJj6lGXB@H($Oy!u(rM{yBM1e-Apf8%?F6Z1;plZe zZZl7!lYy%pBs~W>s6$+aoga8KGh({j@E%T0eekPbalM8vm_C=~v@~l%Bx$B|)-Sb3 z(-flbEzM7+0XWwvctB~BjTF^{cBzKFj^@9o3oA=fO`|O-)TvxO1DiRw&bfIP{ApK5 znEsFNA5}ArWF>Fp@kyo*e#GOAc{rl#QRwRjkNNK2UT8ORLr_2c#z#A>foX5qgO&_@ zjb3Y|QchHUVQoGdb|RGTa+@P*1RDA*0$XK88sC~zIA>6H-z`_56t&n$Fxh}eb*N6r zRFQBVaUz)nEt9dG697mS<*tSC8TxZh8+(@mV+QUov^3Y&` zj_&-^=h>U?Uz=e0{<;1^6nwjUl`Hu0v*?9G3$r%RVTd?TZcTJw2ZpZ}gjIWO)12?| zEG4g6=TxNk{rkg^9?pu!7D1jUWVw2$51WHj+drl(8D$`z}eyzfxA ziNdby3?v+p+6P){BYa^H<=bJ>4ABAF7rxs5kORA5fE`ngJK%DIU<-A9_W?VRM+L9e zx`l=1Zq0pHIe_IG_j(6&`9Zf+-;;w@O2ii=Kcs8JYSHXPb5RxW&1&sq@HmIjU75g2y}ZFzaBRAg|jK=K^*Z1vKcagRaL07r;29-3?flP zh;~s?u+1!Lb|Vog!I#IqHn57j<#jz72yf#Mk@j`{{PfIj3B;#NxiV$5s?q@PsDST$ zfdmygv&6MLMS|ma1lrzGFv@lvz5xr(Qx9+PeILjtjI#_Q^yHNl-7bBU@oh3e~NO9^0?F$L3YCvGut9pY5+z)eB|o_fDP z@Zmhkzy3PNA#FH-Eeo-%exh&zEkVJ}t*F|kC3*T) zqF&RCDj=0&(Up7!UHQ&m8^K2j*3O9xMIrwi&WOU5qUiV11X{mUMTHG~8F;Gyi zj3|$#%8>!~1pQJE9@@93`ozpPZ~+T>kTbDDjIcl5N_xgK7xAyv_Tu{b z`p`}(3PxUG>8UQG3GsIhmT@S<()#6JYs!h;-!~}kT&$gcC)Q)3cT-_0K-*;!|AEUE zgFV%L=n?jcWw7gYBDeL5C4-&$7I(ID6zsqFy#kMCz7T#B-4lLQVagiPBBXB0&3m9& zsgm#~(HqE4A$~`gpp}ERYewJbV&3?of1ziRNMhsjo>8zQ7vH&YLfv7vxb#20LX<-Y z79ZYrqBNu;TQgL+I`Iwhk@(-IP~9lBwXq4d^0FIYTB_8BsD{z@;&bB_^A& z1%iY?+HTDwJS3X~f)Vb)`uf$YU!g;I1PY|WA+2*R;@T2E!GRldg#Q2=gjW`;Qe zQFx62$}_pd-MI_|J&R}3u(~?dnco;YFj+jW7d%*-#%xtoU$F$w|DG ziH0YCi%jH6V#A-_PG9IeV-)W?cns!mWBHtFidHl4{eVE5yWQ=aIaUQD9OZU{f25}l zBlDx|b1M{jVX^hW7rS{e4K(sZMw-I`Ao5ccSWv;8C2XK=NAkJy%sB zr!^suf79_<_uR%Fn3U6PrZY%C*^f9Gg>^cvEZEGSee?J8W6Ga;6_nadT9QLcR_C;- zc5)Q$kmY_F;b%NwSPicaYH>C~*t#AR) zBf*voET~ysvN*_$!dYwK!w3tIq-L7&CV`NuExD4z55oN;r+OlZSnp<~Y(B{bxJJ0kt;@hg%KoAlsW9P_u>? zM3N4^=|-1*UCtc;*Vd9E%;$Zp#t-L!ikMxl+)w;J3CnfaQ1+ntBmy+55^Na6t@#Qv z%em}M#EEA~91X<<3X5B6=^^tpllHmqLyr;8X|(QaLs9{m;ah?|$lPo%_66*!ANjqx zEDP^ZQlvWYSf7f#cIa^ce=Npk=W?&a`t-Yha;LewW#$&KnHoU8^Ggx#ls|{V! zVayow6`8O$aR+=OTEce0i#TRfUd^hr=?PFEcv57l@}AnD!f9CXunOI|sGh20lPKs9 z)i+OcMNbZDBNqn_V#{ikh zrouPXss?$u`Qko{{131B99~~}q4e<$Rn7CQzyZicoMBTeXvkE7FTP#!Mn7QV7k}>O zG$~i(ps&XZC#%BqT$16S76Xu__{W%<+^yh8~;)S^XT%B);xeG&K6!Dy| z_94pZgu#F^HUS=CVnGIVf6(m0v**yE6v%JYz4n{r#K`No6z&;5Po>vGdjzU24I{f{h5PVBa zZ$zV&^HOe_0XSf0#WZ5ajvWprpZl-nR8Yn|GTT^Y@c5=L?pGw6Q+ncLnSuDD{C3fjzmxc=8)_lV>mZoJ#)%)r29DKrLT4t>4eNl8m* zFmJsMDGhZmt(scx^4l_A15!D*rp#SWx$Kd_1Q@wSghuicg1t|sYj7jA@VW78NZFvd zk~{~c_FerS>ya{V2EBuJ+mDNui^eO8@k^M<34%(r51j| zLKapA6VJMD;gymQE<&^4rvJ#wk-RHl+H@6iO_hBP1BfK#4 zcKcv%RgIy}oQmC!G(9{jmqmNV$E7YZmSWLWpbDd{PPD8!(xu_s|?bCg1GKy$275$KQNAK7ZZ8CpJf{Z-D)D5^r0i za$cXK4yIY;!p}ux9}N{GC;hS)L`dNN{L_6*Os4AlSy?Ar>~D>a98rF-bzit%cQdn= zZ;(xs7ZNVz57?!a07P~4f{#MS%A#tPJw=V|Hxom($P`~5z5UpEBBm-^@XAog=0(Fc(vp z0stJJQ>lj86na5UTgp#S`!6TnKv@3p@grj#W~J06twf!l>zWgMLBjJi07|9~Ue7f* z5<>zm#z2%W84m*;9}d#Ctenr6HL#}feYJY*q~$T+8=jND?=oX6&VqM# zZX$m5RyNh!)_{&q$6)QpRg}o-&-EXlplsyZAMzc|EWR7Q zhZ0YTFmSbLsK4~&UeT7mv8m0A&Zdr_ev} zI2B41WtEt;ND#Gvzo$X@Q-+0KJ0rVAea~m}o7`!WetbX1EQ)$TnB4Bf8po}?E#CEw zN)3yjG^5N5iixcjP@jHce31UrdHTOT%Jt^{r?DDsgvJDbk<7iLugS}+L&_$DQzVLz zS~$@F3&l+E+N1W65x(N?eRJZPO}``YgEmhTQKOo0A|WyA^<*0R$?UXU!DNQl%@E8H z0R9ZlPUjSl+~V-aQ^x9$9Ghf&*@JBuN)*aXtzZP4X@ZN3EXG0a;)5iz6}yBnuFg5P zVI;R z?1$?vV5_UZ0VH!e&yTB0BWgQ6FBsVX#h}Ye??{m@U?zQ|bC+Por(J)q90O)2c}L`8 zG^$C*+?lgEvh4Ka&%I6gGIB#FQ8gNg6bAsT?Yc<_%Tn&#*wBFd%w>ztJVj(o>)|H5 z=+aVuYjwU6#a#ufUNJ(AZtgJUqTP z_>d6Bf^-kICaKq+v^OT=;>S;)P(-C#GT^O9l2Qz*e|(N6RDBj>Tf-K@5@K>N0Gh^6 zE7zf07(0C)N2p8{7>44qcE@hVWXZ;rXh@MnTEQ^^b`;Wh`P zdDz)6VUwqe?AY5ca1!aIYiu%vP$q$Qn|9`<8+`T#XC!&;kzSTN82G*MMmqpf-VMj2 ze4IpAtmMv5cVE|0Z!O|mb=IOI=H0vV!7T$@05b)y;p51&y3amsl-zGLeuH&R{^DTv z5QC$HvuAPUlhcmHX|3h{Pd9wk?xqkP?aysLJFJ-7omc5Q?pOx2Y=_y%;3(-FchA+8 z7156Mx)+WOuln8W{+WKLxV?EAVyM~)n^o9>r_R@yag&kzXkLMdkOTw9EvD4_D> zHbvRLDZ7RmU@g-XO0tz0EYW1WP9g)yn2spP8yJ1VBck7qz&H{Jj1qNj!05|d=OXgT z4d^o4Vsw4If-+_K`5zHEi41y~IIgN0IEm5yom=3cREvc9^h-byuMx~JGp=^+iN}Aww(0mTKfY-?E?CiSy1jA~@GirLi zmvVZJLLNIFQ0|G@z5WD-1IE712WK9e&woX#+Ar%UG*$eqW(4TpkO=@N+{Ep>KA#V* zLK%RGc9mK2+?Ndb{p?`MNR4GhbI-(i%8@Y?IO@6Npq6)$^=wYnr3go0)&%sRY@BLy z3AAynN_2I33bsy;4BMTde18jFXlJnOLMfxQRhTx#t1OD<=HZcpw|~N)HW2r6_jYz& z$BGibz7E>E{?t`&oDIu2<>aJ8oUoBu4TZCpNR*W-#E%|PGvrIrNy*Z`M5xba~%>-wo+gZ5R(Y<_Imq|3s1~Lf;)p;10t|bHUp7$r^ zck!Sb@Qy?7gpHS{QUPE4kX~MmMQAdN6f;4za6=UkQLLgy^^v1TN$xbbMYYZMpwNwl zWSrUW4SU)!0I=cC(E%4CPm@U_Oft4)U}q-6(35L@`r?J@fdV>4hW9ve$;)B?ndi+J zhBCpULckmb1vqRFt!Py^v%oz`@J+n7{Un3vkd4>YjVEyYmzD;A$__$C`Q5;Lg~R7v zS1Mk;`V*cll9UP{AK`k)SyWnb30^5rxBkMH2=YSw-U#2vk7uu&t;vH^|MI>aC%Yj{ z+@G85v2wRS9dGTjzGxk(385N}Vh$Q>|Q-L{{* z4iZL6Ben?f9j8^)9f*~I%qxliwPfHhwbMUP4>?HV9UIZ}CRQA&_3=?289|t)(jg)| zrea^D72t{n%lWrgAXjb8;!J_=MFzS%-WvRqZH}aF0fuVFcc0$+!s4C*CWBZ!SAK{# z+fRMFa8j?RO}H4dC{S;|7cj2l!A?d#B8klOoLolCn9MkIS(noj>vR1Yt?tUfoK|8j{b3Z@7|IihQC&B6Vo|#0X zn`gujVRF(sBG=6K9K^(%;-AYm&>Xk#{HTKw9LwS6vOwBlrel4gAC$|>4eg0=kz!`* zsPsaG891JdEK|f5Lyy>&teYdrnF>$;yvsz@WWN8nRVn}!$+X7`BTjD?Vu{a~Q(1-E z672u$=T?YzILP~hB`V@^oD(O}9Z2LlIvJrhm+wBDI;m#h+dTG@#UMT99|M8)jYR_g zwg_kgizeVSn4%G4F2PBO=eI?6@OCRDm=EDZlMo(hNhU)SpfCd$)JQpryCH}HrIjot z{`tbbEA!BxEsOy|1skg|$y-}+XW*fI2OJnU@prVBr>mc(D>n5|t`UJ0muFaUAr5mn zGV+g{eay^Bj@`}`y=D-AG35|V!W+8tIDF$?y_qSQT$&iwlM$35E<*V%>lskF)&2 znJ>vO*9h6NU&2R-qg;Un~cF#FdOE86pz)V2i-fa;7u5|ip-$2U`ilMv{eXc zL1G>#v;eg=Lqe;y7bcFMDSBwMIkLkKmfVNnB}j;w!ESA%&8i-dyi%x)#U2kZAW;oB z_jpgtEDC$B6-ApPSh7KQb{{*1cuxwDm7o2aZK`%|9I%Aonc{nK+pn zW0LG6Z_EPzbK9-Yl`E0kps9SStAQF+~hh@RoX{K_nDsDr||M~5_Szi zxRRPWI^iU`alq>{BPNsTP$I{89*RK=a2_oqN<498lo{MG^2P5sp(CjY?wfH)JQ^F# zD?U^icPdn!NXuJk(p1}EZ*W|>g;^(y#rJb%cNDFuDY;B@ zwHSXV!zDgCu zCLwKXH$=~=$_Xxi_3k-KDIAqJ;0eYm1l5PE^~jd_($mvp`EMfX#31!RV3)Iup6k}P zt1MY!-sfvjQqWUo6lB7cLzZK5ZJG+3paqRtF2vzt3Jg~Wkn$;@PfbPU;f0`;CD(~J zAN)X=bPru}ozipmJqh0co7m2d?$3=illdg=(u|P_ z$#(eN7!I6lnkE(UaJ$5xfBte?o5 zNCDN21*Gew8&>bfSeJOkHXZc!dXhI9zEOOuGQgp$l0q-!~|Q-teAEFFVnqf>fWyb zR#N&hihF|*^ufl0@o1Y%17fRi5ResAwesHQ_V=hPvRQOs2HJlamsFt}8r)$UH zwV4fDa3JOi2zzI7r4QV%88aJkWZI{fim1)6;cB21Azfo|LTKVQxXd6{%8XAt_0X+7vcA=ydh!zqlMhtgttMY!PBk$%%b?ibxju z5IoTolzzC2k$?U5S7&?wL6`+?mIrD{JV3!q;MBjrOCIZc{|_L7c06Q?ahLehh0yIr z@$&=%W07aazmW8BI8zM|s2*_qD`L7yI^*gkavx2hp5(SNq&9Cm4IdU4FHs1H#Yj;W zrsqfofegMuW*t#lHwDPV4>!AT-Wm0_oky9p*d8g247I?#)nVE%2v$t)1j4rpxv5nD z@2;cUTgAx08*(s(n+|6@3lAp;n!+A$G~jC+jhhjg>LkD8cX0j9@Ils?tLW3@rkNg( z<*y3j*Wdm9*Q&?w7vV9~f2C6~k^hf}hM8r`IQgm%Gr=dld66CO( zjy5!MrHqD#hA%A_d1;a)5?`5;G;0UmB?d9M9RdQq3NfP@DA4=}FeUdZbih%=t#?-TkvEIW zRp96|v)8k+8$pcDlUD_X0m^txim@jbZ>{lHW4JYjU~qy&bl_eq5I1H2DLr$ipR1U_#iyopt$bZ_F<_nfk_ts)37I?^V);}YDctAM z@ATs%1DS*+^KH--6yT;LR~*Jap4?-BDG_1k=@=sYfStGaUnBhVxAhzMPKhXGWg-8`n>C3=BDry5dmvSM zw)0<%ebsNh*_Ir>m((AyGBU|En%%0G`HOj#u1r6&-)ur*YsTDIzG2ib0&zs6F}ruc z==+rmY<~cGsX(=`U-~U#>;oVJG9aYVB0)^JDtSPRS}4MENa@j5Pt$8g<;D*`OJ@CA zZeCU+11P%kxFQW-T#Oby_KMy~QcoMU?QdiE$kV>SeqbYR-$TbGSIA|X<0;~#3v4e#i6cYD zl787_utv-^ha6}S`8NQzVGD`b=+wm_9HHb z7)ILuWFV~__bWAG^aerUGl)N^B6OyVE!l)n!2>TZrrn>&{4&bF37Rt@s$G+T(5nUu zc9Lthh^GZvA;l1rBMnfb?NE@R)v6I59$i6$*E0+S8dre%6NL;lM_9n_4t|1w!~YfpBEaGl`@BA#H?R~)*k(KJ z;^oVUu;rM9?cR{qQ>^9$p$}%+CDfQ4=8ZW(=MLo`*U^2m`0v8M*XaKwg-vdKDUAQ~ zCoUo*BccIkcskK^PLQjyx(^9EP9&q)@lgz>!hhh}1(HrKz~nXNI^RIFm#)e?;EcsE z_tqw{5TsKln}osF#6cy^kT+`?$EA(+I3*;WDWwF3nOu;7aI_HZqTvNHyhD5ta#3DO z2GAop-k2_c#1b1A9&X-R1F#O`#3nzQ|{_-C(D z-}moX*1dmWoUBgR3Y+se=o(&mzf*XI`yHVZxVnLS5M0I9^7QDH03YN)_<_mOmSXGsCv7Y!9;sz#fzy(=Tv0h1aJ%EpxNzAXtHM5zb_EKjd^@v+t#fC zh|#0_X1aHHBE0U^>-)5jj9=p|H$#R?HY7nQ1m}mW*9iXUt5j0zoqX2aPg*JFc8S1h z)=tKe5Jpi(H}$%;r&3IcRq3+)<(w_O8$uUOLlZ*ZYCpOB5wi34QeAdUKL(7^A{A0) zBZkZOK07#R$Wt+TaPOagXrMg@T%HwJ_ueB09@X)Z(QX;J%AF{OB>H6szly>9b!L5$ zvn{zp2v@b`2K~DA4j^ZH`NRA$i+jCbK_S%qe=~^>E}4P5)9z!yDgpGMFm|o%41hOm z)Z0M#UNcFZ!7B3q(8uW9;|!D$*+yHIJ67;ovMan z-c<<|Ng#J*rt>T9y(Hov1c_nC3g}ckf)Td*cKc+@@98LXob!bZYHI5Wl+vm0m*G|cSW(cMJ% zjds9`+-dY2_nh7)5meAW6?i$`GZ0v0;c!>YZM1_@h72z~nMl^hl<(!VKgf_XJU99Q z(6${`Ys(7@!@#1gWW-6SR-DW=0r{z9o2q)J>4O2{(g7j8&D@ZtkEmS-#k_72@! z)nhgr8UG*l-UO`1we25X88fr7O_gY&$y_wcY)MITX)qMcqCusUd8}9z8Whblr9mWf zlLnel6sc6wKoY9&cP^oxZFrvdd*1Imj{ouR*!!s6rnS27>%Ok@{7vV=DL1~fC_p`Z z_eK4ad)DB=s9Yp}g2P+Cm;f@J!Q{94Mp}JKgkV~ddl@bsp+B&W*PGWsuYsLsll!35 zh0knGftT~Hw_1w`2^2aSiLy(v%B5sxk=^E|D;FKmvWeTqgkYO^jRSDLjL^2rS`-m; zXyGctw}{Wg)0358iB2_UAk@eC5K|YSd^!}n^?W4y3`3tW6*V&-nxMECcA4Z-gZqm?}1<(8=r8Oig3BjT^gq&vCT)Y z3jPor`DSpV@ImBk_*ox8Vf0`96K`OPe>!b^%4X;%K;0%uH?D_3wzO{c8vKrrAd!fK zxJRdDMVF0&ha9wFgHs!d&p{p)kmp?vvDcC>bFJW>^ILl8GO9$Es-ox zH~ZZP899DDwQ}{S=){L^^ixNaVq%TuIdrbIYCG~_*KUPN+h|wL98_^AMjzNsvfkxU zwtdzsFe&I!)}np0eSD1KW1#4!x|Xb*_%&x5SO7l}4A@G|tS9RH3%F&*P|xXH-gE#v z^A4w}4^SDPOjC!ev@mex2&DcdN6&;|ppb}ZG%HI8ibe(WrW!gzbv(CJR+7kLZT;1T z#8Rr_0gJ5$enUn54(FFK*vToc(FOu;l2x~~&Gs!w$J(a2O>ar-5R`tt4fmQL*x*Fd z!Mx)g`-&I+1vygIp<^WA6Bl|daDEh$$~$)KI9bPg6YmpZ7!3g0QO{oZ!=49PhmJGs zVI0#7dOcGJ&03d~L)ka9)v&>lZ^Af_G+YSUXRRB8;wlBHDg<6oy+=Y8iE`|i(I{q(DIgH4 zMUW@#`ZjIcW?Q&T>TV63{NO|NgdI4(C2>C#i6g-6Ahe%q141{`cP-3OTnznE6w;+= zog&49Aut}BRqou{Amp89O^Ok$U2H`G``b=VzEsGk4>>4G60+P?(20xU9 zLqupoj517iwoxd+T1ZYbapMZXRMteeM(0mR<;U6|nm*#W(SlA=3<=urs=^&~iT>Jn-% z&;Xt=(^Zer)dp8iYf9$4@uSeSlgUQh^i2B%#V?_@s|Dm&{4NwJPj^n#NYKAh&nrJg$=6uk^iWaW(=|H_VryQg z+E}Ggx|-D#L=U$Ugfwuyp(XXifFvl5m}0Haw|$GX!Wn+Kyn5fl8%cOEiVnRP7-$RS zeK2xT5$J^HRRxMJ()A@xo1>2g6$XaxU@QLa5H9@M8d%K3Qbootr`%ZdzEK5OD*5nC zY&<&yeth~nu!h52Lt&*91^o0C9xD>C@-l5nx;1N#qwt$%G_;M_%=*MW?c1sGMff#^F5{&_JJf*2*a{EEft9!#$@=!k7zd6L*vh!60A^C# zL#E|0Io$T|-_KJ66LJ(GbExj0D?bO$frci9-)GRbzpA|9i|jMbe;n8f^RZ(?LWx?G z)?C_Ku_>!!2c(V&33GyA!vVF>5pWvP9<2pHQK%nwa(1Yu_s{vz_2>NVjO)rsR>0{< z)Ii}ExFk5XI9%Jm&F_5svy8zbJ@)s-f9YR@&zQV43fU-+BCwZz0y95`k56BICjEcT zhak^;$`NGPAHjBSinm7FG`fdi!Ylw9uLJJ^!p8>dgv3Mik|?*s2t}eKShOG2wISbs zTJiy!hG3sEs{5yZ{iK}FsbGJ6&sCG3B4U4h{CB?5)MnGS!jGta)I||c2@=8(>ht~G z(d*5Yj~gTN5_BCL`X6lOyDmXU#}pR!=yQkG_px8NY1Et%FGtN8h|l!zx39ZDTa7sQ zqSsHW82k0UeE&@<#?r4kcpWm;LVXzWcc*Tcepg__aD1kJ`MSUE{#?N)_xG`TFKbua z{JbBTrczQms0Rro{MYw1(#vhgIBrPW(%2U7T2xEe=->V z#y2V&YX>k~7ovu4wi(ShjJLJ_H*fJy`licTGS9f}{hOMs0ECLb=ZUJP$p+IsetcB) z|LIy9OnDUmNl}sCtRHpE)nTNEtK`UIoPWQ4{q6Iv6Ik&F1hG});5u(PprZBg@|!Y; zSKj2oXZrWs*WW(N{^KV955B?K4w|OpA!_;GzZj5LLb4+J?_7+JyHdCq6^b~nWMTB5 z`Zw47DSTYk{C{))eaHXx>u;aiIQ9CEyTQ=k!`UEzeCOZz#;x*#VGPDL70BJ!F645> z1z+=ri|@Jw5soiwr!z)g<~}>9nfq*Ce5QW~c7NUdIe8%Yw|aau4aV*U6>nu_?cer? zUfdC2vI;^MzOz!ez7Nmc-#4SCPMx>uviAH?cnOZ9Y4J_{~Pme_I4jPO>*Fev7>7smw-5e+G=^Whezmo zM)Ks95H}9^6uZobxB++xrKhDzuBpCC49u zw}^J#Cb-D?(-kV~0BVo}nXqTHNezh>y&l!Ue`Tsj3SvMG#4nL3D}+&y14O3eCdz6> zIf-HkkdqvZ)saI^nr{6NRG_G>to>qvohl_X0wm4{yt~OS2ABrTcmZTU0lsLCPcx|r zB4Gv)AOV*y-$gBCpMn)n6PanuaBAI36QiM-i46-Aq&^Yg53GenAlHZ$Kp8Jtr2l>0 z;ES85R(Zue>~40!o)n=hdTn){cwKyzk6Sd2MNJo3{LocZY3(E~7cT9jE7kz?4=xJ_ zAY6w`Z9>OE9ss4lLZIdz&UJlClYl7R$HEU)HNnE->nQAkXc>}*`cX|uh(BFdS~L_0 z1VMd;mqo9QYA}22efRm7$t6L73Jcb)W|J)tw?-Fgo6BA~e$q*Eytj(){$gL>1>IsX zwA-euPP2n>H(!GkNTT6Lig*i8iY1Y4k5M%1*r=x!oxvC!TqeY#T#xtobJS$9gTN)4 zVNJF_=j(Z)0s>BodaeX=P7VlLM-bldcvDsbC>%C*8x#leB-cm08QjBSx@33SL7Ebf zQ~#cVXn=c4_$I*tKq1ezL`B+dAHry~eX%59mM-^us%FLkrMfAPg01iGcJYHRvJat^ zU*ENM%^J}wtuZ;eTXGOuZEZhnX;hWF^H*Pn5Qkl{^rDZq^c&2aRiu zFBr-rk?*Zb(Llsrf`+hhSkKrNzhSX69b`eDT8}KhDf+v!?6tP=WC%VCJJPT%97udl zNk5-lz%1|ZO9R_b+y!l93`FnO91zE3-mwTV_rPByi=P1gHp^3mmxhzG*wFL(GGvaN z4{i}e^+>rlN^O7$N{oo8$J<=KY#H?z_!)q4ebFa`5CZ8PaJm+G2*q7K2$87&0^lUE z(;vJASc(Wpt=c2vaXV?)6*eCuJv1gCuFe{0f32y>nDxk=cO#$Wpo(V~1923I9{6ArWA(1IvHDXZ)8fYKIaUx|P%6~2!r?AY->C*-NZlB9# z&-eS&Nb)hzFhHU{l``maA3={EQ&Caj6fJ@SfI=`x@eJYJYHV9}5**~N0I0K45wcR2 zo5iUiv(UX2-rrlbQu8Gv3MPQUDnL-@4&P9a3Jwu8y6pQ$#!5(3XaJA&^fkDUl&Xa{1PKj{raCWvhStf=c^~=w6TXDO2 z{iT`Kntr(w=tM2AR(bK%XahRc19pTQXr>8Rb4l~{cpRr0y?^a_ZljMn_8%H`!a92N z=#H~ZgaDXnSfWy-7?5f>Yz{(2!ejxy7@l+ieK1lY)ZjTs9m$(pS!*#-?n_s7duz!= zDMztaU1y}9hx8v(SC8TZ#Y5DdzP#HC-q%hjMf~6O6uX4-kRjvxBy3oGf5Cx6E7$MR z%YsRCWVH6F%Zhp_%fjs}K3hD!k;+a84cXIIKs_<4qsGw7!nE-9^;vqtmVUDamom?_ zS-+ItYmsH?GjH_a`K1P95;OlUy}LkfcB$bckL4SEj=rq9yI^dGMV8v>#BjmgO6IB$ z%R?SiZbIhy$tNLeU_9RBbA7Xqd7kRA+Y6p-@_8ImztIQS_30BQHfWZ#URPY+{Pj=F z8yjpPeAIt*soMJ_A+v!(%Z+^St-K=^!e=eAbno-7^2%BaTu96?VSUfmUNj8DK zQW)z5dZ9<)53qlJG~czCbdn^Au`*7D`!Wpc036hr>Ri7_P^MX%E)#}A=pwg+}cZpNt1MZ4+E>L2hIDK+Y5?Dt9ybW3* zHKX%_&BWLZ6CB5OOJ9NoT@QrfiW02acf-#IfcOsv&&py$)7E7|tY1leVR&Fr5R*mk zzX_i2VaQr$jP0z+_N*|%}k;A z8i6sQ?p7H}TSBf$&(G(2HSBlKoUB6CViai8m3+hCpMb|-yI4Pj{Carf7?MHra-e+z zsvd`t4YbSRh|ZhiVKG7-8=ulN1*3-c%yqLhA?__U6xzD$NLIAcRv;Vfq5We)IUc5l z`@Ur$m;K+rdlQ)|?M-mq5rL9|34c)HO-6#X2ChzywKIM+Vb}@K&9NSlrDx$<;Elry zfLkd!PXVY{{3$K49a<*J%;%&C`kHRDwC!^#X^SGKnK9%&HNmw8h2t-q&yS8NTqW^1 z)57pr7uN#$thttprJ~*CaqL+9E?KDB0kT|H0*@eEA_dtN$sFo!utmM84%fSu*xH3{ zUb?6lOC|!@ML#~~jAjS72${Wn1d^deuN?SYRM#2It9c=#J^B=78Jqc`ph@U#1muX* zr|McRYAT9sc+tX$0~UEY#pXy0=P*SX8z%Ut!<1eOv*JFbNguqJ?HvZoT(#ACs7Kd# zyP)A0Kmuu!G~QjK-&u$d(v) zSb~{C{QUC3?B_nP227^A%Q%Y=W*TT$OtViSKt>F&0q=bky46`M(w2k2j*H%j&73So zOd*U->^oqnl>~)ty+^90R_G#9;pDALBvl3Onoj7Up%d5*pL*5B@%-At$0(3i*71El zNCDv0RpvF)09yGRUr*s^aNk9Z3jLat(VYim-Z!x*Xjt9+*OJN#G%~7OW>JHparSkkkO; z`115U06TTa4wk6&u)7A24){>b6O`Ox1H0|+1-y_F7*J`yqj_6lHl)pjA0e>uW^b%{#HB?ev>jk@u1P6@@dU^nmVyy{YhOG%OF?;)rX zLAi#uhsH6!sWyXUkkroP*nT>g|IUX7PZ@wTY+-faH1XZo3}H3j@R@I)+nO#L+KukV zdW1TOteKD-DZrHR2*YAXMZ}trxeh=S5E!>YTurb_s@VCS6hi?_6TKi_G$!>QBulsj zHZN}XhjK@|9&(g(6r_gYo$CErN$t=WK$!8a`_WqgEFIQSw$2sf3y`dZp>CX2;(%v_ zVl4=YC9N_zTOn+5q0cm z%V&}$YD*E%i zd~%SXKTB9x;BVx|$&`*{06F~YS9Hfu(YGGg;wv2Y`)_pP)tt_MG-K7^q0B#Sg&2Xb zc443AGtf=Ltp#wc0Dc&mXFRJALu@mI(N@Vl@W*RraKa#e{5J&V$o>cziADxf=bmyN z9S%N*UjI*zj_Z=bIHTh_iN`sV3#I9w9slRLs>f@Syq%IqdVKsl-*BPvK7n&dY@DNPN)SA_`|Cn z`cL8bp?7*bc_<*i!tur*o4AKf=6k_`;~|?FH3}QbfImB&Mt)hj!s=jI7zre#5aLf zG~}pi!JqMa-K{VOQrrn-L9YRYk)jaNOpLN~ChQ(qf(BXw-I#H`eupvaU~?3OE7CeX z_}nQ0pf?8Z0KGWD!;}a{KpE_Am?d7LltSGM61Z6q-OoLZRCghO6TpR&NQnt#i!&sm zpn;}~h;M(n3;_!hvpjgEL!dILrIb&aLxSPW&~Zqs*O0S={tjn2AUR)13yX4@B(4;l zOMqBPbkFzdIs*{zJ}?|)9j7Sa9dI5I;o18FoIJoM7I2TihAc(ge z79u@=p$e03X8vp@hS!XNSpdrDILkH?Q+CEDlwir-2A)cBx8r-a_8>($UDiy;E)T07FVi_l^d0?rl# zaUct0(C~U5a1^#@Z-BbLHsy0%-IMrGur@PUbINl~l4p<82P5EXE_0Dl zE2U(PP9%jK#9lN5(e53&dC=-!8s4!G48XJP>AX)8i$<}79~?3i8nc||MUUI0-A-7F z2;|tD6ya2OBM!YICBC;HiHa~&N}^QCz`0eu{nHjFCeVkB#9R;$>qiHZM+U-k!}T>w zN5~pBO!+zUP%#I535D^Zo6#bouim_5#LY1D3TBNe44D}ir}^o9e@5Qa-w@{L-Y1h2 zyJOMZxes5+&UYS?iT!oXQma|RHo2PQ}%o*x^A6Br4l&kf0as&ixzCLH6k1m!N1oX12Z|@@gN!E( zdjwoPMI^$H4`Rf!0tuz`mwP7l4F2tF4iJ1^1crhUb}zz#b6+hkMPPY-7fqqQ<3KqB zR-JDQ^|rxe3{=&-QHo+C;5(Jj?L{HY5kMFpG9ZJXCN{xxPpOZ&hTU)`i)5=D6KJ~! zVmUe%p|eD4$<-Kv`V)>2caRDEMJu4XdMgKZC112TIdJC~1_a&>iL%Hs(Mo&sbaii8 z%Y$5;v~%J?wS^;BRWcV7!)K6N7qn*Lt_IR%b}e1H)K6zCbb@c{GQ3}(PEgXUiw5v3 z-V8ukE=qU<1cN)brTD5mZBaT^Jb&?J+OO@aDRwMZ0}ai5KqeKcN&*akSUWh9nFW`6twmXYcXitfFO>>9K6!w)9@#&@22M#`XQO#vPAiY~~1g@Z_sd|Fz?KADyPM+;2bL^ekP1){NIr;fAI730N z>p%q%bbRP_poKKV?_fI}2?%yr$+p34CU}yKt`_^@K>_fl6$)8(I8=Q)p-6s-26(L# zil^kI+FE6D2cQOGvbsK(caaz4PCYsN5J%250bB_q+U6Kr*m16T&oes83XMq>7zrm@3+3P8Np79N3RCx&W+bb}zJ;y%o4g_A!M1CFH(AU%)n zk}iG6ZgA4BKS85)ueJNJys~s+EqKovoy$~lFo_J5@_SKVuP|@S_0?EwY(rkt;jUW^ z?Phu>d^TpB|0Y=bnRx95&>-DBa#GQ2vuPr%F(9ymB|z@YIu5$$9?sO{iax%2%UP4l*&it0WrXsNE=k zjfx*7G~)cbQNISRMoKlB1^dvCkF%pdfs7eFdUHp)onQ(A#^ArXEtLv3LbtvYNN8&p z1p5-cp+L|bV&d^_gdkj^Ljb~>3n+G~wQ7I>l2TY`vvmB2OZ|WRXwX&k!;x(h3K~-< zl(#j8g39Z=z*%KnzClhjFag8QM?AqLzdnZt^pw**%>|g9l)6Gq|#}C_( zKWgzrp4%_yjkX$R+pquf31@C?KR01{$5r?9>)fUHiqyU48hg+68voHrbGS_>W-v@| zGPwHi@(1lXae&eKS>V+9{ZSi&>!)>e=#DE1uiIT8T_bL-I;Q4=u(ULsHmw@b)}4)A zow10D=V!GzrlR`U=~hX$8<6O$+4yz%PEaLwAu1_19?7`-+hHj|BEuT1 zE=(5SmXoYg2K|Jm#(Qkp5K6vn^vRQq(cX5=QQ;xX~$`IHvAddU& z92qA%JPPi2#8J4Nhk&K zYoe}KDI}}p*$f+ z#q&C}BaVoBnMTJOg(;(a22n91J5EhmIe@qgI6%X8lwGYk9|?v2I{Yc|-1Qn7AtEu! zOQ5+KzT58hpc$`&PARFFn1qu9v8t-7e_CX0c=&CaA`0V1zcyJu4<%3uWX=aOL6ejB zmHXK}HF3m@LDT5Vw;?GCgm6X&gFnwODQS=_{4o5$>Jb3_d&EnZd3+u#G4<|bdS!T0 z@zRH)Mtt7E`D3;p=piOnTM@4|4Npi3XoZ4`W_W%c(WGHiI!f?LM_RwZL&q`A(A%Eu>1L$vizhi=-^U@H)P^oNnr@QhY(K>x(rp4G5=-vVFjs z;TJ^B9ZGEDeBVPCwXckKpwi#6D#RItyVnqm@s%g#)>3RS};5gCSaz>Y`w$Un&JXkRXOV*{guM9p+O&@ zLmUwBYqYQ05>Y;MY@vBI1_w+XSW8RG7_g4y+Cz&1>aN$ ze|it%WQ0tUYoK)^=%$6w?E@)|b7f7hEwSvc7k0fEJ;unvc;TMACijqkeB-1g(0~_A zY3W>qhpnPK>udcOU5kHtHx3$=CN|iO;$Ea5tGPSP0P9W8Kd)g2e`Z`+48saB%RgRE z-sn0RN`HLFYXv}#zeI56%#|i4l9p$<_NV&#@>IRTeed@Qnkwy{_x;;4*qRR&^_0TY zUGmSa4dm=ed*tQ!$lq+3$2ct0Cxri4&&{ZNZ>kvT-5bo@-~T`V#(`ByLl~}B?2{)e z>;^HI)0lXhV}AWMwa~d+j@#2z+3Hx;_PMIr{?)tovRMP!qS+0D88y8h!TV;I1JvV# z^Y-}nuW-wH6t+&Ab77*QM2(D2{`SHwOVML6_MiIr@vkHWkUK&9MR7U4?#;AS?*plg zt0Vb!!Ab0!@yE6BkHztiZ+u-XhMixS^pZkCM}GT~cj7Ec?XGd}pB!5vl2$(3BVE8h zuf|UDgrT;jCPTtQqzBF$Fi*cXM8_}6x$k1TokH$=3zPfblU%Tx))gY12}=6pe2HGH<%x7gcxrj*&WnbKZNmi>aTAzeqrtl z>jLA%Ox3(fjTjqei-;&_YhzpG`ns+D>{b48pZ^=*@IrH2x}5)u{f{G7E-8BtGkeFi zJ~th7djc$;&HtkA=iE2$3$I?evQPEpJP-2x0rLD6cF=0@ajE{|pa?4M&ybgxut0Pr z8k%#=_n|a$ z(6a7xV&1&J%4T8l?(g^SEZ#Mj;ZlEwk;9nzaRD4T(Av*BrG)RDi zg@q$*A0QrwV(}<6XPEbV0fD82F!_m-WET~)D%{Bv#cD3}coxb><#27~-Li<+O08mW zkFG52_zVSgwx6F~^g3bWr~zP5K~NeA_m3YRN^*Mk6~oNG^z2_8aD>vnXhacU~ba9qDc|K4vB>G6HIbqBg*X|O`&Qm2%w}NsvV&pVFy92 z51dyE?Pf8s(klpxpyVz|NlA*gqm(0PwIUa{_EN=LzRXndqzF>7$l6k!jG@yhuh!>TA6h;922s!zj@*@ zD^SlmNBk2|*J+*$Pm#9%=y93rH>A&_PdG0+b|MCav)GRcRCQyRGFAtcTExX=#h+y_ z`I{^0B&QVTRA}8NQ};pTI|@bT^jgeFM6?SnB-=ts~Hjkc1%>*6*)f+*1s)5cxeA>ye+%aifb6M7 z`>2M^gUOjOjR=LlB7JTzb#gj_Kn?GL_DO32~&g3|Ok~EjDiVtX3m)JcgBL(uGPOEiJybh9}P;xh-6mK

`YBf zQ!5coGNvpyC#MKaPNW;sS+`qB#F^oy z``D$s(W+P2LCHJO<^cqc2?!;j;X!!ZRMB$Vz~4ZrQAp%izOPyfH`og4mCsw<+w5g1 zhYI}+pUjONfdwZYXwQwg*c{Q)-1CjQ9&z{H5{`#%H5+?W?T%?xX!;^$6e%WZnABVB zT}$2k)9>G}gVMI7yz|3CBspHlm5hsz-&YEj!y?~d^ytz1(7$`cUcxF8@Q%Mt*`5KL zU9A?n>}KOA0@}aqZ&Vb&T;SW)#1tBy#1pS}+DKb{qD|R* zdE%$SSrx{YEcNJMJTYe6xEgdDo!!OzPb!DwD@?p-Mx$AWCHi37-3Hp)+UHP05bukM zD>F_M=|VTd=Xq#>?0_bGQdt@RR`rW%?VbxicA3!2Ee9rRc&GbD~T%?SHI{VJ@h}Te+oJk`pK0!145|*`CS5J64}q zB#hm^|LoOxY4MmTK-K^?2@46W)6mdx+gzq!@UgjzcfQMRl}q7UzkKvAIeq^8#l3sS z-%CGZTo~qEg8Ht?Y_-yG-uS*+G1~nym=$x+`PQq`_ZEv=o?$b zjSQI?H6jKD=&g==#CBS5QhafB`fiU6icv?zFHbMN>#a4~Y||h9BP7s8acsK8g>~M+ z=N;>Ww7t|DtT7Ad+&S%fmrI{82wfLv@@LPY4k`RXDY)YMK9AG0IXMeq@=_j|85zoi zKRG`~eh5VlfG&PllUG841#I?T{rhlH$_5Pwj^MR^-8CLl2q&uTL|}`V-%>VMZ8M(r zXR?NE63-bFkTQDh1I?0{U3=Y@Gs;f-`_E<(0+j8^pr%OpZNX?v0*mR#y=`pC@;L5f zk42$ffy-s>IT=NpYI>-8Ys!wsrE(Ev!t0T&7Io$_d$48Y#moavbntLb6Qs< zmIhoQ7HQPH;0~F#MX0j;b04puRu?8cHQ)r+zO9e^A?{VA} z?^@HJ(=f&&QO*R3Vz%y8;!YRVEp-u?xcFWHTll=w{`*y5#tv6pe$OrS6z^+>`_Nyv zHf}A3;n?t_pKc}Vf{}I|V&rzWT`Mn=K4hd#PEM2$kOSCc356DZyb__DnVnq#SZJS{ z3e>UQ81lZ^O@%E65R0_VSFc`W2T@42PP`gonIhgSe1AmAGaZ3paZlSk7fG`vM?^W5E%iz#Z1va40 zGp3tCfbVOxI~@cDbfNLvtV&ZynJsVwS?z-N!u`RZp8Mok=2W#AG{t%M=hE`JiJ7BEKN_(P$)9QIekIOb2ByuF$4>CfY|Y3(oirO;fczk+_(mF z7lvDwd@#KRDu|LE*g;?kiljr!8)I9>gps*HVk<}~Ar|uMqO5yspqV%mrJka0qa3wQ z@M!_5?R*R8UrFW+|FW=}=&~@iQnOAPJBsXad8+oZAG!+^y(`|W=D%+H41ejTicV~}_0fG@v5&kYI43yh_i)bsH{wn!PoeBWzDKJLLd%&# z*ea=*Lg|tpR>LXGPM#c9wpLLwdV3jcZevHS>kQ1!&GpKwiL`!>VHp)4w72@HSDtXqGd@8R6#v?E$ZA7@gaXu|F` zGqv+2COFXZF)EN&M4pteSB%Nwih5&t;(Hg z_bJPj>mn@u=BSmVXTX~39 z9HnPlq@Ixb8Ij1q7!{3-jMzbozPuPgu7V;U#}#LxfH6a~9X!7$nDk3vT7pWnwU?)7 z?eY1!`G}I+n}E2i$U3NytRBF^Xp1cBdF^Cu!EV3C09N{oq7did&D=Tttemr>XGu1y z*jj9|B}g7C52(c44XuXwy?gtR8VUvkuk^}?gJ$>!z|2QBOq|>>!Z0f z@HCBkT}pc^4U%O&y4H9j67`M>#u%y~s+Q(Q%`~_nBSC90e^F<%hg8k+qema%dPQMQ zix!z0!03?&jYb0l+l9NP2oB?Y!*swRQ8@LE$8N}@|MB5EiKk&lczvtATYH^MC>;S$ zFlsp!9`o{wN;6ZhhzE^r*^O-_pPW&=zr}o7%vK9oTr&ytUd&62`;L$axOj0Y&Ob5; zKTW2j)N@&{V>FUJ6!4QlzrE`J0V7i(pRo z+x@AQhL?#$gH%;r9T66`3YI7p^nkXR)6Tp`HzOCN$&cA425?*)AlVyi>&tL);cVE0 zfHIP-z+Y{E_$4(k2qW4S_2^y^K{xTazeXn74U)mFtXZR^qycgkbKq)UU-v{kRtInV zh#Ty@K^UfmSlCK2%QA`d>V7{iID=94$~aza`OKA8B=MCRIVDJUb3Z`x>c4SUdm!mjR(QJww zrJQT=`zBmB?SUS1z_B$$wfn94HTAM0kYHI;uYdOhi_#Xx@E$KCXf);2LVK+t*MJk^ z3N$C4nYp>e*kZ0=b{t$dBp)CwN=d4gi8S+P4K2Mcwb-bbue}$QTogNsLnFF)JIFp7 zFdB2UdLpI!?jPIhiW{3Q>6~~2Xb|e`S_)7{Ph(yh(|V@7v4o_oXxTDr-W^C$>-AEK zt5*4owqg$ZdQIK!2TDIRgq1F9uHN{bRyHK5oc0hI$=f7bRZj}QQ2!~ z&z~1?gun*Gao83pE;Qi)NC5h1h~-10GkPKLyQ@!1!!|z zK*V~yvga^{dsePH@o{X}qX5j^boq6gy75V(#i5tW03W0vlhRPz%U!a1Jf&pQS(chk zZx=wuRZzwJ*>PefkC;#c%r8$_g8HG!quKAtNY)hE=WBDm&;dnUdq;V z-^5^MIQAAw0ygXjJ+&FLl-yK$1Jofi4Rcg*HlbmbsyeM&hzoQ_>I01U#X_OJQ%e(( zgAlPIVkBdtfsT$lx-<@g11v2o^UnwlEEP-^u*@%`cn!Ae-jiWii2=C9_}qV=C&=&) zhJJv_!U|bw^97QLhzC6ga@@y50(k#$h@G}51-9AFc_kFxexkk1oAPJrCS!>ShkxPq z#ye`>$om;(sxbhWI8hOaUfd^x#~qIx0h6~HfQ{-Qxx{v#`}$<73mBBETq)CY^)T|4 zPvVjT4vvAW#thmo8ahmkxu0#ZHiV+5yAxabr(+>)gvod!mf}B_*_F z%AP|R?%5futY`uK^%u70!M{b_8f)&H%ctFm*qxx&+xvMR&)0j=+y= zf?8ID9W}Uj?_O43o+cmy&%9~1)Ei>ViF{yh>P6u1xRNGyz&iVA2O#w_bsCg8iyKrZ zVF_(@3d9){kEwiS5W{U#VVEdO#nJI7?PfDYB4v&V2|-0L>DRUBlxtfkR8j zVykIr{2ig%YYUWD&d|TJvo1W8*O?If!1V)8-bLD(-kieWC z3Ha!_bHDGA@ZhWiuxWJ-x>axwG;=TjOPVI;k+K~Ny3Q{>PL!g(-L!>!z8}}=g18|V z_W+dOR>x7CHMGXzehhpri34O_cOAE6UmDxQsWSacSNR$J9ZBw18%x1vb3V#O#%&P_ zzy73l)s0^JiK~GKelpaJ!-~Rtz9J*Q8+!sZL2Ca}HJFO}w3XlL{}YV`zs%kAGppF3 z8NZ?QNIQ=oKl;asJ?xhA=E}Zod8*L5F#?F24sD~q1G467A^;tIrl(9$JwyD@seq8p8% z0Ak?i*2C~x9G|99^B6Y;zldzSIrK~RkngniO<9B##<=yAY~&s`KF)&Vh3527c5qg9 z_9=_f2>Vildv-XwDZ~N)C-TyzOLMarjLPE^J%hn+b50E4h*?=!%-_XnyH?!f(VfH* z5`E(*9t!5DQ1U+JF5}=a($;8tcl4kT3zc!AMLqhX0+Kt~%Ph;%MgI3rNgKFXwsjN*vuN6%U7DnRq z5(Fnx6d_YcK;TYoua=C#pV7P}9M4-uoG4LBU=3~Ep@oY?`45O!Ny)vSw>U=r1c89B z_CuNt!BYX2N6d`#_7|TwlhE$@?#~t!k|-@*nyIp3&Nn5;Xs3x2%8WQVj=U$FhkxYr z+ib*lF-zW>0AU2%n| z;9BpEGbogfX%Tn{2B@BQo3b5sCB?r>bqFrN2QrNGaAVXvIc0Tq3| zUHNVe&D}|}9(D`+eD635cu&NWN-xE|_9y+@N3)OKdubBQi`xfuklc+?6$s^azw;@7%~qK85H;=0`Z-kbZw?9O9m6bTLf#gZYg3=+$f1MD2WwH(g7qPiC>HAxq1hlI;?% zb}i)`Ov6d+M0!mmPVKB3PEDHmkfL`WEu=JvI-p(qsvl%zr%*(NpgTp_@9(&dx4j zMK}yIz)cn4EP0x`D{0=md3)FH*NHPcG7EOCRHWS_( zkOw*4lf-D3&UgM;q=lHnroJupf0y!puG6dkPI@S+zK;U`sW&3N8eq3(`&)po#B`$L z41M`>6XYt-oZjWokUvxBqGyn218xUj{0U%k4DOHyzP=?%hN_Or$zA!VfoZ7?fDt?s zi`}ffyu36VRu@uuF%snD9t*0^tpREkyQr%@?^Yg}h_Efj7VG%izZ?V+EsOnoDtsw) z#V44kluZzgs7}2XFgZtfFi3sO!cZ4KjIGzM_wD(XU4`&yFyU-Gqi1)HaOcLy*13`^ zLw$Fww(26VZ+jWqxfno9t2m~t3AqjsPe(wE&&k&-k*YzIk2peQDmt;^)(5vo z#Ql@*mhHmuhJB@Epv3hti3cVv#2l!~vzS;S zn(Pb-ir|6;iN$31BRxt#Vw4Idrjr>;%DyRpieGKS*@VUC!iUm7sV2uqv+^wISimrg z^Qo5mQrJBjmO7ph0lIc}zH#xDoZ^6p$53V;t8v|h|Ic=isn0;YZqe4T`V0bJ+`S#( z+rCnOr4c)GX*xH?ld>t#G5NWqs;cTFv8@N7$sk)S<~rzl&h^X#*eB|du3F??)P7BN}j3%1}G7k7_!ORrpqcGOI!b5)Iujjp0~uj+sL1?@U+K$jX0 z6APfbiF5H! zf-~JpH2PR(XJ)<_8|OOZTchxk^K@~1;V2Tt?~8q|fa#4Gs2kyie0y$73_8$(DWx@c z5`yMnbnf5QcaOYhBSzRjs{dD6?3cS?DQbK>UxP+|ijgQQtIZE%umvfuZ9DsTvwLf? zRysBGsSy3mz6l-G&4+>*xra>5VQ=AheFk1HW`QLx-K_>pvKx>$8Horm37J~2Qb8Y( zaCw+td`QY}#c0oTULDP)sOc1Gg;}qu>M9u`kW14u%lt>Dt-KWSwD`2Yzd>+k$fb~_Ah~U> zMAhcYEI1%8e$Rijndv99i2T#Vg=1WlEMvo4oHr+zn~4>BE6L1VWN)sjt=eDJMd=IQ zEpOBH1s7bDEC!oNjR8Zx@{TviXmv~^w1^3GP*iNLx$Av^-FWq)8O4PXW)Twi2gzvH&4GHi4*b&aw*~-LQ!rv3q&8;Im$~EEZIbj} z*tJdI-i5A8X`8eQ?~27k)e?%cHrK0MT^to6+3SQk^;KxS{mNt1-Ybrezku7vVk=3! z=tnka^vkwxmDKangl=}0fp!w|*$4Q+S}yk#iRa8(VwfFj{KkEm1+W7tt~;z;zH`>k z|BIY;1H>@DlJ#V~D z+5C&M{-u4PP8vvEize^nQKK$^dyhmJN3Lfq(?D3;wWoaYNdfrk#wRQFSP+_%Ac&?4 zpF)3y-F>L#{H~9t*S+S)(FD*ZFNwH`G}}* zOnxzdx;gjUU18YYD5r~N(gn4&iGh??1qy4+Hx8cYvgh$zJy4OL&{wUQi<1ha$I!Q$ zy9Zt(kII1Z20<3QYTNQB3VTS~GxtvUAM_K27yM)ATe;9F?pp}L(051S$lZC{aBDc(p-^2@;>{#a~maiLI0)}7t# z9^UpC0ITMca-^H4#y-f-$%(dhKbi-9AFprx6ydz}1-|Hg!2895XFPa)@JQEJ?;J=7 znKdbf>+GIx0`F&~IoN8PuU!(lKV9bP>-+{FXH@wFt`IQsYfWk!;$kPtz!*g=Y4nHNEetW*)*}y z7?pq&X6a0_aROR_q{#p*0M;Yu59=+2k$+`mRa)6x_|ZEJRQmv*VC=u@+jw`UrDdrG zVCCra=|Q=y5#u*5k=Y|8t4VGn;@Hd z6xtdqRvg5@HYrapuR4h7)S*S9BDEu}tF4O$BRxYn#MfRMRm6}5BPA~c1uY{^3P>P- z_?On5pjA^q#J#%Xy%k8_tp=*UpEo03VvywsJ{g?6A|A0h^eC00kCnpIqB`IC($dn8 zD*&LE$KraGtQT>O=>dnkcPt|&91&(>lUI)D+;b5`2hSR3?z`=f2SKwI|M;Am1ov0cLAbQ=c zmHynW)PmS563mjivi&XOnkt|-n6*G5J{Q^YAr*nfpV9OzmI^{!$v_1cQp4fuNkHc? zu2GbQC+iHTuX8%-7=Nb?IRTqVgP1T_Mclm}a9J*x$%;WD}`1j8BTm4)EKCqX+kx9B^pT$8h(nitA&5HU71 zD4c@#Azj5{3WBOF91>X4f!koBT6SF2fIQ@^xH#jw(_umbxQS-OcqI&P(j@QpqAn*C z0V>#?WTJGqR=um4aaFGm!*gLrpE-Sh)84>J(V*%HvsRDww=HVg&Bgx$Mc2;xRO=eJ!kA-Jk+-k7F-mb* z+i}iH*dL06c~tV@vtJw@V30if@)$%l!@FD2_Ndd~g^TPdir zW=JowL_U*LShR?VM(Jwz?>cFut&{4QKoi91GNP!f!;oRLj_SI)QMhQPm`}iwD~7*( zq1n~g)v<`ALRr^A8F5?sgQ~xvGn~P55}X3EZ&C}>t5&-L$k{q9E%gDujHuykHf@-6b)oX=1!?&j*d6xGcjb zIH*OlB^9+aZOMR%u0Rljlu%$-s~~$;4 zPwRQb&=T)pDyC@s9rlfML~z(tfpw;&5J z3SfTasJzATV_XA^AQff@ozbtW+%ZkwK08Y|ZE=C3SlFl6ed6vrrCu2m)pe|P;oKr; zJjRF~QugbWTqA!LTNw?kgdP~SXNsgCxs6(AFCPG7`Tn|P2*Jj~4wU0!Z^BVNLmQs7 z3li&45D^6m>&xrQ>3l=(32poi`}Z<%&KBdiefedq$`6Me+_i%9o8DH{?=w0M_2g`DuX|{`7v&hG z6KI$m#7Ra0)j&N*nDJ%D8TTIfWAzw+5~{#e;;-iSTsjOZ9mp2!CyrsP`l(##XHQ9X z^+P4AJ^#YkM!ff06Oj4UR3&lf^9nI>{8f6Tk>~&Uw3B_7drEA{?AVev&rd zCNsO|A7F^>56n?Ps#&;bHk*la8h54mnm3Ah;wK%b`YMdGa|jJdMmM` zHrHGa!qUkOLWz~4JBYz^_4x6A95?wCgo}iH^LGF2eMA37OQN{(%WfCHa!oizrlSXx zw61=Qb^yv33V~BK>LZ`g?!-qwF|!hKqVGzFpFI9V4u4%j5g)Evl#+x7J;2Vp)&|f9 zr-g|1`dv3hJk-Clwe-VAXZ^|9aWrt3Br9LUjksJfv>Ot8h`BiA)jw@>ZO2_D|AmPU zgpmY|4fMT8?!cWOEeFBMsn%LvG~5a8VmJc-NS}>S^=0F$@N0qjPe$E``S$4j`v0cz z1N~@<_OytjHe}Xe)j&K(pK^H)YkqW}w*p8_9 zTQRw~7CiPacWn@}XlR2GDZ(ODWH<00fI%d+(A0MbrlGo6DhDGBd)+O?RX7%&BjyKO z5-JMWFHLt+D)tT;Giu$t+Vz-0K%|W~EH-qsVV5NPC_I8TVOhQIZ2nz3QNdt(l2Ifk zt01UuJ_>c&rkj0nT+v|%bZJuLvfeMPg_j2N0*^9kKH7!3;N` z`}a87CDtFFXgbi*yyK#!Jyi`BUbqsT;Dq}qwjsL+W@a|FE!02B$j|tq{toqeTbTWS3Ws+C0^Apy04j0>YC8(jQozk71uG7S}#+x|j~rQejAKY8>? zT!>~L;zCZho3~b~HmKqQq;JZVm!CX&0>JHrkv8V+8s8P>3fS)U9|~UR3P7y2N_fOJ zATmwwMk`4y2B?JFh%hKOWMp2%XoP-g9;h@|A*w2{eM;Yo`{Q@=b!ca4X9a`D@ftU2LO4LN8xPH7F+L(LF7EWX za}&PMO7A}@ky>P2%mFzhrv<0mbSc+`tkF<84&zG$FP{T&89840z){t8bcB%_5?d0f zFgsNZ`8u*u?-U-PBlpAMBy}g9!owa2>jhpSy&_{Ay~28}oV{y2;aU%_;vD zF|%Z`hioBjbv!(@fmcAfaXeR8kHL^4u-Fh$Qe+_wGE)Jp9at<$`JmvdVbZoNd?$ga zDMDnx+~=k{mS*V@7m3X_<@Lp5?;mgW2#2C#n71`d`QCFj?uF|h(&;>m`d|*70X==f zfcJ0$uR!zdG@se?d6M4@$m=u{=v8QOBe|(zTLLen{|L_9eTphxMg(=Gz>f=m#)%cN z+p!kiiUlEn5Ycn24Q_QeL*2fwdKV10$Y*+0u^W1f9%)jyc6#mxVh-m^K5k7+Mn;Ou zE4s?C?}y@{RbZourqC>C-t?h*Ae9JUHspnpA-WdAX=F;7D5v&(!>&GPdDQ(0`=Sjh z(fr9p0MWQ~VW6lX>(=oH*&d zQX&-ODil^VNN!XwMfRL?Mw&9W2eE^IyM;$ZtpS>>F6RP`B(eG|6^uLHl6U@DKs)6z zK&iEwP4n~swB??Z%9C!CLCLj*O$mmnO}B@LyYGT`-WHIWg0M?m5grUv3%OkoGPc5V zu$t{I`$3DqoTF}T(F-{FXK*o1a%wH7WGOrZ9hd6bdW2gXvK5_ps7LIFt936Sl@L^* zDWD2$khD;HaFQkWf+uGwM*%{xHSyrKHUz3+!UC){@EoJ0(m)a6%M6EHu6TMZ>}$Ac z>u+^nXHUJqLWso)$=dNMp<;{dT>*YFj)9GruZ6Ah{G6S7E@@JyvIYc`#xLIZdvY z#&P!pY6n{06^z&5xSy-DXyZZyiYt$_f(X?G+U1a*SxJkFCp>RPw>sEztD#}ol`B`C zl8b9#QmTF|j@f$Oc;u(vnz0tt7lhD(&!rfv?i{RwEtrSnPJ!BO#|}uJf7h@OAPX*+ zSQ&^0!jw#W@j#eCDwXx%K_NC9O611wz(Je9WTg&UJ(dib6#&sw>v@T%@1HaRk$b?g z$R+zx-Fn6H;E=JSG0ww9_=aoGl97YmOTT?Xi08c-n^->(~E756ovb7b??-g z4RO$2H$tMKiMEySJfsN7sD;X!_Upq%QYNY`BgAI$MqmtH)^5T*#atlci13Y!aX^Rq zI>UF^cBGd>C>ES+(DVJfj$ZP?kRzvYY0oxz5Bptx@%n8=h&T}2aMM-U@3}gq%25>F z{9K8pGyH7W;0>ZOGFCxMyA-f+0phc1SUpSF<#qw7Ajv{Ss+a{O(2GTW^_w5esvPcD znV;wD9X|!jLZh*3INa>(8{)gY<`K1iQLv07aqXm9=dxJ1Jxw=*f`@gK8UE#2jJUYd zG$s?k^(rzxkHKI9{8>Wc83eHq_-EDHqUEwJ?)m3G<2y3PRxCDLsMA+V z)jleGiK>q3|3loHz*D)d@8gR!sWfUpX;2|4MTUf(q=jV6R-{OSp%ltYwVM=b8Ip($ znJOD4WynsYWGhoD63JXqkuiSPy=WiWo%Z>j-}m?b`@QH=-Sga+{d3#N}Sq`PR3@8w2lG zlhA(sY++`h<7l+L@5nXzy`Znov*nDrcK!N!Pfxx835HiRNZXi%s#HXnICiWzVg|!2 z{<>ciCKq~xxdZ25<`HEP3G+NAL6D1NB@g8O+Or87wDVBf}OBL)y?zKP*I({18)2XbN65+IF<92NQJ@UT z`iLi|<{jkP$8~yuAU|Q&-$J|G^@Z{KBajOoZ;Ow;T@$eRu-68f=|v!o_mXWLtv}8+ zT(@SQ)T)}ePdhdLd@+HzaSN?Vz6{B4Y*r%`k9>I{T|I-+uR7LA$7#GSu72Vqqqyu>wR5? zW~RZKHSaU5NQGDYPS1WOd#(iMYm@iN56Ic|Z1PZQM2?*0Fgw*9+b#y86ksJ1&VuIb zGqCQj!zh~GhcNJXfzK%C8=>bqdc`5k{IA;?yl-uG?-7O;IDkJ=Quz^mBfz*IcFJ6k z4GM7UuCR$jqM;Q@V(=`EeJO$GnL-*+M7e;}3Z+I7&txEa(D}7jG?YYcu#L_Uvt~8Q zYO5R7G@#@tf3dO`qCJu}m06W;Ai@>L8!W*+(M0e1C6ZS}mM!m%0My?Bzvh@zlM9&9 zFvIk#%*1dv8bNX7ykT@p2dccbEosOd*3;9NJFn@3PdB2g9gWJslbH>!(eN^k%~H&* zql_u6e3FMgEx0-Q&&st1_3N5{_c{K#UxxDN7q*y|q;{3a;Y3VvP=$#GTJj*ou8>I&-6v*f%fdz(%?3~k!)fbY2nX;UoA%+hI;csvk-AIb zF>VSKyMZ}NG-M4h??LLneLYY5BrUEI=e5ZyAda{tCFmShW?Gja9b-MI(*>o4+T-9n zAv03KiGyiHe`wc`WKIZp5A3`teQm-etVwnQ0MbCkU|(Yr>oQOe)Poz)elOmQ&fjy} z+PbD=zX4#OBP&Gk=#;SUj!%2aw_o&UT$f%litjvzNk80HJ4(n5! ze&f+2kte1)lMK1a*Rmy9CAr$YynLJ=JbiO6Ex`R) zU>U6UW@46u$ie`aN>1npK%b^r;5_D_h-OZ^8El5Chrv<}V3y5tsN_(svjR(mL}p(( zRC{^nkNGVPq)b-Fsl8d&kR6x=A={q~-AtSqH<=~_*s{|kZF8K^9Kj_Rse*gv@kK)h zK_>8^<3Vxhagx|qi16yu*nD?KYN`yA#N!374XNXlWB`QgVb)&gQT0cGf{y{9dRdhn zxH4ptn^8p=3mB-ZNEVAVPtFa;6RWrTH<7{Njv*sJ!(u3tsr1Jdo%JO##hRq;(H_H8 zlm;8w!L}v&SH0f4NsJvo1E+BSQ@M=+_q70siV*^ev8+Z2u7tV<+%I%7hmBKWVmz0} zgPPAXU&OEDkZD?HGG=TRk|egU+7A)O7nU8={b$x}I7v&I&m|aR%&+@;4^mVn$bm0H ze#k|wH^K*@(ilZ-Zi+P*@bEF+&^N>m*J zaJP&{O!z{Ojlar2Bm>(Ms(d4fe?bWoj-zX+Z1q6RXTLjLdX7BcL->fNp)(!l;+7|o zQxA&RIP+VK%G4i<*8T|dUw0wuwp~LwX)U9%o`J%PP00dtfTh>#xr#~rm&4Ua{h;U) zS>Fd)^kU>WS^-1Sqp;DVaU(@ZMwtYg30Z)K#>15No7~B;XYvA#r=*T(<}JIR_`Am4 z>ebK_;KouSp>ca~ON23m2uTTqMLN%cn!wapHDrIT2)V|igYaSP$srFUXu3gA)Zv9b zr)hMuw8=hs$Z}?E%POHBoAdyu1AyW`VEf2;|4|=vXa{PcL8ur=yTYMQqJd3`fc6TC zAH_5j@Z=JX@qZe`-xDx|$rECi}F5hn#qxMy`1->ryCqWCsg zP6uMthvY|3f1~CD&Hfr6d^Nu;V0!RK zhWEz*wCj8tphE)cK^BRM%`EuHo1>5EdesQ*c!;;9Qh33G*3*HkSuYt z_hSn@2fqPBU_H?bXow)`3MDcWnJQq#K#X{)S!QJ^Lc0MD2g&R+&8YibOr2kPkKN_1 zLtbIM8WoUzD-=uGUz|NXq!ZwA`_}^}cQ+1A18}BwOXsnYp z{c7Fc2q7wyhggU+q*YSc+U0M5BJljf)yZx)j^huQ#{}jrb8rmDT_a6R$EiS*%NKJo z9J$8+GL>WbkLa*}lX$Mo^^JlZ06?n+;F}DHk)Vm8i<}*t>dLjuAvY)}6+44w7)XnZ z#v-xPH|1a%tp_$)2rVqK7}oM1Qgw!%v~1lGi+qqOoTnHuT*!o~Om2ICxqhb=A}gvw z-wMaNnKao(b{GJTpFa>Ea;KCgPb&!Ybq8`U*ZnS;I5YZ|tT?UGIx1QqFBSzd(eDzE zxjfE@{6tIaM>j+)EoD!X3EA-6t~$mKo5DXTiy|Ph8H;>99-RgNYuY$!N>JaDAThXP zIvY0z(2yaCk)oy_Hw((8%ZYs^hXHH;2))#-rnX$nLRNo#SoissF%MW7WxR}h%i#X` z;EO}Bk684-k!ILI`i~gc*ymKVLKw;JKCeLxz`?dzCLl`^N z?Bi*5}FH8s;!dX&s z*q?6LXwuhpPAC>DmCEy(=Rb#^5+{`dAtpHM2LJ*M{;InFafb+``X48~ZF7R^N6kZG z1;b$m&j%Qr{SW?mEUuQ#eU13MKuBmE3sA+(kRb|EN)9LNobG~HK^-dGWP7^`a88&k zvkmc_4C(DVbBN5!1oZw5`ePf-{D7`e<&dlgm>NCz7!pUWyBe5)7eaNyxYT-o8k2p% zQLXfshSCp0$k-f{?1ShxA~(jFsR1L2v$A8jxnb5onmH>!$fyog+nTUNU~hmN(DM0A z*shdfPVU{qVDc9P;cy94uKM(Hf%B6YOTx^D`pPa<{0yv`MLK0#{$7O(*v z4qXTbllG=C1YofOAQMdDOK29g5>N%yh30022!*0G+X@6E4r#^T-s35VJS;oZn8a5u z4ejgMNi#sAd)*+6PTq!)I1J|ViQjdGba|f884mC$Gf{0whB;vb9|-B9d+jK1BWU$BrE>r6hVy0_g)?T-PI=yB9Z(fbs_M^y9mXGt@8}li{GxFOK!HP-t>>u1}nN}QZ3ul)3Yh372?b|nbU7$ zwxDv;qR)dm%&L+jgZs5Q`aM@F1wQZ>6eB618?EF3mHOk)Z&(_Hc zSn9lvgds%Q-=_YlbEE#ptb$nuM)W~esKM8F7l!}&Buv+|bmN3*0QOXhO)t!JqJ}?Q zQK1b1pD;**#!0Gu?R?_?jIwe(l#zNa>r66v=`Pxl#G5G z?v7dFCj?G8dB5d5!b+f^@(a09;b-~~+Rrt))1EbKJXbq4c?l1at0Q$JENdp4vw+Fc zt7=G%zSc_LDL{UU361$on)$&N(b_pwXLIzf0J$}I9WdYOpSxhIVX*ovdsd%H?W<^SM>q0iaSRbI$%;hV1Nt{<5B4Ey(+tny zZUo0PU@Mja#AmU1`|#`^Y)OId8y1r4Qc0MW7=BaLi*NqpB#W6Bz{Ddkn5`nRp)%#k&ZR?sK3gK}2O5M5Cd^`met7{PycaL{4s7KoLcK$%Lq{FQDb zl%F$}bcHhY_Yz!{27?UM`LRdeyZbW zRsX}iC1D%qzJ!a5%nAG`fro`01*6IUk@4QkCA52-DAApx6Crm<@NLr?R;K0F4xz8 zQj8YgQXci_O6=iS#f2n$f8%Py)9we~gaIV4>_(kOecc<*4+f3(ql?0d1g!#Z`xhC( zU30yr=ALCF9^j^VC3fF4y?v)A&lR{Zx zw}wC9PZ>V>wqjjECWU~i>jRXy)*eW{i&PE^HTdaj7GRuOrNA;FrTSR` z-w*RUj5C>s0mAWnnsHqIl0yxhXR;PA_tvN5agW(!kgk}N;yC_l)w;SN(y$tr3?w&n+Y*vbkYa*&7d~(H}CxpGS7sq8&NHV}q3>5wGH8-I%KK5Czl;rhi zK|?F3Z!efOcW!RqWN7e#tyk)t2WsJwGNe6M_G#2SAcgpu&w|M`WdHt82^VeA;w1k| z%O8e4(0&?A6GG8e5Mm;tpDk^=`Z2X;FwvYLT&(OKl0{ku%i)l=Vu87;GfhTSWEUBUngVL;nwK16KO<9ljNktuAaKm{; zL8X0*H}8Px*VyQ6hS3sem(G|mL-RPiw!jSBk4{(}W+h@sA&|`QAo#U^V#_!zc*BUb zUacU{zUc0OaqUy|<~Leh017}e-DFnXegmmMlBKqPDu9(utC{ z6`84Hs1FFj=z zn1${yt7seJ8i|ib3gMG-bb`ld#FdvN3r#}{_9tCBvj2N7^PV>_G$gH_FGUJ#ngBNX z>uv|M8el%L;p9G~(wXpxP)qMcu1SrBO$bS_tRc~=CQ#=~K&^-ay&NT+8XW&B-sgDn zN5j1)WZ8pRtDNd-fsxA$vyjBh&_!QoeA`ZY-MSN0qu_2k-fZsg;vUldkc_QB?0=HU zhvDpQG&gBr7J84D5Lp`mI+S7`ngbvfnLBqi*wia)6oxDfU5Ju-f{3{uP_)SBuSh9q zu6zC#MaLb{b7>=kS$dWHvO?W^Zwc)Nfgc6C<;nLXSaGMPW=W0b=e_RT7r+g@1Y-h9 zli@6M?}|PPOhE>5X}Nh6zUEMuvI&iso3oti=b$qp0!J>OQ{suQ=vGiYQe3-qaDtY# zMpC_#39Xm52mE_?VZEUkF9G$%kHred3XBKo3xAy>n+T*)=bS?W%o|A2fmFG`F{Aqc zkNS;!_FTK0olOlqNH@gqd~%W36HTQ|8oY<}l@STZ1NSThB8q{~d9-(-6T}vN!>b## z_6_OXWJyS>`0eJj+zmia${J=@DiAW~Adi79Ezm}bs^A?cJCmc*S&uAOvkuDa zT@zLn%w~8n4s!388+`j1jFyoY+sg6@5104!{3J7TM5Pmo-&x~tjSBQ|f!e1yPTOE8 z)RFWezIUYmdzqMxWprpqYl<`$i`;Km$|zL_NUX;FAN}J;K4AbW1DVKQNTpO9?$rI?+ek)FpBXyf-P~Kne8}_b6nkN_M4|pe%vx-@!ODVY?hBxyC(Gi znFvy;TR@+mXUSJ_SXXfofv9o*z>+^`ca04INPa|hi7#(#gC zk`7p>7R`Z~)GSSLOw|2 zVJgR&)gD8`X&%is6(^LkW#_R!-iLe3T!ws3_Qz_8sLGb!)zLDJ)=&k}wYJf=`lqcn z_=WzP)$pB*tIhDAMUF!&ixWZ@Gbs$oOgb2L_mbF)7e{YR=Tj5s)SSm&Lc-(o`4S#r^U4|*_E z55h>ic^Zt+iA$l9C4D#q9Mtm24WU9*nH-blA5{sR7(Aq@Q){heGjJ;={JWK$_kDy7 zWP7^`AH|t*XysVJEgj7l_R8@e-~2oUmoe~FDfXjJ{_Usqx6SZ?s$t)sUB!?!W&c42 zM}Olp@qA(*Bpqz`aJvaMp+Gntt*!3Or>!QPCPRiXkO$-h{ynH|1@Q-;47 z_pos;`|bYSwu%v^BaC*{MO5-R%Ho_0=%%s%;4Jw52C@%YoLwp*a#8F<59yT}bIvr5 z$+Yv?e{j3Mj{^S-%V@$oJIH=47(IHM=5e^-{`zYm;&aCm${OiUBks_586?{~TW(b9 z2C(EIEQ5TJtO4z0V4kh1%IKKI%aipYY{lFO+WZ{B<1BE9*$*7`rZa|)^IpqZ=Hj*# z*^JlBl_{&uC?z5z2%|FS9QX{RO%&WFO$x{O)dnI1hW&D+7#JCMPP%LN0a8;UrzI+w zkd)FR6BSh>X=r4T+EuQcXK+5Ugj_$bA9H*jNBLrr*rn4pU&peI#z*_N_cTvT|L9EF z{cDH_OZrZf#v&C5g))G2OfbZi0nbz#=mwE)@Kr-r~ z20AkOLR9*T8XkIN@yEXlgD-5i~DgS0&j##eVX^3`-U5UOy$bmFeGa;Y8qPjr8 z-IcQQ_8X_fZ?B`y0H7mMSs~pvB(swjY7zk&f^4=gL%T7?yBia&6?IirRg`A0UcH*= z1vn4YS=45L*(*v7XKtQa#s1S`U@Hash)+{&0xJ|0pBKp$Pr$Lx@Y1<-=~9rqV-j$! zO4q)QkJapHVRYcsSu`97W@H=?XVhS*b;OqWCGb;h_nn4rb*u^x@odzhdB_ zpxXhhDR6UFCt{L}Miu&tsyld4z(H45n+-{xdXDah6@|3xM!-pOLyveA-KKIyy@*Zwu zVOe_2^8HI2cI7EAZ&X!<(8T$ZO-N;&mIMwcqRzHEf*GQ@vKEr}#XgF?!c1Biy&$am zKd&WrZ;QgIE8y;9c#?bcXxaBNS?}MxnHx;W&52Qz+)yq}f9!vhSNny|RB&MzQD~t` z(|?lzEs7IBpHXOCboeE&VUol)H#fHsZ+?p%$UYap?A!vKf{Gk~&P(hskYCAdS3}L7 zTL5IBdR>fWtINxMgYO%a8lRHsu9N*pY%|%Bkevt#5$q_^!^^irQ`2h-AD`39P{%hc zCu}nk;pJOlRqy&9EjJ9>o@sXk+^?DT1UNw)^v3Atk#~123X1Y5I(72oIU#^5531HN z4xaK^l6QZFH4d)b>|IBv!rxB>2!D)tf$jc?YHZ zo|CS@&&BNDC@e~PjeqNWAqsIKSE_b9WU^4oDb%ch|FLX@>#zlJCW>VZ6BsczhMO7v zCjp%ZKRgXS`69ZIJuQ)?LB*a%?m!i5FF_g25_`HLswK0tmLEGx4P)%yfV@55*MaI& zkyCSCKkoW_&GO?ypafmq-~H)b`&(B(15A2g(#<8;Iuk?DlM;ybud~e;BjGx+iM;kj zPV%!5H~gM25ECO0=QA)so$vp$g?HAh5610MnA9nLQv|maR5wf(jC!;hB@jN=A`ax{ z=YKiS9dtk=`*`2p3&>xEB_!V6NjT@G@wxT}tWRUu$yBFPlJsja`O8Qu09HIQLn_x+ zv6!E?1{K?UACWn8G(e_t(8zFJ2-?A0iL1u^Fb2G9=6UkulD0QVB00q=?|+%8V3gf0 zKfxRLv+bLMTseBrBzUE|r`{JFm1DQe`;l~xUWr??{{7cAXhn(Hm&LhgEO6IUiOp^v z<=ybsOmxz^oGB+leT#DJ-Z+}+cV-ww#`TCVi94mOebGbq^4!aAnmyi?fw^**eV_0T zs|wV7AhAzFzbX(x)eahOle*(_>Gvn+y!HO}T9b38bl}ytFF3ScLxr=!_D!H@U7+WK z4y6BXHkiVt(VaEbzCtQ%*NQG%MoxEyh?P{mGYcU%)&|VC9yPKdqiU=dbBHNa&hxcnNm-@Eu4fxO!9IiPXN=hbo4T5 zid%$9Jbu`_o+|nv4ZH$q%_jwu>$}dWjfJ- zw&}eSMcyKl`g2GuXPP#RE)Ov8=*dPRlr_`y!|BDhAM^U3;y#`<#r}48(y*C_j=!uI zEMD6ez9L+#c&*>a8;PH^r+qzj`-JYL@6yCT!`#^jrDRNwjV;V1K{U9%pOPAbef79I zH9c82B>mON?lX}z!TUT@{!g8SAK2?xOKZZPT7z#!%b1|3u|?<&>nCh62AIZ*M3k9g zi9L2Fv;8Ac%x^rwl}&SYd!wM{>;}%Ospa@{^QELpx>M!LhMfrz+g(Kkdvj18BUqIx zboyib_iFvAn0X)pH^8fCqEiSU5U;c1vSo$c9SA!$LBlODY+I4DZQC}B=f7`DZ0DD< zh;(XeY=FQuTRCyc=9C0U1Zs*#?VSxNBek$a@(>12EHKFu`bqp*d!~pZEE2euqhM(I z_AOAnJbaj2aRnw7&{r(_uoswEMGp7@;`SFNUkGu-isOEt3~5A+=hp1b8GNr8n&F1I zOF`h4+yC}NyQ71HLt?wKvNEAd&7T2}bzbi_y0t&5eIG_Z)L+ptB46!#0J1N0=FEwn zIZO$XZ%2-ee_cr>G4E@_ zfty5+^$5cAaOTVeWH3y$W zVk_!S4hCLQlV47A=?{;MrAv$~make>T(z$J5QWIa&inu>!mSV=!&Y=nyD`!H(u#2T z7%zXH+w~^JD#6@;2QmeIN%nJ0t@lz|b99)^iWsj^Yl_!0b{=_oLP^<}-4M$PIULNc zx81&4HHX|~1Y>Q!&*0{WLgZzMQ$OPP1c4GU3q;_I|&Fk7F#AH>tEYEwP#5kv}CYpEu_urT9 z9L9l@U@RN6{lRIlbf}QQ{{TQ|2BqGk>Lx2$nSbV-?eYvtmfy`)xTo%b}py3exAxnmo;1C;yOnW;>Z;gw`tlV6|V5OARpQV zaSI5X9Q0MmEtWl!;U?+QF@gZp6kC}|S+7u^QlA;1JhyG4@yh)ZZ6?v8>pW4kEkL?& zHd1WE3LDX2@K1ngz25qx;5g_lesHh2Ru0I~nJ0=x&$BsRl|7~pXxYwcn=xevcvRo;=qKJidy1BV0sD5ePWx1F zC(cxXHTZiY_ZQrGh*Pk$-b7zQ@R15#R=jBK+@6Ii)0PS;4ynKd7lDyDN`DO}5NG(B-I9ZEM=;+iqr{jB@iJ z1-OCxPVY3R#aMd+lGiTsoug#DjJRBm42i8-Gm?LF1NEl? z;&UhH-#7AmsLC}9m!PVjr@DXQp%~m$1_0!VCt|z~o%&JBs#0|j{lVv74Y5Ier>)+9 zwF4@h5QwGwVe6uWOq`{%)T{j$qCU8%W(zz)MC7CkSMXYE+`I2rDq+w|i#}H8v_b!* z0emQZ7ngJ%=oTSwj770Slp6AT4f|zSboLNRP_EYw5x?Kfa(GCD9C{AoUz~t+KvM-& z_Gr>rCxJl0-jM3G@31MS@zrcipv^dx)+_w(#a-wsRiJTA{`y|~80B))G4gk2+MGXZeF!Z$k zAbX3ePoH2q;a~0KdEG!dweo?i}SYxDlqK^oMG3CzZrR`nb~HcbB- z=L0JeaFyu7g{nGLdD@_QDb{=e@`TI%q@b8sz^(KGnF+Q2so63M&C3B!z~BazKz}=B z@N43QoCV48hnt5-;e8;i3G%ufaAW-^4H*%tTqLOuY8}SzT+6|9-0!u!F!E%(#L(e@ zrLP*I#~)qC?-*ZvBM=E?i+gq=eb&xkF?faXK@6P>w&A zs=fDKQ@~_mY;0^}`K`|)H#?z_rJfEghn38u>o=jYiF)wt@_K$I9vN7ylw^Pj#!rIKkdK z)&N2--MjaDU*?uB2>_&P5xjv@ZkCXU5`-Go)LFVnuhHLlD-jia96}>NW@o+6p6h2<5t<_0`R>+pjR7m3{Vx6k9X9Q* znwkQ-XO-j@_$o#h7UnnD-kt2!m zxIkl6uo)|_H#jb6vhBw;hk9i@y92!iM6PVha2lS^zSy`pt1@G{o7WFme{>o9n^_!p zsILD0zONs?2KP8>T)K2rQCLXG#yd97Ra5_ulkV}i+ic_BnTd9-lv=O=-^TUn>g&C9 zGz)Xb4K8|A=XJJLStx?ku zb8*|Bz4ol*=g3w#dValo=cd}{+6zbfpiA4vZ(|P*ps#o-`?PZ>ES# zdD_h>GYt1%xM3Lu=r>0~;wY}xQ&n zh}pIwPt-J_He!NnHnk)8q2X zXxC7W)T#Z0>srO{GM*0p$1RtB!|+K1j`OF~p(BA3L}fV)S0E9I2S#Q>HXStkgSNQL zOi`_xd(^8%+pyY3$Rq9SUsRwW3GuhJghMkbPCMclz)kRr-`fc*DZy&-z(3!fiEqIM z82nW^{(p||3>5BAz958s4UhsH=4>|Hpv?LkAC+!ROGJNh|E>NbHrC+W{@dE7B4E&0 zKN(LE3-<_mQfzekpYO|{-)HeU3K0L5O z5B{yR`e(l3-#*YkRuKP8Kl?@XxV)PemYJ(1*1?K+ zpYS9GNB|W%0p=7_dDad~HL$X>N~~L0yMTd~T0c+m9RlL>&u9tDO&RTh0OKsUIUouF zS9bci$}K3w1dMcti#|xQM%g}!VS`>s5ER#&KVyJvCH}=8l}_j?i9H4>^*BJNZ#1GA znmsH1YsY384nS(`7eQgf$ePf4jgz4zaD#|D3zDoa+|lP4cIG&{%_I9zCCueloR?#> z1jRjOAaD|j*?WpecLT(`KY*3Y0L{AT+Yb?=IQ27F0Q(NQQGoZ-lcVTb&tOD z(tEcgQoX>V!pUHFlw?ycbL)*Anqf1H<~#%(IQT0|@NZn)e|+Yve=p)-DCtk+=WjX} zIvJtyT<9=8bF>GpwuWvq48X3{Yfu~+lZ!FYhI66RUcA#I5GESY{}>p;@TA@2-@6=U zJ`}O>PZ*2a%R#+MKUQ(rD6Zazs!aH0AB^f7%YLzM z^snWT=!X@$k8qFXtkJ#r^4FK8dQo1ojw>H0K~h5OF`j-j0nvX*>1T~4o*b`ML2qIB zUp~BwhuBC18e6PLpg|Zqj>Z3{Bo(@zm*EvY(3D{jhYJ)a z`q%z^iW7u$SNeaZG~Ftkocfsck^ct=vjabPpJ$I9ixQrs1o+bB2+tzZXKN)3Q6vL zZtQ{{gc-0`+MX(p-woCf4eg(__QA)cS0VWXpa-x>7?}ai8I5AjJRvYj9@7Iw#Qf=K z+17^a?1}v6(=_o^`-O%yFft#D86UVtXf$WEUzCGDan6ZDS&){GC8r`x#Oh&`qK&NV zi0)HcjZ=abLywpw-E3%RNFpcH^P(2`p@)aElnIPSz8HuR@SCy`kYJ$sm1J2n2)C2P z7@}qCy~!KuFFJqNC+9MIE9`ny-MhFqz18)8l- zZsXM;!MP(i4bWhLa{4@QOrhq_CU9+vyO&~3eLC>GOnM`U zK!cl5gnu|Dw%H-zEbKOY~A}W_5$(P!GL@Xu4l$KF4E_ z%YVAagbuV?D8h88qQA~in*A$2GMQW8^g&ftA#8v-W2s$Fm5A;GUd?qhok=3lUjV-f zA@-|yBQnDv$)a1C9xU$Y*bte0;mC>?AHzw&ibijk6FEOnyA}2xUNHkr=A18^H3+qSjD#x3<&X`)MhF#4PA{FlRy(Sp5*2Ek%bPu z@m0@H80&01{h%5BrVeg25J-@Md!$686#w2 zACt8ld_xU(y#us(1I>Nh*_M_THO~n)#}Q9Sr2ww!bX6MXR( z53H>|^?>*N$gJ}aNj;A@1Wf?&mewVH5GS+QEfs&OXRzk4ph)B&et(R_uh#Uv2HJgUwE zd6k6KX80oX5^hBZPzGxqJmLqq%`-?{1Wywsdp!Z3PoG1$Q3XWIbLNK7U{RT`GAdOS z*ynuS3JoWWr8)0xRt&j*U6KG`0B!fb46Do=y0Cy8r~8;VR1IH`m;#}QXaQfuJOdL~ zeQ;#O>=haLc@Tb}{m+EJ>6M=(OCudoV)1CQwABq1irW|DQvw}aR)`7)HOuA`L4ve1MM(|NG1u#AwukP!|j z&abN?7@XN8wt%DA@v}CIIAZkcm{Y58(a_XKS5&-wj^IH`x%cQIG~fnLABa{`kYGf{{2>&4m8jbgfL_Sza}8 zI?2;}-yhEvv^TSJEp*q4KwL{t@ZTu7p+}AwXXJWY0?WTU_eOEaVPWxHi{q4BPSw0gv5Bc-*Z`GBb{M?mZ&eU6vuj>+&AuKzQK z21G$B(?)1P(zFOYAu3;SGgZuyR#Dav_1SFmVO8iVHM)%kObmj@=L9{icdo8tEh^DQF#!nL>@Jp z3##P7H4Wn$dmdxLC@74~Co|)X*b0l}-N5x@o5zaC$-O*xiR0RGtSY7sJSNF4Z=*vG z@yJLA2Sp_0Wiolj+jy~04>SbqpsK6q(1Y_AI`m>>*Ac%&d6?1h{$h<0mWugZrfB-C zO?nF#nF3!z><{R#Z{9YwU>tKLExE~r3r!e>K34=}C>x=(BNy?JWIO{U%vzC2chbR& zx-=RvU;hcT7d%0_xo}o+5tvaaPGcF^Hf>lM!37wzyHP{qw9mX@swEqoleCtCROQ(hnlG^zU-2 zlSXVa*JChxkKZO8p~)W)iZU4YQBq)BNvV^~;K>^8sVOA(c)!df2HM8`2{%K_$jvQN zNJJ*kkzh|P49#+QaDe77%8WxG%|6Rmr>^cxX+s&2$R9(JTj=O-zk%|=7zp{wnA#PJ zMM&pS<=l!hiiK39XdvD66GTC~)q|}a4e`__Lh&dLVrChV*i>~DAtOk^PzIBz!UEo0 zHqrwxn}a7S&csZ`wd6w#@tatx6bn(4VPV+;qbd38Eo^-`m4-v7C@HU9e%2F_*NWQs zM3~W`Di6yKq1qU<{7ml2Hb+HtFZ*>?Am!Vi+rB~1&xZzSC>|RIfG7Sge(50(13T%x zyK(tW?0;~I2$7_5jEqBF{MJ2#ZJgHzZrq`TU?Q5hw)wmDd{o7cksZsyh_7`VE_g|7KzVo>m&KFTQbnwN@Mc6Rm+IBf5D z%E&!AkP{{oP?xV{=z4kQOLq&y007V`5mRhNxP!`0&<=Agg)HAbZ)p_eOLdtPJH+^so2~5KQP>6E$z3+4m>5t~tXf0kQ4`(ky#fxi2 z!Z`8y9Z{sn5Ihv(mP?Q^+t)v>I^Fz|Q67hrfBq+wHmmUJ z=(LKL8lPNPXNuz!Li~2_u{w6C!#WV>}jNZ}(BtY%IE{ixHR>oq z_E4hDVc;Pm5Y^Ucc)W*GI5d1EZxdQp*F+*X0@+10cpzpt_gFN^ACDO#9@Sx$RxZdR z*dk)P^8zLC9X(^bGeK)T6zi8NL1~hlz_3pUzZw>%PF z-IT%M7m$;&e?bMX3B^4dNe^cVYbc_G6^T)#{4|Z5+TX!-A05bj*f5~RPM85=AdPnC zvx`#H^O+MyjNMkYqRz`B`&;BNp;~zGmxtp2KS=c75LPxvNpC(0EBSq`t*zad>vl+K zDps3F_n#25^7{&)@h(Bv~`OWb%1Bs8vXH<9|+;OKTJBv?c@RnZRtqV>{ zKgqQJ@TslbtCls^U+Tg*56^E2s~7=woCHM@kZOsfgFT`N!R^0BG^x%&?+&DrEcf;2 zm))na0$jy!C}Tg1UHGzk@FQi2Px#;H?)-B%;cw6I|HV>bH<{@2lxogHhEOOLxXkU^ zAWKnsH%MrpN9QX<@KAK(z`FLh^KCI4YD9_*A8k_U@k1qu;3_wBlE-AL@>FeT_7tEk zNSYePuyX;&WWSu)?AbjH8{7-e%uyzfla({ME1eKT$jD+Kb|Jlj6jKB#0wV!KR_Z@t z4nCPy3Z_ys(7U{Ut;z@ynAC9flXt9-&K?$*et!AR#+0%>xC-F^nwSKGRvm`O-vBf`6AtNUKz3Fq9$ZG9e~>D`^VT#PkBnG-w-A z#V+!ZMpbH`geKsS)OWg~G@Eb+fN;eLCW>T-0~x-kqB$Jb`r(W@OfoIOA(f_cb2B=~ zD5DcFLf>o&dy6hWrk5aC(lvi$SezNAhg&Y87B|Ev+6b1HP=npq7{DuqAX{N1W(T}d z3}`R}iY`??T8BJtID9lE7{5YxcvZv_Uk<B19T-Sl&SwhN@P*VcYQ1Bta z-^6acNoMK0^>*&0`C+Un$P7lUP{%`EPva+=nwp6(Q^9%za%$Ywey8s$LEg))!qndI zb2D1bQt4U$^pc!gPbw@!kzK-RmPb(G3W!3>atP7D`<4=b1Zb}9!>ckM?O})N;GGa4 z1dMcjsqsPMlkHF*PM>yt@naIVCBq#w185Wss=aisQ&0bfMM-W#;;=(+X)*;t3EhB2 zvA@4C#r6X>ejZA(VN^jOnVtK@L$ngGCF|a#g&yPCq&CREL+982p^vs`2iXBlxn$XF?jdxjJcML}&NwzH3;r zdo_!n|U~7>^m|arKd8@ zSg+Bl8=+4d+zfn!SO*x;E$H5Y;zSDDfmw+9axK>TRE4hkd1M(wW)W~~$&hxhgLV@P za!HJnPkPT4$k*v&i5BAwY_yHZA))>F+!7=?G^I!S^3+;D&!-R( z`38n_NKlZlG>F_b*3}=1I~u=~Q8^56rVYtMd@{o6@KI9A&G@mpfLdumBDxD;&msx6 zteJ#u8E!nEToUZC^MOh4r*o#$s>u=qX%kK3Z9?cZKhDi1kDM4xEgJYC30>e!aBqyq z-g3No5+eIhEV}6Oi8*fiQY9>19n{|;WB6j;pdf$jvKRcoXqUbVz{x+GURXT@m5okT8t`euJjUCO|}i`0N?( z32M~BTI)0GDt@xjqQ_%18$w1%QFEm=RFTuy-1qg4X*2bpFsAzyQ4`qQvD`6}Y);bM zAmb{sHgpAVG-&xwZ_I^mMnAvbWzhPi~T>m&KlMe<>QlP~|dIHzWZ;e!^J%lu?p0GmN=}crR}51p-GoAqG`fy*f~4&6d!{3ml&} zk~15Mu=9EI#q@*1U)(uz_^=&pe5N?#L*(m1T?0gtYU4x0lwPpMy*U2>dXIAj#3;|@ zn}(&v#8zWfvw;x^88f4n+&Tl7YoK{>s{-7}B#$@QiXBipJ(8%ENc_ZQi!i_|`ObipwwY!6DOKQUN^Id3VNo^<-FcBdG@}v4$k{KDrf*jZs-|z{mwp8{tD`+C#u#)P#A} z7bSI&5fccU8md>y;MvZv*qpq>3z0t^=u9vrXUJlpS-Tc-g&rYxy+7D5Who>Bjag0- zUM4$HDG2D{y-vKi?4f%C-PwhyC^^y_N$K25qJPfc~O8{bj{T$j?e0ca6sLBg`Mc=fdS(3ICHB)e4 zJ|izd(C%=&9pF(Nr?(16ICRP{BR4R5L&8Ov+FTc4?s)F?O2dNRc%r1{4iok!e%2v? zf~1g#F=jO4Vqzn%m}fKmUJsvU6vpn(JiDsv=H{08A|kV^aS=|~4{GvYvI)haZujRG z_Amv+&&$Ubhl`ogeK)Q?94*DPR%jnK0+}tKCMNc}s7zqdjLPl0mo-G#cT*@9IvKP{ zdyt&>5kQ9JUu(RA&to~@EMWw3bS<$YqgR`uDvtofV~KO93m3gRfHPkQ9eaOB?jg%- zK<07(+I<&vAu4jrjk7<3wcjh9J;fsz)*;r^?Ln$Z>$?q|`YCBM{ zs*f;VVb2hb1;S6_75Erx={Ip&;X*&f?Ye&&=e)kAhZ18OWvGYHszAd6>~aGK_%@O} z2M#W5f0NM#93Kk7C}HY=!C2wcw79v6PA}NLf?~QF0VlEeBj_db1vwNSJ$_t-6ZjH6 zD|{;mL_%a&O(FRii1vIkHeuwv)@-TjEl_}o-bfs}Mb6VCJajE_m*CwLhx4Aj!jLin zYeO+)D>o&p1o--VCQ%xm>~evYhEQzrc^iga9y&pbnP7}RgB%O=grSX2XfieIVeF37 z>^6i1)Mxg=`Y9LqA-cP6p3`k^LNPSs%S%g0*b=OJ1OAr)8!~o)L3YaY33?F`=m;B= z>U+_pVfqgs9}U^csj*my4kk;`9mdTSdX$##ynE8*$@8giwPn64x1MXc=QxJK(h|s%7S=;E8#D(_U1plG#%_theIeikoC^{36mk5WlT+zN z=`ett;F-xySXIdbj%Cm)=M)7DVK?ifs4QQHxB-N7*TXY&2(!vaR$Xk8X+(YyP-vh6 z1ty#5uvh}h@t!$SOCNt{J4ZJ^N*3pA3B`NvM$h48+Q9_U&;jaqzu^qS0N&hwkvix0 zEb$@zfwJ))%co7A908Dx7UHH^+L|0NK@I=6Blibb^~i~Ej1t*NZd7s_s_HB>SAiRfhKQ&+!y3k zH2yT7i2&SU@KDMBKKtZC+3HWsWVCV`>7S5$2u=!ZI^}HhVD;1rdmvJo#u@_KrI@;> zoe~zaZC0TtgB<7L7S}d?)Jl#f#D5@iTchzbq;ZJ2=ky%z79b10bAJq164i@XM6@;! zGLMFjc}@8YL`o@n#sL)&e=>qReWBgz)=@PwK!GDyL`LRUB*Et5zl_OJBpMjTXEe}G zFNQvoq+Puhs>;ozD}vf064kF0|KSbM*a7|mizlR#jXaKUbx@z2;0_EQf!6(X6$5;M z=9ym^`4uQ@qlxQ-AQVSi5EXwM38#W{F$?&rsdT{t9dH{N(QuqX}PV~TS2b6`WEk$W}lZZJBF4ba_$wp&a2P> z1U!&RS(Z)?vZ*KC9axZY#nTTdZGE8va}%45NY*J2X|{Vul2rNz#!&#DNPQL+9_=z1 z@Q}g|xks_y8b9tP1{mQy@?5hk-`3SxVmXjx14P2Z@lgD3y+@oUijq2!>#w4QC@|T^ z$os&~ow z3MAUpjaZnTQro92UZSTB3;}t6=GbvI{%B+BgS`S0|6!f3AXvMEg;3a1wb+>egceRzFWkEGFSv;DUaxt<5Ju@dL?3)^k!1HqDskK^9 z&nJxPg_d#y!b{a^el1NZk&)jMc!A~ixWR@|Dwnw8V!5RL#A3zDcO>ykK8J}$>5xPs zF)KWa7<8V0fC3FZ?mZF5#tU9U z810l$1FDvtVlB11a19mD(3ROBo3>e{=T2XHpB;oBDw2s#VJdB=r) zf|GFT=^ZT2N@r^Ad@+#W`#7fZx+82MWuoI-Dc>?j`E zR!+FI5FQY@=Usuf3MC5=!qrT(MrWq=;XI!hq&{vZ#@ZZ*)_e^7j%m@7${qkxqb3M{ z>!N#1iw#{bxtY+zpg|l4W0Wwga>>qvJKAP1)_iOk`IZxCgQ3#)8(?dEtrqOlLh1H( z(jqj8lYpa95{P-5+g*+EZGh8^kq~MgAI|WYc;MahOJom%PB!WHU{p^ZIU}-vEfj($ zL85cps6K&1*cincMR#B&w5-^h51m(p;171w8NE#;MviKQA`&36i1KQ}Q6Q@sgYy>U zpw|4gqqs(}N5VO3u)v>T0pKzHAI{!9EXVeH`@Tv@p+U*4E@dc`IWxJ^U@T=!kuhTt zk|~5HQc8wQDJnx{%v@2K-NKDbB_U%-LU=wa_wRY$_uZbqUVnVIFL%Rr9_MkawXc2O z*E$%=JZ8pG!JBe1O!12suP>edgc;(sTz7rp&#$>(OYY?EnpjOS+-u+9tx5;EbY1RN zthz9pM_GWi>MeM96t52WJ@)W281a1_2UJ&N=pjGGHPrIX5C{*HWB8Y~lq8(X7*o0g zXW%vMJXEdj4}e?e<&}Ow14_G8n@Nkf{2I(HNDwhtnqJLY*&Fw(V))gsQu=>f=JxM+ zBe_V9QisdBP33q9!$JHweD-Xw-j$^nDcDaD>E4HLR8u6nz>t3a_pFk)_TP-|HRW)t zirvW;Kk^c(AlUN_Jv6J;6`$-e!iO{kkvl|_7aCa{st$R?%0c5bnv_=!-g~zA)egf( zJy|rdd__talNoSTx`aj)zSKdH!d3nM;-oS*F zcmKncJ+XCbc_(m8EPD4I&orN*>(tYqf)`C4*p@S>3D@Lwo%!!z{vy=~d&;6kFXP?v z2BIfGMGE6qg_I@ucB{JTPh)ghre+ zTv54j?=3Z7MokeP#Gx$L62oNNOzb|1(-dvcw4g3bfeZvs7>yfQ$dX?$9Af_Yw47&L zz=iBG2}{OIR>S)vEgjO=B$N7lTL-)qnjQ<`ePGZbL=frgm3+&3y!D+Pa3Gc$OBg(a z{XrM;K8FX7&Bu((*I_wW5VE-%MdteB8d2AwW-k~Z?mTK{zmXnqgnHdU*mrc^y68Do z`mM?!;c0YGHJ80kS@u~thX9<=a=W6G={sja?N`E}&S3H}xa3yX~0j^q1q zN{p<4R-HQS?w_YpvgqVuyj%2mu`?|i*jRD1JDwqFpnF$k+hmMkxKdr+NS>{Q5H+A& z(;eSqGTY8pk3EN=T?g!w*;-$(?c>wPxc;a6EELmPa+xoSFj64)acD5< z5oXVi^0wNqRN#HZ*nQ~a(`2rI)PJ>Jc#6Mv{g=PCpP0TqJJd+v$qxvj>02nHMXjc` zlQYNcMzMg)nd^cx&+|Uss*wbAF645XHMR6kT3h-93)t@Ep2n&IL@efte zeQ_ROStEi{ecn#zy?Csxu3>t9k3!LWG#t!dvjVsr`(8rd%0ivf_^iI$G$@skW{B{2 zmekA%Mg+sQU_zxie)-4AK&;KCa;t)Kk%sycxe$fHjBFO@o!7Wf<$l_3xl97yOXtCo z7}t&QGY_8{C@Ujm2@2oGh0^ZF$?CkMe3nqAk@151v9bR^2WT4HRMUWLT=x4=4|1yf z^U!q~>0`aR?dWLHp}<4k#T(krM;r2)chiOI>h*X?EieG8b zeXMb+7>c-#Dvw|q4S>XpIWPPP!oq(BZ6Qm-^fhZ&8PgBaWDrT`^?0e?r9Qc+r}kk1 z7Do$&0ZMb}vKa!f8!$RK_6O%?FbD;+T@CS0&GhY+b3_Zus%r(vaZ| zJ<2AwNI4rhuuaCqZcltS5rd2!L_L-PY6q78`oW&v3^ai&fCT&IynoLPCJwXPTJjID zzJ2ZLYHSuxIQgF^!xZMvc&S~cTs2ts`AVz(Zu7dDnyOAd^*=x*)&E278cX9}x7-fG zdcAqv9NgBlbiC$ji#$AMAD+Nn%tu>}s<3$g9f|wTFkgqHrBEN@w8_?K$7F}W8Gq)S zDz483%!8Pvb{jpes?9|@H%LX-nrzy+^RO%@c#PTzx%{x)7~OYZ9hV_hJK`?MfJ5kl z(oz-enRPJ~gd=0)=sFZnt>KbqN80wLPEOeHieO#aI;v2$yj4=LCjnX1ADcw zhk@eHS6&m1iZT%IvX`a*v}x03-i+-obuHLFzVV#&%wbOIE9DAuav@m^GL!%PK|Oq& z?x5y&HPVN9E!b!osHT{6j{&Ei>n|x{@fF9ds4QyE5X5v-uPc#q`}hGyAMb{7d`pfQ zMQV)(6Tqo@@tAUpDJ)dUlU#zs_zwx4)Vhi3b!h>nsk^ZKr>56w;K@po4!6M5xV-yH zDD2I|>(kFeEF7D5qsQW7(SgIKlbt7NsE_B=9tWS!ew=>0WVph9>}lU5xvB#)=wnPK zz)A*vvKjUfBddD2xm{cMUgDsG+zrwNhn5%+*wrzc)+t;TR}IP(h=02x&d2trn)lSv zJ#-WS&%vR^m`sNLgGMhOM(9v82QI7^VhL(y{F`#y`->qm<%Y%Wb538Go2Kr%b;|r} zwFyP=8KdMzR!UqZf$ztYwts-STMCH%+0rlT`Oyim`iXz*_Z3^Oy@iqRBK&a3htqr_ zN9;$p3C0y9K28Kx~IHVqV2_X&Q5^C(%{c+PNd?<-3Z8TKfB zHbT`rEnWl9Uq*MyylG1B`T}PVt;Lrm@5H>l+q`L2|t;(9&9X&T8x{li-xc4oFvJhxW-9>HlVA27M!1c8!EjhU^AnSV9tsPl}w zt+DPsHVsxVs_s2DOPoAg&s(NN$^@Sg7H^V?!a9hf$fF61*@)dWJQj#`TsLu3q34zLB;(d zV|M@tFMZSdMcYVSewn(w%ilXH->j_J>>tV+znp6nQM#?g`zyT=kyY%^$7dcgmiA`% zS54Mo@12AhL2j-w#H=qaWg|E5{-<)~4pZp@plB66R{ZzTMy+)VM2%h7%pV~l3B$Ii zFuc~GJ=@c&PnW1%jW=bC_~IA98viiyq;xJK+uuefH2pQV%^qb!mq8AB)pUAQX-F_x zoQ-H)$Gt9GqIs9DKzWjW$Tg-{h-vV9vf((5GWo~vg6@g72i$&QO+I{mxSFCgl;<*9 z;_hqbZ7ZOoM(vEPB`|_uo4YXiADbKbSV}`PZ1)4eIh6R~@8f!4{F&ke zJPo7GRqi7_f3hJ<*rWpNqIT|V?;v`Tj6#n(9oSN#aJ~IM%^16|)ZTmZ^xPSte+7kK zpDGLD#hpULQX$)LBvm%-TA+n;`=x#G)|i;wMMjE%fU0-ZPspETYHno*j>maueOY5w zumtr+-okN)#DBc946ic}H)>W-#4Rc{2gcxxM4;NCX{6b}_ITQ{mPsM8tD+W6Y|`&L z6O7-$&xvq}gqW=rzM`UV9YDG*_UG0nCVTX08k>aER%}H4toMf^&>lzN0un9aztF@a z?0;dLFk4#>R=$7uM1aKD4y9<$xy-_pg@#zhZ%#ZJG@6mv8$vbZOGNhqGy8xQFaOk)8Noz^H6!uDdbhjqz7q?~&7|CrbIs z6xvRWJUX}U-1%Bve&u>{?-)ayn#D{a=+=UW|ym~+J zHsuQ!N>z0l;r))I0DMDYI{3nVWyW#teyf_WNX;qJ)Ct_Kg=v)K>SKE*K$8PhK9iOfTqtQpiciP$C zupjhq-4X$YvmX%*^XRFNe8S-p7`}hjW+8SMcN|UBX8t=}oKXWHq|Sv8o0HM^)48Ml zC-wFA2TpPHkI)=&V@J#x(v?{F z*OZ_o!@uZy0ElmLsaP|Dno+9X?uK1;z3D}hc%D|*kPwNGfJ?=ef?dlGz z5k31+KF4uNU`<89GqhAE!Oee9WV02OKI74kwn6htKa+GDO_|b$Dj8KEkC%-g)|cch z*PXM^=ILzlINFBR_k#404TtpA3?^G} zSO`s0ylA-qq-MCfW^Ias7*lphX;@Axx#d;3kg7$fs_kCBpbRnZ)rTSRkz2BBBnFJ= zG60dQqGqS6clsG1=^&F|<`2A99ddhdq-l6=(H#!Dw+K)?jGIW zWQ4B|q|`$61?MU2G!+3JONrM{dzPcsPoY=uchn;`^H|y2QnAWAmxC-g13de))db@J z;SA)0zY9(lN)`$>nO0gHLWe%CuI6THVv^bOs3+*gyw`codSpB?wxUO8W8?P^kG=|f zTCHH5n53gg4gDo=O}!Z_V*x}f{lP`&l!*nt)B9zZkL_9rrs+-V?1&4^doH|AOjQ&l z@=?sWn*+}B%g|TLPydl61;jX$u_3=o%5c^e)H^ULWwR+4z9dpg7dN{P11-EOYHIVP zr7IWF(&q2(+?E#(PB|~3?Qwg|T+%Qo-J0`ncF&c5wsw$KGSgt*z@ko_I+^`S@zTn8NJCH_ zo;Yc5p26`H5{r8DT9<@lQ^pJ~s#{?XLm=IFQhLjJOD`{IjxMzOXNC}C7hExmNyO(a zE+?6tGvQLdd!43Sjq<#e8`f~d%O%r_F8xwZ+yeK=ts?rJ9CuVypZk|&cK3WI&W_-E zt)w*G)@|-inRNZIP+hGZRu&-#ASnbl3;#!_-ghq%wuT8yLtq28R~d+I*w5US6)2VZ)9HJ1vhW)Ibnp<=mS?l1#qto7!;l!=9D^5dw9EBWL z_5dhg;nZuFN|x9(4g8UNCyqF=`Jd&$wj>isl^6~B6Q0JK!x zOlEJ*etP!Q-7{foE|2BL&sc7_54eOrJd&0ecJ3Tm}-n3u%hT9(<$fH69m+SR4kW|3Cck&>7EnEQ@&fW95jda@ZKeg9qjF&=H6{EWOW{@=Xhofu$%hEz|4 zB3I9-eg!hR0=SCe4##v@<`7UfY@(Io1nIT7i&`yC@(X`{pIL~4BmQ@w(ef}xo3J+iKyBX_m-Tx8D!U{71mecO zvw;ysA*49VYex*4H5`nD|Je%QD*aKQmpU?(-(66;~)?dvlJy$UUa4T0ZeSP3|O_ z_pyj;(`gFsZtEYu0^S&2EE%kHxAM*RM8tn0w^X z!Go7~L~9Qjai?=$lc0d~?C5I4HTqv%Ic^^`OU)O?k1zDC^f_;w7}n`U?ENpFNl49S%hCvu>?dsa%p$ zM~4L2*CO8L1`2N|?)roDT(NT+$}K;B{{GLY?Y$Q) z$fm^BFfuZ_Ts$w^XV}hO{pZc=$Y6*xGJhr7TgbBcuWq`wJ=9NCjDfA8;#N$jt&Ay2 z%GWe0W%rm^SZGr(TmqTg2A?YTRtCSm2XDI$*T0(ISLgAOrW46gsmG38qRs7|VR%rG zeG%QDVZ6lWh{+qTOt$LMrMluQpwQdfyPvObXDahjcJ>5RsgQ;jZ|&$=Q%6U~KJwDT zpkS9vH*ePEBwwThxb*ey7TjZR+@eLbmMvR?c50BylS+bAV`t(RV6aJzIZw|9b7NuI z+xt4evTSydVea5qYdt_n`n+PT7xPYedT&_0x|;NOC%sm=*k|_Kn04);zSEV!ON$=_ z1({#JeOr&MGyVSIrdU}2p_dOIWETGEYzsJp8eZ%>iN#DNBBTX~^`A3&HxqAKj z^=t3c)6+|m41D{{qPdzTsdJw6$X0Y!So@c?mK2%pare$0zOc^DKFg9v*BBgWx@F6j z0s>k>v$l3@pN8@=X;!hFKD~87j7~4AZPyQ%0T(e<}czTdOIM4?jU7eEp!0#J`OA6VA^%&O%4P?udG% zsbQN)eVf)N3@eYaDPVWUw6={m4R7AJhJqck|8jY<>z}#v9%b#Vr`^fQsvoG**g>BQ z+4dX>Q|0I6=Gt{wmCB8E)_$eNWXK@7$*n=pg>9+s|4amySbG;J(1Gg|EcS^D4-aoS zsa^@buc;HDM>RDyTaWrlq?W&&bvH}i%?6wV^}!&s*%1xbs%2cc;?QA9l&QnzYu5%i zI%-kC8$M#*ZYu0XeMddLh<^d>H3)XW==ZzfO|p0Hs=quZpQ}Fn7B8Miz1cQOnU@OT z0Pi>f@lquHrSTs6m?vxQaSEUNzpmwwt*u*@3^I3QudS!K~Zl3f)@$) za7cT;?rJwbmZD?ki*N3~EU6=lbg2pEjsvkb{;Sn<&9(B5P>!#?(8|Di4PRA#k=1pPJ zRG_ZZ4+a*Tm(dJP+nIb2@-yV3d&UOSHAM>+Ell=l<`%|Jpk`nmg~-G}z%KB8p|Z%^~68H5Hd9cf7a8 zG_NB9^J-mnPKF&85ckCg_wH>05?)ErG6u*FsYQq|Ux%wRjzznC>z3Wvv5m^g$|g*i z@-?rRogH+{iTKq)bpKrAys_2S3|SL%qV3s~vrU>dE&TAoJz~15$I5JHUhF?)7?|0b zKr@lNI$+&#KKbP2+~yRP)l4)v(+9JnRHr8bLlrx3O+5-u^z3wE>y|AUD1`NH2l+Zu z^hIph62!qXeE5_JOWCi8lAdhP?EIFkTD9oat4YR}I18ouj#b5$9$oj|4aLR~EDwUH zza}ZjevFo-#Sqn-J^T09Y}~jppEvogSp7aU(l+4y$+J?CzX_~-2Vs$4o? z(^;jfjs#ve=7ee9ZUAQy2RMECuO4{c4Z6K19Snqv-xw7Y$eG(cY*!z$jq9;%*RKbz zTUV3neYE{xmw|*8U7INXF=IxI7-8E}>Hh^LU#;T8v-Wy=PE&M^jK1aDJd1QUh`IYR zyGUzl(FBtYPT2>?ovhnpU|2ucfX+?V*zY&Ee$ZpfjvYpW zmu?hp5*IQq-CHt1?L~_gy~{tY&3@muK^1?;Z+Q!6ottfKRq>_(flNlnd4GJ`+deXo z`lkpRFqWqlEePvK2-+;R>#;WJ1z!K`R$Kn(*P`_=TIO|y%NsawV2x>4jPoRd?jdyB z4;{KTCB>4xZ1@v7E09lS?*1d^(j_??X%`AAt?BUbrSaqaLpdjyQ{E>0RJLg0aq2=U z(daS3r`52XDq>}zZm(|ouK%D$`2aQ82sP>HYet~Gs8lHpdjO1QhIgO$LX)?nIOSHi zXV0GL>FK*6sZx(0ckMM|$l$>Q4Gr~b)#&=yW^y?IC5~ciMn!#9z9s2y+RfNZtR1CT zJoZotvXyKp4EF5LJ=HDfZub5AMj2neHBrTnwej>)>P6)BT3VsotXVT_pXcN8J3Gk)Akey^cm0um1H(RVG_AFE z?OMr0@7}++h>frxb*|#4fz59Anj|V5%i+nc-Oq2a-+|n^Qhu(D=JnX_^$4=dia%Oe zTIyh(%f>5i;-HTYLbd@DwDbCW`0zoqR;~WUqTCAyl+qhoNx$2i#{s(P)LZ`V zKiO!u#W&E{_^q>|)^{4N7kv|iGU##3 zz(;HI*s^{5Uf``+GoxnBHsBI6k&}A} zVsT5~6@C26-z9B6sETeHnm{VEw;(3hy-@iX=YAGKj};w95Vu;$BkOS1?kfU!wAqnt zS3d7lpSX}VZ=JUi_Gn+uqIN4NDq79HY;(fstZvsXU9x#0q`wW6^fOwUMJ%LMO=r&I z^p&_&3VAV>^MBsz^RxEKl`EO$*hX}YRe>ts-<69sG&E$2jbGX82+t*%i}Nxv6nM2) zZr|RR)@JQx%a#qXHOte2VrbCYzjL52ZB^zBTR-EO>vm>g9bDJ_hYqdl-u7$a*yH9> zL*ZLUvgqiPkS*vznP(2=W$}51L*&&i>K>X-lV5O&{q5~HFJ2r_yyD`uYlDhcthOJ= zR5yKv6`tn7TdewD^a@HeWpw+46SEuRIS(b8wLW3=cm!^^+5qh}2M$=YZP%`g;U@c; zlwY+wZmrER9DUZ$XX~RC*!ShzuHU%f^Yx9Zp{Z${It<}OD6@i(AH!iU3P=SZ%c{S^ z9yLg1XE)Pa88LG7vFUnLHUI)vWdkwSpyx|Lf5bL2gsB7e%|>icgQE!;NvaX`R?<>- zx~QJ3dE3oOrGGV*t!*LaP!Yfz_Ohq*X zj2T)pXU>$D%)zzgw03T~Nl=Zi+rouid0=gld@QB?K4}44I)k3)HwKgvAI)r zShL=Zc!ieZ_Yctm_ zO#^}wJGR-9wvVkm`oi+w($thtb_Pn$Z^@DY!-j=}wH!0XddGDeM82S%{gtQXHpl0$ zUkC9GQYp=_My-go7a8uBadg;=sCo#ZXq?C*_Eu`Y&HTu_)I33-;zgiAQ zuLEn|cKm4ragq7|QMoKfpWC*9i~5MEwGm4i<3|{@U}w)Id)Ht5&Y;A2B9O`|xb0Abx_~pU7mNif4a*uOlqQ&2a6ru$6fo!m%79sG_R> z0_~EKpa0|ddrYNPjM<}&V1T9?U(_YuOsPm;@V36Xy1K5eZUE1w7~Zz}K$T(4wryI9 zL{>WZCplV8;qUM7?c>wtND6oYLB1NxcB|Mc*m32e*(%i(m(mIEAQ>tk=sQfz(qL9X zW7lMt+uj|O%D8UV2&{$0#jcaLQ-Y%nD#nc)_aAzp=4Oj#toZW_7=PDqEmjh%n`6Wqe!bZO*qvGV$7dzBerE1?$-$J|;{QP(!S7;wxj}P+mvmQKX0G7u`nn98&efO}F z;?|1oyzWeTirjnrtf3d}F>+*mhV5ytSg}H*X3Z7=oqo5~lPK9PW@lGFeE6{C_>N&g zR{@_Igjq(hWtY=9OKsECMl<{K63dRl6TwnNzckIOA;HkRCa^^)AairAj4zHKXcL%( zNq~5>f9ge2**P^ja>DqunkEw07z0s${HRs_qen!Erd3Y@&XJG7B~Llvt$R`1Si9ql zX7~jDc+Oc>xSuoV8PmzcS%=brFgJ-cO!xZ~N9y{R!h4n^#ZdB4jMYXn|6m9yKuhaC ze7G*=VNHc*?b?&m44m4)lIp^swqgSZL-gB3n!b4XQrWU)4QNu^4ege?A3Jd(Cof@8 z;k$PSKU`LgWeR2URzqX^_3NjA&)c|TN1ffT=Q1$1Hh?XVgv5}89m+v>^_{YrlvVHK z+-Er;`;hlNeVzwC|8gxPVP~(!>II8$8|s~FBx(O@=D+{eUs3s~COp?_g4qDgmALa| zq|}x^%X%~q(mR()AgAi4&fC{uN`KW=qb5yiE3CS9l@B%OoZF$DJDUMS!>GWG(aCS? zP(^m#(tUnQfJ=>n??<#?ItIA8HGyxdWuhT4<@)u(V7&qm*5w;FU_c<~8d!T9dkoZT z<1~770|d|wEWOc^h6h2JQ0y}GciqvW^OFuw0YBcRyC$~RVtsI&`Hi9Rv#GDVpI@pa ze8huO^IH+Z-N#zFA%sBe)SzN%@AKV6X?{lAsC~w>XU(aeZK>e{;;K8tLE2LN6})*9 z!rr(@n}z@u76YwfKjJ_a6>Cy>9IA()pt>C{a`pFpMdRr3p zC#4zaIdw02`*sq1%In9cI>`(K+QUrPJItPdK+_P5#?#b#=hQH5r_FxU6>6-20B5^z zR~JYuqyX5_Zs}*v?Crn0Mx()w@#8-{yP#vD0m7}JtsUC&_;L6>VvG-rIYpI+tXjE&n;2m5#i&Q#X1w%rIMe4Z&Ri1|8@F z!OH9m5}p&oKdr5;cax{vHf!m81e6zTeZWhosh2O;!uNd{Uo&Ex;9O*;8VZ1w8aBN; zFbt`zMYi#F7|*>FEZO-}r%fy3wCLK%&zdoGsLOXDt`k?exw-uV<1v_7LZ1SDB09uyXKiP%&C6lJ1Ogyrs_C?SjH zooWKYHXrp9b+Ppi6QE&@^XJcp+~RGm+p*(`MK}dRF4JDLh#+B&x@h6Ju_V?}Q`c~{ zGs~L+M4umFltp@b{p3tHmZK(V5&k~<%*T*pW^sS+K>2mXCwiv?v`yyXG~Ox|sh~x{ zD|PlO^Ln$v?Fbh>g;~{!Q4=_Pw&mI;R-j$|N6p~)_=uzg%tUp(g3Y@KWF zT`J?&bIh>-TH&>n5mFaRf_#VZHfZkJAS`;$NBOE{WIq9+@?VH8$=y3dlr#OJ zk0R>lH{7lyYb?=u9f^isdEdXhQtP!sP5RrNPcFf@m&RBI_ydaYLVE3zL+s-_nL5~T znsHCmj_tjqK7ny9$8t^R4(V>L`0>LNz8UX~N)_)va^%Pg{1$28D57~0oVVv5gNPcm zK-7YlaXx#db;W*8H%+=~AV{+l7E9~=AVMZuvB4xk{4kQ|Pz|}xdv}qqR ziOp)g;&(6V@0^Dng6{T^EFn#& zd3vf-ae6Ry=xDWOt#m?}@me-kycew*ZtJ#S~NHl{7*KR-lbo}X!svZ+y zQ3R^0$_7r{V{m)B-LPRgERpxZh3)Ts;u~V0L?wp#mA6njuS6q}aZUJ>8&UYwt%8>{ z+O5LuGVyMrQ=_5w@#iWrU+wZ3+1fRd4X9Aa;0N2jWP0(Co)q7qG>8k5dh;BJ$p9B8Jocw zO7kwv(vKNs92auR|EsxTH~A}}eU!NsML{JpYWkv&JL8%w&6`7bC^C>|Qwc;&h^`GM zXvbDJ9OT)kabx*;n9S3?3)R8QIu4N&rcV#TYfwnMOQ-YK;!j=ejlnzz4U6p$dtBJS zel&deo1bsdhkUaeqmgWp8*-Be^k8j&cwY}}am*-D_@Mq&a4S0{I9<5b+0cs#_;SgC-e(Y0w^BRTq5iqi2-OpxP9s}?I( zW>pWh{BS_CZbvi!E+(I5Yv?&8{eRTDT`@81u*)f_(!|Z|+AGPe7{y^`=Fbbp{7L^`$5exn+5NUF)+c%8KQy6MeP% z?$@(1?%MUlrDcYnJTItRnoN_8z1Q3|IpfZqdH}a9z~RP-2sMYujf`w5nXVc;i7;ag z15F1H(>UwYthvtlfCfmn*o;Y!$f?hTeHtycNtNe)@CWv_7qNPcSdslMexzH!0#7TdEOhqu}k^P%3|Q1%NOdMnuUr`SIh&j9bGvuFX=m9U8c` z(?eaSlCv}VG`_iDKzy^q9?%3zJ7M0 zZ`gXc*=YOyPttv=HSRh~Xk|gIy?gh@sG#5{iEJWv7)1Bzorrq9diRDLJ$0%(-5xs3 z;3zu&9*b!QhU@XCpXN$p_5^ZVvW{^5ssOQOj%72dYP!{{N~ZVa z`?ZV)sr93L=4YnCsw!glZMGi+8l@L<1M8Iq$YZRJ5Qoz2{O$o9MaB2;6VVz`PA_5R zP6ga-*|u$lad+bS417?cPL+Q7auK*1!BDFQqlQ~gfd-bUQ689~GtT=~FptZBMEdE| z%C>E5+h|te^z)uK&wRlnj<_cu9o$MFp^P3i(V3UWA7{hAT+Y&^y%5W;*A|!h*b6_u zeY7J_Dnw~^d>dnLWa!?4q8@u0xq*cZ>3DJ%wIiprI-eTueh}f|*mdZa(8>#Vt1O?4 z(p&S>OxDMLNAkA1AOuzXtd!*HmR`+g<_5cbwz~Yiz3&j|+zj1+y8-n59cOIh&VkeYk2NP~VxbeMnl9 zvOf;gT*+?G|K@rr^q0Oo^0~Ty+3y1Y*cm<4^$h2M$t!7iL7s27Xrhw;diMY&O~so7 zDZtx8_fnRu~37Swfk_eYXFW~ZPuc5pmI-5hdtqL(n zC@I{22{SALGYBNEu^S;3x{Cn8mQR2}7r^vq7srJjs5(&9zZ7l__l5;0mVv<1=$he) z3(=7wzSilkvP=tdOe#H|sH+a4YWCqkkkL=GxFbF!!3h(L54)cWf#+gxHcObZ)_tCS~RUE3=4lzk8qk+WDl z31+iq&yJ1?vF}+ccvvst-h%9}NbKh|S!lIQc1c=Ash{=qX)|{B0D5VgpOgy0HQQ9F z5XKwZf-v5E4gfsL$JGl|n%p7M01YTUX^Sd)6;*v)#&FG*R5qSJ&Ew*i2PeK08xqsm z>wptW%gYCncsw4pgvt?+2O}vZolIdeX~X&T7lEu#pFW0-T3>wY5*IQeXW1mRdiClF zvCS#Sz&3`Y6Bo5S_*nI-m(3HD2#zMbz-ijq+4205d$$HXwSw7_D8R7?M%LXuePt9{ zI1~dBel50!sHDp%cBIcXePUeNCe&V9%htt59+KHHft2C99iz>q)Fbd<;`exA=FHn? ztrWChR{Z=abP}m>)&BjhShl##xorLTUc8`e=p;kiKj5}Cp`qFc_qD*ZVECBRmN$P4 zbLnvP^xU??hxpeF0ruW$I_6@iM7h?$gUmlrtjhDTWg|9jRPcN*{a5bg(*m}kGv}kPBu9}DIH;woKNx*;4Ls6( zcfFFxEK47*9|}j*%^qZGqJii!an`Ib>J->|i<_Hp8zI$j(EQ&&E10*H+_Y7SEK1I` zy6WxQw;z4lw@vLjbpW0At>oIAPu-CSiFQ7{XUv|xp6+R>%1Q5dHEakn0k7`ws!^kc zR68K_wLs${J}}vHYh2%I)v7_1R)=mI^)W{2KR^r#lCVHlX+UW%564(UW}wSs9i*-I zr9Mv8g^j16CNbndsdR3-&VE0AdMyYX!pzPq4uP4?Rl|tGtIKAy496fk8?eKXHTdb0sYH00MymAB{_Fr2UyLv-1rg^Q0lm*Fd==hMtp( zSuIQSM5l;Zvt5yqtMG^Fqq#YS)jx6kcs9DjzMBtJW2Z4k&wAmz1~+&14yDs$ z^tf>|d+0~pqz%m+4qXi{sqEvk<|6Up)7S)J5mt=k=+#3jVkmyiJH8sqAAc}eL_43g z43s`WeSN)>eTaX?JQ;VeS3McNvRVo`5d?=Z`%bX6Ra9TN7&d?g%tI0aULDbSKUNIIz$eaGjDoi*_SvhCTa?T`PJqd?L&p> zVa1w1-KuxQLS35z3==lg zQ9cD3Y9zWMGydIbL%Po2$28?zg|7+lSc9%B2=(ns3%e? zM-c-M&n@z6nNWNuKDcF85oDgjexx$)$A^(toGNPwXIjBJ;n$0~R&4vD&Go!IeFX}~ z(Ruq?;vmsjydh!9Z=|@W5`bf{IM7WC!zkeKf{?dkhOP8aH z_F&H!6M4VF4oJeFy z#}T&l-KY@38{kD5!Z45Gz^RF@A4*kMm*l?i4}| z*q#`5XsjWl461|c>-O$FG~FRyivn2EiLlm+1oMslPE|E3K+Y@Bt$i*I)WTP<)?sxw zYS_-rd&9BR)C>kf6+-euOHDU7r}l5m@CQ9My9!m}eC6jspobB6B7&a!aXo2D@0K96 zr%#{aHG%5uhbvHQaO|+CAB*d1HwZTWKYDxy(s7&iE$6ekV4rvZ@x?U z^U34K?EUWCVAzDh*=eC~oa4GGBOuKD8qA?QHA2%Ko-|=j<9hX0Q%KurR{4Eobc5C= z{Svv&bny1Bb%1sujh8q?mQs9XVk(jzZ*L5>$7pg?>J(j_|JI~R#*2Ic@h?UwP-p}! zA~Fjx_`-=Dy6QMNT?W`Fp8%)py?b<0=lAd5)=VIY_%b=Oi`<&V@@S13HA*;ZG!XM0 zxCh$wx#0KPoQm4d-8AnYM(Ntk`!oR(J~%o}N5(*w*sJDt?%DH9$2t=>nFd#T8q`@N z#0qe;;h>xT_+1fvQ)Zt8u?|M-1~KW{Oki3n1kI>-T>0LgpiG^$hT#tr@J%OIHc~}b zMd7Fkgj=jD1F6;Y^!1%zmWwrk`bqRini8sP)pP+`iCzXWs8(^obN#CA%W$^|GRHA= z$3>+)BvTut2QB2!r~@PGNVP*$nCJzW?wTBRT6zTzD3uX6Pr+M}MO_bjJ=Sg(^(C`g z#0*n2Gbt%pB9#7ZAOjV9bz7w483~I0gS4U07Y2SaUFQu0z?>#N@wTYCuki_Qua*jTeRO zar&MYlU1iJU=MNQXF?Xj=x>=lXU+zARH0Zz(nBCaG+a+pNnK>b4D{Ctvu7VFe4)xO zBzUTqR`!3$fss2jr9QScjQBurdJfjxjW|1!$2`@#b;(|pJPIVrjj4u0oS`okaON^H z`T<(ZHw2>%VKG_$qoh^#=M#7kQhKcWR5>udDQ#jKUca7>bTIOc6UR-4o|5~_tHT*c zE!}`&y&s;Q(^hEe=-9y`CglTlno~`gJGZW`V0Vo$7y@P*p0qXC+@i|rs6h_yI&c2` z4%B5*vJsIWWj)zjR-HRvLJJYA@=44!MdG1n2ArWzCQ4oq(%&8uj3R)1Ewuv#afneC zkvA|V#zkJ9hQahq}?u)2C05_+!*(#kutV zP8)6?Pq<*RMHY%|m!|6ggkB`tKuTy(>+ZX=;muYOM1A^MJGJqdTIG!fZm<#&n>LL& zTwXlePL)mbS$#(A_n$MTJ-UssH*|YvT)epV@^o?iNE%_IWkS$?Dk~Xb+N#wg;2e`z z)#cKH1`QfaFWs)|I;ZO7?nX8h^STvf3`J~zO+hsWN2jSPB*tZTSmRL+ZQl$BT`u=w ztS0(S;^u}52xM&QH5b^m%3I9h+u@IpS{bcHY4EuB<|*PlbZRmo4Nrn^90IF@u1UL!UDK2>UZ|aMd8+ zAz=O$Jh}*(MsZ8K654Zovbbc|f_%gcOM8!;0HXU!6a@3V`g%Jpb-41d4TYXKZfRcO zuDcxJIk@noS_S=`7@D}qGXdP7QkmaQ_nCx`)%^D^h{&yjYhZEiv)3fq_}u;GnEz7l zkaiGx7^$Re(`jr8-k+Y^Vk4ptP<;IDsJfte`2CBg>~UF1Wj6`$oY-l^;tlkA zpWmg6sac*oB>{M;xV>phWif}7l8iXoZiLTylzZWkdDq5dL?lp=c3`lxn1m!RF0664 zy2L>!vp`b^Y#$sOalgQ1?k!2=5gN@$2xpX~CA!Tv&qquZ>5RG(WWTI!+qT0A5a7f% zYE{(hDv|N2O)=44M05HC#*R7to~;)V6H^nf39~E2{hkR$#@z#}4jh=dH6KO&B5W-K zP;#V5p>aT-x(+--4wy?1e&)1ll9hly|!(^|AtDi1ktp3-3#l_yhe;O0p& z^QVxGK4CF9VA@3gRRMjYQ`*v+b_VU%J zHzBw1`&qV|ad1>hGerS?1m=$R@#BEdsW7#;Ckt5IkoRk};}IV<$>^k-`WAxA^XgM# zVWt03VHC4>_U*Xp_aeb%+1HnG0~E%J%_+Za<70>)am z_M_fO3zE1uP?6-UK-=Qp_-k6QcaV`nQXn%3z$j2RC^YmUeiDy|`wAvzpnC_^s@F2H zMN7(#u?EYnr=suC06Kvt0g3ty5w3%|%(-`b(V*6GAxmh{EX7L(G@hr62K91Ut8MU5s|!z%XhCf9kw?@xmD924{uX zzi1r`7XcrtW?Rr;my*(2wTZ;-MyDzmYLtoD5ybF4f4dHWZ7I511#{=`MhV~9+h)Lk z(6jx_{kq43L}*coO4d0asC3>ym@XtJP%QXIyeDI<+q$(O?24H2aA0-@;9Tw?DQ(`o zj)A(Mp5fDGDG!-C=tCvQ2>0|;rzWDqwN;ts1&~9ta&y;Nt`F|~Zc3-^`VpUfa$3b* z6=fBVE>X(w{nr_pzlGKy5we1pg#DO2IetT;Dtg;SKRbI>yf3%4h<}FuvX$6_dM*3m zl6LFzWld>Yg5ef67@cN@6&f6Wsw?OeYbPDQ315E_;3g3fs7MKb{we5YlKl8DAt{m* zb>x1xWtVbuFVf^dOM~N3yZD|`KT`-{^{3t$&aEN;SuKefU@Xp%@-{l^!>kmSdU50ZhS;=;zn)^kjj;OrKzpG+SK9f zP_yI!#4%|D2r+U%VOS%6^+^(_^-S2WFhbbOC|*j-$}XX~yLbP2-qhj2F|T^kddL=_ zC6X}BrJ*C2Wd@s@w7+Plim%Z&QkmEG-?jCUnrp`fJ#9jjj7zvbKP7F?^0!E1b^5x5jrR97=G!KtQ=oCB>Zoun1dKxKGaX6E4&eiyjzJ@5ltNY&l z^ey-bf*@}xmmR+`Hnuja3}et-N*;jCy)9ApCyCP9tQL{d&e>T%;Q5+AK;65l&dNid zDn3mDQQ$sn(6FIbkGgt>`i&ch0uN07FH!FQ&(5)8D2PNV{E@f&IV#^z zoHAt%`TnN!fU+79|Hs;8UGuzO`|A{slP8+Bt%uvYQmvfs^C#g8FRN&_}rNr<8SxkPW1dju<3xw<&# z(U}rkm`4m18wV{U-k)Dw@w{-wC^Vs2FTVg$)X}}!A#`G#@3B|kD}nwI^?s+ce`@C& zmJRsN{p&2V|DGS!jk|2HdCmWbR$iMZyG$_EWB;`uG-$xz<3|~XY+cj$N$>|KAKb8ON+>%kC_`e=i z$2RTT*R|2Kk92vLvwMPvZPZ;X91&f`%*{0&(h+&CYzs>wz2wri1;=`u`2$^An&!;` zTi8%4h|7~Z)iZA1917Lr>xjyLzAd|u&!^tn-B_d*wqVwc8+9<@6`nJR&^pDA#1@f> z1;7dinlv%V_io;`t2yVDehexSG^{`w!2iIfWy_XAgK-E1Ctl3Wtw~d4Q$$gb%80oW zk&zo!)KKMLgu|iBCiM9qXwZQ;La@E$#QHDv`}H}Mx&w$pdrcT$rbNK%+k&9_wxu^WLLb`w{ z&#{i_)~~--$O9}4_q^vR-;YXgG!*l#_~B4Ar2`>*@LW~AJCy^r42vl9D~y*arKKb0 zfsS7Ecp?NZO;aHiIv&_hltrExXQ8&ASUjTVq%rMriVRo!Rir6Y5kPpBi*!gcZC%e0 z`h^lBvsP1BckiCmwnfB=g6lYW5`00;^WL`r>AFh)Bh<7(r%wNopReyG)gvz%Tx&z3 zltx+cPgj-2S$y4uNt0HSMQ8k&sq5-57<=8!NK+FOez=$l9_;r1Va-7ndzchS0=@6; ztbAXJBU#crVW#GA-+zWsT>pbDS-y(yL#Z5HIDIdRi@)YiqPAHB|26@JT)O@cy8>aH zGPAPc?jAj}*w4?UGas!GoVXZBxmH#da%*)92Yo>WY~|tr5uC>@?hLqLc2IJM?h)mu zQ0pL!h@QH3f!nxrbcb^=WIISN%u9}FBS&VbI`uIF9 zTe>Xtf$7nx@84vE0qk-%`F@Y!)OuOQidrLyd1ZH1 zLPk})80z0?eqRfm#E9k~OIl`9W{-f`79H$AqI=_}O{+O5V&s@%KREszA}5{Q!^P_i zb(Dj8DIGFg&}7>o7fd7dLjfVBKh>n@XOi zcf&Bp&U`T-{_5RwHbE98xA3*N{G}KQChb>sON$)OiVNp1i3JiOq~8sJJ+I#Gef3@IGUd9Tjg304M)$+z zf7?j;c%$ZZR6Zer)>GX2XmqVgKTIB3Jad|IN<(BE2Tv_?Aa%hrBL*~Wx}MXku8=t? ztoi}-mJ$D2_^s{4zG4c4Pqt6T8> zqC)B18$5xte*&@9dj8AWBzTz=lig7Z64Y$yx1p+R8Dd_+e$#8xBus?Jse{_4*QfSW z0nwzJm9pMw&YU^2v}Iqvc8Qh#qjTs-8d&Q!s#l+QHL4AMx0)y^-|`pI+9qT-lpSG9 zo1=gE=FPe~&HG$pI>^<>UFlq>@+5ZQgf+LfQ{i}cO=GFtfmqU7HMYvd>bQC1#sE^D z64=k{_51vy7KdAkTtX&fwus@7h~P3HEXK>7w+ophe&cx zp{kz92b8c$(I#Vj2qn?!VZ&$5YRht_Qt3{fI@J?`a5Q%W{TNWsYpc*gSk=W+OFpe> zXP3*M2x=8Nj|{&>f;j=A>3ECb6xOe&c8KfND5ek-btL%FlZzn5@K3#d?eWfQ%LOJi z?BUxQv@CNPkZ~aOH=mwOuND3j>06Qm%EOosc2rq1kag{a3w`Or3ZfNGQE=Qm;s~2L zgVK_AV*zFn%PeoM<+P34Sq=Mf9?jJ*NgG0gf&ySeVs)5cK<9`yJ%&=cLMgJmik-VG z>eQ*jO+iZ(w{GnbCF+Keah@7ur&au3<^qS4icnnor7<~nb74>X0&-UgVZ%{}-3mK3iF2+>`i>IVh0qlS<6cJ(Hg*1CP$g|2fhgU=zTu;HKFP*x z5A8#|gdlY_iVQLksfPMZn)861ijejmaGXjH7`xnJpw_M)3$&oK-h>Q^A3>{$WHHvu?ZWc$Y3E8O ztzy??P!eJ7NpwGVx}~SSms}JfNuR1zdfhj|>nd#BE4567frVQ4WTX~i^?es6B$%~n zcZ zHW{xb{#;zzgrXwJQR2aAwz*-01_C&xPZg|)Uu+d%leIEt2cR>qV~2;;=ElAh=Hz-z zuNbH66+?jdA2oHw@3YSf7Z%!81EH_O!X<+jXnmA^bqB7J4xnZr^snJb4Ihs3?$ZVE zOIGWm7oOXqM43is3S|}s_-E^&3Y|*rgN|#-B?Z#_X%!ezz z$bbOx#38D9A*vhdlW;g@(g|z3+2r|vxp8^j7rZf`pG4b4L)wpcw-?GDFgCC3{UamN zdg~*#w-7DqD&M$m+t|sqtbmBP@SNK{Tg`iULgZV=QWjAkUcP$O{KPY+SwUl5L}gzq zLOzDobG1Q-jwxktZOH3`DBi0nM3{bPOcsIp7*xx_gBf(ZOAH_%95gil6h=} zqQOupiLgkE5Xl%J)1qW3Wge2ELdlQ@C9JY!nG!`&rWGY)A>;mB<$3q>9PfVjKK43} z`##p~M*sis_x)bOd7jsKDGIMunk+gdiZFUj3$Snm*P|Cs4&)0CMep;7n?ZLY1eY*p zG;5r?+Cd%QI^d?=n-QG3bf|;i$?-BZ00W2|2+3_uYf36|O)!5<|0fhMa^$)Zoq+C* zk-IeIa|;xJyV%laFUMefL62wi$Kv@57RZ#S=>0HB8hN>6RDswf0K^L51|89%TQ^7A zT$6P&agp^tCURMId3CPm@_+zkpt(r`6o(LVGCB7F@JEA_I~2JcoMsalY5facEn)W? zc?E$`?M971am`LQ5!fmmagiosxJv*L81rn{@O+a&ZA2DRvXR3(m_}>DfgoEc2{3t$)Yst+XBvN!F32(09n|%>f9$3P{K=< z`mg$Nbop)S^bOaoe);WV=HF-0iH?2U>PYG4fLpl0>gqn5A-{%Hy$ ztOUYFN1|WxdL;>x!ULy~-Mb%e8*S+{a~R)8KTj8+RKd&Vd+rnNYrWE%W6m+g-OQHc zbli5uWarim%CC$mq5zP{s91$X7bLR^{gSY<{Iye9YY|*B0JMO4U>Tm>+kaua%lY4^ zlf|v)6O^Nv}L3_g?Go=NPi0}&)?CfGSM>?aS3$L&|UX6IuHkr zMlIlx$7}K!X@!T>A}F%*^I7>3G&wta&1wmfrzj|S*P_hp4yVhv-YmSH%oTsxYlvg= z+f<;B#LBb3=a{IetqE2);2cqy3q;$n19zPsP#^;uRu$3n1_d?3v12RFT~uFU-$sKA zmx}UKm>wvhUf+6cFxf}?MDif+C(8Fk$E4D;pQ2V*GJz(_LJ2yOLebBFK)B6Xr0+vk zZP;Ng*WyLb+OKR!-G6x0K8XeRBJm|E zDMkgyWd`))X1a*80boZ;Kuve3MA}B`svq|kF9T|M|3g$402>vzxz9F4NusAD2JirA z<)UU9SzIn+U-Eof=WOB_WU%A%j=lFLu!gfy<^vGfr9 zL5f?xJG-(R&a`W+nemWTU6gjh5_e{S&GK3(+hil}K_A^On$yCQUK*%zJYMePuQavX zvva3Rek4|6Gx!}pIZRL;XHL?ec$!i!yG&dWkwYZ%>S(~^CukS@{gh`+`HYyEvVYy2;^>OuFjzmciAvx*`F z(zRjHo4|6k4s;%|Gcw$g{}%DOl0`r4-o+-m9-&9XgxrTXy}c2qt5k<~Zh;xwV~PPj zw2C5Lq#~i9HVpnIr87TUyX-72?e@N3T&76fPsm)`TwBk;%G!$it%zj?Lp`3m?(@zs zGNezYV#xQL$zx6L7!I_I6$dZNgdLs=2E%2%Z(7v4ZQDZ-J|}h{VqNx0}C6uG8Z2j&>X!R}S?;efZn2 zBB*mRruWxW9FcqphHkt(+52{VpsFn7(pV72gbdn~FBw*3 zb-dB`f^Ao(!Gm8W3N_c(Rxhn-=-I5<7B{nXlD<35EKaeYNJKc*0KZ3MfKx84R+kY$ zC~7^+fLNpdBsj-3#*6V(!G~0)sI$xR{uyFpo%jJB_$Oo2rp@-a% za)9Mx4gG1Lzy%l#|Fe9YyZaQxZz2pynp{DAC|0b$DwoGtXP6&7frycT}2WifpQ3a~D7mLa@LC%I^r) zZSH%dEodb3{amLcmIKJ_D2e^9bpXwZ-vp-g8BB@q+e|zQX_dkZ1~)vo(-rQq^^Cme zE2dwNU$qWczFhl9nZ}f;V4y8|-Vj2rV)=-$B)s)T+gXPAN6-$z>d}2&0F5-wFX6vs zoF1r5!1I)qKP&-Yjz^!CfFDz5KAOdpf)f5cYUhUh#NVe`>}aEJvX3HOxiNja^WoLs zk;Wg(UlFtbc4NIV@VB$FWiWPpb@h|OEF%zCF-R;;LXbR4(M(|mK#y-i9VNr>eEHyI z=Ts^gMn?66VA=jLt8LR|o1V`LH2b74@l?DLGFs8pmhz5BvdY$No!*38E z7-PB&th$9L|Ady8Ai`%`g6;M1fBZ2veg&p!0_3YL$&Bu*=~|2-g0KM$;WA$wB_f_V zUpSrsEsXQ=4C7_aNOVtH2T&<-VGzZNWt+_pQJmPHmp7ABTca5*StY9yfpJ_^Xm1gG zt1F{gk94u8{NO(Zqv7|3k`KjN~U~k zDFc_%hj;6?m9EFWiWem^f4JhIQ>W_V+DdJ=o+_|qzfoZZ*;#uji-s_b{mZ-{lR-Qm z5<*mQ_W?&`M~i z;2YF@E4&ZBp_*OClXwJ@7_VU$?J0;N%x@{|X0Q+Hj})yc)Kub3`t<43NH{9Qy<4Cn znF9>LA4?2$1ziCP`U`5^0>$ux%KOzZJ~HV*e<=PavA9X_`^G&@+rDAaE1GbQ48hF4 z@+=@7_>>I`EuCf7&)Kl938x-=%e6p6GV4@18L%4I`;TrA><{Vj7a^g zJ>+*3G}@bnNB@G`_Zl%X?a)Dm?$HWL&!o7FkgKVqgMf_td3-GbvTF#)M6-#6KeUpw z6MO^h`*QCjmkTY<tku@EXk|_X#W3=qYAKtPd_g|nYv8GcL*f$~icqy9&(C59I`;kO>W9Aag*LnM_kp9fHoEKP{qX?HK%`QLy4rLJuS zR2bX6)lRo zB-5MCpNubqY+zdjpn}! zS}vjm+C*2kY5K>tAV5lMTI?izb)EkOdR38O`NVt=vdivT1b}W-t2X#tsMtQ@SN&=h zX>5yHV!2u!e&ux9A}aT`SV&%=+Tb4Z;R2E1XCbc~HA>KA%qu>hBL zZn%g3@d$)F9qQpEkbCDLqmT;5H8EIQ^G~!V*uBHvr1R%+CJ4W)!#hZ;Q`Vf$WvES2 z2z8{(JHBo7AAp){1bz@`SeQ_Jt42eqPG=r1p}l7BK6 z9Do~=s+I36V?DvEHxo4?5u=LEz0I~3-ifv8rPc-G>a_0454WB&u8aYJ$mSU5>!mB0 z-Jv=*aeNG=2D6&2As7-#=>3mCYzs0(9kBU4@U@Tvk>kd{wozZ_tl*5X4khI8Y{g^T z#&Nu0W?{Gs$h6Qmai8^>#xz0F8mJ$G*ADSi{F8=t*n z*RD+jOGi6l|6mcWgyKKkTopLad5S$B?%BKQ>aOp8Fp!e5V6u(YE8ISkIcfNSM0LmE z{4#Ce8VWOy%z)OY=aX*mP>I*#B4#nCxG&kOR<_vD;QAR6I@0|Yp7ZYkofR4u<~(~P zLK?9o6B{_7qEq!-a0m#0Ph>$wXs9OeQp$)%0=q)=hzF7F|AHQ2c~Ns{0PF)9h2HhA zdafzCgeWO(FHQe&N+?A^O2Ak*Qgqr$oBQA#Q)<<~vM_+typEE&Q>H2=-@8DYjJJE~ z{C7JKrf~N$>V?$#Em^!HIFi*BhZ!yR4zI4N>bBzR6860}lfyJ?Y*cP=ZP^tL@_;<< zG0G)O`#vnbpdLb+6~2FUQ+-2!LKG){tq5@c-r4~#`{S5)3N(&YxxhN2km0CXGar`Z z$>6roAHOs8DShyr_`Tt~ZQXIKfAr!726q73Z`d&;%6zsgHWTS;hk7!)`}FBFs0&Z& z#JCCIL~EOiYd%11)QiNy+@_h_WCSxU)pZ`aX&P#`WoLuySiZh^+E`ovWm?iB)3)I% zCR~57n+I_9KsnJ|Xq!^t%GoX{#k*DnLY(HCc)SV@ z{x;JBRTn${B;!ckki{dTg2{Vx_SdEFYg3Dz*in%MjLjyAtX2f)R9|A${Wt}@Nwj`~ zB2fovHM@w1co6X9OcTq{yGBd;Zs(PA;En$HXU4;G{iJ{6nC@8WvaWmk`@CCk_>g+C zT?SMs+BP&C=!Fn;Xvxh*KfW)AkyQr3CZcbfAFh3VF~4M79E2u}FBU~XEWI)F!uwY+?kvR~ zV>Odr3JZ4urWY|F)F1$zbk-Dmbbg0leX<_!&}rKg4qo3CU;FYkwAKY@y)W?w+X}pbZ$|V7Q;}B6FJas<%jaJfP@hcAs7;elI zATn5JmzugSE2s|28X8GZC2mtpz7M1SZ&@nCv!Q|UBwon8sE~ExdCl4f{Gu6}EOtDC zKQ-*&hy%!M9HfA1m;0o1vyHZ&p_^cI+>%!lA^-fP7M@N><8>m3U$CId{G!#1Py>rVWiVSa z>i7@(=T*&BiQeXJ#D}6+CecnQ3?0`;>o3l6Z4HR=()HDi zZ(LpQtsmPewL78qb8g1O7;4@ppwM6`-EOj%+&(C7!|7TZf^hjukORv=!GCk`l6xqY z3-aZ(Mn!jI0bsOj~@pt*z6h(bh{nOp7U849Mkf(eunQB*Q1U4H+VwqJ{Nm~e5P*f(Y9?rfLi7iU6bX?mGAbO4z~J{gDSfWMSY4O8+8ZVy^4%C4uJr%VE91zCh?7`%luMo@*6}3mdI5GpHL+KwUfGQU>)~vuc3vFAnnSa0R4LZ=XiHHLz|^}j z_P=36&vFW&mR&FX&~n)9pB9+>vbniAwxP~Vg4nIb&;aza;K*WA4>};O7W<&g%texe z^_tkH$>^!bN1ktL&fyi4zV5q5B#mt7^fKjQj2=?yaVMuWf`mA{J>=(dh4AN`noq#V z^C&N}lM5!EllVi+IRG8N9TsTmFyG#g!6bMnzqmg_O<0g^WoH;ZU4y>wPK-zPg3{^%bwsOgdw59vR@Q^lzC__z#;;i3v(e~WGsSCb|_FV2ddlj>+Q_u*+r|!^cEe6{~Y40$pz|=6MQHUx)4#! z2VF;rfr4V)`t=R)R*TG8YaP+Hmfr6I41*Dz{Oyza?HNfzdrR-`$wH&O@AlbV5ps{o zUwpr3QMY7)tY|sxQb7XCR*^WzeM^u(WNK8OFPURKj%pQz#u|SHN1!f_0UNT+2d-G87GR^(_ zZ!4?|p*QLV6uY2Piv;|ZDSQefTB(CCaJ8zn6?1tmir9@hb>qF+*&0V8IyrCR?|dwt z)(qm_KP_YWQ|8QRTBqvNS+;8rqBcMt!2`t)3F*NJw(aG|yE?2XSsnWhUfOitGP@qB z5yLKMn1V=jVc-rtJ>HQQ|L$bTq3#q=&#UQ%>#<3vZeA&DWLue`R}@5-u^==j5G%{9GGQKFnMLLEsE(LMkV z+?er1H@!qGe0kEs#EC%y-GgijU@ehD+^NUSU+i8z^)O1aN5rwZl9H=A1E)_aZ8hw< zL?mcz+(;xL95=iHSFKVJ=o708CV9lIOtg9XZzylrGA0I7B$$GyZn!U?IPRspYJ-As zjmc#vi;dpy+NYm5Pr>=k`RxjKjnDq)ZC&QcSK{Up2M>Zn@K3boe?b(bDtj2LpgSL1 zzKGv)nJ6wR6%k)efq@q=EFMkUAR>dTzT>_R0qGZ=1r?%fOvKG6d-F>AMzKcXDC^V8 zpnCp9MXPPT_R__R-KcAY2m@r5saXU82MStRQ?F$h%YHFj0??9VcvQ|UP9cg@fG0>6 zUw8HRFB$!$EfGHiam#039-(~{ZX5FXJO2Fq&uW2oEoLhQmiK|7{!8;4L*7H^wtSiR zVGfugGFFxDRRpIVDZVNs<4e4D&gR>^C&+Wfuu{ye>8N--qKA{wAOt&+V4DKg5TwOh zC?w=R()Dm)$38hr2sSwn;2=A(WSAC{2M{a!2fabY3G4xD#hl61?i=X zvaUOGrk9v1)Cpdq)qZ{nL-f2#u(lW1$H;Q}v}rPuV3>TCE)hs+9>eRk6TVE5$dn(W zHk3E-WpO2n0Ga%PZc`(FU6v=rw^4x7F2m5QX6cKHLaS8kI}_hiBrN}*T4UMs{-+Ry zxwxXe$(4tBk-tloJ1KL$SP-@9KCL0gMeNv}&HcTRCxxW&``a-0>3ZKh3EFd?@S~sS+`nRYcF_`(}YF@UeWK&T##$NA%Od) zsb9qSK~$nLmse{)?GZCkUob5YI6}Fs@8XbwME|s4^NH$6<9tpg`ttZ33VI zy_Dg1zWI>1v#SrXpMr+Lh+#^}`>^J$$gjOXKG_Y=oUTXm?% z5E&r+)xo_%iCzHRy(4HEGG6m}R3O)8~RgG0Rm2r&h8^ z-0hcFg-$(jO`;-0LeyUamwot;*xrMGSk^rKW#kGpv$aE};idGQNY&Oq&N+{k0yI`c zs=~lfMK$5j>H@fDqe4YhwGnKK{G_2cGB8oa2p4H^C_j=)>Mdwe6iWj@Cx)^40w=iR z&!?kyuiZd!p>E=@FR^l0k$qV73}~w}Q@?05j$`cKBs|o14`w6A!&+p1>K@P7-wCTV zu~oSP0uM(oY8Nup#gZ17>SxK_v-lKRdr#liG0*E+YG*M!aOKg|E*hZ@)aR{It!4vz zH$+RnBe{`=#w7;ZgbgC02m`c)o=8QJ=>tr5AVdJ?zy?<{FDNmo{Ik9F+%6s7#%X9e z)y}#8KiV3y{#%4~vXBP;MqtS7OvolrzAL%U+{fIm1?5s;WMt&j9-}e+V%f4DYD*Lh z2vl<51sooHU^?h8s3dgifDVjGPn`CH>2cmvqXrEc2xM^ON{~rsl%UzuJJfsp__2Ml zo2K~(2kY#0fBuOW*l1QxUwt1mLr09>Y5z~a6i-wx;3h_-9tr-3QW9xKj`PUBw^D73 zu{dv9@HsR#24^-M8!jt-X*`{#e!<|PE$u!^$M|1S>a`oRR7(*ZfE+%t>D|Bop+k{4v5>Jp zu`5x#h_M;>r>%empnQE(M?~rD9keXjPOB-K+cRK+-&`ou=@VX4q8&TD>Q1aY?cEQ>$^Yq3Dn@JyxtavGX53g@|nPUG}A;w1|!VIOn|?Z^?fqZNZJXV8b>J#$EzaT40vd`?w(lZ zT4tgJLLi0{*7dm6+_eRQJTcn`aXc)uGd?_dSzQ3v#OdSel5EGsCrf!s9~11eB{%bLXQ}({ta9X3bX^>eS5qLN|haEmtD@P~2%QP9Vb17GroOpd!kyNc{OILv~2c0^n zJt=C-hDonE$hEffh;MPY;++UN%M>d=-MY%27g3lpucK%fD7LZ}!Qh}E0KC3%@1Fab zMtq5Ia~c*Ucj8T#n+cHzE;jBI5bGR6zsWAl+H;}GQ3c$sFU%2;uPl12Yx;zul%|iP zI~Y_Q(nM5-GK$b?+XdJ=p1}-u>dJU6<+lh{ZYKqqoRLacZa5DV#G;O55u#Kq4QVXf z*xFjF3&cy4gkzQPKR>-#$ZXVEwTnmDGkx1alC*4tpkz=K!JIhdD_BoTn z{bd6t^R&A+*hk+MV@?@9g>pz7l+4w@ioqIlvpZWuI88OO!vFxvs-cQRI%@8S%U}@& zg_+9-1U@cET6eZl2N52LLzT%@q3K3}DrH0F;WZ&7A7h9I^L=y}#zx-)W{iaLcWPdn z56w_AgQdwDA4>Jam(OX;>KutEcIAL&F+3`D7{H|X$59xG{mgu3US!vbjKWjeT9Lqo zIV?|~1< z@z{0A>#r9$)qJIcABd}v@D$cLAOo(tGP3=mal&X7(aUh}cF(r6?+Uh=B_IeP1VFTp^i^AZl0|@nu-K_N9L$fPZ zO=54VT?CZ5v7P8~2_pn~!a#hdflh{(YU8{3d1k%WXaW1fQ^6fdw5`&&p}y1V^C|HO zs8wz(PDU0Mno>IecZuli+~;RwF3gpkjbhP+NW)!d;)wB}3v6#ZYV>D3p;OKxMc+lV zW>^eJWskTDBu!=DZ|(K^`PBn8xb3@C|AE+gg4Ni*9dM_hU^qRQtOgWy6{r>ZTXdB; z#PyZM-zmx3LrO)yLip+Rwd;n{4isauY%(RKFMNl|iuo}Xb!c&(o|FZ=7(yKAD_!XO zJWq`qCNXreTp`b_Koy(K{tOWThKRX1t{~WNg1NIxg3uIq4biZSI4@bx!Cx(AMgLoP z2d#ZfDy8xbcGsa!*i5CR^I|?V?9Lw3o33q6p?Z<3y4Tw~8y*_fMHxI4r)E2<>lk`z z>*yRgHqN@gNb05KloIf)+ir)w7jGvcRsKcLIT1Sj`4DbB4yK7gr@XG*c$o^Lk_0j2-pLZcg8(I{vXLI}yR!Xk;K2K$ z1TMdO{AfZUuh-I2Pq0Y7D3b8p;zRPw2$%Ed5g>GHk;P7!ef-dF|Rv z`ACL(=$z~X#AQy6>EGZB$N0n%6B)S|jXGzyrT_Z3-Pe_@6K7oTOWL~?p`Pr44JKAA zSyX3}`;E~KO(?D1iSFz?Wte+~)73E^HqjNJ-)pfKM`UE0Y!|jxlnT5BC``@&sT3ET zF`ALk$A3|raz{i}2NRguDYG#{O?Ap#ZeMVhEj9hQ%1%>E^ef4)Dy21HrzkqQBsn%- zVyQomHb$Nsm;X}Rp0pJ z>)h_iF8Q5@+>Lg&i?OUeXt~_kBqpPaVYt3YQ0TtK_omKXlN4eU-g-^zfByJT$@a0i z*9)fYX`8kVik=)&DK`F6VX;2-M`|qH z?=k6#>KLV(-R2tVjRpZuP5QDmH~#y#*DH(6`nzwBA&fsqkL1FcPaJ<9Tb<<$%#A-e zEhxkyxrTAOIK*c<7kVfjlgatA#HXwKNo(Yx#x5N#7Qa-h*_PhKj+$J%d2`?FB-$~w z8WEIW@#uZDorXkCn*eTNaOLvl;f!+4Sv9l7zfAWVdR%)uyIW-3PycD`u?98$pqi)K z@Ps=xXwju6ePYQs@s0;q+ebV$ouBp4&n@`f$B*Y9rxgFY((WBD)aWb{`esrajL%ql z1|^JtWd{`{<@&%kS!!+i`bGkr91$vc;6M*@xGUzoG=fiGzfPn%h~x03efv=K4D9yh z-&RQ%JO8GfP|J+1zt@wY>HdCCH-uQ2j5!>_mXBCwW}c?YkJ;VbEBFnqYXaB6avMOu})qk7*3u5Wy4YUKYZ4~uw%QWM;tTk;WLu>jxNV_w(K~@sAnpcPu z#iV)qyP{_xys^NtpXs+|=+{rUC39G~TekLeVX$>MI*zm_n+NRG)$sLBHPx8xV*@bVSx~}~m=l3i(?0QMGni41i;@i55i!Xd}%~F}MT~$%C?=r7}lU}`CsprBhhT%)q^_p{5uV7}80UC&5)0Z7{iE|cJZHs$|@UorltOvWa(}ee2saaSmCIKjj5Ydran4}ZW8<8{X86`ixCK~hxNqfL)|-3WIXq{N%M-Smwhfn>0bNzwRoqR;;RjRw8J36c+4<(`IMQs`CWX{SY1(!wlkwP zrD%?kk&!L>pYw-nWS4sP!sk!sx`^Vp^ z@w(44^;VQp6QGGBG-&0dvvu*m&Hp#V;L42~H})q5wkX5Gt9wo?@4u(LicK`f6xmGy zOrQCHPG)AK5DlC>ZW(dl^)$b^*WXrGpBe3_D0^R0F@#0aHRlf5hT>F06ojg@u=OzASe!5V=tT1-yCupv;)Eh(7{ z$VJt2Jf)&hN_~#swgulez9OgJ_qxu1=dw)FTCQnmJ6fZV-2xoFP( zMeX=m31E!(GnSoY#ZYxl+qusfxVXN!lC&8S9&X?ng1YK)%2umJl4?m6CdS5B?l}dv zc!FWXT{d7F*il~Gr5<(74Dc_i;nB>*7s7vRMA~>Y7YfqvU*C`BC$fHdK$iiEViN}~ z7rb%fZDQJ9U3Fjoo(?L3`)q7%ym12oiVbCLmDzynw{MTZ_UG2h@9*2I*Q=*BWk^;p z$z);CRO|5!kuTbHo8Q7DpYynJJAGCt_+W19Xcyq!q%UI|8mDn|Il6h{mMv#>%gU}V z_Y8q|NHA>U7&V9xvKwbPNs&$tPXPdRF)C zM0>quFO!a5Po-`t`b2{?;JY1;_w~{iRmPLPc*d9RIVn4apVCqaZ*jD%VHe9m9Fh$W znRiCC#-_J5EQ>q{TvoAv*(>9y@W5HNBSwT<{uhtnU>vVXBW&4+n65J`o0?U_OQnCv21=8aT zN{S>Tf(=YWY%q+OlK@D}RuF_Ebj|vm30? z<;N6oogzX*53%_?FyEghZCF9I&f zyc$SrmxZs2@9i3`Vl>uo$HD6N?@v{nOMRbYsOIZK!i)w}Iu&E+_*cT+6V0+x_pki! z&z{;DB!libMuP@TM3;AJ1>eqL^Z7R=Rb1-o;`ukUU0t%g(7BAncqE@jH|_`NlpKZ2 z@;-^#{xtG;86mCW_Zs)^{UPe(@$Fu5w9dQ9wytGz*?h8Zx1EzRnBch3_76n~9?t`= z!UU$Agm4$?&&1q3F`$>Gno(cRw3 z<#6r#^}G1zMicmjKM9K_oR2pi(T%jh98@y&Rb0lJx?qOOCWQi>IR zBhNmZfhyYVt_xp3oCp?|@pN(AkDj!p$MKTMtLggv`}+ew`u6TgP%!wo{N+int5r}l zxMfcO(8V?)BOfounlE3zUAkzu)B#V@3wURt`U z!D)O~TYKV6*_UUL|8|v%4wSHwXUL`9F(DRQ&eKI)m8xrfO62)EaqlGz&=lOb7pf>J zbH;6tA?wconYw>|<#|Bfqg1Okq;j?>p&3mAjLfG{OTJLAvC+r(Q&(s$I_mB@BD0O2 zilPL5{ap5C+{2@)n=kbapcKRQD%2n~J$)8{e9g@8ut(^vh~WR$Pchd0xy$iNY_90kF%J!FZ zy_!dV-3x~!)oYg~k_#21Sop}X(@23lA zvBZ%ad9Zwb1<%Cu#yG1W*0M3FU@-l%;@c>7q*Kq~@;Asoffeet1ua#!6Vdx%|4cqH z)GQ?yqJ?%_oBB@d`MXk(NYoiHfTGC8%)}(#_WLOIFo%9+++Y52vzEXrOY*9*P%!G> ze>}6=XY`g=-3ln+-A+IR8*VX-XEgtJ`>Zo8F1>J1lRp+X5NK-K^a}Uk-Exfd^)DV= zDP;iz*fRvgItqBE<>0CE zX9F`ZA<1p^B3gSst*F@ZI*sQ*&cImJf7!AD-8$6%$WS>W z1N<3@rabCyS9bo~9MqvwJy8XlBC3ULu^&0o)x{-Tyl>jIJH|!3a z=gJM94oPR6P14D511VM?tyJ@MrEWc@4kn4$T)HQP!<_U+f`nKUXI0YnP8Wf#Xa~md4)s7~Eg)E6^^#WG*+y7;yPq=e(1TO*d%~(fvyXzF>}GzQ!*nDhBqU(s;&X2rWJN^yEm9O;L!ReS z1V&Z)V@AIKnkgV@YwPLlKZ4D{rJ+)9I67_>dXg%ppM`~@tXupjn>^1EHS>Qp{_Ffn z{Fu`5LfKP{(p#t}^va2aa2ZdVBqIn7sOc3eyFS?RWw7)lxdiFUv=AqHuMmsI4I4W0 zFJN7>M>^%S@?W|%p3>xca1*ToQYER0=76A_{B8fvow0R+34Y^7&=o~Fae%_EXlC$_ z;?=!B7N1GUKgZ|DIwt;ZZ71V>{aG0uZcfq~<23rV^cQVcuVH>=VsZ`f$ZarnTc4 z$lpZSx?_Hf;XaMDo;@9G_?B6>%+7tu?B-Wt=3&#$>QSNRfUBH#()z`w)bhTWG>oHw zRJ-RqM-MHT#=NNcOWF&{@0sLh>B>SZK9{a()^pzDzw}2n(Heb>Aje5@^m{$kvqDiOws!7*oXRBAztnPh{w>+8QO{831fHW zZ9bx-@aCJ=>^?>9;PIdOFG~sAF02cpT{=iXfDZpzdT)9=u*a)0YYJKGye=>FjBnX~-PKAeEdmO&i3VRLW;#VV9ycB{by-%CyW^~@)xov!@kSI}aU zMOtNY(KPq%kT@?>wgzsDtgQpYk|HR`i4LQF({|$`@3XnYs6&Ur#Prk4vl{S$6LdIS zVFri+J9kzI-hv5|eHaBv6;E%{!gCfmcY7|3YflxKPa)TCcUz-IP!`kEe_1Jg2+23I z9Q-Su_hJ_YI;(NIYvVwNWeC~4+_wWs#G&{wrSgmKrPJG-aH!$I1i~ zSN?*cw-IeZxZMWEB}Ex(hOKMU8G@{>3%*~o7s-x`Z1paB_>_G^53h5FmB3l=Ij;_3 z6jXcL)-&qVw7umq={s^-p{*L(=fQ$8ndOZ9t+;o9mft>10CeW`FNMBU+2Mw^au7h$ z$u16bdu=SHuHC=dZIFT5b*xmsHOD-D@3T8 zj~o@ih=N&8Smxv0FF!n-OwKyR6EJPGTPw^-w(L&@%&=f$d8ppi|>7=y0igsO|`}pc!q2}fM90So9nbV3r=$G$>22K@E+2kjLtwOW;%(2~0 zzB8Au{OE)^so7@lRSoa7oqO&eDbSl#6BZ32I)hOigg4G;G@d7k++B}lEnWUmzj(GJ~H4C#}&UFjy5);!xmahj+RGh2*tHb+&^TzAF~wRM4^mP@IUSV!=+S30UeCT6xoS?@ zi%z?;=~W+SiQXS3`k}3`#Lt~xpmL+s7mp2=P2QX z;hb5P_NHVujC0lB9k&!3b}TLTJ6k%7hJlOcbJm~FlZbz$?!YBC=ge0%+=a+VnAZk+ zORP}76;QT?@87@T*0Ql6V=SYX#FaQ=q2}8h21X1PrR&ALH6G20;519A1rTSN2|qp;obQQYDhcXdN4?T_97RJr z8nNVxbA5_^*BD?BeLaEeIFS@Z`t9sBZJNNfsP4wr!L=@_eqI1)es}f%4Wd@YZ10q0&pP(U4Y@)@ktr|N1w_MR8bQzmum-po19lOt4(T=ec6h# zC;$E{o_%qm!riZHG^e(7ZrHHlO+;5`Sg(u{qiB1^KU>I&MS2%`Je2*B)A}v@a141v z93l3e-qmkezH!=pKtZbW9L$YLmhhMP)PNhin3e_$?9DrUc-Lql6I-)T;t6g&dnDq4 zhf6EEvT4C;yO%eb`SROGZfKv?R@+)YmHz(g9QW;vt914ZA;TQenY83l{QOp)PnknT z4sO;|ZJjLbv2$M6SUaJAGOornw%(clc7@K~U!zj!_98Fl4ikK|NcN}c(CFQVlO@i> z^Y=ncSk$jgjG`El4Nc9K$v)|s_fE!SetN6{T|3!7!JkzF&VpX}AFuZ=s5btPpxKXO zj80Sf6&Q=XukWK4k>r`v%B;THB0t^%;nIPF_4h}UHpK5X-KT9}yF?<6NtMN^W2h(n ze4Vov@=-1?5@VALN_My7h^z_-`YzL03EK@PPcFH-zrpXn z@z-(fUHtssxn54j)TiWH>(rtdEf;-R3`(+w11S}ewDJrIcDj1f$n*y9cK`|%wSxay#8%EV`Q6Kqbr*U-d$=b%!=Mqbqun~B9bYjMb@vCND z4r%RKw7d8D#5l!lE)RNlw<)V%o&T|7`fwHdwZ-PK6tJ@j{BP{-*U@;TDaJdFd(U7~ zBU&Bm2Xu)sXcYb=SSsIT^%s-e>wo};9nx}aSR7nmS%e>>TJ`f=L#e~Y&|6W|#4?64 z3;#ogVajcujNaEjhCafZ84&vDtro-Ro)%}Govir!3x-8@pbeRmUfElG*T{Hb3tm)y zG+S_cWS=;}!Z&Ze3tGQX->mfHbv~kJ2;;!!qetIRy@QA*o{UM0d0@8ApDJ1pxm2`1Ar`~PnFUq6bH(vwEzs}|D-MYffJ&~z zhW!ay8iigpmhT1Dwf!e9$pMsn5;nVNjf5cyqdlIenaS`O_{!++q=X5FTvqhLXPC&7 zM`CNI+04j-f@%SIY)ImY&&&e?h(SpcA)fKq?A zIz%Pgznh_BcU{YXaR@1#mn=!~IeKvK-n~cfg5$0H{#D<$q1Id;zYFLz#czV(bDZF% zP-q&1O29ym@+fnUK5c?76zUT;o$x{`1f#in zvI;O(T5PG8PaUZ5*abhhV!?onddt9#FrhiEGT_bC<1^mow3WJt!WNecN#_hx|}+sYIfyX<=2b5YEqvsGe+MSR`^*X zWh_I6z^$hi;4d?X8Rgym<{zd^S$-!CZ008=NeM0{050|5$3z<_g&~FNVby|v=NwQi zE$0ozNO2{3zRT4-@a1@Xlm>k$^i zV!UQCLeY!qE}?JfBUtP1DE_>Rxa*H-%CUC)NP%Y`#uAUl0l#gg zKrdNpg^^wwoqbiI=av_#W&3Y?ZW(kKkH9U%jI&nxxg`dLgmiKFM#E^ZZ?bVyt4CB2 z-h9A#oV0t-E{r4qdlLcOj@<))VD$btX*mu+VmFf~#DI-P7Nz&4??GTPf#Z@}Fm`Ba z;pcird!=?GM}$R_-Q``8g0~2}jdIoW?jL+e_B_k4F~WS5-vUISr4`K^)3FK2in+FL zfE&bbSTvt&CZEP_HgN4qOVQ`t!NA?ldFj$LshQVL2Y;}-auSf)&Y9XuBVg!RE;*gD z!PS#mEnEH@xFB%W6fa_YsWUWFyyQ!;LTRswQ12(EV;sg&9+A`P!P1K3O-D3J$s~>0 zhY2iCb{K}QLIKei53gRpea<{eX;8O+P4sKU_DP{>CHlBs--hKGiBU?oZhmSO82K5S_e z#}%%|NH>-%%4p4b13N+@cO`;iQGftGVJ=Wp@&h-a^gcalM(b9Ty!N`!*mGR?D*j~Q zSSNMIB}BI!1_0*IS#s6Yv2wucbBCmB0s}xPvyUY3ERY6JF^WSgIxWPw>SH&k-?4Cb zMA$#drUh0ASgfRkr@u;bK+EJde*A9PvgtO--*ECm`Gyo0KByQTK5)`p)uO}VS=%&d zS)pm3BiYs2bR(nH><}j+4>(0=;`yPcw0sxRA|I;b=HcRD7L-- z+MsDaV#I=JBeD!B+(*&d9Y+in$+F^mA3nuQy869l9EYseyvNqdp*q1>7lnV>H;Vp4 zgXms8AaP^X86w?Ypc2XLFM>MnH*%~bcL)sjU{R3|sPsblzQNmR?G21AEi(m!T<4Rl zYTsh)cGW;tG=Q5zEJRoV8nF1@JPjSQe7Z22S;BN;|JSg76yMy`Dl7ntdUSVu9B$%j zFU+N0z?;DmcPPLmFD+!T%QN?-i#XR(=5Ue-V80pjp28U2kD-w-bO$22u-zc5S{4ri z%!2Syy5Cs5Z_l1RGTtMMl=M-)zTIG&hDc5mj9pYFq%8>hF#zm<1Ng#p;o3ydmx&aH zZy>HJ9H;&RdY6X%pT!B@b!b(n#V~?>7spC2c@@MliajSJU5*SPw+nBf1V)WP9Fl$c zvT2tt$K+|zx}ThElw~8FXj5^%pyBzg1R&|Jb=jY*IPXQ%lYiXjH^+ZE_CwYy$Vqz(EIqL zsZcLjy#%e_?_;i_l_7K_WRb8xy-fdgqgw>b0kj4VZ@06B5zfbjzuKALJ_SkT9C@ER zG?5W)SDVITbb%R zoAVkLQbHGPrV{l@S9vv)YU2>S=wv@VWruJdimyBO@isI+SJEVdERj}`kS=5;C(wav z#D#!gec;E};S#}oeNW(YQT7sTaN)#Mzh~sVCQpW-{y44Mp5f1v)u}~IKt=ogs_W<{ z`!VFtbD6{U?W=s**D9zwORd@81pF`|Dg`_LFn)1AhJQX|ZF(Fe);^3{;W*hZ>yf*P z+hSTbJ8BK9gXIXo4ebOCozMx5#|vJp8?H%)cGTTzm<*nwJuFr!;3)Juxq24K3TD3(K*UW2eZok+qWO) z@375HZ=oHydOi!KM~&J{3)>MUYx-ps`(spQ5ZUuTBZlU33(bq+TrAO+h!lZP{aIz@ zaptO!6c_`S@#|9mgHu=x1O5v*xMzU!^+2()Vf;>pWsah|ifm%JuhD(}KGrjUiZ5XGlh_=K4?e3wZURGOQ;Z zv^*diZEas~Fr@;jM-aO)v7f=H>>i)drkdag9pRVDT4R-^O#I6JnDeuacQ+s+gh2LQ=iGJ z2PuW2S!E0)x|9@JAn337VM!U#x1NXC&&G3fz-?f<7?OSV7J|x$n#Cl|POGu~xtZ*^e59 zxZsr8fZ_O_1ygD6ewA9L$E?ywNdQz~ma>jy!<=Rp;6<;B2p5(hKX(=ENc?>^_adHnvWPtDM1WXl(O2z4N~e% zUhXO)bsFCo##{==RwRPDOPZxliKEcJzFDJ|4}@+z2?myajTLtj(yyt9TMM4U)!KW+ zE?)Es=*{r@nSHG-ne8S~l@=ILQVhIOMpd3eE}%BH275pKM=5X=kY!Jw(YQ6}_&7OB z&4bok)?4y5%;RA&aUT;}G}r|EKr$n$Q?XsigEKsWKjT&86VJcSnh33tkDOE>k-c=s zvuMV`q6L#f-c#FPk3S0#CvZ`6iFbJeCFLZwXgr7O*rz6i!jLFgF;x`>H_R4)X(J@w zl=aqDSE_`>+tlabiC*?xx8aGIpW|PpJZXxb$;6(PojVVgx{ZF|sGMK$H>v_q=rwcB zz0J`d)huN{E8#pocyQ(fWoKCf$haMs6 z0>~~9sSYsR^x;r|#xh{fZD5+ji}L0UYNuX!%19x`!OfWQ6^=>tNYW(ZnG8(ANf}Gj z-M3YbGn!rZ1)q8N95q)q_u)5kO0Tp!gnQL<%C69}XFqyfPESpJK`-tC&8)Zdc?bA< zS0aWKZgega7F<}>;utN_%m-;_PI)S3rhWU4tBoH*7)VT7@%8O+fu032C5gHMqXb;b z*3G7Sd;9+iu=vg1hS>q~yS;NS;63l`C1KyX<{|z3SaEtdn`!Zd7I&I|M zo%M}U>0fqDS9iI361?v)kMUW};-$TETJ4SrR274Uh~2wQ?8<3*54KsuyhSy|gX!`Md!RnAcIY!;PpS)S4Xn8=Yh#ro0=F22@N|gqYKfgoklwmw{SDF#o*(9)j zRyBN0q!d1U&2Pl>rnU{k+oisq0^964Yg#?WDiT{|_htv$uKGFwvGsoce_NydWtA&6 ziatb{7W#ynAgCLNeB#u9=z1mF}lTKq5lE3r10$iJJKT3kPk_GApM#FH5p0m zjN$~QY5c-I<8V%^3ENy&EOmZgV%%_F$vc0p@;Xs5C9e1^DkjZo97h@O`y!2_Uu zA_L#u{_1km=F7OJ&3r9^P2}W&&D}u^c?TiO!k2}&?j;3IJqmKl`9nHCL{vl!I?fRl zk!Y$Jm&ipED$-ORDBSCtm2e}0VmPW)dqngFSG%6|xE;XHcjJEvon!*sNXvahv$gM5 z9>MM?MKRk{u{!X*)Z6L|ZoPcjP-!R8K|u9IuZrWq$pI@p*3j3EIsfB(Izz@XKdkEG zLvI?#s|AP}@G9lfD&tjQ2bp}AQ^d8bd})NPco(Pp-sbkpURtF8QzP&Q2`bz`dc)cr z7MWG3*DS?Sz)*E@UqM1u`g@;}fKx+;4rN$Pk5>|WX1(50Tc{9c^wF}FDdSzPX-&d6 z8YJzuZB4lfgQ{*01y>0F52#HZckMKzR1{Y8c33Kr)_;E+;U>I`&;5L}`MvQ}rSZE< zRUyzq`ag}h>0w^TXL)p1wV&u|X!=(ls+eutl62N33)q(oAAQXcE1P_2M50Gl&7eLc8VHMDODwZh8c)ps$V0= za}q6y;Y(k{>=5dzoTz#WW%9y;iHqZS_~>icgS3MpMh+>*OOPwX?R!~JT3zzvyBmKN z-Aje`kEU10s=1ZHCrMdFbr&lyniN`&VnbT(X(96#4UDudLL?|s4e$q{Pf)A2VL8ev zPZhHc{Lj3ah?tkgi!*VeO24iVE~bu>Ls5tO-AN)Wwi&eaG=Y6=y(@&K(%_JY^Z>q@ zRU^p^qASU11;qE~Gv8IS&@k>i(x~G{&&v^Qdc5oL?~t5U)K(rtp|9|Z3pX%E$SQs0 zqD9FDao`Y0&qGbd9*gbnmFuxk&+|Vc+Xe3H0*V2ocN0k4d)Yst5YuYide`)qkYj=Z z(oerY5q?|Bk9-;cP@Or>#6#gBq=TOG9#U!JXPzp=RUFiIA(~QyJ(P}g??*t?o_!39 zSigS#q-B-81b1vYN;DX5f8~R-K8c{#xPlx4K_8lB9V0zlk?ZkaNka^{eEHD_jQsp9>@&9a?V%SyN|o&0HYA_#?%rCR)k0~Xe0Xd z@8tN>HP&%3<1u{Le-{-^0E|J8 zC%Hr9S$6WBA1-;)gn89Cik|BMoHE7ePvH_3vlx($bqUwLIVZrxv>CYMM25bKVT+Q;cDT;*#?E_92Jyy*g zPnt#p?KA7xi5+!f59Tj!$)}blM$%{E;7cB5W`(GvkSv%_ppnp%10v{;m-|x6?*;V6 zd07p)!Kyz2wBkyK#_8R!F*tHi?0jL}7mcbP-etCDBah#}c}j_F212Ef9Jei4(e@vZ zV`qV_42TvJliW?1)Oh&tp;S4r;e7jd2l>kVS-l4oS;a6*JoDKF1IxI$+O1n7cLeU7 zxjx+LN^fRG*akoxq;#A`w`ejktx9_q=ln49rsWt5-hl;;K)@z4A~+|rsC-57*fT5j z(SY+vbV&-IN>qY-kQC@YugpCJd=*1uc9ho@iwImW+&LN5V;dY%QzB=ey3Ycyx7|%W z%~luMOOzoqW`*ZAqxlpTNHPEkmJ-`nmwqFkd}3(FoY`HbRF__x&++MZ=h z?ZX)6sBEp_>y9bIQYfNoE-q4_;ZoD1#Bh%NBU^CU8-qUXML587!&uzsM<*4nNgO(Z zIIfl`<79%u%|KDwZEl;ns$xyue2jeG1T%`4u%}O+%pX~eb?l<6^-|J0?_V;6yS2k& z7$XQv=~Sz*u`%h{v*+Y$c?%+==d_se2T}82IEtuZv9Dj`w7-{+>@xclk7mezfBx#g zuTup;M_JiITRWocV_;z5)&1!$b*^hDR`XmB4pA579-S%UKZ{=OG>D_q`4FO>(vv#u z8k$}pm7AnAE}5YJBsddR8Pd(%?0U91iU%o^YNRVLR*8gh3@jDB`h4R-rx<9U-Hc01 zk5_YA*&(||MXW~enVXyI9Oqnfei~|ctDSE|p0*sDLTO1g%+L8| zcb(pY0Ua@n<1Y%6AqM(KZP#fWya{Ah_F~=zYh_RcEXP+{Zb9JGBgE#WKiVGqL(OBN zl~tCP+m^IEB%}J@Tyroe$P^*#-vSK`YggNAK*G5BXJ+A z-;w3e*9eqL{iZ#5d{#9);`85r)NI+mwA}bOwmjIo+y(NL1sUYMq7+V!F^5bBU1UCh zikk_zoS-Wz1q&%TpL6r>viP{GXNYKwWRD-2Pa3%Y4f?`f6RA(g@q2IiGh;#D_kD8# z+6Wffn1Q;R{}*R(0+(anzki>`Ff$D9F^n|^SEQO6OO%qv+~KljsYI4RN?B55BGrsB zO9+=1QY5kzMb>OJV=pb1NjF@`%FzOOW4ugJ!kiwr!F%?ZWr`fL4 zNDvgch`_Dz1cgTmMgViLhz_IDFY5OLFmZb;pB>a#hA|6XC=Ndgz%JG^ta5Mw9_Vl> zC8_hJvn^Vd3;rgfL5MU|VriP++7A6kBYRr>;+o<*8kF@tkVX+N=wBFGLm?ZuTAzADq{2J5G5-h?NJ%3n7T#y$a zbbwXSzZ&>)%Jp`hU?n>2@sxZ!P8PY=RVNN0|-T%HMneRwLMJ zcD4s}tUM)TJEaX}sHoiG3|2mVp2%00);pe|aAq5a%2E>6dcuJQkxNKW%VDjB2BgYg zdZiXfvl663O$?%@??PImR@}{KsVAdlawnPIG@Nql>+oe~M$t!rV^(#|`SjS&NmwxP%r!;WfcFUONFoZ(t(kQH8R zyP^Dqfzk+OMY#`P@)*$!amd`z&|U~%Tt`lOO|)%)te-H;n$DfK+N|ZUjH1wc_wG3Y zqK?RiUztpOaZOYzZCV=r&9Fi&QPR?y!5(hJr?PHwjuF{4<*;Gtyme#-_OQq?pPYI* z^~knHkA4?qhWyX?<94benaR7!p)p)tlL-q}j|JCXakYWr3Me>XucD%&%AYsw&ZxYF%zk4h?@<`BGP7P{-^MpHtmk$qK+c>wqrDWY z+r=1Glcr6tpy4xS^Q+bbcmfhvmY% zzX)ARO>Nw#&-knH)9!)Xh_EBZ#(x~WV6MzUkmTN#531ibrwMZbR5t(I^h%zC!4Ywn z(cjp!5{Z;nUgA_RHL%u=B>ynZ7baq@oDd>o+yKH4i;gCv-bdgtYxRLO2Tey1=&%Nt zRfeL}`^vc)N~3Dp;==Z(qCx@FaAlNp9Pbrv(9CF|RI#8YLeGPpZ6y90YDL_cR##&# zAhXON$(SP*kATF|a1cLRb?i8IbNT5vPki&ov%G^>$&ni5=tXSwqL+P~y&7j5L~hxV zBvie4)RSNLkzKzU>q)VCrtNl;lr%`v6SG?v({*QDxNt$1p3K@>+96^h`}&;sKq6$n?qA-`RO9=<&tTHE{E~To;OmMW!q70@d0ZM?5f0~T7Vmi;;Z}it zh^TSMX49-}29;z8fLQb#sm)Gee!(Aef_3`@p<6ILV@%*01cqdJVG(yPh zE9mV`-|q-&dj$w>m2vOj5x5DhlQHC*m$WpX(bxlc;$3#6pl`+7CB29rX)`{ z*)oorR)8dmx)b6IL-gK%vz5A$gAHJ#E75Y~0i%HUPTg%Ky!3(mrW za#T{%hOLGUa>)BMLLJ2|vV+l1=t-px6d^QgqdJ;r6Gd>?MZ?iGr_t`FR5DHkBda3! zk4F1F$nByXht?;i!^cYSV9vqFORImkfUI8Rv_X2uEdf=R0+!dO{7)kcZMR-Z?2S!K z0nj_iw4LaAiO&abm&5=~Q=NDs#ZAD|Oms;^JQ)r_?0xJjYLN8NR9|^V7ffUORV9oV zI^y{Kme<UrkLA|7Skx5tWRr)*bf6{mJuX%GvqoY3#UKHqcGjk4iP6|9i6WrSy}#C=dkqhQG5{o3Nb111zrn5ObBRW9~VVI8l}h<6sHbKYh)P z@?=o+IPxu3XRvEmCG!5)OfmPxmiCkGpD5{ao-(ZU*HDkcU|}UMNR@ix3s;>`itYN$ z_7HU!<(6ol=^Cv2Tb6WP9lq7%HBk(e840xjNvKB4JLF2GM6rl6WYm$zH9UsJR1Jh| znRC}=r?vY~scLB)yz?0u2$ygpCvP&$P8&_WUw&od{{7z;wOMe!twaDir~7W@=;tSJ z(^96Js!`4BMQiPb2&Aj(xGc9^J%<1YB5{cQ4RRByyqZMO>riQ+hQ zX8^(SFtP9fzr+m|fgH=W=|Dl&5wjaHdW+n44#F_Mkxe+3YF_c_DU_=PkdUIvD!P}t z)o@KOurPEW(vl)-h-uy0DE~%DO9q=+w1Tg!?j`;uQju1$i=2p~b7u3*F8u7z$cF1ZS9C9rz zhBJfe&e*|rDrfLt=yo$}!$wMattvf+GP;SAgh;jsMwy%n|Dy2QqhlFO7f5&Qz!w|& z`tv7ObhfKgMZaPqHbVEsw;yjs=LM zixy6wUgB7uHKh?=!_RMi^pWAM?^_JiQ2L=s^P+#w4sFON*})4lp3p*|Y)qitjDz)X z5IY5QZjaY7w-e|4LmwIV`ytYb47{*tApjr-8A{30agd=k9BC$ODlM-ChlyZ*;(WD! zpG`E!KnYna4@QQE{%UG2YZdpQRxO;ZAgAzroI}B%u z{MheSbcAX&5Di#%a^uB@JeI=-%l!SPEWUHA7x}+nU2{9$Q)~$4;MKjfY?Hf8dN?Mz z7OI<7&Q(o&aDeUyeJsiW&PU4rY!7W>D3yy=)S1$apSWDe|1-a1WMZTDWTKC} z**};2` z5PZj~gXQR&WpC28f;nE?b<4Vk+1n+4J)Nyvq%WKr@Z27PR!lv_=thhZD)2&+!k4nZ z+X|VRe_)GGKRxy+)eG1CE_933;YITpP&#nIlbqkGf6&|KziIYR7f9}$QU(?=ftZKN za`xuVFcq=jkwS5cpc9>#5uGE>RpWtgANO%*gmT09j|v7!WNz;BvcZez&o5QKTG?hj zRq+2+@vSJ zlJASVSP#)Qc`(mo9$-?m`t>hKs4ANE zxe}ac1qGt|=pE4CwqS`BYG%S6weu|Bb4Nc!vCVH}~ ze*__zdjVL)iJ)Shkag+OfyMsQJU82N_3kU@ou1yIYx-%V2xdjDKgRnul2peefz6Vq z=3FrWZX5ylaaDf`y^={jo8upy3O@)yT5FjdaLPiKr z=}7P#Z$C}!#6sp$1Vtv)KzicGqh;i?7L|sW75w>G1HPU>D(V-6-0eYKOnCe-6frkw>| zkOuy7uRD5I0j1S9>K4n0_$HBQ4N(aaa6Py9Qm9>HsNtWpUygxYuhP|Ud-G_Z%s6s6 z?)Rq0eo-}(a4LpV>7`V$TU)yWa?Bo@Y%j8E-(d}u;X6RfJP>L4Z8o>Hy))mJON*V= z>#O2y;yd+muFO#|u4zAXDAg68Tak3)$vEh_Xai&mtR^|x<|>X1vsSNna}i`7H0*1p zG$*z;p5U-&+^uV$D&C{e0`xv_IcvT&IH`|DCGd;s@sgeRu#1wmP~luB5kS&>!#p5U zYEy)n-oDm$>6PJY*nUd{U*vi~(a|Z?K*m~Iqv)~k%46JCeNEme_-8#%oNiGITpNbN zx>o2T_C%6ppv)41)0$>x9BelBNz8e87j?FX7XQM^#>3YE=-hDY1isg`>!aRn99qf+ z(3l-(I8~-d)UxH`<*kE?{|=goXCfddU)|e4`f!T7L!db#CfC>DO!G%>-dyB%fjOn| zGVH&j-YfLA`Od;DG3ss4JzoLHlW>6*&kM?_s~k zlP=gci2cQAdy!_zjC}Jz=tPZ?0({h096{=N%FPM)S&ng=y7`V>Bs3&72Tap(> zxXI)X5X5!rFbilal*TZ3B6Vs$8f88$hDq11gPC^m^Ap7&PqC%QBER11bJTo&cClOM zFTNoB7u?-SF!w%H2`U3i+!X;Kz|G5$ncV6HMSmso<-qM~4KE1&EtK!-UQ4k*mciq~ zf&GIMg+rF<5w_PU3hkv2w*4j|vwWGVqA<=tSwUW-uDpH!(J=jOF>F~G*wC(lOT^;B zeZzcmrew0(&uM4P+JT1i*0JZUFTmlI&iNEA&;@gAKbC~dWGy=F(lFQ;XPk+MU3ID* zfobC3N`ZK?A>D8S*uaqf6jhA+yGMnkG5G1rx3 zX^RPjPU>sx9UpstBV!lR0~2zw!D{8jialzuZ0S-tG6yB=MyUS9@4M#OkLV=wi@m^6 zv>*59{M!5c#S~F>RUi$kjXINJ>SUpUdCIXN^98hI%e+E(9!ZOg{>03>JH$Pwp&Xua zYu=T(bB^Vs#Q%Wti5oqPf(Kg%yxOn1l(i=LU%_5(E>2svW)2KY?W}%N#nl=OJ^$+9 z!8f~#i}oU{e*}zWR;JCG!u>z2n0LY!4)L(pl01J7$1>95mbWnRyP~*E9rdeZP?L-8;?0YB0RQzOKEQ7K_$aKmidlSs4I_r;x z-MlxyHTHG{?V5DVdz+jNSaf9ClG}?T5ks7t6d1KqpOs2})R&c@iy(|njMWG}1ozy# z_%KiG8Za27;&w_7j@QimN!vi#pG?yz2!tb;jo8&=CG5f2nRunJ;)PN{)2A(W4kw^K zb1c6F<_Ho7=7HDL;5 z1P22{XX!otC(lplyucP*SP6o0)5Mm?hJn{_1^O2x=oF#|&`7@f&HX=ozopmtwkRy6 z&xhzf^)Q-pZofe>(-3|+pNI7&B(3kY)6OEotuPdVB968@5lkCm{{79j&&eumprp6H zKed(F|0$;r*D$1@`b2o21Sp$>k86slNzNEEW@M9Gt$;w^C72j(A)(7->otTd!oBvZ z$_d)){HE9W-SG9VMeGWWcrZgUguEjEM4vJJ6Dn{$-U3WIz5ei02Is&w#L|#)ycI1! zQc5O@;V7@Zj2tt#XEbo&Z2&NIHFKEv001g;EU!iQ2k&8~6}u|7nSp_Uw?9j(w$o$= z+rf{W$h1VF#nbh>jcC$*=9C{Z`(x-Llek14QG$~C2LDt!l(L7NiM+^}2))Q|Fb^oy z%>_U=yw?!2w^8nkX-KZ5mtJ&w&Y9b{Z;$QOK=J;aajF4t7jS&kRa}nLOH6k>kRmQn z1RPE4A03g$EQX(ku~vc)ww*cQz>IT)^SIy}R&W~jUf_R?JX@K4F12S;cjj>zAk1TM zR)7e>#6MJ88qHa?(kdCQR_R7q$OzuS*(rnbh7KJ%LS{kKF;l)n5AXYfpM=go`;iiB z1Q1mzBbs7;w7XzTX3CHAppor0`oit}$5hn7m5!C7ijsLqmF*YOl=5KlB3C~5*nR}O!=?S*A*C@g z##0^))6VQm9UPF=2I(|jT7M?B<3oa_7GxVEq!rZ}Qrw4X3N-3+D;kS0SGGa3_ogL? zn#B#DFL0W2WAUZuB2LD?DVY>{vwgg{ySC?%Qtn9;Br#3MDZ2eGuM6SH zh%i%rQEOYn?^ye)CsQuPN{m|Z7=Bvx47W--h8L>6Kvb}VKnhtNDpHTB`UFUF0IdDa@_@p;DZ{S9mm?l-N98Y)wC@vbl^bHbCbG$gcm_=1 zCTpfFUG}9mfjlfyL=hn}RS?x@rdU5U!XPWd?=2%AWhczA07w}m6LdSOpU%NKd*(~n zRK+Njd<^C!@@{MECj80pwvyDW86hbE1-JO|*>nn-Il#sPFnZO~%86j4@1=a+evsqJ zv30aTTJ#z5GH3_v!2%Z{7}K>!Y9;9`Yb%z)dtJ7(eT}hzV<2~-pN1cV{Y!FDL+nL! zTS!YnqMa zhb2Frtdxz##nTcZ2ElhLkB)kNI|B}1mX=xxhMSrI5|qoLvw)mw{||rYpb`15yxOdj z#y~tJ9^PQQu*@r3Q)KK&slT#?)Z1^*sNNyMKhe?&e}9`MQqXtNr9)X~wDryNSG6T4 zx8GwEr>~SYrnxtw4N)D7lNXP*H6D!!E{Njy+eOlmGUBfy=8JJ=w(Mrc2f5lki7+IU zI~x??Y>IZtVp=s54&|FDFW?mHFI6B#$yq_FH1`iBP2 zcgZ(=`ZLHQ{tM#PB$F)QHLcuOYay=8k_l_Ulx{!)=VV!a`$fqB;X#pT8LJ*5W!cW4 z9_T0e=!p^G$rq9|gA&Kc!)PNd)ua3D6Cp&A7FNIe=?;QE(IKP8Sp*YcVCgOMRExY3 zA%<|8Q>d`p%zk)*e>Iwa_uKQWUWLwx-66I{;(tOPSGbsR<0am7th2(!V&)b$7o6AI zPo8X;gRGdCw=f?nH7$-O`LI$lN{r^n6Iv{CUnTAnR3+WDY()b~3MG;QWB!Znobc?# ziUzGl0!AN(frtTKLxAken!|d4QdQiL*r_nNe#1WF6QldHN>ndJyU%2*=f`eIR0 zvM+QyAO$?>(@^tqB-Pvttj}X*wS2g|(hJr6c5#Xo_k_h$)IPo{5lrqigp~}e4?T+v z@1kfq^eiQgnpX)CqSujmmF&>Q$M(V}iGQZ(G6lT0#gLpn-T0pXA=#MnbX&b1paYP0 zM%sS;rz#~HwOme`4xq&SAfm0jIpis(6YDfFR=(%&Tp^N&;8b|d?=52VIphzRqUL2x zdy2SC2U@e;eT)APKEU|8t9%Sn39lmCY6Q$-KuZAN`VR^|xL=BcKOw~owd`q=3Co|X z+(j}`N6a?K?3jw?et8=*k6Jmbtzg))io^*SG8nSsn9xBYej~-5@_YxB^N;wt!NjxZ z&$k6abKsXIezAi+Y^PoF&x5gSJr5u+ikbf~FK5zhHw#ew6QT`3-Ia44)q)gJu}-sO z$;-icJq!&QeOSKyaRf`lo#B*7jacYh(gy3|>91ciw=GPkF$fkMEuSyS0V4h$zORVb z@$-m&@1sF2AVUlA&*PB@XbPReWDju0^Bxw7W%V5PSNb`y$_m=}@xz8)biB}*)xr#y zB#+PJrf2it>}qO0T;_Y_L36Za7d%qpLeS_0w-kt3AQO!9U+$pn`k*4d;q*O6 z_=$q}@y=^_Hi-QI6!&RwV1!Ag2xvVI4g94f|Q5ZqeOOx3**AvYKHtX_->=(Q25N^>BNzR=J>1t+S$fV1FD#m?jLORVz7zOnUMaA**Gg(0 zku(aeNE9*q`4=$6HBCDNWAH^e`NOh#|FC$tjOv<6b5@UKg!PP%8m^+Z10IX#p=jKQ zzh51z!wU5(0}G2t-5ww<3?cCqogFg@=_12F?`$h0p;Eqz4`xv(+!5a9)Lq9xW-M`H z3wn%+VbV1n1535Za%gk2MxNt$5z1PL{b&*eR((PhdHX5VF z2f2UB0IJMmoU4-pT6r`V1{<#K$SZcHsCPI9L0$zLWZ+OR4iwZHEer7w)?SV9&YOCq zJ6Nd|0CgZ5Aw-xSw9{f44tWOEBxN&~sr}gKIUga2z$p0BaRG zDj6U|NCoa3kk2}CP!qipIGEG_Fm6Zi#zFz~rgaj@z1V%;cjvN#@BS(u9gO5dCMsz0 zuaNu92TbGLL~0Fkh2yZv!dW0a6e3S}RbI!$NR7epA0}}+2eGjG?#$F@ zIwz*rB=23y%GzfX$HPru`K&p#wg`U&)5L8u-DZi^yXqe7Kqx~_ZeP)N*2RBO;agc_ zFzOe6BIIuP>H8`a5;m841sO}8>R;L#EN;YA!?ZD3@7AND>wN0nkHXt%;sBa2q!}ouECq zo)#1&Q-=t^j$D{IbLL?tuw+1yfdk#5y7_x#n2&Hm6=qcR79^>NUZCdLnHG@cqp%FW zf=>PJ2cVaanS16F<;64dXeFaaz&bLbYP}>$4#cE&{$|YfFb!XjzVV5e>B|8$yCE!f zVteleB7v8n5`0_64?rA_vQYm4r7?y~5x?{IFS(9o6A=kLeue755%i(jQTlLJ^gp_4 z($_5shfD7G?`q!BGM4eS?=jqnfoQ}E|HFHPO_wj~mrJ%nHD3`idHxk5!laVd^4CRL zin|@BblHBQL73{nfgbqw?VHU&XLxuxvkn^Z_@VJ}#&j@gz*@b^*Td<H?yl~g;7X){$9FD8 z*$q`iuXMBL7rguMCTame-t@$Sg?Vvc*PxB51+q)_4aw6QtqP@~7xK>r2a3-(w+?4dZwyTPcHz~x8f7bm$mDza1@qqGkqPwbXtDnkV>fe=)m~)>dvF4Z`f}(x7b$>{ENeRr z6mu=2y#O-orfZ-$SZF6v?TNqJdz~@5V5-_BZnI}6AOXAyrK!5za$$0y5x_od8WTb? zLz2?WhR-njBV1%w6g%Qo_JCX-Cd7m2lOGp3)8B@XbLP3Cn3l|Vjb~iphJ{nxMR7kzCt+5 zJF#TJC$u?eE4t4*8_67r8VjIagb88MDXX^?T z-+Z)+ycl@`7wR;WXRg<5@CIlzu;&mWMYnNd8=@HroH zT-|i!=*n7pGf`I2ds0mbe2T(i9MVAZ)OUAQimNWzlB15f-6xRg^8Q9MWKtMS7Mo41 zT|UaILr45xPe#Zx8-TC%XWIIsf63SJ?n1ocb>lXXP@|Qmow+wN|6NbR#ezn!Y7PBe zKqtjJMbr#B^uLliJSpyQ)I>@lF`(!v<5m}aZ8@?-4j$??1gh{wQ<^0uQk?O4oyeTx zC^tHRC-_sPiDQCBD>%TZ?P^#}Y6v{dp?(RZ01~das#c0;oJ3o*k$QSSF? z?cW%&YUP+4G0GwE)@+w%`e9oOxh>4!^*zM^GoPOn`C1TCKLWP%Xigk^$Ec!->LH0w$fv>^F52 zbfm*DozCadaY2r;S8~R)$9%v`>IZ96bFR*{A)tbT!kDaB0&+9Sc2un^O+vR)ZlNgu zS__BScIKP(1uhY*dx^r3NTZ^zMEB-FB-2(a>8J$zKy|b-p;%m}#Fm61Jh^?xvY{9z zOG=Y*MLF1qUg^~Rh(|JWLWe}0lk>I)bm=MIM$Y(fP)Hjo1})n6y?Dg)d_1D%O5!ey z>1u7+ZgQJcb#i2z6d#D)hU*AsBFfu0IHv%%hJd-ZYzF5zlq}SYKR}|1YnQ3|FFwy7WJVb?=i3PDVc|L>X8l)Z3G9LF zQ2FmRi)#%*liSqt$PBfjQz7&vS^81YA2~1VZUsel)-@!aJlSjTCUeu0f!eeGppOeB zF~|(6ZikGmV<~aWGYgK<0+k>Ec`{hkyqf~t`z)HkC#f%op9QpS^e=UWmn9K!ZXs$5 zu)O{6aQRz1jWbUhG!$PXtrW$}e})R2CE)@!>JBpJr4&=)QtGjTmZLS~h96ldc$@fg z%0W}eeh$wYTlwEMZ0|oec-4#!wu(_OX-a#w8J^MS7SfilV?)RRD7P10Fo-m$WYC-o zmWtfptW!hkQ0Y(@Tiqjm%}D%3oFj2ZQ{=g-O}|!{3~N!sU!m!`PHhs`LZ+0|m#Z9( z=%4@&(Z~5MG=yqezo>4oH?uvZ<>gDxJ%^dp?I6Dub}^)}TMLHwP)b;Urk%tgn{wKm z#(P}lj*CGcS*5kxNO|Yr4dNUpc0J(Fq#`*kNV3S`?6$sF%17}xRDv-0qi48mqtK@W zOSUvS>M~r1sAo~0|3DiXjaEdTUtv2|$#Ly9Se^z5Y%QiXAmeg67O{a9DlP~|KUDak)WWwqzxKb?SB!5sZ zAq^JK3M4YAM1A)T2kG+#B$G%;H?oh&!Xgt#Nd}0o5K|80rryI=lAc<9*)>3^Q0Qj^ z;kYUri(SC%n}NwV2E#KfIcLQjDfA5ko!~q3j_yU)b`3$UxEhLn#GwXD-$PmIjr1NU z^)+Azh3EJ&1LFmrp~i@0bZXv68f=dJNEX`{+J@fIxHD)G>`)S|qp_Yg2BoRxI4bHz&D07@zplikM8oA}$+@M_cxmOy>G8b^4^VR^R12yIdY&!a#E~Ptk${|yBx#CiZG>BC?Ik!O8Keba zK+RDZ;!EuL-_!}nE=x}zDeKoo1r}UKsI%v6dF%<7EIDehD?WZoNvqst2{5Ij7Iy5hOI=%*gAw;jQzo+4wa!Sn{oYd3 zJmj>Ole6AY#Tshafz2}oy(>5y?vGpu`jDf<9S+x@^kS6YF4*&{lyX|6fOFMt7)Za$ zIu|_iftuokFnST9HWbG~Z!v_x1juO*Hxc+0hC;Y*CmU79-6W7m9?*$i1VEpQ&s5za zzLo#PR=zje^P27A^i!ua3@I}naK{{|*^&*xxrmPgWH6B0Vib2k z4$h+yGv;O{g#G$?yEx%X36E%TV&|v5^TT#Dy=ziKAVkpcRK9eLxbol) zk`gmw#E5HRTn%;R>SH*w;%%0dXdj6k3}^9it{=0xcs+vL{_i7*)blji3XA1vq2XHX zdP+q!o>kFkEpkngPsTAsH1&|@0sdHq$z`7aDcHAb$2+N*Api6$pyWr|KRsPR+Lckb zP(DUEhwu0<5!Q*;6{H9TRV>2TUFDBF__@(e=}`XT<P>lNH`TbO#jsbW-! zmw;+|2Vx|=kK9@sw+xvt;MJVP?`GZn(@%}Omt$A~@!n*L2oQ9u#1xXQVG4gL;wRMu zxds|ziq9=j$s`b;*KrrnU<8iZ`0aN(P8_F%gnU%^eg}KO%G!m>2XEp511zUHE~Rp&b&Eh#J}WWuKC}=T_wb zyg?4`bg1DE^<*@U;)!T7wVc!D3`%D{U@Od^+L}@NGT!`ai_S?5BVTMQV`y?JAjFY< zl&X$$Smfgzp%aHlIp&g*E3dZ|-l7 zHMf4We`L&n!y%UtC*&E8XwWw$#dLgxTUpN5tz`CclWDcX17956Dy~|aY?m)>4w!R` z0gSxR&y;3zgpgK;c*ykmBeXy{2(C700O}WSC}Ss=TzkCtV0K1wU`=Ai!pA}&)48Ob zL~iKFlT1$-?9wFmi`zb#-=vmBdHGz`_Hbj#jCP4|*+2cez&Az3WgUuN4QXz5f1M3= ze9z3@_G4!;KAIQ$xzcRLie~+WxkUJ53v^;=gZhrGci(j`B*{q-n>>6D0V9xB0lo?w|rwx_)CsnobQrK(d3I% z*)4J35l+SO|9fSd-V|mUEi}IGxz^72i52^NW+)zQxt8U+S*Q+D89X(4InBQ`T=JIo zj``qusihGkVwg0@*<}>6myoiiGnpcIOw7I`H;w5lmi5D*N%OSxwZKOQE|euCCMEa~ zk!>r(;+3de=+8(fotG69PNh@FwyM1}CQt|6nam@K{Qs@~t-am64CFooFd@kdz-Rn7YhROK1wi z>(tymdf^a8V;o8sDN2A-Pu9RTpzpK`5%^vG4x%#H740)TBW0vc zo(=BF6UD7uQ$cYX_-sXIjpUvr;n6PRwGM%f#j-5$$r9-#U)7TDpLBn;h^{J+qr- zajVQc!!9ch-|ULzI2fe88?m<+`}XgxA+@t=E#mH=In%1my|n_)Q`Q&vDr7iKp4%qwJu%cg6JsL9Si4`J*<^ zpogqti5O}L{5r(rT{h1@K8K0((pyMK_w3u3g}g5+XZaJ~HLsd0v2)%PrM;UR3UhE0 zZ{NxEqwcL5?FZ!|LQpYF-yQuK6GrWLXYihBCKCvF6~ac6p7q*r)lQg{eL!21>}n` z8g8hits??W1x_6@>_Y>rLFH@W2FtD0#TnW9+~S1wjZB%jb5OTdXTH2*g#7kZ5VrWx zr=mN&j~sta`XT){P|`m+yw{JFrpt4h{0*atZ!Q|d%HTyu|NHC1?_Oh>M#wXra(t0X z8OpaZjW&99lk|j#cG7 zU70rR_}*#b8Y#i?W98;`n0IKb^f&;ArRbKI`Q7;8x1~8uV{G|yv}G3*8zXg;FFiOR zeCsu8qqVLt0U*Zc)|yUeknVnk$i6VJTG5)R^wt{z=bEKd|a_IE>*tcZQ94XpbSdpC^gHz$PkGinCf zb*r+CGgG|({>v?5n{9x(f10J+QG1?r+229+FN3OCe{tj1Rn>g0C~HQ!G?{MLaq;!B z66Jd{4|@6 zJ$l3}8dtjccJb@{qrIo62fMP;*>t0H$fSGIXJ-ILR-x3~dE`jH1z4*|qQ{2mTN=SB z7R6Fku6YzRM@*`zUg?Uiz2u2ES^SM0NUW48g=3E75Wd|PUy_4dskUM@5w5be$Ival} zpX+0Xx*=_`;v^GLu#M=Jwue0BLW0lt<|xs(ciW^5s|wUUl#2qq-wMC&cwTM>c4)2) z!PL6K>eoJGqBq6#AkRr201eWtQVsWF;#fbHr*>LhRb(Hy5Z8d`=Olz<8?{AK0ooR7 zdH45@pez^(_%ydH36`srFXEf$=v<1K`{g#LpREjY+r+w`#l)!mHQmdg4r1<5!Gf|V zZm;y+h&kZ5T4}?gF;X+>yNiLd)CX~^#!;-Ky)8AjqUiPoEO*~`IN$T~ZobuoKan!Q$^ePuHI zw?Pd9x`MKf;*~#f=9;B10of`7Dkm2wJW6=}LuZ?m1zVTDxWry6{P!OXVxP|=)A%z) zbMQ=aYq^H-H-pHtKA;Rc+1fxVd+LXPn3`}Xqgz>a%x1ie`k>4689xL@A@;M5KbHLD z&q*a$p*U=~GvUcC2EAH*kH#Q!z)R9Z)|kMm4b)#<$)oQ5isl{!`(;5z&wQoWZbM9H zIyP*$4i({j0%ayJUFzj-Z#+Fcs}|J1Sde-*#3ahnLg|}C!tHA~_<|jE+-4=X@louM zNG*nbzuu&#FfO37dy0d{#$MeR&7Yv#!E(`8hzTyfSr!{K1mwL-(nRrIiT4R7QsDzt zqsg?rOB9W;7lE^fH&?0Eo#fofnkO6{+Mk4>b%~Jl&umuZK=aJr@|!IB2z?;kDY84e zFJH4=H>Q|Yvoi8d6ytfBQG-Sz<$7xU+wwuxYlGf)vmZMlbRkVk>v#|-rSCDamlcox z0sV7=Pa5pD)x+J}bs8SmRGbvn&0IUMUz;h5#@1C2s_#Qp;dN@kubs(dqonZ*-gzLa zUy;VYK5{Nq{It}xOWik5@G}YIK)R~LY440#ObIre!5VNHhdp=ddaVwR%>#2*G*q5# za%POu2^3Y??@5#T171KIP|B48r04jT$2j{l(|59?cX|VCF-C^OO?=2Rn0J zqvU|q;O@e)sik{JoSK=`V!&PMZ#0u8)A}S)nO6WQ%C@5Q>9Tn7Vv2itu;`Fet_as(<6CsEs+)#mt7m}Rp@U)OfU45@}y(OBAtl!c!@`KSf&(BJj*|XN>)waix)SVQx;*>c-z(&veV@)KvPPD@xSLC{JR^`4z>4-F(&`D8RXKpnF&YV|ktd!=3>{SWd~MH=xN)c+ z<@IWZe5>FgFC{mpQofCEb*@Qxyl#hS=8(~>HoKW8*lHc#&D{H-U=wp2G=b{xL_dlv9M`xcqZpIOjrgg=SPR?pHKChCcVR_>N_iz&T6O-rPo&-<=ADMmC3(aP9gP^;Wgd`m9Ivy2JUC zOx#?EIIBAi96&Cgf8?fTVn-}dQ&GlNiFE|wRLS~1CSGZuPIaZw4W8LvSmd zGq!4XZXx(|fzUk&uD%N_xsXSYXwZ((7_ELdn5|qG@zJcNmbWk|K>R)9l)JQ%`oIoT zQTSsQ_2Lp@ckb+d;_;EmX1LTyva2mM586b3XR8Gwm!d(kvDJgv6)9A9_BBq@$MBj#CQfRpM<77e2aY1@DKCXa$Ui*X2jp zOkGLn$D~A;@)Mm-9l`PO4cf)?(j*7V-z7*tUw(t=KVDE6xzUjWf|#Pv-YIrdFD5os z{?PY|eVbBR#0(aFeHl^MQ~3B4(>{dLTm0OLqh+c|hK|<8RP_rb7)*D3w$ePH3*H8P zTYkLzhrm7n)cb5su$qhuNUFETm8VeVZkpVYXgAG!Yd7EI8rN@ zd)siiSwm%cqq=f~>dS-$+R)VO*w;kmBoPHWb)mg#HXRG*Y_!QyXot!$hPh0{kJR`> zxB2pMQY=e(F!#W@apg#)25?s?BRu}PuH<^dcpScUZ&sVmldXUF>zAFslR(nluhYiH zu$q%4K}wYxXVftbVf_lp|BAp13j(V*Gxk3y=+&9dKJ+8;AXoyqq)^-O7`ug9##McP zw}l96z}-1uXFZv1Z_b|%ij*?S8LOL~Q1jE{bf@rCGnh$vJb((h* zC7lkFWhgCpG*w6Km9>F1I?{`bjOx3mY9&dyzE=98DGzTUI@srPiqqQrX{Rb4Mv<;Q z+QZUmJ!|*keSg7*9tw`T>Hf4vd(dgQk&{Q$y9;KqW9QDDQDYk@6YX2YPiy#z$G0Gv znH=@;|JDPi~ zPK4e#sf}v=1y`fQUSi;+8-tEso?c$nDAb&unAi2nV?1S}pk?@Gi~wv-Z2cws#7;^d zUco~aA`00>i$xx(k7S{lUrK8tHhCEhKIyoQ7DPa^gxMoWZeIZDH2&7-{zc8Da@&`w z(%zd438%w;=;rL;=ll@6k? zK?4}H$A_%Jo%r=2fSj{TnO-NyJ?r_5SlnHpp>b-1r6|p~O-F8Wt7AI}xYd`Q&CPZ* zl*drp`dE4F6sxJcM>TLF8*A>7!r9HZ+4|S3b#Zg|ejnGsX)M|#5R;lG-_#}kbLOS< z1eb`OAzdS<1^ReF1m#m>mrC1g4Ot1=2qOFe@;9L_1SggK=bv*M)?$=*+&RQZsB}p(@ z9d3}HP4DTMAVF%|vL;IXRi5EqR+CkEw6ASGe{ayB%6&(tN&(eaMeTVy?xBA-^iWg` zVlSD|ar8$@Vsrah>gU96Qv>R$7dK6`DF~O`ff(jMpoD*^ZUp{L=d!l8tSHvDEZoiW zj%?x*`v!9>HGc#-tA8uFv6^i-%y81vG&a4I+lKx@1w+)22>vxrf&ew-7R{ysi#@|P z5TT0DjQRkk-)9P8_}ErbmLBe=|H#C35qZs({l>2lhHODHGeGizKqUN6iif_NB)o3Q zVgE=Mm6SUG9#`;E=}T#j$**kmeiPUYynN?-u0wmYZJO`9F?s;(;FS>^`wz~`${$*R{V62^2=*PimV+{b?jtZ$ZjUngF8 z^+1&|FsU|_(s`kccq`M)r9lykgZzV}sgx`ty{ia)jHvf`^)<4V1cq&EcWoy+eSrW- znPVmSKVlC`PL|^C(7YnQmH9UV-#qTn$C$R{voE_}Uj`8^NEX4EEniA~Jo!m``>{J9 zK&9rmMrb=9qK_DTef5bf7usj(h0nv?HbE4ZbL`u@;roDCk!2L*K4-siX!OiIFEdj{ zUI;aatkuf|V)w|BSQkIVYS5I1hK>zJJ|HivCoXkVONgzVt z8-J|MQeZa>&kK@3=Xr_ZDW+iyn{Ocib`Kt>c-Y92TXMQO=5n-EA8iUSO*sP|SsA2JAeOF8Fg3Sd-Mrg^nAm8srhu8|3pwa4?2A(S{^o5 zZI|g5A_aJr=1FQ|W0@o5<>Y7~TP&fiP%`j5h9Tob4}~#D3Lm)(k8wE^@>F?_P4&p; zoN;LwIQ?^Fs~d@Dfo=yqac#wzpyPvm$uBvj0=h`$EO(;#&7GIM!DI#SkXPS6Qr+AK z0Kbf|N_R5o@*TO8+@jKzZKhu2j;^C1>Vq8JPK1P?cj#w&p`-6esn94B*ssylUygyF zQ0E9}%)$q{*?|_r#3q^ASjt~n^%N65ksHi88>W7$ov+&dh_B4*_7=wM5Pm}tEME$o zo#5%=_g;KO2PsuPxa2xeNvFVI8)&~<)LnT@Jv5sS=B!OQ+v}i(Mg8lRvp~N`#1x_W znTt_7J+o9I+ubO&ctv%VH2Lgk+Pi%|h+XRkNS)RstB8$U&3_)Ry4m8F1`{1YhriZJ z%dzw&Vzr5>fs}j^RzaX)BXlR36(QBSy)p*$FU4}`rsc)SWe;R)(h7Mh%vsIK_S2f6!CQP(f z0YTPU1Rc!kH=-)J^XCoI-ET_~#@otqmv`QwusG)Pu4F|Ev&Byk!Bnw~G<(v%R0Y+9 zb_w*s87X^_GQXPeswy%qTH}xTK;`XX=tPr7y9HtN$I>=(BZJQGiUsMT>AhgqRaNOI zrfX!cJpif#eg;_`l>hczbv{kkud5p=jt8%ewwAncw#8Rstq>^%k(M+8Xtx!4-2Tq>3#=?i& z;;!d8N4(YQB$^8sIb@N%yE47CQ`oAY_6^c|q`)<~k#+^9nS3-`_6`R#3nAb#0|0ww zpC6xupa4aTT;}hkBMc&|LAYv3wUVUe)wdggNwM25cB#IX3?uK;D zbLqiT3D27c0CDl!-^9yBXf;P+{Xwjv5yTlWYMhCK-9f3L*wmQ}G(@jxVQZ=2#4eQ0g*4Z)63Qh(2r>W*&Zw>aVzYjc%JNr$fkEhqF0-enq#ph8 zx7FvUvZAC7pZ4ydbK)0YC<#y4zy5)Q9_Oh~X;9R66?`ARz7e-wjJ4=gw$RUYwI6#Q z_BLbv&`yP6`Zg!naiU7JZ8zS0inGo`C*@F@VxTTUYlot^ZQ}1XgS9TpgG$eRAI!!__Aen>t5*B8Jjeacm(>rHr;l`(|6!<{v#BE|ueXQ`ckPN+Qgr ziILV9$-TPi*-;aE$){n6fS?#ASPC(kCV7m)1xjGtrXM;`vNrNqoc{V`?R9Hug(%MR zO(0I06qkQ51EQP!kb_3Z+pI7WAmEr{=ovWJ#EO+UPp&g&E zpSc$HM>ZY1UFwzbXB!5OS%bu;yOt&X?2ode;K6_P)^9qX!o9yx@z96~cb{qS=Ff#} z-+2hSp6$8HFKI53S*s6}p@FE>>c~6&CW3*Eu5rQ(CVi66~_N5qYS*VB5_Mla!Z;c*%dHPTP0XB{Q0zP{(@1%5K+3T+C)xu z>-Aw9tk(aQ3L#JoaBt+f*}i$SVbI&ZThLEDY&o+W`cEESac%Yzk?T16%@Vlm8i3}` zP<>k6`&Y=-LI=fUuUhMO<=*&8VN)8K2Etbb4h4zh>T}7^=IEc z(CypD3xX0mj@qn|7>r#r1vCuHVG5puccSmVi`nD!Pw{B=8dW@df6K2Tp9~y2+=B^hK<$VA8tj7+f;2hQQZ6ezz{DR33geepsf@9wg@Yz~OLrODTBkiSDha`Xhf3^xj->71!Sx_F*eah>Z zxf+326rs8Sgd~BEe{7`wL}{)6iL@3QH0+%$WEWFQ`|5xAR7LXJ--6rQ&~2?vZ!lp% zlx#w^)8k?KztR(3c%lIxGqFahEzb*Qx63!>*#6NXsi~5vLp|+I`DD`f2)RmX`ffe+ zw?ArgoM~pt%`S>I=HL~0_FitPh-zTTzu9jYQ#tk3?uEy|eem#1HzatobuCCEhR zG62_swKoe+=R5S$M+cUl(*%I8$kb)mhYoBQ?07qw(Xosg==;n=b8D|mo88L(z~J~j z#qXXhd4C`5R*WOFd-A4>xp z(ZX@U)mE~5@;1GOSILT&hyUz=7$SvNg7MB@SuTC?rW}=~0}vtT5YC2`{o{iP7 z_g9a^P-BAz1^#6|@YdH}PxhZ_UIAovsz!fR?SIz4HE#0V)ykV<^YnN5rXgK7&rQ+l zOW+Q(Myby+7ziE8=wn$tEIm;)H9P)fK;_;IDr&NV=P9qg3-q0RKc0-VTtYbiZAIm` z;OxJZ-2Hkr*wy$5#PoL*j-R4Ems zpgFW{8mqs5`BECo#+mx}Av6HFqqWmq`ngflgy@fzs5|n2M)fO{Jx;qQ++7SrlNIA( zleX1Lmp_`b4g#ps`Kj-?R`$|qiV$T@v>p@km>=TvJb7NjX9r>yo^O{5ePi-dybVyPy`lt;eBg#X(NB%D@UpHE^XF=a4NS(Aa%0 z=v>mUu?eP`L=>d!{eRSNT~4!B3+BX-wGX#EjMuMZ#jZw$34pAe1YlQR`#S=9zW?|C z!JPl!CZzxTL2e43k52Z2@yO(@P_z`PSqySR+#S8;&yu8586~D6>PwHOi21jOj;VE= zjnTo5tP1?%;hC3m1KeElJAC*`eNAQ4^bUCStfTMcUg#$N?w#q-`E=yTY0>FrSUo@cW%a?W?@UFq0gHn1Oq|LKGafhJ;*O|l}_}&8ZGlQJ{*RblpD=DDV6SxPGLRJyNIE) zfYjRed0*5keZ3?Qm57cJr6;lP8s9oT)FpopkYmLdsf7i<#|^^s^2YZ!CU>v;z}4H4 zLZ47@qL{KYqXyh3V!%-bT_>i0`fG*zang$|}8GRI( zq=y#F^M>W0@84fqjbHaecxbd->XfH}_`Dbw;2n9jcX^tqi=;m3A7J_ZuYdgqZ@l7$ z_nBtnBT8FZe~)Tmc6cSw#*#h0?$oW|UdCE!5dP2a@&Gd&-F*JnI4G4tll zqx^Mw>hBU^1s>A}7Kvxfedy;w}Kzj@c6*ymXnMU zZ8+>;s;K+)Xq4&VK$i%SGMaH;RN0dHK3So1mxZ+B9NZ0JOToZ?%@(YB2a3w14;F<1 zuaa-ex)69(M9@*~cWwW$m1hdmI~XU#Y`&Y0`Tjl-{w^^6g0aQ30Y5-Ml=L?GGgIYb zV_m%O{1<|sYx+Tu!cVf)Qbzr1O0MZ4Rn!|Y!u-@Bt!*|RepA!=gQn(xl$Pz^-IEIW z%AgOGZQr3HgZ#|on)x>&ce`CMf9yX#>^&H-#HOlMoy<->*@)KZK5H+NJjOB) zFNp%{XNJzpX5l^H&km^q7uTpfFD&{1khMuO(>@f)9{Iwh+0}trmBA!u3!@;p5XNk@ z-|fW(ZlOVV4Eb=FTdC|G>>Yji)6%BXUJd?|W8TzPaG6SZT!!zLJSR|>)CB?!;tf|+ zn!R~YP+%z9T${Jg&eUFwJRODty&Kq%*UxA5@1W$qy*@!*Yak~AnF?_I7W`}KwzvsZ zbzcNTPVsXQqB?Tw(w?$Q@9+5Vivj8`C0$u)Q&3Irpm#H|Sb}^r1D!kSHdzbj^@a=l ziYPI@JINQLFru1yav>@$dqCxc{Pz({X|K{98-Ui$KA?DvY_#YnH|WPV9rkr=ndBBiE@4|>eTxt#i@~u z8widnjS2;07Q&(RGSflfJBHDk-qfihqb+-3gENcT=(Id2#3TqaVe3Uo5MKOYIqW?y zS9nqQM^g8F5HDhOc+2Z40nd_)nbaC2)gKJRl~(8p)K{#F6eH8g2DPwY>&fTeED^SL zbDwO>ZlW~)`9nO(Jt>b0!_sQRx{}tU@T{U-ob`d20=#4Ox=Z>O@cBA+k=A-tkk>NK`|n?zFx#0iBcm3z;+xK0QQG$W(wX{LQ>f0g~iJp~QR zx+3~+GMfy?Y+}wpbZ7AWr60cS{)cj1UFq>I!qGYA+#BvHV?z!6ZA!4YNcz2AUc{-$mWoTDP;#Em=R!GjhnEk>%` z9@tBMd6UE*r746yW#mht&8=`M5!FIq@5JT~eQT+-VtyIyd%WBx=EFy5xe=G!h;wm; zD~T=h%)%fKS?}wYAiQ7}VrulzU_H+CE;<|Ik^H&AM1i^!Y;X8=d{~vTvQ7qbj!c!V zAg3gp(zxNZbHGoaKPP$a`snC|3|Cn@N&?S&D&j)*KCz~mpN~9wFXr%Y_%2XE=*UQk z!}|t%3ScdY^&O(skp{VEbTq(gN113L`-*Fp*$e*$7~>L7l#_@_m@Xen4SD ze*ip=-eKGfyA~8IHLoH22(%&I=m9gQ>4hCMo}Y~n0uF>F1h9lZc-usGX^CCz1%y^G z%eS$=b*o}LPjf35ot>Mh>y?kbqsu$k3UB-F>IC!PYv*hlPvm) zWu6?1#_RVobvsYW#ShS97oj zpxci8N1tMISabb(p282G$=+o@injfFYF?^deG( zU%t+Ar>*d*t@@+3j3M>&`Ib1PAU#E~$%6IzBEs7AR?r7ati^mv@oxIW!9{f~@Du8w z%Y1!dNvx_qHi z;PY%#darB)p&?y2X&2JKmgh zQiN3MQaEc)iZHxp4R>5zFCp1ODgxlSm2YctE$OWd-;PI416o6gKv^5ZrR0}MmeOrJ z`|PeIbR3+h_=T|%YylD(`E>@^$PzI0#fS!+C<(-I$bVGgkvmKHh8EuIX`}P5+`6@*D4v4e>5~}pede`%@<+#V)T)HV z*Ws^xSvodqt@0~NnopSMtyo~gvCmBRFnt=)l8=u8@)go-c}5&P=v0O!YQ>n_6_}Wj z73S>%Etn{J3G3x~;;{m&j`sFkn(B^vz-jRC?VIGR52WrMExaw}cq{b6OP*kri%Vmj z`0V7sE2ipzmxYR4g~#@6yvrtySceG`2C*3bqDC{+7d;ew_@0;Nsco3Ah+Zy{X2dga z>%0q;R|Hl?GUyx#`$(}AGIanLi>(mLX|ZH93`@0&tqbZb96N%NM|KURH@v_I^T=YX zAa9wYv~yNFEy;k>r{Q~{?8)Cj!x)`Q&|j>Yk6D~9tOVl2Gf5MUgLkhJNgpP9z|1f( zuZ4)2eBzCd2HXjV9jDa4EJlmKC0r;_eWfhI(1yW;us3NG(knqPG4_rQ^Fu2 z8rR{D236-l$@Wmp2|;HfRB7@8zff^<9wF3AkdZIyk_0M7u? zUBUER@0XAYQAjQX(*d}@C&4CZ5Wn>_W_D6Jj2GR;~um8rh_PFkuAMEVy5ii(Hf&W z>Lgfp1d|?Y8!{0Flx$kcvlCA`?g?c$cJ&&2(kpd}g3p zDo*e7=J(~~|9rF)DuKg5L!6?~QVX+CEKswM zkJD-W7x&&dp|3uvm16uP0ws7}GF;sLPzOvHSCv7;ogv16)@AU^Kex*(e{?@zw3LY52 zLSMX}G46tjX>p*o)>(xs=BgpESf~eHz+?aus(0%b4uYf&}1xBa%trc^jF51*BlAT zRnC%kAmK0_f@ZyaTqA-Z%(Kptprsm$-*VCyAf@tmQ+9myU^J;^$_Fh2J%q7-af+-g zHQNVVgd)&c4c2}BuF-~PO)B&*K;F&sK9GpxU0rV$v<=qcFP31qLLF%Qad=5<|m zuxUs!r4nf*s-R-69oO$oJpv3IPkyZjKcjxkx6gl;u9d0cR;#h;{Io&k1fbe!ANLT; zG$G9Qp%2X9nVyRBaff10JaN#*msOa4OXuIm6+iEe6&o4#7e!PfzK3*6T??#6D*nbL z9WZi)9KIEVgI7b+v`ySIyjA#srOujKa$1DkY~WXUjp$8PA;4m;fsSYtjO)}UuVt6V|CJs}PX)k% zdfHHF)y>?{J-x{jBE5~KUj3$L0pyRVk@e*v zs(5&rK?6?P;n>TjVp_zYA9#Iv3`b%Rt8XW&18v;ahJ$07vg0AZ#fBWby`%(o#rJoH zEl<8M$Hllx73Vz;Ovc79cdPe%lOGWH)~+^d*w?isLwzM-XlN}E!6uQy-dR)Sws_`_Sw^XD7&xp zVD+fj^+uXQ%{CFsn$qt8w7CX8urF4??$Sn5M5812Fv-dP6XQUbHYCR#%x=h8V^Wy7 zVasDg8Aw-&l-zIVGEKs&4w5(j;VN)e#A65V%#~a`O;-^7=26&@>T}MS4Kf=jqv)f1 zq@y(907Ssk-+l6?yaTQ&%N23Ao|SG&vQdp{96Hii$3z>PDzhaEe+*0kc``JkuD7Wz z!z0dGTdva499Xmc+DhJbBf-xBOyrMTOO|UB-FQ=cAA;R*%^3osxm<81W z4DUj1Dp=CYIE_j-N<2?u9;R(k>A>0*J;=kRQ*iLHVHI7ZmyzX?$Y`hnl&hfs#~J}0 ziHvvyrY42|GM=Kvs^L?lJ)1g}ypCX!j!(H4gi$M9Xlid$oHM&z>ZvErit+n6Jq5!- zw(>2TR1S@-TIoUsT;3Q~j1BzWey{c2ir^vBF1He(%zhN3!FME6TxjmWK0GT-jH#K2 z80b}Xvk$=_i3}G;e3~pAhmS{B+!d*4aCGF54i@4GNn$LbP6U`H+eGZsG?4S*SQ3eh zH(fl`El#j1(Zd@?$Yjj*lyg(f4Ng(Qwaozo7%+3GY7*cK+)U}(a43eWVbpCu&cL`P z9ItX?9Nb*oK>iwki}AA?~^}0~c-fy-&EDS+Io7Dfy5pe~?s`d}@OFh_rX;7vmGSE?7-Cw%_U3M2# zMVd)y9%f`w7uV|0FfVZc_M4B1JE)t-H{4e7oQe8HO3WRgL0E)7Z-B)-Rt#KL;tlZ@-obp~=(j_0cl@G|r2ts^JA zK42l>&INqBS_V^Vv=SWI6|9mp(65YOGQ3u2vR0Tk!u*C^bjg@U^nwz$V1AH4vQhNn zqT8NYk zBShd|!LpvK*6_3XlJE%c;RGS$RNhk;!7>8|F$So59`m_$Q_(O)5FumhN26715l!)= zQlE1(P1>}p^mR^-;ew+h4vL~j`?zTGtHg{9NQ~+E)X*K(TU#a`oX@5XZ??Y8$tw=W zw!+TDCa1qFy1&f4j-4H`E;(&S8K6dbL;R=8zVqj?WiA>uZEtmY+T1)~$pCCYv@@P- z8JMP``P?|2G=?yP2Y&{dz>yi9 zXt*i7_s4G^#41~bL>T`y6XV@Bv%7?b;0vn&dHrs=B4Vcf)ZNRFMQK>pmHUVd99HLk zDaJ}L%QhL=Y|1TQ4L@K{K}KH_6(wu27x%8?ECg82q6JZx);nBhsO%wv`yY@_jSScQ z-*yGwWSojp-%jkSqOOE=j<3*=7rha%38o~RMtYPKkkgv$VVwO@+;y#`XZlyQi)bx@-TsPi% z6+d*@SPv);tdXo6<; diff --git a/sdk/ai/azure-ai-assistants/samples/assistant-WhEPqpcbmXadRJmCzMUeTi_image_file.png b/sdk/ai/azure-ai-assistants/samples/assistant-WhEPqpcbmXadRJmCzMUeTi_image_file.png deleted file mode 100644 index 02f9bcd170a6abbd4ac83d0be8852797baa3899e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 181757 zcmeFZ2UnHX7B)&WcF|Z7P>4!X5m7*-Yp?;*dlwau4$^xx3c&)1NRcK=kuJT1f{K9B z`=*F=HcepD!TT(8#`oPZ?q9fP3{Mh8c=vkOn)7LM`6$R;q2J21m5z>%Uh3*aB|5q- zGIVqsxi@db?+6@`%f&y0>@I28DO(xYIU3wCq?0wUvo^P~GdH>Mr-R`gTN5iwKAy8Y zeB6KDwzIRg6+U&!;{SXBkJTOHQ?;}b8otRE>#Lf!baX6b{rXuOB>aDDEAyDI)%uW*VoqJ=w``*wZ9rcFtIJ=khm_wsI4;4$UfjP{ zKD&>rr-tKK@@R8XlHEu{q@`&|gs8L972ku3c6M0-0RgwtHB>TgNj-S*AVDoftDx75 zS+wh-&Jg$hN4_j#iG3A*)UozVX=&+zw6rwoAnt_Ys)=vpB80bY+csmzCrh0jX(TK4 zU>k$o;y!T<6Yd7*+?V{~I3vE=h zwt4FjXX9`0?h3oizKI^Izrx!5M)CEuYMRA8J@SylDY@Nu@=J1tUZpc`BSGB_*2^lsUUxCyNu6u$S2!oB zr6l5~40pRus$vOevP#BEB*|}HdbGc?r-!EPkfNR2(wwB8()fU;@a0EeMfUCbBPn{t zS~fN|`8E29D#rxO+Y|(Q+%mBUv6r7P4dkU^7wqK^h^3?ZTYYIDSbr8j@i+&^r-a4E z#>Q;hA-VC+eC5V?`BuJK(btw;1^cW|C?4?mx$1c7@@1`f5xe1+OEjwM+e_AJ($Wu= zl$7k&R-DPrCNegtZ3JVb?Nq0wc?fO>I@C;4F!>(KVMy4Tb^tg z!S_hQF4<5ZCx0S&{w!yOA8RZ5+jt-4u>;uMv6H1g^?E#i7Mr%FrqsW_lH|TTx1mXz z&bq6}d4_Cs>g@WRq6)YCnm=0)RPEEvzjJA*CZh5EKbx{|Rql3Q==HXgP^Xl7mg>F= zWJ}RV|Lf@G$6WqlKd^cY{)Z)1lGIM-wnjwUtneMdK5FPLcGcCIXi88PwjT}jSR7!G zYVea-8_tOg+4|jWWo2>Z#W|Z-YH51ovqGBY=E?iHHD2PtCCUWzxU4M>*p8o$ESK^_+aQFqUd4%CkG+KEJ$elZ+`2JE==u4ipQP+lsy92X%Y?bPuk{myZ%p zY0oq^o9N6hnD2HKaGAZ0;!_wa7B3&w^8MqZ=2B13+$ME@tTw+z2fzFBcy0l9dSkV2 ziJK-R$D%REg2Io|M1M$F<@FUmo_9enb`JeTM;Ee3@F%6>0@^12~bCdsTdHBs@kS9ELP%ap2cpRDgIh>Z^Jfzo9`bEy82B0_@Rg`5*Qhoib8a*$j7Pw^DeRZPTSFr z?4(amnAQKZ*g&W5KHuf&rKd^`nB^ZX(O+z{Wy8*zG{i_bPi){yN*Bon-YZo78~-Yjn^t|QloX^h+cParaB*EVF)@*rl1g_yDJG`7je(~r zPBv^76)Zuo*u~PMHCW#xp>VQ9`?ueIvrI_)lZ~Byq>?jA<>JMQIWwfz^71P9`1nZ6 z$W%L&+*{*0bF}6G9UU*juS45|N%@5;$aEv_0qK2)xFCV?)Yv-~^%9U{K=ch+i4Ehb1H=q!@h;lDe_Bx}0rX%R=_OFiuiQhr)rOzS5s-g`a#6nFZ@8 z=9p`u?8{=!)y6xrQ+zgT*|J60VJxI2S@X4OlG+F=M2jJhUeTNQNEhk8PfyT2BBnDc z1J1a(+ca%c$5yO~5Q!Iaox9yr;_lVTqhFHDpyzZU)9C9Nw)MVD0#zdhk@FK>NmyRy zDjlC?G~&Sp7l$ogN}oI;{Qdnc{G<%40&NmbRtKLFob_@jrxzT)yNSix#revzt_Qcq z+w-WGCA!WU8X*Oald1S&R4-;rg~|YScEd55pDQzdq^PYbqhy-}V~vx1_Hge1Jhdaw!@Q@Yt5KFK1LcRD5s?b#9IKwx zR(;Pd-Yg@OUXE)aFUc3%?b-HqqN|X;(S2#wW@%#|gT9+0-O1y^(=*+JttjNfOliz#_vmj0c;rXTT9+Wwh|+oMYQ_&5tr?&q{=z>SD=R80 zEIqIhf&Pwhb6>*-Ysst?Pa^AE|LT%fv5S3VheF*@->zM|TyToS%)<|G{>ZeGn;MRu5?m6r)iJ8 zX!Mpn9Jg^UTUyg}@?RN8I$zE-u2p-&Brx6eDXnx(3*G(X+eq`7zT6%UzJu~?2{=?P z=yUsa@BVHb3N-$}+xvv5sLoD_mA5rf5~R+UzQ4EOLER1Dm~fjbJF)j3kRFP&=as>1 znRfx--ywu+;3^8^K|MN3k#nX|ZRB`P&N^#dKouGaVxms|9k1yp%z~re-v7g5`h5A2 zghwauSO2FIeF^gpt4p*c92a^+tndp_=L}S%=N7L5k7k=TH!S?d6|>C`|+T-LW#kOmUO>LoBMv8uh{vp$ok{ITfu~klUzXmPtKo zsEJ*ZZHliikCZG=*oJUDj|j6YkV-3_dl7U>KNX!fzc4gl+C$u9q=9{PNaANYvzHU< zvdb#1^$>@2`_>&hT27=Dy&8&itx<>!3X1!=G9=-3U)a$Ml%-_h`?f-sH#j?%t<*rq zHjV>9Ji1!zH|^m6a+6v8(tA=A2g9tGTnuP4GmRpXMX&KvJ{i0UOsT6=Af41f>?S)K z+i+~KUX=~k_}IeYBBRUTBR{`Z;4hujcwRTW9j}oBd+KPPpG3`B#j2;%1J!0z-`;B# zIi+vhe&E~SelU$ZuUp6H=oI?V6Quc!zVHT;2B92xP4V{aH!}8yJjSk1-LP(6yfiuC`7Bm{$0Bi`%|ro0O}khHA`k2WC*&c zw*Q55>R)?DkDITU$HfY6JYY;l;OL27q^mN>B2mz|Vr>m)zXlBbXxh(9ym8 z^C|ZRI=Y8NBaQJV*x5fS%;6~p7t5~UDQ5AnOjY2nRez}ey~U2&@^xaPf+EwA(?UY+ zyyMb->pq8ac2lfMSJg;W3#4a0dkw$7ICkL>69_n?i!(IQ6f46}m{BxWl6m9f)^dMW z!A|RcNNtG}b7dN|b^Km|2c{p|Qs_9@GWa#DOU+XaR8ioXph?5UwY9Yk89a_Q18$M0j1Ocf4t6Q(zpk>-oG`jjV$=_ ziJ5=~NoU1TT~pu}g2JN5TC}ZHOFdSnb%q|DD!JYIs3Tm+7Cf3gn99U^{cnSXNz|`K znx4m4+i8&BuL}EFSSF%dAR^>>=X+|RHT9F1NZOyI=FH&NW&i-UW(u~#>O!AiN4{-* z_82-=J?Ku~9V=DM*qE5xAY!Ds6CMxFr}{?W0!m!9da*Bl^yQ~dpFVu_sPW??234?j zvY%&rboB|NVC}PEu8YFE++#T$rU2 zs!`|LeGh8!gYY)_KN#8B*+q^*EFg@={`-e5*ws=cB_+r8ON!7qCUk;}oqoKU1JY<~ z)A(3Xk>vE_`*H1@_}^Ef3}{`Be71wdsGPhNvA z4C8U{j4n^0JqV-ypopJhWo3PSN?#94cfx-%V`0IGn$CUfm=th2v<>A)`%iW@XnS+| zIX|!&t4#Xh&IeS>z@y{;@rGi1hM{V#^gaQLj;52#gQS+#O8guKE|tD^Ef9b1vzvYI z?%l7j;?z&fR{Yza+ZN~9jRZJMcBee!ls_&cq>19J_VMAq=2w><9ch;|zz&%kYk$pW zTr-T~Kw4{}O2Vr!tCIEn0&S+~|8e#9iF4;k{5yGk<>kiBFF&0WR+8>#z;eNGI5INFe+BBB?E+FZ`;yznc_VI z07y2&54CsKZ`wZNd*JjD`JrbbgpqByC={30{$LZkH4UeWSJ-z zUd&kmgsdam3(Ox|h>GXhx^=4(peMMx#X~A5$WI5DH>97}d3onz#nz$Z@G@{@A&^pSr!gZhpyXrPid+4=tOH;RPQ=#7ncIHRGrYi2mh&id{J-7rZxH! z5)Bz;7%$bK8hmaP?W+ZfP%;DG%#p)~FW_u9CaNacegEKNK9r=DowzhNp0qqsC=YTc z*IaBjqEx&%7}hcZ3O4dYu-l*Q&^hb#=t*@@iwx$SRZ?}@4KkW~%e`sTw9-+$lP1Uy z7P&fPn>1SFcE#ZUXq?o_`kb9&RrJHF>+ADtRwavZ1|sH$U8%_#%jTK3V5?O8rEIxV zni&^?73)EFvY^O(4duIHToZ1SXO8bBU^lEl8>a}$3+~>%XODpG;I)&w1u78eb_|(7 zrot(V15=W!8je0mwGJxxqHn}Y*VS`0czcMWvHq+j@bT0K7o3jd;_`@dHh<~6{dmxl zs+*Lgp4M?PtnpH*fLY7!?jq-oT&t+AXSt!|{LPJ1%-Y@>5KQh%31}=ievA@7k!dL( z2R*cGDgx(-zi_qXtZybzx2CTJPV=3+;&&|v=g*!xYuR~Qu;?;q%S`y#bA3W9v7u8? zYL5vBH1~=U)eabopVasS<%*F4+oiPy;7am6$GZw!doxh_8_^<1-g52+JBadJ(*!oF z$*J%M`5b=0pjQVmtzSjkcFFSGo&JC0u7$n=$rlpk*syV9BJ>%T=}J!31SR(Pmf(ax z{hT%T&81fys_?}+>&^_-s)IE9E*k;b(mw2w9|q~MPFN%52`iulbxkfxJfg2iTwI(8 z$6C3T5XW5heO3rgEio2~@I*5*0k6UDN0hsx_{pPgwGP+6-o~xi?GRNx3a(OV(@w}o zEVIS({A6pENn;iiE$=OF@g|jXExY*d^j{_TZp6i6->mouz4^0PP$Zqyn}p}-1-rSq zZ5NZKjkP!Wa}P{>@Wb{pL%nEeBQ=l+kKPqYD{o~>NF~*#p5m$r_lu8>HNleohhlTP z9JEVXJpnJ@phI|d$DpY`pS+md`TK?qjBzs=Rt#(u+n!{}$}am>U7|WQG&HaovgH9H z>RT>~iV|Y#P;AoWLQNrMsf0Swk`gAq*uSUTx*Ot^y^yAKz|!oL;e z1~$mC?jYSkI4Qx@I|&I19a$#NBAvc(DF!NWDCf~T^}Vl8;Kql| zvLU>ODPhe@pDtB`(+s1c<}wR&3zE(8?(TX49CPzMu`!Q-&oz#6R|6ZdU9#odMp>{P z*z%KXeUykOyu*R1d-@JHZrtEUF?e!FM9p<>tnzH=m8z@0@GuhaobjQ2#=^xrtW;Hc zv?+bhKq!fhHavb6wU-b?yi60E#rE!n;4=ZzWG9gd{({%w9?eLL$XM=>l9|L@>~egR zls?fvO}|+&3UZijHjnL1gJRTwE{k9U+VE>XQ*>Zw%cQ`kD*R4U*HiRM#VL~?AZMoN zz z7>)k7j~KFv6f76D=Qsvp2k0?uQO1!1o}>8RIx-M>?%P%hGYue3?c&Azlu^eIhn5{K z*aTh=v1i?}Ws4#fl)k!u_WFBTS^awL3F+^fH*c2Qv7GpTMcf)HbJ0XqB9PLgogt5VT+GoqY-&x zrt|g9xFS8l-{{_%|MGG!scC7w=%Q}}951vOe)0F;bcK460C?1fuYF|Y(f=LFPoP`~ z9V{rnk$xK_gGs`psBw$({zsR<#Ol%QEL$J>1ls&l0)K5sE5n{WuZc~D#jw5e=rJLZ zsK;OZJHPey5t(+m@S zPSXf@T~1+}I&j#*sPhs4&MfY;_2zrTGad~!HL2i{WTC;d*yon+4|=uIJ<*X}&FkLx zL@?33J>x-#7vQbf+Cra$k9H^?bYv>TdAvj5q)r~Mz7$`S`&=m8nVVeOHg6_IS^44+ zyc$-Ig$qiTl^Sq9#(iZkT)42qN;)n+p88x}uO71KbW;mz<1n7;EZgNT9JLqd0Ac*91{5{v>B*D~*EsIv!k z*o{wITu)w%7TC4uD7W=y)a-uLoOIXoADSbjK>thyn_OD-P=P-9wWMe>S9L}#{6J?s zP$P*_IymcCnrZIJC55AvlveVhFTH$wxDacu*xXe{v9-KMuM5m;OFNH&TJqd2eCyg( z=`+6OIrCMJl3jb&7vIjfpgUb>;I@RVv6W_S=bcB&R6D#+G2*6B?MzJ*O#(8R`EOr$ z+;RHmr|sk2#f9%$Bd|!|-g00CHcrJUI2r9MGmemgv8KXB8v`QpZHG*uYP9cQebKlS zQ^wdWcJicbTG8~wlBLlT-aa}YTD5aJ9u>ZaT%a@Cqg{)ph@|~$7e{?{K#D&TgRpB) z51Nu4Rha3l`R!rZQPeD9kCnW}@;$`Y03#2GdHhIT(rddSz)CWM0O@I6+fD09&blZ`hj;BZjQtiD5 z!7@I}ec53Z=YT+i>=gn)iBAAoq0^uk50}yHq=E9;`$-+ls~N(rIYmXGyy-EsV2#Vs zQfw?HQKz?-h2v+bPadGPn?P+Nd+)~0n<>?#lB2;nrV8!{I+>07xl%E4!x5l~G*Wnm ze_@e>!JD8+!Q@H-8sGv)0?)`QRJ^GMJnv8(Z8&cDrwBM5sU9(pnX4dXH=%2$0L8u0 zNWTGt@1d`64)r7l76lJq)b+6y`<@!@&7y=QCk{8bE$PINPm@a4mdwiQ_+)~(6+vNV z(S$m(Pf$yaO9936&2FDZ%|CarJ0AXfzGdc7*%$ov@M_o%**u|~cTT++rw>fdC_E6R zerde;>nMaGhvK%p<%Pg7&?eGOy?3&n4Qx%;REEc>TIevYf`cX1fSs1*Fs}9M-S*%6 z8YW16?I+#<3GI7B!Ow-hov6tAN}uX1u}kU?Id&QxDj*sZF@E@+r*DA^y)Wp*K};p2 z4m~)N$FosZaHQ9pwFUIRNHza8DCo5 z6_^bI=#>Ii-8v9Vq~+yXVOi49H%9QCb83Q7?Q^?bWMDSfFNME+>FX?XZ1|?UUO4nK zn8{g(gVwKKZ%=pzF@0WNev&NZMu;7XIKNf5X!(;$Si=dhZH0Ceui(j;S`>?%Y-rS0 zdGruf?4s@Ee{jNXf{a5b+F7ga@FBM8GV6sY17Z%~CBg!1`un%_c4YGcD^JsC&p+O% zCl85rVDc)JM(N12>Fex1(L>1BEBGW1hN(OFN??Lht+h*ZNUB{vOD?Ddmjr*C@crl~E~1CJ0X+p8xVArBRD z27BD+6YxOLdNZ_J26$>B{U{u#mzS3d`M+3MqLv(mV^gJ$v?9<*J$xVfo@yvm z4ckr?y#%BE6uW8tv`g3Tbaa`&Je|FozL5cm+PSIPt-WtbU~CDwKNXxAtSO%FP72m@ zshoJ_LXL3TO+T{GOlVrs+vaM;m#FF^a1pRa4<6UN*{*PuXqN~u2AZ0 zBpM)`y_kT&4&*6AK4QsPlx~9LcUXC4{bxP|2L52YB^zYgo)Jqa7;yXuagI&uf24X` z?-Ya$m-&elie;fJ(OVDSy#-exzx(YPC=Gq}!otGsv9@%FEslghYG%fH?PN_wz~T0X ze|J6f@kyUZGRie=KF-e0&RBIuf*O8{BNaev1XU$N`x9sq5L^p*#`XaKu`K46Y1G07 ztbk#j!Wgg=Sp+0k$a@2~Gt{sI5i{*7$VR;Bdh1-W1u<>6S7%%GbSI44so&111Pha9=fLpggvy(j=sXES zI3Y*Kf#Ev3bcOoJ5oZRH#_f4A-T%3KsEDmc{X}mR=fiP)iPT7=%d^3!S6J$az%7_1 z4~2)IH!S>!=g;M+Y&X1GM2}Qpl|)#Yp?AbQ4*QMR7hMWI9v&VABXMDNsE}xFaxT3O z7?K5z5XhlHwlPHw5b8d6eE6oG+}5SGzC-{zVsM;hi*15Cw}<1J0(8&u!JUwmkf~~l zmfYkHiQ3CH63$;%#fiKiYR9j+k8dwSz*uek6=EKs;Np0(+U6!zK+L zZj&}TqKofHgS*hd?!L6J@CJa~WhlZ?1qPd>-#(&e1CJ1UFgH6MtJd-pg0w~6g*KuV z&ldzZ?`VZGEgODL5e}_j(ZtYDJj6lGXB@H($Oy!u(rM{yBM1e-Apf8%?F6Z1;plZe zZZl7!lYy%pBs~W>s6$+aoga8KGh({j@E%T0eekPbalM8vm_C=~v@~l%Bx$B|)-Sb3 z(-flbEzM7+0XWwvctB~BjTF^{cBzKFj^@9o3oA=fO`|O-)TvxO1DiRw&bfIP{ApK5 znEsFNA5}ArWF>Fp@kyo*e#GOAc{rl#QRwRjkNNK2UT8ORLr_2c#z#A>foX5qgO&_@ zjb3Y|QchHUVQoGdb|RGTa+@P*1RDA*0$XK88sC~zIA>6H-z`_56t&n$Fxh}eb*N6r zRFQBVaUz)nEt9dG697mS<*tSC8TxZh8+(@mV+QUov^3Y&` zj_&-^=h>U?Uz=e0{<;1^6nwjUl`Hu0v*?9G3$r%RVTd?TZcTJw2ZpZ}gjIWO)12?| zEG4g6=TxNk{rkg^9?pu!7D1jUWVw2$51WHj+drl(8D$`z}eyzfxA ziNdby3?v+p+6P){BYa^H<=bJ>4ABAF7rxs5kORA5fE`ngJK%DIU<-A9_W?VRM+L9e zx`l=1Zq0pHIe_IG_j(6&`9Zf+-;;w@O2ii=Kcs8JYSHXPb5RxW&1&sq@HmIjU75g2y}ZFzaBRAg|jK=K^*Z1vKcagRaL07r;29-3?flP zh;~s?u+1!Lb|Vog!I#IqHn57j<#jz72yf#Mk@j`{{PfIj3B;#NxiV$5s?q@PsDST$ zfdmygv&6MLMS|ma1lrzGFv@lvz5xr(Qx9+PeILjtjI#_Q^yHNl-7bBU@oh3e~NO9^0?F$L3YCvGut9pY5+z)eB|o_fDP z@Zmhkzy3PNA#FH-Eeo-%exh&zEkVJ}t*F|kC3*T) zqF&RCDj=0&(Up7!UHQ&m8^K2j*3O9xMIrwi&WOU5qUiV11X{mUMTHG~8F;Gyi zj3|$#%8>!~1pQJE9@@93`ozpPZ~+T>kTbDDjIcl5N_xgK7xAyv_Tu{b z`p`}(3PxUG>8UQG3GsIhmT@S<()#6JYs!h;-!~}kT&$gcC)Q)3cT-_0K-*;!|AEUE zgFV%L=n?jcWw7gYBDeL5C4-&$7I(ID6zsqFy#kMCz7T#B-4lLQVagiPBBXB0&3m9& zsgm#~(HqE4A$~`gpp}ERYewJbV&3?of1ziRNMhsjo>8zQ7vH&YLfv7vxb#20LX<-Y z79ZYrqBNu;TQgL+I`Iwhk@(-IP~9lBwXq4d^0FIYTB_8BsD{z@;&bB_^A& z1%iY?+HTDwJS3X~f)Vb)`uf$YU!g;I1PY|WA+2*R;@T2E!GRldg#Q2=gjW`;Qe zQFx62$}_pd-MI_|J&R}3u(~?dnco;YFj+jW7d%*-#%xtoU$F$w|DG ziH0YCi%jH6V#A-_PG9IeV-)W?cns!mWBHtFidHl4{eVE5yWQ=aIaUQD9OZU{f25}l zBlDx|b1M{jVX^hW7rS{e4K(sZMw-I`Ao5ccSWv;8C2XK=NAkJy%sB zr!^suf79_<_uR%Fn3U6PrZY%C*^f9Gg>^cvEZEGSee?J8W6Ga;6_nadT9QLcR_C;- zc5)Q$kmY_F;b%NwSPicaYH>C~*t#AR) zBf*voET~ysvN*_$!dYwK!w3tIq-L7&CV`NuExD4z55oN;r+OlZSnp<~Y(B{bxJJ0kt;@hg%KoAlsW9P_u>? zM3N4^=|-1*UCtc;*Vd9E%;$Zp#t-L!ikMxl+)w;J3CnfaQ1+ntBmy+55^Na6t@#Qv z%em}M#EEA~91X<<3X5B6=^^tpllHmqLyr;8X|(QaLs9{m;ah?|$lPo%_66*!ANjqx zEDP^ZQlvWYSf7f#cIa^ce=Npk=W?&a`t-Yha;LewW#$&KnHoU8^Ggx#ls|{V! zVayow6`8O$aR+=OTEce0i#TRfUd^hr=?PFEcv57l@}AnD!f9CXunOI|sGh20lPKs9 z)i+OcMNbZDBNqn_V#{ikh zrouPXss?$u`Qko{{131B99~~}q4e<$Rn7CQzyZicoMBTeXvkE7FTP#!Mn7QV7k}>O zG$~i(ps&XZC#%BqT$16S76Xu__{W%<+^yh8~;)S^XT%B);xeG&K6!Dy| z_94pZgu#F^HUS=CVnGIVf6(m0v**yE6v%JYz4n{r#K`No6z&;5Po>vGdjzU24I{f{h5PVBa zZ$zV&^HOe_0XSf0#WZ5ajvWprpZl-nR8Yn|GTT^Y@c5=L?pGw6Q+ncLnSuDD{C3fjzmxc=8)_lV>mZoJ#)%)r29DKrLT4t>4eNl8m* zFmJsMDGhZmt(scx^4l_A15!D*rp#SWx$Kd_1Q@wSghuicg1t|sYj7jA@VW78NZFvd zk~{~c_FerS>ya{V2EBuJ+mDNui^eO8@k^M<34%(r51j| zLKapA6VJMD;gymQE<&^4rvJ#wk-RHl+H@6iO_hBP1BfK#4 zcKcv%RgIy}oQmC!G(9{jmqmNV$E7YZmSWLWpbDd{PPD8!(xu_s|?bCg1GKy$275$KQNAK7ZZ8CpJf{Z-D)D5^r0i za$cXK4yIY;!p}ux9}N{GC;hS)L`dNN{L_6*Os4AlSy?Ar>~D>a98rF-bzit%cQdn= zZ;(xs7ZNVz57?!a07P~4f{#MS%A#tPJw=V|Hxom($P`~5z5UpEBBm-^@XAog=0(Fc(vp z0stJJQ>lj86na5UTgp#S`!6TnKv@3p@grj#W~J06twf!l>zWgMLBjJi07|9~Ue7f* z5<>zm#z2%W84m*;9}d#Ctenr6HL#}feYJY*q~$T+8=jND?=oX6&VqM# zZX$m5RyNh!)_{&q$6)QpRg}o-&-EXlplsyZAMzc|EWR7Q zhZ0YTFmSbLsK4~&UeT7mv8m0A&Zdr_ev} zI2B41WtEt;ND#Gvzo$X@Q-+0KJ0rVAea~m}o7`!WetbX1EQ)$TnB4Bf8po}?E#CEw zN)3yjG^5N5iixcjP@jHce31UrdHTOT%Jt^{r?DDsgvJDbk<7iLugS}+L&_$DQzVLz zS~$@F3&l+E+N1W65x(N?eRJZPO}``YgEmhTQKOo0A|WyA^<*0R$?UXU!DNQl%@E8H z0R9ZlPUjSl+~V-aQ^x9$9Ghf&*@JBuN)*aXtzZP4X@ZN3EXG0a;)5iz6}yBnuFg5P zVI;R z?1$?vV5_UZ0VH!e&yTB0BWgQ6FBsVX#h}Ye??{m@U?zQ|bC+Por(J)q90O)2c}L`8 zG^$C*+?lgEvh4Ka&%I6gGIB#FQ8gNg6bAsT?Yc<_%Tn&#*wBFd%w>ztJVj(o>)|H5 z=+aVuYjwU6#a#ufUNJ(AZtgJUqTP z_>d6Bf^-kICaKq+v^OT=;>S;)P(-C#GT^O9l2Qz*e|(N6RDBj>Tf-K@5@K>N0Gh^6 zE7zf07(0C)N2p8{7>44qcE@hVWXZ;rXh@MnTEQ^^b`;Wh`P zdDz)6VUwqe?AY5ca1!aIYiu%vP$q$Qn|9`<8+`T#XC!&;kzSTN82G*MMmqpf-VMj2 ze4IpAtmMv5cVE|0Z!O|mb=IOI=H0vV!7T$@05b)y;p51&y3amsl-zGLeuH&R{^DTv z5QC$HvuAPUlhcmHX|3h{Pd9wk?xqkP?aysLJFJ-7omc5Q?pOx2Y=_y%;3(-FchA+8 z7156Mx)+WOuln8W{+WKLxV?EAVyM~)n^o9>r_R@yag&kzXkLMdkOTw9EvD4_D> zHbvRLDZ7RmU@g-XO0tz0EYW1WP9g)yn2spP8yJ1VBck7qz&H{Jj1qNj!05|d=OXgT z4d^o4Vsw4If-+_K`5zHEi41y~IIgN0IEm5yom=3cREvc9^h-byuMx~JGp=^+iN}Aww(0mTKfY-?E?CiSy1jA~@GirLi zmvVZJLLNIFQ0|G@z5WD-1IE712WK9e&woX#+Ar%UG*$eqW(4TpkO=@N+{Ep>KA#V* zLK%RGc9mK2+?Ndb{p?`MNR4GhbI-(i%8@Y?IO@6Npq6)$^=wYnr3go0)&%sRY@BLy z3AAynN_2I33bsy;4BMTde18jFXlJnOLMfxQRhTx#t1OD<=HZcpw|~N)HW2r6_jYz& z$BGibz7E>E{?t`&oDIu2<>aJ8oUoBu4TZCpNR*W-#E%|PGvrIrNy*Z`M5xba~%>-wo+gZ5R(Y<_Imq|3s1~Lf;)p;10t|bHUp7$r^ zck!Sb@Qy?7gpHS{QUPE4kX~MmMQAdN6f;4za6=UkQLLgy^^v1TN$xbbMYYZMpwNwl zWSrUW4SU)!0I=cC(E%4CPm@U_Oft4)U}q-6(35L@`r?J@fdV>4hW9ve$;)B?ndi+J zhBCpULckmb1vqRFt!Py^v%oz`@J+n7{Un3vkd4>YjVEyYmzD;A$__$C`Q5;Lg~R7v zS1Mk;`V*cll9UP{AK`k)SyWnb30^5rxBkMH2=YSw-U#2vk7uu&t;vH^|MI>aC%Yj{ z+@G85v2wRS9dGTjzGxk(385N}Vh$Q>|Q-L{{* z4iZL6Ben?f9j8^)9f*~I%qxliwPfHhwbMUP4>?HV9UIZ}CRQA&_3=?289|t)(jg)| zrea^D72t{n%lWrgAXjb8;!J_=MFzS%-WvRqZH}aF0fuVFcc0$+!s4C*CWBZ!SAK{# z+fRMFa8j?RO}H4dC{S;|7cj2l!A?d#B8klOoLolCn9MkIS(noj>vR1Yt?tUfoK|8j{b3Z@7|IihQC&B6Vo|#0X zn`gujVRF(sBG=6K9K^(%;-AYm&>Xk#{HTKw9LwS6vOwBlrel4gAC$|>4eg0=kz!`* zsPsaG891JdEK|f5Lyy>&teYdrnF>$;yvsz@WWN8nRVn}!$+X7`BTjD?Vu{a~Q(1-E z672u$=T?YzILP~hB`V@^oD(O}9Z2LlIvJrhm+wBDI;m#h+dTG@#UMT99|M8)jYR_g zwg_kgizeVSn4%G4F2PBO=eI?6@OCRDm=EDZlMo(hNhU)SpfCd$)JQpryCH}HrIjot z{`tbbEA!BxEsOy|1skg|$y-}+XW*fI2OJnU@prVBr>mc(D>n5|t`UJ0muFaUAr5mn zGV+g{eay^Bj@`}`y=D-AG35|V!W+8tIDF$?y_qSQT$&iwlM$35E<*V%>lskF)&2 znJ>vO*9h6NU&2R-qg;Un~cF#FdOE86pz)V2i-fa;7u5|ip-$2U`ilMv{eXc zL1G>#v;eg=Lqe;y7bcFMDSBwMIkLkKmfVNnB}j;w!ESA%&8i-dyi%x)#U2kZAW;oB z_jpgtEDC$B6-ApPSh7KQb{{*1cuxwDm7o2aZK`%|9I%Aonc{nK+pn zW0LG6Z_EPzbK9-Yl`E0kps9SStAQF+~hh@RoX{K_nDsDr||M~5_Szi zxRRPWI^iU`alq>{BPNsTP$I{89*RK=a2_oqN<498lo{MG^2P5sp(CjY?wfH)JQ^F# zD?U^icPdn!NXuJk(p1}EZ*W|>g;^(y#rJb%cNDFuDY;B@ zwHSXV!zDgCu zCLwKXH$=~=$_Xxi_3k-KDIAqJ;0eYm1l5PE^~jd_($mvp`EMfX#31!RV3)Iup6k}P zt1MY!-sfvjQqWUo6lB7cLzZK5ZJG+3paqRtF2vzt3Jg~Wkn$;@PfbPU;f0`;CD(~J zAN)X=bPru}ozipmJqh0co7m2d?$3=illdg=(u|P_ z$#(eN7!I6lnkE(UaJ$5xfBte?o5 zNCDN21*Gew8&>bfSeJOkHXZc!dXhI9zEOOuGQgp$l0q-!~|Q-teAEFFVnqf>fWyb zR#N&hihF|*^ufl0@o1Y%17fRi5ResAwesHQ_V=hPvRQOs2HJlamsFt}8r)$UH zwV4fDa3JOi2zzI7r4QV%88aJkWZI{fim1)6;cB21Azfo|LTKVQxXd6{%8XAt_0X+7vcA=ydh!zqlMhtgttMY!PBk$%%b?ibxju z5IoTolzzC2k$?U5S7&?wL6`+?mIrD{JV3!q;MBjrOCIZc{|_L7c06Q?ahLehh0yIr z@$&=%W07aazmW8BI8zM|s2*_qD`L7yI^*gkavx2hp5(SNq&9Cm4IdU4FHs1H#Yj;W zrsqfofegMuW*t#lHwDPV4>!AT-Wm0_oky9p*d8g247I?#)nVE%2v$t)1j4rpxv5nD z@2;cUTgAx08*(s(n+|6@3lAp;n!+A$G~jC+jhhjg>LkD8cX0j9@Ils?tLW3@rkNg( z<*y3j*Wdm9*Q&?w7vV9~f2C6~k^hf}hM8r`IQgm%Gr=dld66CO( zjy5!MrHqD#hA%A_d1;a)5?`5;G;0UmB?d9M9RdQq3NfP@DA4=}FeUdZbih%=t#?-TkvEIW zRp96|v)8k+8$pcDlUD_X0m^txim@jbZ>{lHW4JYjU~qy&bl_eq5I1H2DLr$ipR1U_#iyopt$bZ_F<_nfk_ts)37I?^V);}YDctAM z@ATs%1DS*+^KH--6yT;LR~*Jap4?-BDG_1k=@=sYfStGaUnBhVxAhzMPKhXGWg-8`n>C3=BDry5dmvSM zw)0<%ebsNh*_Ir>m((AyGBU|En%%0G`HOj#u1r6&-)ur*YsTDIzG2ib0&zs6F}ruc z==+rmY<~cGsX(=`U-~U#>;oVJG9aYVB0)^JDtSPRS}4MENa@j5Pt$8g<;D*`OJ@CA zZeCU+11P%kxFQW-T#Oby_KMy~QcoMU?QdiE$kV>SeqbYR-$TbGSIA|X<0;~#3v4e#i6cYD zl787_utv-^ha6}S`8NQzVGD`b=+wm_9HHb z7)ILuWFV~__bWAG^aerUGl)N^B6OyVE!l)n!2>TZrrn>&{4&bF37Rt@s$G+T(5nUu zc9Lthh^GZvA;l1rBMnfb?NE@R)v6I59$i6$*E0+S8dre%6NL;lM_9n_4t|1w!~YfpBEaGl`@BA#H?R~)*k(KJ z;^oVUu;rM9?cR{qQ>^9$p$}%+CDfQ4=8ZW(=MLo`*U^2m`0v8M*XaKwg-vdKDUAQ~ zCoUo*BccIkcskK^PLQjyx(^9EP9&q)@lgz>!hhh}1(HrKz~nXNI^RIFm#)e?;EcsE z_tqw{5TsKln}osF#6cy^kT+`?$EA(+I3*;WDWwF3nOu;7aI_HZqTvNHyhD5ta#3DO z2GAop-k2_c#1b1A9&X-R1F#O`#3nzQ|{_-C(D z-}moX*1dmWoUBgR3Y+se=o(&mzf*XI`yHVZxVnLS5M0I9^7QDH03YN)_<_mOmSXGsCv7Y!9;sz#fzy(=Tv0h1aJ%EpxNzAXtHM5zb_EKjd^@v+t#fC zh|#0_X1aHHBE0U^>-)5jj9=p|H$#R?HY7nQ1m}mW*9iXUt5j0zoqX2aPg*JFc8S1h z)=tKe5Jpi(H}$%;r&3IcRq3+)<(w_O8$uUOLlZ*ZYCpOB5wi34QeAdUKL(7^A{A0) zBZkZOK07#R$Wt+TaPOagXrMg@T%HwJ_ueB09@X)Z(QX;J%AF{OB>H6szly>9b!L5$ zvn{zp2v@b`2K~DA4j^ZH`NRA$i+jCbK_S%qe=~^>E}4P5)9z!yDgpGMFm|o%41hOm z)Z0M#UNcFZ!7B3q(8uW9;|!D$*+yHIJ67;ovMan z-c<<|Ng#J*rt>T9y(Hov1c_nC3g}ckf)Td*cKc+@@98LXob!bZYHI5Wl+vm0m*G|cSW(cMJ% zjds9`+-dY2_nh7)5meAW6?i$`GZ0v0;c!>YZM1_@h72z~nMl^hl<(!VKgf_XJU99Q z(6${`Ys(7@!@#1gWW-6SR-DW=0r{z9o2q)J>4O2{(g7j8&D@ZtkEmS-#k_72@! z)nhgr8UG*l-UO`1we25X88fr7O_gY&$y_wcY)MITX)qMcqCusUd8}9z8Whblr9mWf zlLnel6sc6wKoY9&cP^oxZFrvdd*1Imj{ouR*!!s6rnS27>%Ok@{7vV=DL1~fC_p`Z z_eK4ad)DB=s9Yp}g2P+Cm;f@J!Q{94Mp}JKgkV~ddl@bsp+B&W*PGWsuYsLsll!35 zh0knGftT~Hw_1w`2^2aSiLy(v%B5sxk=^E|D;FKmvWeTqgkYO^jRSDLjL^2rS`-m; zXyGctw}{Wg)0358iB2_UAk@eC5K|YSd^!}n^?W4y3`3tW6*V&-nxMECcA4Z-gZqm?}1<(8=r8Oig3BjT^gq&vCT)Y z3jPor`DSpV@ImBk_*ox8Vf0`96K`OPe>!b^%4X;%K;0%uH?D_3wzO{c8vKrrAd!fK zxJRdDMVF0&ha9wFgHs!d&p{p)kmp?vvDcC>bFJW>^ILl8GO9$Es-ox zH~ZZP899DDwQ}{S=){L^^ixNaVq%TuIdrbIYCG~_*KUPN+h|wL98_^AMjzNsvfkxU zwtdzsFe&I!)}np0eSD1KW1#4!x|Xb*_%&x5SO7l}4A@G|tS9RH3%F&*P|xXH-gE#v z^A4w}4^SDPOjC!ev@mex2&DcdN6&;|ppb}ZG%HI8ibe(WrW!gzbv(CJR+7kLZT;1T z#8Rr_0gJ5$enUn54(FFK*vToc(FOu;l2x~~&Gs!w$J(a2O>ar-5R`tt4fmQL*x*Fd z!Mx)g`-&I+1vygIp<^WA6Bl|daDEh$$~$)KI9bPg6YmpZ7!3g0QO{oZ!=49PhmJGs zVI0#7dOcGJ&03d~L)ka9)v&>lZ^Af_G+YSUXRRB8;wlBHDg<6oy+=Y8iE`|i(I{q(DIgH4 zMUW@#`ZjIcW?Q&T>TV63{NO|NgdI4(C2>C#i6g-6Ahe%q141{`cP-3OTnznE6w;+= zog&49Aut}BRqou{Amp89O^Ok$U2H`G``b=VzEsGk4>>4G60+P?(20xU9 zLqupoj517iwoxd+T1ZYbapMZXRMteeM(0mR<;U6|nm*#W(SlA=3<=urs=^&~iT>Jn-% z&;Xt=(^Zer)dp8iYf9$4@uSeSlgUQh^i2B%#V?_@s|Dm&{4NwJPj^n#NYKAh&nrJg$=6uk^iWaW(=|H_VryQg z+E}Ggx|-D#L=U$Ugfwuyp(XXifFvl5m}0Haw|$GX!Wn+Kyn5fl8%cOEiVnRP7-$RS zeK2xT5$J^HRRxMJ()A@xo1>2g6$XaxU@QLa5H9@M8d%K3Qbootr`%ZdzEK5OD*5nC zY&<&yeth~nu!h52Lt&*91^o0C9xD>C@-l5nx;1N#qwt$%G_;M_%=*MW?c1sGMff#^F5{&_JJf*2*a{EEft9!#$@=!k7zd6L*vh!60A^C# zL#E|0Io$T|-_KJ66LJ(GbExj0D?bO$frci9-)GRbzpA|9i|jMbe;n8f^RZ(?LWx?G z)?C_Ku_>!!2c(V&33GyA!vVF>5pWvP9<2pHQK%nwa(1Yu_s{vz_2>NVjO)rsR>0{< z)Ii}ExFk5XI9%Jm&F_5svy8zbJ@)s-f9YR@&zQV43fU-+BCwZz0y95`k56BICjEcT zhak^;$`NGPAHjBSinm7FG`fdi!Ylw9uLJJ^!p8>dgv3Mik|?*s2t}eKShOG2wISbs zTJiy!hG3sEs{5yZ{iK}FsbGJ6&sCG3B4U4h{CB?5)MnGS!jGta)I||c2@=8(>ht~G z(d*5Yj~gTN5_BCL`X6lOyDmXU#}pR!=yQkG_px8NY1Et%FGtN8h|l!zx39ZDTa7sQ zqSsHW82k0UeE&@<#?r4kcpWm;LVXzWcc*Tcepg__aD1kJ`MSUE{#?N)_xG`TFKbua z{JbBTrczQms0Rro{MYw1(#vhgIBrPW(%2U7T2xEe=->V z#y2V&YX>k~7ovu4wi(ShjJLJ_H*fJy`licTGS9f}{hOMs0ECLb=ZUJP$p+IsetcB) z|LIy9OnDUmNl}sCtRHpE)nTNEtK`UIoPWQ4{q6Iv6Ik&F1hG});5u(PprZBg@|!Y; zSKj2oXZrWs*WW(N{^KV955B?K4w|OpA!_;GzZj5LLb4+J?_7+JyHdCq6^b~nWMTB5 z`Zw47DSTYk{C{))eaHXx>u;aiIQ9CEyTQ=k!`UEzeCOZz#;x*#VGPDL70BJ!F645> z1z+=ri|@Jw5soiwr!z)g<~}>9nfq*Ce5QW~c7NUdIe8%Yw|aau4aV*U6>nu_?cer? zUfdC2vI;^MzOz!ez7Nmc-#4SCPMx>uviAH?cnOZ9Y4J_{~Pme_I4jPO>*Fev7>7smw-5e+G=^Whezmo zM)Ks95H}9^6uZobxB++xrKhDzuBpCC49u zw}^J#Cb-D?(-kV~0BVo}nXqTHNezh>y&l!Ue`Tsj3SvMG#4nL3D}+&y14O3eCdz6> zIf-HkkdqvZ)saI^nr{6NRG_G>to>qvohl_X0wm4{yt~OS2ABrTcmZTU0lsLCPcx|r zB4Gv)AOV*y-$gBCpMn)n6PanuaBAI36QiM-i46-Aq&^Yg53GenAlHZ$Kp8Jtr2l>0 z;ES85R(Zue>~40!o)n=hdTn){cwKyzk6Sd2MNJo3{LocZY3(E~7cT9jE7kz?4=xJ_ zAY6w`Z9>OE9ss4lLZIdz&UJlClYl7R$HEU)HNnE->nQAkXc>}*`cX|uh(BFdS~L_0 z1VMd;mqo9QYA}22efRm7$t6L73Jcb)W|J)tw?-Fgo6BA~e$q*Eytj(){$gL>1>IsX zwA-euPP2n>H(!GkNTT6Lig*i8iY1Y4k5M%1*r=x!oxvC!TqeY#T#xtobJS$9gTN)4 zVNJF_=j(Z)0s>BodaeX=P7VlLM-bldcvDsbC>%C*8x#leB-cm08QjBSx@33SL7Ebf zQ~#cVXn=c4_$I*tKq1ezL`B+dAHry~eX%59mM-^us%FLkrMfAPg01iGcJYHRvJat^ zU*ENM%^J}wtuZ;eTXGOuZEZhnX;hWF^H*Pn5Qkl{^rDZq^c&2aRiu zFBr-rk?*Zb(Llsrf`+hhSkKrNzhSX69b`eDT8}KhDf+v!?6tP=WC%VCJJPT%97udl zNk5-lz%1|ZO9R_b+y!l93`FnO91zE3-mwTV_rPByi=P1gHp^3mmxhzG*wFL(GGvaN z4{i}e^+>rlN^O7$N{oo8$J<=KY#H?z_!)q4ebFa`5CZ8PaJm+G2*q7K2$87&0^lUE z(;vJASc(Wpt=c2vaXV?)6*eCuJv1gCuFe{0f32y>nDxk=cO#$Wpo(V~1923I9{6ArWA(1IvHDXZ)8fYKIaUx|P%6~2!r?AY->C*-NZlB9# z&-eS&Nb)hzFhHU{l``maA3={EQ&Caj6fJ@SfI=`x@eJYJYHV9}5**~N0I0K45wcR2 zo5iUiv(UX2-rrlbQu8Gv3MPQUDnL-@4&P9a3Jwu8y6pQ$#!5(3XaJA&^fkDUl&Xa{1PKj{raCWvhStf=c^~=w6TXDO2 z{iT`Kntr(w=tM2AR(bK%XahRc19pTQXr>8Rb4l~{cpRr0y?^a_ZljMn_8%H`!a92N z=#H~ZgaDXnSfWy-7?5f>Yz{(2!ejxy7@l+ieK1lY)ZjTs9m$(pS!*#-?n_s7duz!= zDMztaU1y}9hx8v(SC8TZ#Y5DdzP#HC-q%hjMf~6O6uX4-kRjvxBy3oGf5Cx6E7$MR z%YsRCWVH6F%Zhp_%fjs}K3hD!k;+a84cXIIKs_<4qsGw7!nE-9^;vqtmVUDamom?_ zS-+ItYmsH?GjH_a`K1P95;OlUy}LkfcB$bckL4SEj=rq9yI^dGMV8v>#BjmgO6IB$ z%R?SiZbIhy$tNLeU_9RBbA7Xqd7kRA+Y6p-@_8ImztIQS_30BQHfWZ#URPY+{Pj=F z8yjpPeAIt*soMJ_A+v!(%Z+^St-K=^!e=eAbno-7^2%BaTu96?VSUfmUNj8DK zQW)z5dZ9<)53qlJG~czCbdn^Au`*7D`!Wpc036hr>Ri7_P^MX%E)#}A=pwg+}cZpNt1MZ4+E>L2hIDK+Y5?Dt9ybW3* zHKX%_&BWLZ6CB5OOJ9NoT@QrfiW02acf-#IfcOsv&&py$)7E7|tY1leVR&Fr5R*mk zzX_i2VaQr$jP0z+_N*|%}k;A z8i6sQ?p7H}TSBf$&(G(2HSBlKoUB6CViai8m3+hCpMb|-yI4Pj{Carf7?MHra-e+z zsvd`t4YbSRh|ZhiVKG7-8=ulN1*3-c%yqLhA?__U6xzD$NLIAcRv;Vfq5We)IUc5l z`@Ur$m;K+rdlQ)|?M-mq5rL9|34c)HO-6#X2ChzywKIM+Vb}@K&9NSlrDx$<;Elry zfLkd!PXVY{{3$K49a<*J%;%&C`kHRDwC!^#X^SGKnK9%&HNmw8h2t-q&yS8NTqW^1 z)57pr7uN#$thttprJ~*CaqL+9E?KDB0kT|H0*@eEA_dtN$sFo!utmM84%fSu*xH3{ zUb?6lOC|!@ML#~~jAjS72${Wn1d^deuN?SYRM#2It9c=#J^B=78Jqc`ph@U#1muX* zr|McRYAT9sc+tX$0~UEY#pXy0=P*SX8z%Ut!<1eOv*JFbNguqJ?HvZoT(#ACs7Kd# zyP)A0Kmuu!G~QjK-&u$d(v) zSb~{C{QUC3?B_nP227^A%Q%Y=W*TT$OtViSKt>F&0q=bky46`M(w2k2j*H%j&73So zOd*U->^oqnl>~)ty+^90R_G#9;pDALBvl3Onoj7Up%d5*pL*5B@%-At$0(3i*71El zNCDv0RpvF)09yGRUr*s^aNk9Z3jLat(VYim-Z!x*Xjt9+*OJN#G%~7OW>JHparSkkkO; z`115U06TTa4wk6&u)7A24){>b6O`Ox1H0|+1-y_F7*J`yqj_6lHl)pjA0e>uW^b%{#HB?ev>jk@u1P6@@dU^nmVyy{YhOG%OF?;)rX zLAi#uhsH6!sWyXUkkroP*nT>g|IUX7PZ@wTY+-faH1XZo3}H3j@R@I)+nO#L+KukV zdW1TOteKD-DZrHR2*YAXMZ}trxeh=S5E!>YTurb_s@VCS6hi?_6TKi_G$!>QBulsj zHZN}XhjK@|9&(g(6r_gYo$CErN$t=WK$!8a`_WqgEFIQSw$2sf3y`dZp>CX2;(%v_ zVl4=YC9N_zTOn+5q0cm z%V&}$YD*E%i zd~%SXKTB9x;BVx|$&`*{06F~YS9Hfu(YGGg;wv2Y`)_pP)tt_MG-K7^q0B#Sg&2Xb zc443AGtf=Ltp#wc0Dc&mXFRJALu@mI(N@Vl@W*RraKa#e{5J&V$o>cziADxf=bmyN z9S%N*UjI*zj_Z=bIHTh_iN`sV3#I9w9slRLs>f@Syq%IqdVKsl-*BPvK7n&dY@DNPN)SA_`|Cn z`cL8bp?7*bc_<*i!tur*o4AKf=6k_`;~|?FH3}QbfImB&Mt)hj!s=jI7zre#5aLf zG~}pi!JqMa-K{VOQrrn-L9YRYk)jaNOpLN~ChQ(qf(BXw-I#H`eupvaU~?3OE7CeX z_}nQ0pf?8Z0KGWD!;}a{KpE_Am?d7LltSGM61Z6q-OoLZRCghO6TpR&NQnt#i!&sm zpn;}~h;M(n3;_!hvpjgEL!dILrIb&aLxSPW&~Zqs*O0S={tjn2AUR)13yX4@B(4;l zOMqBPbkFzdIs*{zJ}?|)9j7Sa9dI5I;o18FoIJoM7I2TihAc(ge z79u@=p$e03X8vp@hS!XNSpdrDILkH?Q+CEDlwir-2A)cBx8r-a_8>($UDiy;E)T07FVi_l^d0?rl# zaUct0(C~U5a1^#@Z-BbLHsy0%-IMrGur@PUbINl~l4p<82P5EXE_0Dl zE2U(PP9%jK#9lN5(e53&dC=-!8s4!G48XJP>AX)8i$<}79~?3i8nc||MUUI0-A-7F z2;|tD6ya2OBM!YICBC;HiHa~&N}^QCz`0eu{nHjFCeVkB#9R;$>qiHZM+U-k!}T>w zN5~pBO!+zUP%#I535D^Zo6#bouim_5#LY1D3TBNe44D}ir}^o9e@5Qa-w@{L-Y1h2 zyJOMZxes5+&UYS?iT!oXQma|RHo2PQ}%o*x^A6Br4l&kf0as&ixzCLH6k1m!N1oX12Z|@@gN!E( zdjwoPMI^$H4`Rf!0tuz`mwP7l4F2tF4iJ1^1crhUb}zz#b6+hkMPPY-7fqqQ<3KqB zR-JDQ^|rxe3{=&-QHo+C;5(Jj?L{HY5kMFpG9ZJXCN{xxPpOZ&hTU)`i)5=D6KJ~! zVmUe%p|eD4$<-Kv`V)>2caRDEMJu4XdMgKZC112TIdJC~1_a&>iL%Hs(Mo&sbaii8 z%Y$5;v~%J?wS^;BRWcV7!)K6N7qn*Lt_IR%b}e1H)K6zCbb@c{GQ3}(PEgXUiw5v3 z-V8ukE=qU<1cN)brTD5mZBaT^Jb&?J+OO@aDRwMZ0}ai5KqeKcN&*akSUWh9nFW`6twmXYcXitfFO>>9K6!w)9@#&@22M#`XQO#vPAiY~~1g@Z_sd|Fz?KADyPM+;2bL^ekP1){NIr;fAI730N z>p%q%bbRP_poKKV?_fI}2?%yr$+p34CU}yKt`_^@K>_fl6$)8(I8=Q)p-6s-26(L# zil^kI+FE6D2cQOGvbsK(caaz4PCYsN5J%250bB_q+U6Kr*m16T&oes83XMq>7zrm@3+3P8Np79N3RCx&W+bb}zJ;y%o4g_A!M1CFH(AU%)n zk}iG6ZgA4BKS85)ueJNJys~s+EqKovoy$~lFo_J5@_SKVuP|@S_0?EwY(rkt;jUW^ z?Phu>d^TpB|0Y=bnRx95&>-DBa#GQ2vuPr%F(9ymB|z@YIu5$$9?sO{iax%2%UP4l*&it0WrXsNE=k zjfx*7G~)cbQNISRMoKlB1^dvCkF%pdfs7eFdUHp)onQ(A#^ArXEtLv3LbtvYNN8&p z1p5-cp+L|bV&d^_gdkj^Ljb~>3n+G~wQ7I>l2TY`vvmB2OZ|WRXwX&k!;x(h3K~-< zl(#j8g39Z=z*%KnzClhjFag8QM?AqLzdnZt^pw**%>|g9l)6Gq|#}C_( zKWgzrp4%_yjkX$R+pquf31@C?KR01{$5r?9>)fUHiqyU48hg+68voHrbGS_>W-v@| zGPwHi@(1lXae&eKS>V+9{ZSi&>!)>e=#DE1uiIT8T_bL-I;Q4=u(ULsHmw@b)}4)A zow10D=V!GzrlR`U=~hX$8<6O$+4yz%PEaLwAu1_19?7`-+hHj|BEuT1 zE=(5SmXoYg2K|Jm#(Qkp5K6vn^vRQq(cX5=QQ;xX~$`IHvAddU& z92qA%JPPi2#8J4Nhk&K zYoe}KDI}}p*$f+ z#q&C}BaVoBnMTJOg(;(a22n91J5EhmIe@qgI6%X8lwGYk9|?v2I{Yc|-1Qn7AtEu! zOQ5+KzT58hpc$`&PARFFn1qu9v8t-7e_CX0c=&CaA`0V1zcyJu4<%3uWX=aOL6ejB zmHXK}HF3m@LDT5Vw;?GCgm6X&gFnwODQS=_{4o5$>Jb3_d&EnZd3+u#G4<|bdS!T0 z@zRH)Mtt7E`D3;p=piOnTM@4|4Npi3XoZ4`W_W%c(WGHiI!f?LM_RwZL&q`A(A%Eu>1L$vizhi=-^U@H)P^oNnr@QhY(K>x(rp4G5=-vVFjs z;TJ^B9ZGEDeBVPCwXckKpwi#6D#RItyVnqm@s%g#)>3RS};5gCSaz>Y`w$Un&JXkRXOV*{guM9p+O&@ zLmUwBYqYQ05>Y;MY@vBI1_w+XSW8RG7_g4y+Cz&1>aN$ ze|it%WQ0tUYoK)^=%$6w?E@)|b7f7hEwSvc7k0fEJ;unvc;TMACijqkeB-1g(0~_A zY3W>qhpnPK>udcOU5kHtHx3$=CN|iO;$Ea5tGPSP0P9W8Kd)g2e`Z`+48saB%RgRE z-sn0RN`HLFYXv}#zeI56%#|i4l9p$<_NV&#@>IRTeed@Qnkwy{_x;;4*qRR&^_0TY zUGmSa4dm=ed*tQ!$lq+3$2ct0Cxri4&&{ZNZ>kvT-5bo@-~T`V#(`ByLl~}B?2{)e z>;^HI)0lXhV}AWMwa~d+j@#2z+3Hx;_PMIr{?)tovRMP!qS+0D88y8h!TV;I1JvV# z^Y-}nuW-wH6t+&Ab77*QM2(D2{`SHwOVML6_MiIr@vkHWkUK&9MR7U4?#;AS?*plg zt0Vb!!Ab0!@yE6BkHztiZ+u-XhMixS^pZkCM}GT~cj7Ec?XGd}pB!5vl2$(3BVE8h zuf|UDgrT;jCPTtQqzBF$Fi*cXM8_}6x$k1TokH$=3zPfblU%Tx))gY12}=6pe2HGH<%x7gcxrj*&WnbKZNmi>aTAzeqrtl z>jLA%Ox3(fjTjqei-;&_YhzpG`ns+D>{b48pZ^=*@IrH2x}5)u{f{G7E-8BtGkeFi zJ~th7djc$;&HtkA=iE2$3$I?evQPEpJP-2x0rLD6cF=0@ajE{|pa?4M&ybgxut0Pr z8k%#=_n|a$ z(6a7xV&1&J%4T8l?(g^SEZ#Mj;ZlEwk;9nzaRD4T(Av*BrG)RDi zg@q$*A0QrwV(}<6XPEbV0fD82F!_m-WET~)D%{Bv#cD3}coxb><#27~-Li<+O08mW zkFG52_zVSgwx6F~^g3bWr~zP5K~NeA_m3YRN^*Mk6~oNG^z2_8aD>vnXhacU~ba9qDc|K4vB>G6HIbqBg*X|O`&Qm2%w}NsvV&pVFy92 z51dyE?Pf8s(klpxpyVz|NlA*gqm(0PwIUa{_EN=LzRXndqzF>7$l6k!jG@yhuh!>TA6h;922s!zj@*@ zD^SlmNBk2|*J+*$Pm#9%=y93rH>A&_PdG0+b|MCav)GRcRCQyRGFAtcTExX=#h+y_ z`I{^0B&QVTRA}8NQ};pTI|@bT^jgeFM6?SnB-=ts~Hjkc1%>*6*)f+*1s)5cxeA>ye+%aifb6M7 z`>2M^gUOjOjR=LlB7JTzb#gj_Kn?GL_DO32~&g3|Ok~EjDiVtX3m)JcgBL(uGPOEiJybh9}P;xh-6mK

`YBf zQ!5coGNvpyC#MKaPNW;sS+`qB#F^oy z``D$s(W+P2LCHJO<^cqc2?!;j;X!!ZRMB$Vz~4ZrQAp%izOPyfH`og4mCsw<+w5g1 zhYI}+pUjONfdwZYXwQwg*c{Q)-1CjQ9&z{H5{`#%H5+?W?T%?xX!;^$6e%WZnABVB zT}$2k)9>G}gVMI7yz|3CBspHlm5hsz-&YEj!y?~d^ytz1(7$`cUcxF8@Q%Mt*`5KL zU9A?n>}KOA0@}aqZ&Vb&T;SW)#1tBy#1pS}+DKb{qD|R* zdE%$SSrx{YEcNJMJTYe6xEgdDo!!OzPb!DwD@?p-Mx$AWCHi37-3Hp)+UHP05bukM zD>F_M=|VTd=Xq#>?0_bGQdt@RR`rW%?VbxicA3!2Ee9rRc&GbD~T%?SHI{VJ@h}Te+oJk`pK0!145|*`CS5J64}q zB#hm^|LoOxY4MmTK-K^?2@46W)6mdx+gzq!@UgjzcfQMRl}q7UzkKvAIeq^8#l3sS z-%CGZTo~qEg8Ht?Y_-yG-uS*+G1~nym=$x+`PQq`_ZEv=o?$b zjSQI?H6jKD=&g==#CBS5QhafB`fiU6icv?zFHbMN>#a4~Y||h9BP7s8acsK8g>~M+ z=N;>Ww7t|DtT7Ad+&S%fmrI{82wfLv@@LPY4k`RXDY)YMK9AG0IXMeq@=_j|85zoi zKRG`~eh5VlfG&PllUG841#I?T{rhlH$_5Pwj^MR^-8CLl2q&uTL|}`V-%>VMZ8M(r zXR?NE63-bFkTQDh1I?0{U3=Y@Gs;f-`_E<(0+j8^pr%OpZNX?v0*mR#y=`pC@;L5f zk42$ffy-s>IT=NpYI>-8Ys!wsrE(Ev!t0T&7Io$_d$48Y#moavbntLb6Qs< zmIhoQ7HQPH;0~F#MX0j;b04puRu?8cHQ)r+zO9e^A?{VA} z?^@HJ(=f&&QO*R3Vz%y8;!YRVEp-u?xcFWHTll=w{`*y5#tv6pe$OrS6z^+>`_Nyv zHf}A3;n?t_pKc}Vf{}I|V&rzWT`Mn=K4hd#PEM2$kOSCc356DZyb__DnVnq#SZJS{ z3e>UQ81lZ^O@%E65R0_VSFc`W2T@42PP`gonIhgSe1AmAGaZ3paZlSk7fG`vM?^W5E%iz#Z1va40 zGp3tCfbVOxI~@cDbfNLvtV&ZynJsVwS?z-N!u`RZp8Mok=2W#AG{t%M=hE`JiJ7BEKN_(P$)9QIekIOb2ByuF$4>CfY|Y3(oirO;fczk+_(mF z7lvDwd@#KRDu|LE*g;?kiljr!8)I9>gps*HVk<}~Ar|uMqO5yspqV%mrJka0qa3wQ z@M!_5?R*R8UrFW+|FW=}=&~@iQnOAPJBsXad8+oZAG!+^y(`|W=D%+H41ejTicV~}_0fG@v5&kYI43yh_i)bsH{wn!PoeBWzDKJLLd%&# z*ea=*Lg|tpR>LXGPM#c9wpLLwdV3jcZevHS>kQ1!&GpKwiL`!>VHp)4w72@HSDtXqGd@8R6#v?E$ZA7@gaXu|F` zGqv+2COFXZF)EN&M4pteSB%Nwih5&t;(Hg z_bJPj>mn@u=BSmVXTX~39 z9HnPlq@Ixb8Ij1q7!{3-jMzbozPuPgu7V;U#}#LxfH6a~9X!7$nDk3vT7pWnwU?)7 z?eY1!`G}I+n}E2i$U3NytRBF^Xp1cBdF^Cu!EV3C09N{oq7did&D=Tttemr>XGu1y z*jj9|B}g7C52(c44XuXwy?gtR8VUvkuk^}?gJ$>!z|2QBOq|>>!Z0f z@HCBkT}pc^4U%O&y4H9j67`M>#u%y~s+Q(Q%`~_nBSC90e^F<%hg8k+qema%dPQMQ zix!z0!03?&jYb0l+l9NP2oB?Y!*swRQ8@LE$8N}@|MB5EiKk&lczvtATYH^MC>;S$ zFlsp!9`o{wN;6ZhhzE^r*^O-_pPW&=zr}o7%vK9oTr&ytUd&62`;L$axOj0Y&Ob5; zKTW2j)N@&{V>FUJ6!4QlzrE`J0V7i(pRo z+x@AQhL?#$gH%;r9T66`3YI7p^nkXR)6Tp`HzOCN$&cA425?*)AlVyi>&tL);cVE0 zfHIP-z+Y{E_$4(k2qW4S_2^y^K{xTazeXn74U)mFtXZR^qycgkbKq)UU-v{kRtInV zh#Ty@K^UfmSlCK2%QA`d>V7{iID=94$~aza`OKA8B=MCRIVDJUb3Z`x>c4SUdm!mjR(QJww zrJQT=`zBmB?SUS1z_B$$wfn94HTAM0kYHI;uYdOhi_#Xx@E$KCXf);2LVK+t*MJk^ z3N$C4nYp>e*kZ0=b{t$dBp)CwN=d4gi8S+P4K2Mcwb-bbue}$QTogNsLnFF)JIFp7 zFdB2UdLpI!?jPIhiW{3Q>6~~2Xb|e`S_)7{Ph(yh(|V@7v4o_oXxTDr-W^C$>-AEK zt5*4owqg$ZdQIK!2TDIRgq1F9uHN{bRyHK5oc0hI$=f7bRZj}QQ2!~ z&z~1?gun*Gao83pE;Qi)NC5h1h~-10GkPKLyQ@!1!!|z zK*V~yvga^{dsePH@o{X}qX5j^boq6gy75V(#i5tW03W0vlhRPz%U!a1Jf&pQS(chk zZx=wuRZzwJ*>PefkC;#c%r8$_g8HG!quKAtNY)hE=WBDm&;dnUdq;V z-^5^MIQAAw0ygXjJ+&FLl-yK$1Jofi4Rcg*HlbmbsyeM&hzoQ_>I01U#X_OJQ%e(( zgAlPIVkBdtfsT$lx-<@g11v2o^UnwlEEP-^u*@%`cn!Ae-jiWii2=C9_}qV=C&=&) zhJJv_!U|bw^97QLhzC6ga@@y50(k#$h@G}51-9AFc_kFxexkk1oAPJrCS!>ShkxPq z#ye`>$om;(sxbhWI8hOaUfd^x#~qIx0h6~HfQ{-Qxx{v#`}$<73mBBETq)CY^)T|4 zPvVjT4vvAW#thmo8ahmkxu0#ZHiV+5yAxabr(+>)gvod!mf}B_*_F z%AP|R?%5futY`uK^%u70!M{b_8f)&H%ctFm*qxx&+xvMR&)0j=+y= zf?8ID9W}Uj?_O43o+cmy&%9~1)Ei>ViF{yh>P6u1xRNGyz&iVA2O#w_bsCg8iyKrZ zVF_(@3d9){kEwiS5W{U#VVEdO#nJI7?PfDYB4v&V2|-0L>DRUBlxtfkR8j zVykIr{2ig%YYUWD&d|TJvo1W8*O?If!1V)8-bLD(-kieWC z3Ha!_bHDGA@ZhWiuxWJ-x>axwG;=TjOPVI;k+K~Ny3Q{>PL!g(-L!>!z8}}=g18|V z_W+dOR>x7CHMGXzehhpri34O_cOAE6UmDxQsWSacSNR$J9ZBw18%x1vb3V#O#%&P_ zzy73l)s0^JiK~GKelpaJ!-~Rtz9J*Q8+!sZL2Ca}HJFO}w3XlL{}YV`zs%kAGppF3 z8NZ?QNIQ=oKl;asJ?xhA=E}Zod8*L5F#?F24sD~q1G467A^;tIrl(9$JwyD@seq8p8% z0Ak?i*2C~x9G|99^B6Y;zldzSIrK~RkngniO<9B##<=yAY~&s`KF)&Vh3527c5qg9 z_9=_f2>Vildv-XwDZ~N)C-TyzOLMarjLPE^J%hn+b50E4h*?=!%-_XnyH?!f(VfH* z5`E(*9t!5DQ1U+JF5}=a($;8tcl4kT3zc!AMLqhX0+Kt~%Ph;%MgI3rNgKFXwsjN*vuN6%U7DnRq z5(Fnx6d_YcK;TYoua=C#pV7P}9M4-uoG4LBU=3~Ep@oY?`45O!Ny)vSw>U=r1c89B z_CuNt!BYX2N6d`#_7|TwlhE$@?#~t!k|-@*nyIp3&Nn5;Xs3x2%8WQVj=U$FhkxYr z+ib*lF-zW>0AU2%n| z;9BpEGbogfX%Tn{2B@BQo3b5sCB?r>bqFrN2QrNGaAVXvIc0Tq3| zUHNVe&D}|}9(D`+eD635cu&NWN-xE|_9y+@N3)OKdubBQi`xfuklc+?6$s^azw;@7%~qK85H;=0`Z-kbZw?9O9m6bTLf#gZYg3=+$f1MD2WwH(g7qPiC>HAxq1hlI;?% zb}i)`Ov6d+M0!mmPVKB3PEDHmkfL`WEu=JvI-p(qsvl%zr%*(NpgTp_@9(&dx4j zMK}yIz)cn4EP0x`D{0=md3)FH*NHPcG7EOCRHWS_( zkOw*4lf-D3&UgM;q=lHnroJupf0y!puG6dkPI@S+zK;U`sW&3N8eq3(`&)po#B`$L z41M`>6XYt-oZjWokUvxBqGyn218xUj{0U%k4DOHyzP=?%hN_Or$zA!VfoZ7?fDt?s zi`}ffyu36VRu@uuF%snD9t*0^tpREkyQr%@?^Yg}h_Efj7VG%izZ?V+EsOnoDtsw) z#V44kluZzgs7}2XFgZtfFi3sO!cZ4KjIGzM_wD(XU4`&yFyU-Gqi1)HaOcLy*13`^ zLw$Fww(26VZ+jWqxfno9t2m~t3AqjsPe(wE&&k&-k*YzIk2peQDmt;^)(5vo z#Ql@*mhHmuhJB@Epv3hti3cVv#2l!~vzS;S zn(Pb-ir|6;iN$31BRxt#Vw4Idrjr>;%DyRpieGKS*@VUC!iUm7sV2uqv+^wISimrg z^Qo5mQrJBjmO7ph0lIc}zH#xDoZ^6p$53V;t8v|h|Ic=isn0;YZqe4T`V0bJ+`S#( z+rCnOr4c)GX*xH?ld>t#G5NWqs;cTFv8@N7$sk)S<~rzl&h^X#*eB|du3F??)P7BN}j3%1}G7k7_!ORrpqcGOI!b5)Iujjp0~uj+sL1?@U+K$jX0 z6APfbiF5H! zf-~JpH2PR(XJ)<_8|OOZTchxk^K@~1;V2Tt?~8q|fa#4Gs2kyie0y$73_8$(DWx@c z5`yMnbnf5QcaOYhBSzRjs{dD6?3cS?DQbK>UxP+|ijgQQtIZE%umvfuZ9DsTvwLf? zRysBGsSy3mz6l-G&4+>*xra>5VQ=AheFk1HW`QLx-K_>pvKx>$8Horm37J~2Qb8Y( zaCw+td`QY}#c0oTULDP)sOc1Gg;}qu>M9u`kW14u%lt>Dt-KWSwD`2Yzd>+k$fb~_Ah~U> zMAhcYEI1%8e$Rijndv99i2T#Vg=1WlEMvo4oHr+zn~4>BE6L1VWN)sjt=eDJMd=IQ zEpOBH1s7bDEC!oNjR8Zx@{TviXmv~^w1^3GP*iNLx$Av^-FWq)8O4PXW)Twi2gzvH&4GHi4*b&aw*~-LQ!rv3q&8;Im$~EEZIbj} z*tJdI-i5A8X`8eQ?~27k)e?%cHrK0MT^to6+3SQk^;KxS{mNt1-Ybrezku7vVk=3! z=tnka^vkwxmDKangl=}0fp!w|*$4Q+S}yk#iRa8(VwfFj{KkEm1+W7tt~;z;zH`>k z|BIY;1H>@DlJ#V~D z+5C&M{-u4PP8vvEize^nQKK$^dyhmJN3Lfq(?D3;wWoaYNdfrk#wRQFSP+_%Ac&?4 zpF)3y-F>L#{H~9t*S+S)(FD*ZFNwH`G}}* zOnxzdx;gjUU18YYD5r~N(gn4&iGh??1qy4+Hx8cYvgh$zJy4OL&{wUQi<1ha$I!Q$ zy9Zt(kII1Z20<3QYTNQB3VTS~GxtvUAM_K27yM)ATe;9F?pp}L(051S$lZC{aBDc(p-^2@;>{#a~maiLI0)}7t# z9^UpC0ITMca-^H4#y-f-$%(dhKbi-9AFprx6ydz}1-|Hg!2895XFPa)@JQEJ?;J=7 znKdbf>+GIx0`F&~IoN8PuU!(lKV9bP>-+{FXH@wFt`IQsYfWk!;$kPtz!*g=Y4nHNEetW*)*}y z7?pq&X6a0_aROR_q{#p*0M;Yu59=+2k$+`mRa)6x_|ZEJRQmv*VC=u@+jw`UrDdrG zVCCra=|Q=y5#u*5k=Y|8t4VGn;@Hd z6xtdqRvg5@HYrapuR4h7)S*S9BDEu}tF4O$BRxYn#MfRMRm6}5BPA~c1uY{^3P>P- z_?On5pjA^q#J#%Xy%k8_tp=*UpEo03VvywsJ{g?6A|A0h^eC00kCnpIqB`IC($dn8 zD*&LE$KraGtQT>O=>dnkcPt|&91&(>lUI)D+;b5`2hSR3?z`=f2SKwI|M;Am1ov0cLAbQ=c zmHynW)PmS563mjivi&XOnkt|-n6*G5J{Q^YAr*nfpV9OzmI^{!$v_1cQp4fuNkHc? zu2GbQC+iHTuX8%-7=Nb?IRTqVgP1T_Mclm}a9J*x$%;WD}`1j8BTm4)EKCqX+kx9B^pT$8h(nitA&5HU71 zD4c@#Azj5{3WBOF91>X4f!koBT6SF2fIQ@^xH#jw(_umbxQS-OcqI&P(j@QpqAn*C z0V>#?WTJGqR=um4aaFGm!*gLrpE-Sh)84>J(V*%HvsRDww=HVg&Bgx$Mc2;xRO=eJ!kA-Jk+-k7F-mb* z+i}iH*dL06c~tV@vtJw@V30if@)$%l!@FD2_Ndd~g^TPdir zW=JowL_U*LShR?VM(Jwz?>cFut&{4QKoi91GNP!f!;oRLj_SI)QMhQPm`}iwD~7*( zq1n~g)v<`ALRr^A8F5?sgQ~xvGn~P55}X3EZ&C}>t5&-L$k{q9E%gDujHuykHf@-6b)oX=1!?&j*d6xGcjb zIH*OlB^9+aZOMR%u0Rljlu%$-s~~$;4 zPwRQb&=T)pDyC@s9rlfML~z(tfpw;&5J z3SfTasJzATV_XA^AQff@ozbtW+%ZkwK08Y|ZE=C3SlFl6ed6vrrCu2m)pe|P;oKr; zJjRF~QugbWTqA!LTNw?kgdP~SXNsgCxs6(AFCPG7`Tn|P2*Jj~4wU0!Z^BVNLmQs7 z3li&45D^6m>&xrQ>3l=(32poi`}Z<%&KBdiefedq$`6Me+_i%9o8DH{?=w0M_2g`DuX|{`7v&hG z6KI$m#7Ra0)j&N*nDJ%D8TTIfWAzw+5~{#e;;-iSTsjOZ9mp2!CyrsP`l(##XHQ9X z^+P4AJ^#YkM!ff06Oj4UR3&lf^9nI>{8f6Tk>~&Uw3B_7drEA{?AVev&rd zCNsO|A7F^>56n?Ps#&;bHk*la8h54mnm3Ah;wK%b`YMdGa|jJdMmM` zHrHGa!qUkOLWz~4JBYz^_4x6A95?wCgo}iH^LGF2eMA37OQN{(%WfCHa!oizrlSXx zw61=Qb^yv33V~BK>LZ`g?!-qwF|!hKqVGzFpFI9V4u4%j5g)Evl#+x7J;2Vp)&|f9 zr-g|1`dv3hJk-Clwe-VAXZ^|9aWrt3Br9LUjksJfv>Ot8h`BiA)jw@>ZO2_D|AmPU zgpmY|4fMT8?!cWOEeFBMsn%LvG~5a8VmJc-NS}>S^=0F$@N0qjPe$E``S$4j`v0cz z1N~@<_OytjHe}Xe)j&K(pK^H)YkqW}w*p8_9 zTQRw~7CiPacWn@}XlR2GDZ(ODWH<00fI%d+(A0MbrlGo6DhDGBd)+O?RX7%&BjyKO z5-JMWFHLt+D)tT;Giu$t+Vz-0K%|W~EH-qsVV5NPC_I8TVOhQIZ2nz3QNdt(l2Ifk zt01UuJ_>c&rkj0nT+v|%bZJuLvfeMPg_j2N0*^9kKH7!3;N` z`}a87CDtFFXgbi*yyK#!Jyi`BUbqsT;Dq}qwjsL+W@a|FE!02B$j|tq{toqeTbTWS3Ws+C0^Apy04j0>YC8(jQozk71uG7S}#+x|j~rQejAKY8>? zT!>~L;zCZho3~b~HmKqQq;JZVm!CX&0>JHrkv8V+8s8P>3fS)U9|~UR3P7y2N_fOJ zATmwwMk`4y2B?JFh%hKOWMp2%XoP-g9;h@|A*w2{eM;Yo`{Q@=b!ca4X9a`D@ftU2LO4LN8xPH7F+L(LF7EWX za}&PMO7A}@ky>P2%mFzhrv<0mbSc+`tkF<84&zG$FP{T&89840z){t8bcB%_5?d0f zFgsNZ`8u*u?-U-PBlpAMBy}g9!owa2>jhpSy&_{Ay~28}oV{y2;aU%_;vD zF|%Z`hioBjbv!(@fmcAfaXeR8kHL^4u-Fh$Qe+_wGE)Jp9at<$`JmvdVbZoNd?$ga zDMDnx+~=k{mS*V@7m3X_<@Lp5?;mgW2#2C#n71`d`QCFj?uF|h(&;>m`d|*70X==f zfcJ0$uR!zdG@se?d6M4@$m=u{=v8QOBe|(zTLLen{|L_9eTphxMg(=Gz>f=m#)%cN z+p!kiiUlEn5Ycn24Q_QeL*2fwdKV10$Y*+0u^W1f9%)jyc6#mxVh-m^K5k7+Mn;Ou zE4s?C?}y@{RbZourqC>C-t?h*Ae9JUHspnpA-WdAX=F;7D5v&(!>&GPdDQ(0`=Sjh z(fr9p0MWQ~VW6lX>(=oH*&d zQX&-ODil^VNN!XwMfRL?Mw&9W2eE^IyM;$ZtpS>>F6RP`B(eG|6^uLHl6U@DKs)6z zK&iEwP4n~swB??Z%9C!CLCLj*O$mmnO}B@LyYGT`-WHIWg0M?m5grUv3%OkoGPc5V zu$t{I`$3DqoTF}T(F-{FXK*o1a%wH7WGOrZ9hd6bdW2gXvK5_ps7LIFt936Sl@L^* zDWD2$khD;HaFQkWf+uGwM*%{xHSyrKHUz3+!UC){@EoJ0(m)a6%M6EHu6TMZ>}$Ac z>u+^nXHUJqLWso)$=dNMp<;{dT>*YFj)9GruZ6Ah{G6S7E@@JyvIYc`#xLIZdvY z#&P!pY6n{06^z&5xSy-DXyZZyiYt$_f(X?G+U1a*SxJkFCp>RPw>sEztD#}ol`B`C zl8b9#QmTF|j@f$Oc;u(vnz0tt7lhD(&!rfv?i{RwEtrSnPJ!BO#|}uJf7h@OAPX*+ zSQ&^0!jw#W@j#eCDwXx%K_NC9O611wz(Je9WTg&UJ(dib6#&sw>v@T%@1HaRk$b?g z$R+zx-Fn6H;E=JSG0ww9_=aoGl97YmOTT?Xi08c-n^->(~E756ovb7b??-g z4RO$2H$tMKiMEySJfsN7sD;X!_Upq%QYNY`BgAI$MqmtH)^5T*#atlci13Y!aX^Rq zI>UF^cBGd>C>ES+(DVJfj$ZP?kRzvYY0oxz5Bptx@%n8=h&T}2aMM-U@3}gq%25>F z{9K8pGyH7W;0>ZOGFCxMyA-f+0phc1SUpSF<#qw7Ajv{Ss+a{O(2GTW^_w5esvPcD znV;wD9X|!jLZh*3INa>(8{)gY<`K1iQLv07aqXm9=dxJ1Jxw=*f`@gK8UE#2jJUYd zG$s?k^(rzxkHKI9{8>Wc83eHq_-EDHqUEwJ?)m3G<2y3PRxCDLsMA+V z)jleGiK>q3|3loHz*D)d@8gR!sWfUpX;2|4MTUf(q=jV6R-{OSp%ltYwVM=b8Ip($ znJOD4WynsYWGhoD63JXqkuiSPy=WiWo%Z>j-}m?b`@QH=-Sga+{d3#N}Sq`PR3@8w2lG zlhA(sY++`h<7l+L@5nXzy`Znov*nDrcK!N!Pfxx835HiRNZXi%s#HXnICiWzVg|!2 z{<>ciCKq~xxdZ25<`HEP3G+NAL6D1NB@g8O+Or87wDVBf}OBL)y?zKP*I({18)2XbN65+IF<92NQJ@UT z`iLi|<{jkP$8~yuAU|Q&-$J|G^@Z{KBajOoZ;Ow;T@$eRu-68f=|v!o_mXWLtv}8+ zT(@SQ)T)}ePdhdLd@+HzaSN?Vz6{B4Y*r%`k9>I{T|I-+uR7LA$7#GSu72Vqqqyu>wR5? zW~RZKHSaU5NQGDYPS1WOd#(iMYm@iN56Ic|Z1PZQM2?*0Fgw*9+b#y86ksJ1&VuIb zGqCQj!zh~GhcNJXfzK%C8=>bqdc`5k{IA;?yl-uG?-7O;IDkJ=Quz^mBfz*IcFJ6k z4GM7UuCR$jqM;Q@V(=`EeJO$GnL-*+M7e;}3Z+I7&txEa(D}7jG?YYcu#L_Uvt~8Q zYO5R7G@#@tf3dO`qCJu}m06W;Ai@>L8!W*+(M0e1C6ZS}mM!m%0My?Bzvh@zlM9&9 zFvIk#%*1dv8bNX7ykT@p2dccbEosOd*3;9NJFn@3PdB2g9gWJslbH>!(eN^k%~H&* zql_u6e3FMgEx0-Q&&st1_3N5{_c{K#UxxDN7q*y|q;{3a;Y3VvP=$#GTJj*ou8>I&-6v*f%fdz(%?3~k!)fbY2nX;UoA%+hI;csvk-AIb zF>VSKyMZ}NG-M4h??LLneLYY5BrUEI=e5ZyAda{tCFmShW?Gja9b-MI(*>o4+T-9n zAv03KiGyiHe`wc`WKIZp5A3`teQm-etVwnQ0MbCkU|(Yr>oQOe)Poz)elOmQ&fjy} z+PbD=zX4#OBP&Gk=#;SUj!%2aw_o&UT$f%litjvzNk80HJ4(n5! ze&f+2kte1)lMK1a*Rmy9CAr$YynLJ=JbiO6Ex`R) zU>U6UW@46u$ie`aN>1npK%b^r;5_D_h-OZ^8El5Chrv<}V3y5tsN_(svjR(mL}p(( zRC{^nkNGVPq)b-Fsl8d&kR6x=A={q~-AtSqH<=~_*s{|kZF8K^9Kj_Rse*gv@kK)h zK_>8^<3Vxhagx|qi16yu*nD?KYN`yA#N!374XNXlWB`QgVb)&gQT0cGf{y{9dRdhn zxH4ptn^8p=3mB-ZNEVAVPtFa;6RWrTH<7{Njv*sJ!(u3tsr1Jdo%JO##hRq;(H_H8 zlm;8w!L}v&SH0f4NsJvo1E+BSQ@M=+_q70siV*^ev8+Z2u7tV<+%I%7hmBKWVmz0} zgPPAXU&OEDkZD?HGG=TRk|egU+7A)O7nU8={b$x}I7v&I&m|aR%&+@;4^mVn$bm0H ze#k|wH^K*@(ilZ-Zi+P*@bEF+&^N>m*J zaJP&{O!z{Ojlar2Bm>(Ms(d4fe?bWoj-zX+Z1q6RXTLjLdX7BcL->fNp)(!l;+7|o zQxA&RIP+VK%G4i<*8T|dUw0wuwp~LwX)U9%o`J%PP00dtfTh>#xr#~rm&4Ua{h;U) zS>Fd)^kU>WS^-1Sqp;DVaU(@ZMwtYg30Z)K#>15No7~B;XYvA#r=*T(<}JIR_`Am4 z>ebK_;KouSp>ca~ON23m2uTTqMLN%cn!wapHDrIT2)V|igYaSP$srFUXu3gA)Zv9b zr)hMuw8=hs$Z}?E%POHBoAdyu1AyW`VEf2;|4|=vXa{PcL8ur=yTYMQqJd3`fc6TC zAH_5j@Z=JX@qZe`-xDx|$rECi}F5hn#qxMy`1->ryCqWCsg zP6uMthvY|3f1~CD&Hfr6d^Nu;V0!RK zhWEz*wCj8tphE)cK^BRM%`EuHo1>5EdesQ*c!;;9Qh33G*3*HkSuYt z_hSn@2fqPBU_H?bXow)`3MDcWnJQq#K#X{)S!QJ^Lc0MD2g&R+&8YibOr2kPkKN_1 zLtbIM8WoUzD-=uGUz|NXq!ZwA`_}^}cQ+1A18}BwOXsnYp z{c7Fc2q7wyhggU+q*YSc+U0M5BJljf)yZx)j^huQ#{}jrb8rmDT_a6R$EiS*%NKJo z9J$8+GL>WbkLa*}lX$Mo^^JlZ06?n+;F}DHk)Vm8i<}*t>dLjuAvY)}6+44w7)XnZ z#v-xPH|1a%tp_$)2rVqK7}oM1Qgw!%v~1lGi+qqOoTnHuT*!o~Om2ICxqhb=A}gvw z-wMaNnKao(b{GJTpFa>Ea;KCgPb&!Ybq8`U*ZnS;I5YZ|tT?UGIx1QqFBSzd(eDzE zxjfE@{6tIaM>j+)EoD!X3EA-6t~$mKo5DXTiy|Ph8H;>99-RgNYuY$!N>JaDAThXP zIvY0z(2yaCk)oy_Hw((8%ZYs^hXHH;2))#-rnX$nLRNo#SoissF%MW7WxR}h%i#X` z;EO}Bk684-k!ILI`i~gc*ymKVLKw;JKCeLxz`?dzCLl`^N z?Bi*5}FH8s;!dX&s z*q?6LXwuhpPAC>DmCEy(=Rb#^5+{`dAtpHM2LJ*M{;InFafb+``X48~ZF7R^N6kZG z1;b$m&j%Qr{SW?mEUuQ#eU13MKuBmE3sA+(kRb|EN)9LNobG~HK^-dGWP7^`a88&k zvkmc_4C(DVbBN5!1oZw5`ePf-{D7`e<&dlgm>NCz7!pUWyBe5)7eaNyxYT-o8k2p% zQLXfshSCp0$k-f{?1ShxA~(jFsR1L2v$A8jxnb5onmH>!$fyog+nTUNU~hmN(DM0A z*shdfPVU{qVDc9P;cy94uKM(Hf%B6YOTx^D`pPa<{0yv`MLK0#{$7O(*v z4qXTbllG=C1YofOAQMdDOK29g5>N%yh30022!*0G+X@6E4r#^T-s35VJS;oZn8a5u z4ejgMNi#sAd)*+6PTq!)I1J|ViQjdGba|f884mC$Gf{0whB;vb9|-B9d+jK1BWU$BrE>r6hVy0_g)?T-PI=yB9Z(fbs_M^y9mXGt@8}li{GxFOK!HP-t>>u1}nN}QZ3ul)3Yh372?b|nbU7$ zwxDv;qR)dm%&L+jgZs5Q`aM@F1wQZ>6eB618?EF3mHOk)Z&(_Hc zSn9lvgds%Q-=_YlbEE#ptb$nuM)W~esKM8F7l!}&Buv+|bmN3*0QOXhO)t!JqJ}?Q zQK1b1pD;**#!0Gu?R?_?jIwe(l#zNa>r66v=`Pxl#G5G z?v7dFCj?G8dB5d5!b+f^@(a09;b-~~+Rrt))1EbKJXbq4c?l1at0Q$JENdp4vw+Fc zt7=G%zSc_LDL{UU361$on)$&N(b_pwXLIzf0J$}I9WdYOpSxhIVX*ovdsd%H?W<^SM>q0iaSRbI$%;hV1Nt{<5B4Ey(+tny zZUo0PU@Mja#AmU1`|#`^Y)OId8y1r4Qc0MW7=BaLi*NqpB#W6Bz{Ddkn5`nRp)%#k&ZR?sK3gK}2O5M5Cd^`met7{PycaL{4s7KoLcK$%Lq{FQDb zl%F$}bcHhY_Yz!{27?UM`LRdeyZbW zRsX}iC1D%qzJ!a5%nAG`fro`01*6IUk@4QkCA52-DAApx6Crm<@NLr?R;K0F4xz8 zQj8YgQXci_O6=iS#f2n$f8%Py)9we~gaIV4>_(kOecc<*4+f3(ql?0d1g!#Z`xhC( zU30yr=ALCF9^j^VC3fF4y?v)A&lR{Zx zw}wC9PZ>V>wqjjECWU~i>jRXy)*eW{i&PE^HTdaj7GRuOrNA;FrTSR` z-w*RUj5C>s0mAWnnsHqIl0yxhXR;PA_tvN5agW(!kgk}N;yC_l)w;SN(y$tr3?w&n+Y*vbkYa*&7d~(H}CxpGS7sq8&NHV}q3>5wGH8-I%KK5Czl;rhi zK|?F3Z!efOcW!RqWN7e#tyk)t2WsJwGNe6M_G#2SAcgpu&w|M`WdHt82^VeA;w1k| z%O8e4(0&?A6GG8e5Mm;tpDk^=`Z2X;FwvYLT&(OKl0{ku%i)l=Vu87;GfhTSWEUBUngVL;nwK16KO<9ljNktuAaKm{; zL8X0*H}8Px*VyQ6hS3sem(G|mL-RPiw!jSBk4{(}W+h@sA&|`QAo#U^V#_!zc*BUb zUacU{zUc0OaqUy|<~Leh017}e-DFnXegmmMlBKqPDu9(utC{ z6`84Hs1FFj=z zn1${yt7seJ8i|ib3gMG-bb`ld#FdvN3r#}{_9tCBvj2N7^PV>_G$gH_FGUJ#ngBNX z>uv|M8el%L;p9G~(wXpxP)qMcu1SrBO$bS_tRc~=CQ#=~K&^-ay&NT+8XW&B-sgDn zN5j1)WZ8pRtDNd-fsxA$vyjBh&_!QoeA`ZY-MSN0qu_2k-fZsg;vUldkc_QB?0=HU zhvDpQG&gBr7J84D5Lp`mI+S7`ngbvfnLBqi*wia)6oxDfU5Ju-f{3{uP_)SBuSh9q zu6zC#MaLb{b7>=kS$dWHvO?W^Zwc)Nfgc6C<;nLXSaGMPW=W0b=e_RT7r+g@1Y-h9 zli@6M?}|PPOhE>5X}Nh6zUEMuvI&iso3oti=b$qp0!J>OQ{suQ=vGiYQe3-qaDtY# zMpC_#39Xm52mE_?VZEUkF9G$%kHred3XBKo3xAy>n+T*)=bS?W%o|A2fmFG`F{Aqc zkNS;!_FTK0olOlqNH@gqd~%W36HTQ|8oY<}l@STZ1NSThB8q{~d9-(-6T}vN!>b## z_6_OXWJyS>`0eJj+zmia${J=@DiAW~Adi79Ezm}bs^A?cJCmc*S&uAOvkuDa zT@zLn%w~8n4s!388+`j1jFyoY+sg6@5104!{3J7TM5Pmo-&x~tjSBQ|f!e1yPTOE8 z)RFWezIUYmdzqMxWprpqYl<`$i`;Km$|zL_NUX;FAN}J;K4AbW1DVKQNTpO9?$rI?+ek)FpBXyf-P~Kne8}_b6nkN_M4|pe%vx-@!ODVY?hBxyC(Gi znFvy;TR@+mXUSJ_SXXfofv9o*z>+^`ca04INPa|hi7#(#gC zk`7p>7R`Z~)GSSLOw|2 zVJgR&)gD8`X&%is6(^LkW#_R!-iLe3T!ws3_Qz_8sLGb!)zLDJ)=&k}wYJf=`lqcn z_=WzP)$pB*tIhDAMUF!&ixWZ@Gbs$oOgb2L_mbF)7e{YR=Tj5s)SSm&Lc-(o`4S#r^U4|*_E z55h>ic^Zt+iA$l9C4D#q9Mtm24WU9*nH-blA5{sR7(Aq@Q){heGjJ;={JWK$_kDy7 zWP7^`AH|t*XysVJEgj7l_R8@e-~2oUmoe~FDfXjJ{_Usqx6SZ?s$t)sUB!?!W&c42 zM}Olp@qA(*Bpqz`aJvaMp+Gntt*!3Or>!QPCPRiXkO$-h{ynH|1@Q-;47 z_pos;`|bYSwu%v^BaC*{MO5-R%Ho_0=%%s%;4Jw52C@%YoLwp*a#8F<59yT}bIvr5 z$+Yv?e{j3Mj{^S-%V@$oJIH=47(IHM=5e^-{`zYm;&aCm${OiUBks_586?{~TW(b9 z2C(EIEQ5TJtO4z0V4kh1%IKKI%aipYY{lFO+WZ{B<1BE9*$*7`rZa|)^IpqZ=Hj*# z*^JlBl_{&uC?z5z2%|FS9QX{RO%&WFO$x{O)dnI1hW&D+7#JCMPP%LN0a8;UrzI+w zkd)FR6BSh>X=r4T+EuQcXK+5Ugj_$bA9H*jNBLrr*rn4pU&peI#z*_N_cTvT|L9EF z{cDH_OZrZf#v&C5g))G2OfbZi0nbz#=mwE)@Kr-r~ z20AkOLR9*T8XkIN@yEXlgD-5i~DgS0&j##eVX^3`-U5UOy$bmFeGa;Y8qPjr8 z-IcQQ_8X_fZ?B`y0H7mMSs~pvB(swjY7zk&f^4=gL%T7?yBia&6?IirRg`A0UcH*= z1vn4YS=45L*(*v7XKtQa#s1S`U@Hash)+{&0xJ|0pBKp$Pr$Lx@Y1<-=~9rqV-j$! zO4q)QkJapHVRYcsSu`97W@H=?XVhS*b;OqWCGb;h_nn4rb*u^x@odzhdB_ zpxXhhDR6UFCt{L}Miu&tsyld4z(H45n+-{xdXDah6@|3xM!-pOLyveA-KKIyy@*Zwu zVOe_2^8HI2cI7EAZ&X!<(8T$ZO-N;&mIMwcqRzHEf*GQ@vKEr}#XgF?!c1Biy&$am zKd&WrZ;QgIE8y;9c#?bcXxaBNS?}MxnHx;W&52Qz+)yq}f9!vhSNny|RB&MzQD~t` z(|?lzEs7IBpHXOCboeE&VUol)H#fHsZ+?p%$UYap?A!vKf{Gk~&P(hskYCAdS3}L7 zTL5IBdR>fWtINxMgYO%a8lRHsu9N*pY%|%Bkevt#5$q_^!^^irQ`2h-AD`39P{%hc zCu}nk;pJOlRqy&9EjJ9>o@sXk+^?DT1UNw)^v3Atk#~123X1Y5I(72oIU#^5531HN z4xaK^l6QZFH4d)b>|IBv!rxB>2!D)tf$jc?YHZ zo|CS@&&BNDC@e~PjeqNWAqsIKSE_b9WU^4oDb%ch|FLX@>#zlJCW>VZ6BsczhMO7v zCjp%ZKRgXS`69ZIJuQ)?LB*a%?m!i5FF_g25_`HLswK0tmLEGx4P)%yfV@55*MaI& zkyCSCKkoW_&GO?ypafmq-~H)b`&(B(15A2g(#<8;Iuk?DlM;ybud~e;BjGx+iM;kj zPV%!5H~gM25ECO0=QA)so$vp$g?HAh5610MnA9nLQv|maR5wf(jC!;hB@jN=A`ax{ z=YKiS9dtk=`*`2p3&>xEB_!V6NjT@G@wxT}tWRUu$yBFPlJsja`O8Qu09HIQLn_x+ zv6!E?1{K?UACWn8G(e_t(8zFJ2-?A0iL1u^Fb2G9=6UkulD0QVB00q=?|+%8V3gf0 zKfxRLv+bLMTseBrBzUE|r`{JFm1DQe`;l~xUWr??{{7cAXhn(Hm&LhgEO6IUiOp^v z<=ybsOmxz^oGB+leT#DJ-Z+}+cV-ww#`TCVi94mOebGbq^4!aAnmyi?fw^**eV_0T zs|wV7AhAzFzbX(x)eahOle*(_>Gvn+y!HO}T9b38bl}ytFF3ScLxr=!_D!H@U7+WK z4y6BXHkiVt(VaEbzCtQ%*NQG%MoxEyh?P{mGYcU%)&|VC9yPKdqiU=dbBHNa&hxcnNm-@Eu4fxO!9IiPXN=hbo4T5 zid%$9Jbu`_o+|nv4ZH$q%_jwu>$}dWjfJ- zw&}eSMcyKl`g2GuXPP#RE)Ov8=*dPRlr_`y!|BDhAM^U3;y#`<#r}48(y*C_j=!uI zEMD6ez9L+#c&*>a8;PH^r+qzj`-JYL@6yCT!`#^jrDRNwjV;V1K{U9%pOPAbef79I zH9c82B>mON?lX}z!TUT@{!g8SAK2?xOKZZPT7z#!%b1|3u|?<&>nCh62AIZ*M3k9g zi9L2Fv;8Ac%x^rwl}&SYd!wM{>;}%Ospa@{^QELpx>M!LhMfrz+g(Kkdvj18BUqIx zboyib_iFvAn0X)pH^8fCqEiSU5U;c1vSo$c9SA!$LBlODY+I4DZQC}B=f7`DZ0DD< zh;(XeY=FQuTRCyc=9C0U1Zs*#?VSxNBek$a@(>12EHKFu`bqp*d!~pZEE2euqhM(I z_AOAnJbaj2aRnw7&{r(_uoswEMGp7@;`SFNUkGu-isOEt3~5A+=hp1b8GNr8n&F1I zOF`h4+yC}NyQ71HLt?wKvNEAd&7T2}bzbi_y0t&5eIG_Z)L+ptB46!#0J1N0=FEwn zIZO$XZ%2-ee_cr>G4E@_ zfty5+^$5cAaOTVeWH3y$W zVk_!S4hCLQlV47A=?{;MrAv$~make>T(z$J5QWIa&inu>!mSV=!&Y=nyD`!H(u#2T z7%zXH+w~^JD#6@;2QmeIN%nJ0t@lz|b99)^iWsj^Yl_!0b{=_oLP^<}-4M$PIULNc zx81&4HHX|~1Y>Q!&*0{WLgZzMQ$OPP1c4GU3q;_I|&Fk7F#AH>tEYEwP#5kv}CYpEu_urT9 z9L9l@U@RN6{lRIlbf}QQ{{TQ|2BqGk>Lx2$nSbV-?eYvtmfy`)xTo%b}py3exAxnmo;1C;yOnW;>Z;gw`tlV6|V5OARpQV zaSI5X9Q0MmEtWl!;U?+QF@gZp6kC}|S+7u^QlA;1JhyG4@yh)ZZ6?v8>pW4kEkL?& zHd1WE3LDX2@K1ngz25qx;5g_lesHh2Ru0I~nJ0=x&$BsRl|7~pXxYwcn=xevcvRo;=qKJidy1BV0sD5ePWx1F zC(cxXHTZiY_ZQrGh*Pk$-b7zQ@R15#R=jBK+@6Ii)0PS;4ynKd7lDyDN`DO}5NG(B-I9ZEM=;+iqr{jB@iJ z1-OCxPVY3R#aMd+lGiTsoug#DjJRBm42i8-Gm?LF1NEl? z;&UhH-#7AmsLC}9m!PVjr@DXQp%~m$1_0!VCt|z~o%&JBs#0|j{lVv74Y5Ier>)+9 zwF4@h5QwGwVe6uWOq`{%)T{j$qCU8%W(zz)MC7CkSMXYE+`I2rDq+w|i#}H8v_b!* z0emQZ7ngJ%=oTSwj770Slp6AT4f|zSboLNRP_EYw5x?Kfa(GCD9C{AoUz~t+KvM-& z_Gr>rCxJl0-jM3G@31MS@zrcipv^dx)+_w(#a-wsRiJTA{`y|~80B))G4gk2+MGXZeF!Z$k zAbX3ePoH2q;a~0KdEG!dweo?i}SYxDlqK^oMG3CzZrR`nb~HcbB- z=L0JeaFyu7g{nGLdD@_QDb{=e@`TI%q@b8sz^(KGnF+Q2so63M&C3B!z~BazKz}=B z@N43QoCV48hnt5-;e8;i3G%ufaAW-^4H*%tTqLOuY8}SzT+6|9-0!u!F!E%(#L(e@ zrLP*I#~)qC?-*ZvBM=E?i+gq=eb&xkF?faXK@6P>w&A zs=fDKQ@~_mY;0^}`K`|)H#?z_rJfEghn38u>o=jYiF)wt@_K$I9vN7ylw^Pj#!rIKkdK z)&N2--MjaDU*?uB2>_&P5xjv@ZkCXU5`-Go)LFVnuhHLlD-jia96}>NW@o+6p6h2<5t<_0`R>+pjR7m3{Vx6k9X9Q* znwkQ-XO-j@_$o#h7UnnD-kt2!m zxIkl6uo)|_H#jb6vhBw;hk9i@y92!iM6PVha2lS^zSy`pt1@G{o7WFme{>o9n^_!p zsILD0zONs?2KP8>T)K2rQCLXG#yd97Ra5_ulkV}i+ic_BnTd9-lv=O=-^TUn>g&C9 zGz)Xb4K8|A=XJJLStx?ku zb8*|Bz4ol*=g3w#dValo=cd}{+6zbfpiA4vZ(|P*ps#o-`?PZ>ES# zdD_h>GYt1%xM3Lu=r>0~;wY}xQ&n zh}pIwPt-J_He!NnHnk)8q2X zXxC7W)T#Z0>srO{GM*0p$1RtB!|+K1j`OF~p(BA3L}fV)S0E9I2S#Q>HXStkgSNQL zOi`_xd(^8%+pyY3$Rq9SUsRwW3GuhJghMkbPCMclz)kRr-`fc*DZy&-z(3!fiEqIM z82nW^{(p||3>5BAz958s4UhsH=4>|Hpv?LkAC+!ROGJNh|E>NbHrC+W{@dE7B4E&0 zKN(LE3-<_mQfzekpYO|{-)HeU3K0L5O z5B{yR`e(l3-#*YkRuKP8Kl?@XxV)PemYJ(1*1?K+ zpYS9GNB|W%0p=7_dDad~HL$X>N~~L0yMTd~T0c+m9RlL>&u9tDO&RTh0OKsUIUouF zS9bci$}K3w1dMcti#|xQM%g}!VS`>s5ER#&KVyJvCH}=8l}_j?i9H4>^*BJNZ#1GA znmsH1YsY384nS(`7eQgf$ePf4jgz4zaD#|D3zDoa+|lP4cIG&{%_I9zCCueloR?#> z1jRjOAaD|j*?WpecLT(`KY*3Y0L{AT+Yb?=IQ27F0Q(NQQGoZ-lcVTb&tOD z(tEcgQoX>V!pUHFlw?ycbL)*Anqf1H<~#%(IQT0|@NZn)e|+Yve=p)-DCtk+=WjX} zIvJtyT<9=8bF>GpwuWvq48X3{Yfu~+lZ!FYhI66RUcA#I5GESY{}>p;@TA@2-@6=U zJ`}O>PZ*2a%R#+MKUQ(rD6Zazs!aH0AB^f7%YLzM z^snWT=!X@$k8qFXtkJ#r^4FK8dQo1ojw>H0K~h5OF`j-j0nvX*>1T~4o*b`ML2qIB zUp~BwhuBC18e6PLpg|Zqj>Z3{Bo(@zm*EvY(3D{jhYJ)a z`q%z^iW7u$SNeaZG~Ftkocfsck^ct=vjabPpJ$I9ixQrs1o+bB2+tzZXKN)3Q6vL zZtQ{{gc-0`+MX(p-woCf4eg(__QA)cS0VWXpa-x>7?}ai8I5AjJRvYj9@7Iw#Qf=K z+17^a?1}v6(=_o^`-O%yFft#D86UVtXf$WEUzCGDan6ZDS&){GC8r`x#Oh&`qK&NV zi0)HcjZ=abLywpw-E3%RNFpcH^P(2`p@)aElnIPSz8HuR@SCy`kYJ$sm1J2n2)C2P z7@}qCy~!KuFFJqNC+9MIE9`ny-MhFqz18)8l- zZsXM;!MP(i4bWhLa{4@QOrhq_CU9+vyO&~3eLC>GOnM`U zK!cl5gnu|Dw%H-zEbKOY~A}W_5$(P!GL@Xu4l$KF4E_ z%YVAagbuV?D8h88qQA~in*A$2GMQW8^g&ftA#8v-W2s$Fm5A;GUd?qhok=3lUjV-f zA@-|yBQnDv$)a1C9xU$Y*bte0;mC>?AHzw&ibijk6FEOnyA}2xUNHkr=A18^H3+qSjD#x3<&X`)MhF#4PA{FlRy(Sp5*2Ek%bPu z@m0@H80&01{h%5BrVeg25J-@Md!$686#w2 zACt8ld_xU(y#us(1I>Nh*_M_THO~n)#}Q9Sr2ww!bX6MXR( z53H>|^?>*N$gJ}aNj;A@1Wf?&mewVH5GS+QEfs&OXRzk4ph)B&et(R_uh#Uv2HJgUwE zd6k6KX80oX5^hBZPzGxqJmLqq%`-?{1Wywsdp!Z3PoG1$Q3XWIbLNK7U{RT`GAdOS z*ynuS3JoWWr8)0xRt&j*U6KG`0B!fb46Do=y0Cy8r~8;VR1IH`m;#}QXaQfuJOdL~ zeQ;#O>=haLc@Tb}{m+EJ>6M=(OCudoV)1CQwABq1irW|DQvw}aR)`7)HOuA`L4ve1MM(|NG1u#AwukP!|j z&abN?7@XN8wt%DA@v}CIIAZkcm{Y58(a_XKS5&-wj^IH`x%cQIG~fnLABa{`kYGf{{2>&4m8jbgfL_Sza}8 zI?2;}-yhEvv^TSJEp*q4KwL{t@ZTu7p+}AwXXJWY0?WTU_eOEaVPWxHi{q4BPSw0gv5Bc-*Z`GBb{M?mZ&eU6vuj>+&AuKzQK z21G$B(?)1P(zFOYAu3;SGgZuyR#Dav_1SFmVO8iVHM)%kObmj@=L9{icdo8tEh^DQF#!nL>@Jp z3##P7H4Wn$dmdxLC@74~Co|)X*b0l}-N5x@o5zaC$-O*xiR0RGtSY7sJSNF4Z=*vG z@yJLA2Sp_0Wiolj+jy~04>SbqpsK6q(1Y_AI`m>>*Ac%&d6?1h{$h<0mWugZrfB-C zO?nF#nF3!z><{R#Z{9YwU>tKLExE~r3r!e>K34=}C>x=(BNy?JWIO{U%vzC2chbR& zx-=RvU;hcT7d%0_xo}o+5tvaaPGcF^Hf>lM!37wzyHP{qw9mX@swEqoleCtCROQ(hnlG^zU-2 zlSXVa*JChxkKZO8p~)W)iZU4YQBq)BNvV^~;K>^8sVOA(c)!df2HM8`2{%K_$jvQN zNJJ*kkzh|P49#+QaDe77%8WxG%|6Rmr>^cxX+s&2$R9(JTj=O-zk%|=7zp{wnA#PJ zMM&pS<=l!hiiK39XdvD66GTC~)q|}a4e`__Lh&dLVrChV*i>~DAtOk^PzIBz!UEo0 zHqrwxn}a7S&csZ`wd6w#@tatx6bn(4VPV+;qbd38Eo^-`m4-v7C@HU9e%2F_*NWQs zM3~W`Di6yKq1qU<{7ml2Hb+HtFZ*>?Am!Vi+rB~1&xZzSC>|RIfG7Sge(50(13T%x zyK(tW?0;~I2$7_5jEqBF{MJ2#ZJgHzZrq`TU?Q5hw)wmDd{o7cksZsyh_7`VE_g|7KzVo>m&KFTQbnwN@Mc6Rm+IBf5D z%E&!AkP{{oP?xV{=z4kQOLq&y007V`5mRhNxP!`0&<=Agg)HAbZ)p_eOLdtPJH+^so2~5KQP>6E$z3+4m>5t~tXf0kQ4`(ky#fxi2 z!Z`8y9Z{sn5Ihv(mP?Q^+t)v>I^Fz|Q67hrfBq+wHmmUJ z=(LKL8lPNPXNuz!Li~2_u{w6C!#WV>}jNZ}(BtY%IE{ixHR>oq z_E4hDVc;Pm5Y^Ucc)W*GI5d1EZxdQp*F+*X0@+10cpzpt_gFN^ACDO#9@Sx$RxZdR z*dk)P^8zLC9X(^bGeK)T6zi8NL1~hlz_3pUzZw>%PF z-IT%M7m$;&e?bMX3B^4dNe^cVYbc_G6^T)#{4|Z5+TX!-A05bj*f5~RPM85=AdPnC zvx`#H^O+MyjNMkYqRz`B`&;BNp;~zGmxtp2KS=c75LPxvNpC(0EBSq`t*zad>vl+K zDps3F_n#25^7{&)@h(Bv~`OWb%1Bs8vXH<9|+;OKTJBv?c@RnZRtqV>{ zKgqQJ@TslbtCls^U+Tg*56^E2s~7=woCHM@kZOsfgFT`N!R^0BG^x%&?+&DrEcf;2 zm))na0$jy!C}Tg1UHGzk@FQi2Px#;H?)-B%;cw6I|HV>bH<{@2lxogHhEOOLxXkU^ zAWKnsH%MrpN9QX<@KAK(z`FLh^KCI4YD9_*A8k_U@k1qu;3_wBlE-AL@>FeT_7tEk zNSYePuyX;&WWSu)?AbjH8{7-e%uyzfla({ME1eKT$jD+Kb|Jlj6jKB#0wV!KR_Z@t z4nCPy3Z_ys(7U{Ut;z@ynAC9flXt9-&K?$*et!AR#+0%>xC-F^nwSKGRvm`O-vBf`6AtNUKz3Fq9$ZG9e~>D`^VT#PkBnG-w-A z#V+!ZMpbH`geKsS)OWg~G@Eb+fN;eLCW>T-0~x-kqB$Jb`r(W@OfoIOA(f_cb2B=~ zD5DcFLf>o&dy6hWrk5aC(lvi$SezNAhg&Y87B|Ev+6b1HP=npq7{DuqAX{N1W(T}d z3}`R}iY`??T8BJtID9lE7{5YxcvZv_Uk<B19T-Sl&SwhN@P*VcYQ1Bta z-^6acNoMK0^>*&0`C+Un$P7lUP{%`EPva+=nwp6(Q^9%za%$Ywey8s$LEg))!qndI zb2D1bQt4U$^pc!gPbw@!kzK-RmPb(G3W!3>atP7D`<4=b1Zb}9!>ckM?O})N;GGa4 z1dMcjsqsPMlkHF*PM>yt@naIVCBq#w185Wss=aisQ&0bfMM-W#;;=(+X)*;t3EhB2 zvA@4C#r6X>ejZA(VN^jOnVtK@L$ngGCF|a#g&yPCq&CREL+982p^vs`2iXBlxn$XF?jdxjJcML}&NwzH3;r zdo_!n|U~7>^m|arKd8@ zSg+Bl8=+4d+zfn!SO*x;E$H5Y;zSDDfmw+9axK>TRE4hkd1M(wW)W~~$&hxhgLV@P za!HJnPkPT4$k*v&i5BAwY_yHZA))>F+!7=?G^I!S^3+;D&!-R( z`38n_NKlZlG>F_b*3}=1I~u=~Q8^56rVYtMd@{o6@KI9A&G@mpfLdumBDxD;&msx6 zteJ#u8E!nEToUZC^MOh4r*o#$s>u=qX%kK3Z9?cZKhDi1kDM4xEgJYC30>e!aBqyq z-g3No5+eIhEV}6Oi8*fiQY9>19n{|;WB6j;pdf$jvKRcoXqUbVz{x+GURXT@m5okT8t`euJjUCO|}i`0N?( z32M~BTI)0GDt@xjqQ_%18$w1%QFEm=RFTuy-1qg4X*2bpFsAzyQ4`qQvD`6}Y);bM zAmb{sHgpAVG-&xwZ_I^mMnAvbWzhPi~T>m&KlMe<>QlP~|dIHzWZ;e!^J%lu?p0GmN=}crR}51p-GoAqG`fy*f~4&6d!{3ml&} zk~15Mu=9EI#q@*1U)(uz_^=&pe5N?#L*(m1T?0gtYU4x0lwPpMy*U2>dXIAj#3;|@ zn}(&v#8zWfvw;x^88f4n+&Tl7YoK{>s{-7}B#$@QiXBipJ(8%ENc_ZQi!i_|`ObipwwY!6DOKQUN^Id3VNo^<-FcBdG@}v4$k{KDrf*jZs-|z{mwp8{tD`+C#u#)P#A} z7bSI&5fccU8md>y;MvZv*qpq>3z0t^=u9vrXUJlpS-Tc-g&rYxy+7D5Who>Bjag0- zUM4$HDG2D{y-vKi?4f%C-PwhyC^^y_N$K25qJPfc~O8{bj{T$j?e0ca6sLBg`Mc=fdS(3ICHB)e4 zJ|izd(C%=&9pF(Nr?(16ICRP{BR4R5L&8Ov+FTc4?s)F?O2dNRc%r1{4iok!e%2v? zf~1g#F=jO4Vqzn%m}fKmUJsvU6vpn(JiDsv=H{08A|kV^aS=|~4{GvYvI)haZujRG z_Amv+&&$Ubhl`ogeK)Q?94*DPR%jnK0+}tKCMNc}s7zqdjLPl0mo-G#cT*@9IvKP{ zdyt&>5kQ9JUu(RA&to~@EMWw3bS<$YqgR`uDvtofV~KO93m3gRfHPkQ9eaOB?jg%- zK<07(+I<&vAu4jrjk7<3wcjh9J;fsz)*;r^?Ln$Z>$?q|`YCBM{ zs*f;VVb2hb1;S6_75Erx={Ip&;X*&f?Ye&&=e)kAhZ18OWvGYHszAd6>~aGK_%@O} z2M#W5f0NM#93Kk7C}HY=!C2wcw79v6PA}NLf?~QF0VlEeBj_db1vwNSJ$_t-6ZjH6 zD|{;mL_%a&O(FRii1vIkHeuwv)@-TjEl_}o-bfs}Mb6VCJajE_m*CwLhx4Aj!jLin zYeO+)D>o&p1o--VCQ%xm>~evYhEQzrc^iga9y&pbnP7}RgB%O=grSX2XfieIVeF37 z>^6i1)Mxg=`Y9LqA-cP6p3`k^LNPSs%S%g0*b=OJ1OAr)8!~o)L3YaY33?F`=m;B= z>U+_pVfqgs9}U^csj*my4kk;`9mdTSdX$##ynE8*$@8giwPn64x1MXc=QxJK(h|s%7S=;E8#D(_U1plG#%_theIeikoC^{36mk5WlT+zN z=`ett;F-xySXIdbj%Cm)=M)7DVK?ifs4QQHxB-N7*TXY&2(!vaR$Xk8X+(YyP-vh6 z1ty#5uvh}h@t!$SOCNt{J4ZJ^N*3pA3B`NvM$h48+Q9_U&;jaqzu^qS0N&hwkvix0 zEb$@zfwJ))%co7A908Dx7UHH^+L|0NK@I=6Blibb^~i~Ej1t*NZd7s_s_HB>SAiRfhKQ&+!y3k zH2yT7i2&SU@KDMBKKtZC+3HWsWVCV`>7S5$2u=!ZI^}HhVD;1rdmvJo#u@_KrI@;> zoe~zaZC0TtgB<7L7S}d?)Jl#f#D5@iTchzbq;ZJ2=ky%z79b10bAJq164i@XM6@;! zGLMFjc}@8YL`o@n#sL)&e=>qReWBgz)=@PwK!GDyL`LRUB*Et5zl_OJBpMjTXEe}G zFNQvoq+Puhs>;ozD}vf064kF0|KSbM*a7|mizlR#jXaKUbx@z2;0_EQf!6(X6$5;M z=9ym^`4uQ@qlxQ-AQVSi5EXwM38#W{F$?&rsdT{t9dH{N(QuqX}PV~TS2b6`WEk$W}lZZJBF4ba_$wp&a2P> z1U!&RS(Z)?vZ*KC9axZY#nTTdZGE8va}%45NY*J2X|{Vul2rNz#!&#DNPQL+9_=z1 z@Q}g|xks_y8b9tP1{mQy@?5hk-`3SxVmXjx14P2Z@lgD3y+@oUijq2!>#w4QC@|T^ z$os&~ow z3MAUpjaZnTQro92UZSTB3;}t6=GbvI{%B+BgS`S0|6!f3AXvMEg;3a1wb+>egceRzFWkEGFSv;DUaxt<5Ju@dL?3)^k!1HqDskK^9 z&nJxPg_d#y!b{a^el1NZk&)jMc!A~ixWR@|Dwnw8V!5RL#A3zDcO>ykK8J}$>5xPs zF)KWa7<8V0fC3FZ?mZF5#tU9U z810l$1FDvtVlB11a19mD(3ROBo3>e{=T2XHpB;oBDw2s#VJdB=r) zf|GFT=^ZT2N@r^Ad@+#W`#7fZx+82MWuoI-Dc>?j`E zR!+FI5FQY@=Usuf3MC5=!qrT(MrWq=;XI!hq&{vZ#@ZZ*)_e^7j%m@7${qkxqb3M{ z>!N#1iw#{bxtY+zpg|l4W0Wwga>>qvJKAP1)_iOk`IZxCgQ3#)8(?dEtrqOlLh1H( z(jqj8lYpa95{P-5+g*+EZGh8^kq~MgAI|WYc;MahOJom%PB!WHU{p^ZIU}-vEfj($ zL85cps6K&1*cincMR#B&w5-^h51m(p;171w8NE#;MviKQA`&36i1KQ}Q6Q@sgYy>U zpw|4gqqs(}N5VO3u)v>T0pKzHAI{!9EXVeH`@Tv@p+U*4E@dc`IWxJ^U@T=!kuhTt zk|~5HQc8wQDJnx{%v@2K-NKDbB_U%-LU=wa_wRY$_uZbqUVnVIFL%Rr9_MkawXc2O z*E$%=JZ8pG!JBe1O!12suP>edgc;(sTz7rp&#$>(OYY?EnpjOS+-u+9tx5;EbY1RN zthz9pM_GWi>MeM96t52WJ@)W281a1_2UJ&N=pjGGHPrIX5C{*HWB8Y~lq8(X7*o0g zXW%vMJXEdj4}e?e<&}Ow14_G8n@Nkf{2I(HNDwhtnqJLY*&Fw(V))gsQu=>f=JxM+ zBe_V9QisdBP33q9!$JHweD-Xw-j$^nDcDaD>E4HLR8u6nz>t3a_pFk)_TP-|HRW)t zirvW;Kk^c(AlUN_Jv6J;6`$-e!iO{kkvl|_7aCa{st$R?%0c5bnv_=!-g~zA)egf( zJy|rdd__talNoSTx`aj)zSKdH!d3nM;-oS*F zcmKncJ+XCbc_(m8EPD4I&orN*>(tYqf)`C4*p@S>3D@Lwo%!!z{vy=~d&;6kFXP?v z2BIfGMGE6qg_I@ucB{JTPh)ghre+ zTv54j?=3Z7MokeP#Gx$L62oNNOzb|1(-dvcw4g3bfeZvs7>yfQ$dX?$9Af_Yw47&L zz=iBG2}{OIR>S)vEgjO=B$N7lTL-)qnjQ<`ePGZbL=frgm3+&3y!D+Pa3Gc$OBg(a z{XrM;K8FX7&Bu((*I_wW5VE-%MdteB8d2AwW-k~Z?mTK{zmXnqgnHdU*mrc^y68Do z`mM?!;c0YGHJ80kS@u~thX9<=a=W6G={sja?N`E}&S3H}xa3yX~0j^q1q zN{p<4R-HQS?w_YpvgqVuyj%2mu`?|i*jRD1JDwqFpnF$k+hmMkxKdr+NS>{Q5H+A& z(;eSqGTY8pk3EN=T?g!w*;-$(?c>wPxc;a6EELmPa+xoSFj64)acD5< z5oXVi^0wNqRN#HZ*nQ~a(`2rI)PJ>Jc#6Mv{g=PCpP0TqJJd+v$qxvj>02nHMXjc` zlQYNcMzMg)nd^cx&+|Uss*wbAF645XHMR6kT3h-93)t@Ep2n&IL@efte zeQ_ROStEi{ecn#zy?Csxu3>t9k3!LWG#t!dvjVsr`(8rd%0ivf_^iI$G$@skW{B{2 zmekA%Mg+sQU_zxie)-4AK&;KCa;t)Kk%sycxe$fHjBFO@o!7Wf<$l_3xl97yOXtCo z7}t&QGY_8{C@Ujm2@2oGh0^ZF$?CkMe3nqAk@151v9bR^2WT4HRMUWLT=x4=4|1yf z^U!q~>0`aR?dWLHp}<4k#T(krM;r2)chiOI>h*X?EieG8b zeXMb+7>c-#Dvw|q4S>XpIWPPP!oq(BZ6Qm-^fhZ&8PgBaWDrT`^?0e?r9Qc+r}kk1 z7Do$&0ZMb}vKa!f8!$RK_6O%?FbD;+T@CS0&GhY+b3_Zus%r(vaZ| zJ<2AwNI4rhuuaCqZcltS5rd2!L_L-PY6q78`oW&v3^ai&fCT&IynoLPCJwXPTJjID zzJ2ZLYHSuxIQgF^!xZMvc&S~cTs2ts`AVz(Zu7dDnyOAd^*=x*)&E278cX9}x7-fG zdcAqv9NgBlbiC$ji#$AMAD+Nn%tu>}s<3$g9f|wTFkgqHrBEN@w8_?K$7F}W8Gq)S zDz483%!8Pvb{jpes?9|@H%LX-nrzy+^RO%@c#PTzx%{x)7~OYZ9hV_hJK`?MfJ5kl z(oz-enRPJ~gd=0)=sFZnt>KbqN80wLPEOeHieO#aI;v2$yj4=LCjnX1ADcw zhk@eHS6&m1iZT%IvX`a*v}x03-i+-obuHLFzVV#&%wbOIE9DAuav@m^GL!%PK|Oq& z?x5y&HPVN9E!b!osHT{6j{&Ei>n|x{@fF9ds4QyE5X5v-uPc#q`}hGyAMb{7d`pfQ zMQV)(6Tqo@@tAUpDJ)dUlU#zs_zwx4)Vhi3b!h>nsk^ZKr>56w;K@po4!6M5xV-yH zDD2I|>(kFeEF7D5qsQW7(SgIKlbt7NsE_B=9tWS!ew=>0WVph9>}lU5xvB#)=wnPK zz)A*vvKjUfBddD2xm{cMUgDsG+zrwNhn5%+*wrzc)+t;TR}IP(h=02x&d2trn)lSv zJ#-WS&%vR^m`sNLgGMhOM(9v82QI7^VhL(y{F`#y`->qm<%Y%Wb538Go2Kr%b;|r} zwFyP=8KdMzR!UqZf$ztYwts-STMCH%+0rlT`Oyim`iXz*_Z3^Oy@iqRBK&a3htqr_ zN9;$p3C0y9K28Kx~IHVqV2_X&Q5^C(%{c+PNd?<-3Z8TKfB zHbT`rEnWl9Uq*MyylG1B`T}PVt;Lrm@5H>l+q`L2|t;(9&9X&T8x{li-xc4oFvJhxW-9>HlVA27M!1c8!EjhU^AnSV9tsPl}w zt+DPsHVsxVs_s2DOPoAg&s(NN$^@Sg7H^V?!a9hf$fF61*@)dWJQj#`TsLu3q34zLB;(d zV|M@tFMZSdMcYVSewn(w%ilXH->j_J>>tV+znp6nQM#?g`zyT=kyY%^$7dcgmiA`% zS54Mo@12AhL2j-w#H=qaWg|E5{-<)~4pZp@plB66R{ZzTMy+)VM2%h7%pV~l3B$Ii zFuc~GJ=@c&PnW1%jW=bC_~IA98viiyq;xJK+uuefH2pQV%^qb!mq8AB)pUAQX-F_x zoQ-H)$Gt9GqIs9DKzWjW$Tg-{h-vV9vf((5GWo~vg6@g72i$&QO+I{mxSFCgl;<*9 z;_hqbZ7ZOoM(vEPB`|_uo4YXiADbKbSV}`PZ1)4eIh6R~@8f!4{F&ke zJPo7GRqi7_f3hJ<*rWpNqIT|V?;v`Tj6#n(9oSN#aJ~IM%^16|)ZTmZ^xPSte+7kK zpDGLD#hpULQX$)LBvm%-TA+n;`=x#G)|i;wMMjE%fU0-ZPspETYHno*j>maueOY5w zumtr+-okN)#DBc946ic}H)>W-#4Rc{2gcxxM4;NCX{6b}_ITQ{mPsM8tD+W6Y|`&L z6O7-$&xvq}gqW=rzM`UV9YDG*_UG0nCVTX08k>aER%}H4toMf^&>lzN0un9aztF@a z?0;dLFk4#>R=$7uM1aKD4y9<$xy-_pg@#zhZ%#ZJG@6mv8$vbZOGNhqGy8xQFaOk)8Noz^H6!uDdbhjqz7q?~&7|CrbIs z6xvRWJUX}U-1%Bve&u>{?-)ayn#D{a=+=UW|ym~+J zHsuQ!N>z0l;r))I0DMDYI{3nVWyW#teyf_WNX;qJ)Ct_Kg=v)K>SKE*K$8PhK9iOfTqtQpiciP$C zupjhq-4X$YvmX%*^XRFNe8S-p7`}hjW+8SMcN|UBX8t=}oKXWHq|Sv8o0HM^)48Ml zC-wFA2TpPHkI)=&V@J#x(v?{F z*OZ_o!@uZy0ElmLsaP|Dno+9X?uK1;z3D}hc%D|*kPwNGfJ?=ef?dlGz z5k31+KF4uNU`<89GqhAE!Oee9WV02OKI74kwn6htKa+GDO_|b$Dj8KEkC%-g)|cch z*PXM^=ILzlINFBR_k#404TtpA3?^G} zSO`s0ylA-qq-MCfW^Ias7*lphX;@Axx#d;3kg7$fs_kCBpbRnZ)rTSRkz2BBBnFJ= zG60dQqGqS6clsG1=^&F|<`2A99ddhdq-l6=(H#!Dw+K)?jGIW zWQ4B|q|`$61?MU2G!+3JONrM{dzPcsPoY=uchn;`^H|y2QnAWAmxC-g13de))db@J z;SA)0zY9(lN)`$>nO0gHLWe%CuI6THVv^bOs3+*gyw`codSpB?wxUO8W8?P^kG=|f zTCHH5n53gg4gDo=O}!Z_V*x}f{lP`&l!*nt)B9zZkL_9rrs+-V?1&4^doH|AOjQ&l z@=?sWn*+}B%g|TLPydl61;jX$u_3=o%5c^e)H^ULWwR+4z9dpg7dN{P11-EOYHIVP zr7IWF(&q2(+?E#(PB|~3?Qwg|T+%Qo-J0`ncF&c5wsw$KGSgt*z@ko_I+^`S@zTn8NJCH_ zo;Yc5p26`H5{r8DT9<@lQ^pJ~s#{?XLm=IFQhLjJOD`{IjxMzOXNC}C7hExmNyO(a zE+?6tGvQLdd!43Sjq<#e8`f~d%O%r_F8xwZ+yeK=ts?rJ9CuVypZk|&cK3WI&W_-E zt)w*G)@|-inRNZIP+hGZRu&-#ASnbl3;#!_-ghq%wuT8yLtq28R~d+I*w5US6)2VZ)9HJ1vhW)Ibnp<=mS?l1#qto7!;l!=9D^5dw9EBWL z_5dhg;nZuFN|x9(4g8UNCyqF=`Jd&$wj>isl^6~B6Q0JK!x zOlEJ*etP!Q-7{foE|2BL&sc7_54eOrJd&0ecJ3Tm}-n3u%hT9(<$fH69m+SR4kW|3Cck&>7EnEQ@&fW95jda@ZKeg9qjF&=H6{EWOW{@=Xhofu$%hEz|4 zB3I9-eg!hR0=SCe4##v@<`7UfY@(Io1nIT7i&`yC@(X`{pIL~4BmQ@w(ef}xo3J+iKyBX_m-Tx8D!U{71mecO zvw;ysA*49VYex*4H5`nD|Je%QD*aKQmpU?(-(66;~)?dvlJy$UUa4T0ZeSP3|O_ z_pyj;(`gFsZtEYu0^S&2EE%kHxAM*RM8tn0w^X z!Go7~L~9Qjai?=$lc0d~?C5I4HTqv%Ic^^`OU)O?k1zDC^f_;w7}n`U?ENpFNl49S%hCvu>?dsa%p$ zM~4L2*CO8L1`2N|?)roDT(NT+$}K;B{{GLY?Y$Q) z$fm^BFfuZ_Ts$w^XV}hO{pZc=$Y6*xGJhr7TgbBcuWq`wJ=9NCjDfA8;#N$jt&Ay2 z%GWe0W%rm^SZGr(TmqTg2A?YTRtCSm2XDI$*T0(ISLgAOrW46gsmG38qRs7|VR%rG zeG%QDVZ6lWh{+qTOt$LMrMluQpwQdfyPvObXDahjcJ>5RsgQ;jZ|&$=Q%6U~KJwDT zpkS9vH*ePEBwwThxb*ey7TjZR+@eLbmMvR?c50BylS+bAV`t(RV6aJzIZw|9b7NuI z+xt4evTSydVea5qYdt_n`n+PT7xPYedT&_0x|;NOC%sm=*k|_Kn04);zSEV!ON$=_ z1({#JeOr&MGyVSIrdU}2p_dOIWETGEYzsJp8eZ%>iN#DNBBTX~^`A3&HxqAKj z^=t3c)6+|m41D{{qPdzTsdJw6$X0Y!So@c?mK2%pare$0zOc^DKFg9v*BBgWx@F6j z0s>k>v$l3@pN8@=X;!hFKD~87j7~4AZPyQ%0T(e<}czTdOIM4?jU7eEp!0#J`OA6VA^%&O%4P?udG% zsbQN)eVf)N3@eYaDPVWUw6={m4R7AJhJqck|8jY<>z}#v9%b#Vr`^fQsvoG**g>BQ z+4dX>Q|0I6=Gt{wmCB8E)_$eNWXK@7$*n=pg>9+s|4amySbG;J(1Gg|EcS^D4-aoS zsa^@buc;HDM>RDyTaWrlq?W&&bvH}i%?6wV^}!&s*%1xbs%2cc;?QA9l&QnzYu5%i zI%-kC8$M#*ZYu0XeMddLh<^d>H3)XW==ZzfO|p0Hs=quZpQ}Fn7B8Miz1cQOnU@OT z0Pi>f@lquHrSTs6m?vxQaSEUNzpmwwt*u*@3^I3QudS!K~Zl3f)@$) za7cT;?rJwbmZD?ki*N3~EU6=lbg2pEjsvkb{;Sn<&9(B5P>!#?(8|Di4PRA#k=1pPJ zRG_ZZ4+a*Tm(dJP+nIb2@-yV3d&UOSHAM>+Ell=l<`%|Jpk`nmg~-G}z%KB8p|Z%^~68H5Hd9cf7a8 zG_NB9^J-mnPKF&85ckCg_wH>05?)ErG6u*FsYQq|Ux%wRjzznC>z3Wvv5m^g$|g*i z@-?rRogH+{iTKq)bpKrAys_2S3|SL%qV3s~vrU>dE&TAoJz~15$I5JHUhF?)7?|0b zKr@lNI$+&#KKbP2+~yRP)l4)v(+9JnRHr8bLlrx3O+5-u^z3wE>y|AUD1`NH2l+Zu z^hIph62!qXeE5_JOWCi8lAdhP?EIFkTD9oat4YR}I18ouj#b5$9$oj|4aLR~EDwUH zza}ZjevFo-#Sqn-J^T09Y}~jppEvogSp7aU(l+4y$+J?CzX_~-2Vs$4o? z(^;jfjs#ve=7ee9ZUAQy2RMECuO4{c4Z6K19Snqv-xw7Y$eG(cY*!z$jq9;%*RKbz zTUV3neYE{xmw|*8U7INXF=IxI7-8E}>Hh^LU#;T8v-Wy=PE&M^jK1aDJd1QUh`IYR zyGUzl(FBtYPT2>?ovhnpU|2ucfX+?V*zY&Ee$ZpfjvYpW zmu?hp5*IQq-CHt1?L~_gy~{tY&3@muK^1?;Z+Q!6ottfKRq>_(flNlnd4GJ`+deXo z`lkpRFqWqlEePvK2-+;R>#;WJ1z!K`R$Kn(*P`_=TIO|y%NsawV2x>4jPoRd?jdyB z4;{KTCB>4xZ1@v7E09lS?*1d^(j_??X%`AAt?BUbrSaqaLpdjyQ{E>0RJLg0aq2=U z(daS3r`52XDq>}zZm(|ouK%D$`2aQ82sP>HYet~Gs8lHpdjO1QhIgO$LX)?nIOSHi zXV0GL>FK*6sZx(0ckMM|$l$>Q4Gr~b)#&=yW^y?IC5~ciMn!#9z9s2y+RfNZtR1CT zJoZotvXyKp4EF5LJ=HDfZub5AMj2neHBrTnwej>)>P6)BT3VsotXVT_pXcN8J3Gk)Akey^cm0um1H(RVG_AFE z?OMr0@7}++h>frxb*|#4fz59Anj|V5%i+nc-Oq2a-+|n^Qhu(D=JnX_^$4=dia%Oe zTIyh(%f>5i;-HTYLbd@DwDbCW`0zoqR;~WUqTCAyl+qhoNx$2i#{s(P)LZ`V zKiO!u#W&E{_^q>|)^{4N7kv|iGU##3 zz(;HI*s^{5Uf``+GoxnBHsBI6k&}A} zVsT5~6@C26-z9B6sETeHnm{VEw;(3hy-@iX=YAGKj};w95Vu;$BkOS1?kfU!wAqnt zS3d7lpSX}VZ=JUi_Gn+uqIN4NDq79HY;(fstZvsXU9x#0q`wW6^fOwUMJ%LMO=r&I z^p&_&3VAV>^MBsz^RxEKl`EO$*hX}YRe>ts-<69sG&E$2jbGX82+t*%i}Nxv6nM2) zZr|RR)@JQx%a#qXHOte2VrbCYzjL52ZB^zBTR-EO>vm>g9bDJ_hYqdl-u7$a*yH9> zL*ZLUvgqiPkS*vznP(2=W$}51L*&&i>K>X-lV5O&{q5~HFJ2r_yyD`uYlDhcthOJ= zR5yKv6`tn7TdewD^a@HeWpw+46SEuRIS(b8wLW3=cm!^^+5qh}2M$=YZP%`g;U@c; zlwY+wZmrER9DUZ$XX~RC*!ShzuHU%f^Yx9Zp{Z${It<}OD6@i(AH!iU3P=SZ%c{S^ z9yLg1XE)Pa88LG7vFUnLHUI)vWdkwSpyx|Lf5bL2gsB7e%|>icgQE!;NvaX`R?<>- zx~QJ3dE3oOrGGV*t!*LaP!Yfz_Ohq*X zj2T)pXU>$D%)zzgw03T~Nl=Zi+rouid0=gld@QB?K4}44I)k3)HwKgvAI)r zShL=Zc!ieZ_Yctm_ zO#^}wJGR-9wvVkm`oi+w($thtb_Pn$Z^@DY!-j=}wH!0XddGDeM82S%{gtQXHpl0$ zUkC9GQYp=_My-go7a8uBadg;=sCo#ZXq?C*_Eu`Y&HTu_)I33-;zgiAQ zuLEn|cKm4ragq7|QMoKfpWC*9i~5MEwGm4i<3|{@U}w)Id)Ht5&Y;A2B9O`|xb0Abx_~pU7mNif4a*uOlqQ&2a6ru$6fo!m%79sG_R> z0_~EKpa0|ddrYNPjM<}&V1T9?U(_YuOsPm;@V36Xy1K5eZUE1w7~Zz}K$T(4wryI9 zL{>WZCplV8;qUM7?c>wtND6oYLB1NxcB|Mc*m32e*(%i(m(mIEAQ>tk=sQfz(qL9X zW7lMt+uj|O%D8UV2&{$0#jcaLQ-Y%nD#nc)_aAzp=4Oj#toZW_7=PDqEmjh%n`6Wqe!bZO*qvGV$7dzBerE1?$-$J|;{QP(!S7;wxj}P+mvmQKX0G7u`nn98&efO}F z;?|1oyzWeTirjnrtf3d}F>+*mhV5ytSg}H*X3Z7=oqo5~lPK9PW@lGFeE6{C_>N&g zR{@_Igjq(hWtY=9OKsECMl<{K63dRl6TwnNzckIOA;HkRCa^^)AairAj4zHKXcL%( zNq~5>f9ge2**P^ja>DqunkEw07z0s${HRs_qen!Erd3Y@&XJG7B~Llvt$R`1Si9ql zX7~jDc+Oc>xSuoV8PmzcS%=brFgJ-cO!xZ~N9y{R!h4n^#ZdB4jMYXn|6m9yKuhaC ze7G*=VNHc*?b?&m44m4)lIp^swqgSZL-gB3n!b4XQrWU)4QNu^4ege?A3Jd(Cof@8 z;k$PSKU`LgWeR2URzqX^_3NjA&)c|TN1ffT=Q1$1Hh?XVgv5}89m+v>^_{YrlvVHK z+-Er;`;hlNeVzwC|8gxPVP~(!>II8$8|s~FBx(O@=D+{eUs3s~COp?_g4qDgmALa| zq|}x^%X%~q(mR()AgAi4&fC{uN`KW=qb5yiE3CS9l@B%OoZF$DJDUMS!>GWG(aCS? zP(^m#(tUnQfJ=>n??<#?ItIA8HGyxdWuhT4<@)u(V7&qm*5w;FU_c<~8d!T9dkoZT z<1~770|d|wEWOc^h6h2JQ0y}GciqvW^OFuw0YBcRyC$~RVtsI&`Hi9Rv#GDVpI@pa ze8huO^IH+Z-N#zFA%sBe)SzN%@AKV6X?{lAsC~w>XU(aeZK>e{;;K8tLE2LN6})*9 z!rr(@n}z@u76YwfKjJ_a6>Cy>9IA()pt>C{a`pFpMdRr3p zC#4zaIdw02`*sq1%In9cI>`(K+QUrPJItPdK+_P5#?#b#=hQH5r_FxU6>6-20B5^z zR~JYuqyX5_Zs}*v?Crn0Mx()w@#8-{yP#vD0m7}JtsUC&_;L6>VvG-rIYpI+tXjE&n;2m5#i&Q#X1w%rIMe4Z&Ri1|8@F z!OH9m5}p&oKdr5;cax{vHf!m81e6zTeZWhosh2O;!uNd{Uo&Ex;9O*;8VZ1w8aBN; zFbt`zMYi#F7|*>FEZO-}r%fy3wCLK%&zdoGsLOXDt`k?exw-uV<1v_7LZ1SDB09uyXKiP%&C6lJ1Ogyrs_C?SjH zooWKYHXrp9b+Ppi6QE&@^XJcp+~RGm+p*(`MK}dRF4JDLh#+B&x@h6Ju_V?}Q`c~{ zGs~L+M4umFltp@b{p3tHmZK(V5&k~<%*T*pW^sS+K>2mXCwiv?v`yyXG~Ox|sh~x{ zD|PlO^Ln$v?Fbh>g;~{!Q4=_Pw&mI;R-j$|N6p~)_=uzg%tUp(g3Y@KWF zT`J?&bIh>-TH&>n5mFaRf_#VZHfZkJAS`;$NBOE{WIq9+@?VH8$=y3dlr#OJ zk0R>lH{7lyYb?=u9f^isdEdXhQtP!sP5RrNPcFf@m&RBI_ydaYLVE3zL+s-_nL5~T znsHCmj_tjqK7ny9$8t^R4(V>L`0>LNz8UX~N)_)va^%Pg{1$28D57~0oVVv5gNPcm zK-7YlaXx#db;W*8H%+=~AV{+l7E9~=AVMZuvB4xk{4kQ|Pz|}xdv}qqR ziOp)g;&(6V@0^Dng6{T^EFn#& zd3vf-ae6Ry=xDWOt#m?}@me-kycew*ZtJ#S~NHl{7*KR-lbo}X!svZ+y zQ3R^0$_7r{V{m)B-LPRgERpxZh3)Ts;u~V0L?wp#mA6njuS6q}aZUJ>8&UYwt%8>{ z+O5LuGVyMrQ=_5w@#iWrU+wZ3+1fRd4X9Aa;0N2jWP0(Co)q7qG>8k5dh;BJ$p9B8Jocw zO7kwv(vKNs92auR|EsxTH~A}}eU!NsML{JpYWkv&JL8%w&6`7bC^C>|Qwc;&h^`GM zXvbDJ9OT)kabx*;n9S3?3)R8QIu4N&rcV#TYfwnMOQ-YK;!j=ejlnzz4U6p$dtBJS zel&deo1bsdhkUaeqmgWp8*-Be^k8j&cwY}}am*-D_@Mq&a4S0{I9<5b+0cs#_;SgC-e(Y0w^BRTq5iqi2-OpxP9s}?I( zW>pWh{BS_CZbvi!E+(I5Yv?&8{eRTDT`@81u*)f_(!|Z|+AGPe7{y^`=Fbbp{7L^`$5exn+5NUF)+c%8KQy6MeP% z?$@(1?%MUlrDcYnJTItRnoN_8z1Q3|IpfZqdH}a9z~RP-2sMYujf`w5nXVc;i7;ag z15F1H(>UwYthvtlfCfmn*o;Y!$f?hTeHtycNtNe)@CWv_7qNPcSdslMexzH!0#7TdEOhqu}k^P%3|Q1%NOdMnuUr`SIh&j9bGvuFX=m9U8c` z(?eaSlCv}VG`_iDKzy^q9?%3zJ7M0 zZ`gXc*=YOyPttv=HSRh~Xk|gIy?gh@sG#5{iEJWv7)1Bzorrq9diRDLJ$0%(-5xs3 z;3zu&9*b!QhU@XCpXN$p_5^ZVvW{^5ssOQOj%72dYP!{{N~ZVa z`?ZV)sr93L=4YnCsw!glZMGi+8l@L<1M8Iq$YZRJ5Qoz2{O$o9MaB2;6VVz`PA_5R zP6ga-*|u$lad+bS417?cPL+Q7auK*1!BDFQqlQ~gfd-bUQ689~GtT=~FptZBMEdE| z%C>E5+h|te^z)uK&wRlnj<_cu9o$MFp^P3i(V3UWA7{hAT+Y&^y%5W;*A|!h*b6_u zeY7J_Dnw~^d>dnLWa!?4q8@u0xq*cZ>3DJ%wIiprI-eTueh}f|*mdZa(8>#Vt1O?4 z(p&S>OxDMLNAkA1AOuzXtd!*HmR`+g<_5cbwz~Yiz3&j|+zj1+y8-n59cOIh&VkeYk2NP~VxbeMnl9 zvOf;gT*+?G|K@rr^q0Oo^0~Ty+3y1Y*cm<4^$h2M$t!7iL7s27Xrhw;diMY&O~so7 zDZtx8_fnRu~37Swfk_eYXFW~ZPuc5pmI-5hdtqL(n zC@I{22{SALGYBNEu^S;3x{Cn8mQR2}7r^vq7srJjs5(&9zZ7l__l5;0mVv<1=$he) z3(=7wzSilkvP=tdOe#H|sH+a4YWCqkkkL=GxFbF!!3h(L54)cWf#+gxHcObZ)_tCS~RUE3=4lzk8qk+WDl z31+iq&yJ1?vF}+ccvvst-h%9}NbKh|S!lIQc1c=Ash{=qX)|{B0D5VgpOgy0HQQ9F z5XKwZf-v5E4gfsL$JGl|n%p7M01YTUX^Sd)6;*v)#&FG*R5qSJ&Ew*i2PeK08xqsm z>wptW%gYCncsw4pgvt?+2O}vZolIdeX~X&T7lEu#pFW0-T3>wY5*IQeXW1mRdiClF zvCS#Sz&3`Y6Bo5S_*nI-m(3HD2#zMbz-ijq+4205d$$HXwSw7_D8R7?M%LXuePt9{ zI1~dBel50!sHDp%cBIcXePUeNCe&V9%htt59+KHHft2C99iz>q)Fbd<;`exA=FHn? ztrWChR{Z=abP}m>)&BjhShl##xorLTUc8`e=p;kiKj5}Cp`qFc_qD*ZVECBRmN$P4 zbLnvP^xU??hxpeF0ruW$I_6@iM7h?$gUmlrtjhDTWg|9jRPcN*{a5bg(*m}kGv}kPBu9}DIH;woKNx*;4Ls6( zcfFFxEK47*9|}j*%^qZGqJii!an`Ib>J->|i<_Hp8zI$j(EQ&&E10*H+_Y7SEK1I` zy6WxQw;z4lw@vLjbpW0At>oIAPu-CSiFQ7{XUv|xp6+R>%1Q5dHEakn0k7`ws!^kc zR68K_wLs${J}}vHYh2%I)v7_1R)=mI^)W{2KR^r#lCVHlX+UW%564(UW}wSs9i*-I zr9Mv8g^j16CNbndsdR3-&VE0AdMyYX!pzPq4uP4?Rl|tGtIKAy496fk8?eKXHTdb0sYH00MymAB{_Fr2UyLv-1rg^Q0lm*Fd==hMtp( zSuIQSM5l;Zvt5yqtMG^Fqq#YS)jx6kcs9DjzMBtJW2Z4k&wAmz1~+&14yDs$ z^tf>|d+0~pqz%m+4qXi{sqEvk<|6Up)7S)J5mt=k=+#3jVkmyiJH8sqAAc}eL_43g z43s`WeSN)>eTaX?JQ;VeS3McNvRVo`5d?=Z`%bX6Ra9TN7&d?g%tI0aULDbSKUNIIz$eaGjDoi*_SvhCTa?T`PJqd?L&p> zVa1w1-KuxQLS35z3==lg zQ9cD3Y9zWMGydIbL%Po2$28?zg|7+lSc9%B2=(ns3%e? zM-c-M&n@z6nNWNuKDcF85oDgjexx$)$A^(toGNPwXIjBJ;n$0~R&4vD&Go!IeFX}~ z(Ruq?;vmsjydh!9Z=|@W5`bf{IM7WC!zkeKf{?dkhOP8aH z_F&H!6M4VF4oJeFy z#}T&l-KY@38{kD5!Z45Gz^RF@A4*kMm*l?i4}| z*q#`5XsjWl461|c>-O$FG~FRyivn2EiLlm+1oMslPE|E3K+Y@Bt$i*I)WTP<)?sxw zYS_-rd&9BR)C>kf6+-euOHDU7r}l5m@CQ9My9!m}eC6jspobB6B7&a!aXo2D@0K96 zr%#{aHG%5uhbvHQaO|+CAB*d1HwZTWKYDxy(s7&iE$6ekV4rvZ@x?U z^U34K?EUWCVAzDh*=eC~oa4GGBOuKD8qA?QHA2%Ko-|=j<9hX0Q%KurR{4Eobc5C= z{Svv&bny1Bb%1sujh8q?mQs9XVk(jzZ*L5>$7pg?>J(j_|JI~R#*2Ic@h?UwP-p}! zA~Fjx_`-=Dy6QMNT?W`Fp8%)py?b<0=lAd5)=VIY_%b=Oi`<&V@@S13HA*;ZG!XM0 zxCh$wx#0KPoQm4d-8AnYM(Ntk`!oR(J~%o}N5(*w*sJDt?%DH9$2t=>nFd#T8q`@N z#0qe;;h>xT_+1fvQ)Zt8u?|M-1~KW{Oki3n1kI>-T>0LgpiG^$hT#tr@J%OIHc~}b zMd7Fkgj=jD1F6;Y^!1%zmWwrk`bqRini8sP)pP+`iCzXWs8(^obN#CA%W$^|GRHA= z$3>+)BvTut2QB2!r~@PGNVP*$nCJzW?wTBRT6zTzD3uX6Pr+M}MO_bjJ=Sg(^(C`g z#0*n2Gbt%pB9#7ZAOjV9bz7w483~I0gS4U07Y2SaUFQu0z?>#N@wTYCuki_Qua*jTeRO zar&MYlU1iJU=MNQXF?Xj=x>=lXU+zARH0Zz(nBCaG+a+pNnK>b4D{Ctvu7VFe4)xO zBzUTqR`!3$fss2jr9QScjQBurdJfjxjW|1!$2`@#b;(|pJPIVrjj4u0oS`okaON^H z`T<(ZHw2>%VKG_$qoh^#=M#7kQhKcWR5>udDQ#jKUca7>bTIOc6UR-4o|5~_tHT*c zE!}`&y&s;Q(^hEe=-9y`CglTlno~`gJGZW`V0Vo$7y@P*p0qXC+@i|rs6h_yI&c2` z4%B5*vJsIWWj)zjR-HRvLJJYA@=44!MdG1n2ArWzCQ4oq(%&8uj3R)1Ewuv#afneC zkvA|V#zkJ9hQahq}?u)2C05_+!*(#kutV zP8)6?Pq<*RMHY%|m!|6ggkB`tKuTy(>+ZX=;muYOM1A^MJGJqdTIG!fZm<#&n>LL& zTwXlePL)mbS$#(A_n$MTJ-UssH*|YvT)epV@^o?iNE%_IWkS$?Dk~Xb+N#wg;2e`z z)#cKH1`QfaFWs)|I;ZO7?nX8h^STvf3`J~zO+hsWN2jSPB*tZTSmRL+ZQl$BT`u=w ztS0(S;^u}52xM&QH5b^m%3I9h+u@IpS{bcHY4EuB<|*PlbZRmo4Nrn^90IF@u1UL!UDK2>UZ|aMd8+ zAz=O$Jh}*(MsZ8K654Zovbbc|f_%gcOM8!;0HXU!6a@3V`g%Jpb-41d4TYXKZfRcO zuDcxJIk@noS_S=`7@D}qGXdP7QkmaQ_nCx`)%^D^h{&yjYhZEiv)3fq_}u;GnEz7l zkaiGx7^$Re(`jr8-k+Y^Vk4ptP<;IDsJfte`2CBg>~UF1Wj6`$oY-l^;tlkA zpWmg6sac*oB>{M;xV>phWif}7l8iXoZiLTylzZWkdDq5dL?lp=c3`lxn1m!RF0664 zy2L>!vp`b^Y#$sOalgQ1?k!2=5gN@$2xpX~CA!Tv&qquZ>5RG(WWTI!+qT0A5a7f% zYE{(hDv|N2O)=44M05HC#*R7to~;)V6H^nf39~E2{hkR$#@z#}4jh=dH6KO&B5W-K zP;#V5p>aT-x(+--4wy?1e&)1ll9hly|!(^|AtDi1ktp3-3#l_yhe;O0p& z^QVxGK4CF9VA@3gRRMjYQ`*v+b_VU%J zHzBw1`&qV|ad1>hGerS?1m=$R@#BEdsW7#;Ckt5IkoRk};}IV<$>^k-`WAxA^XgM# zVWt03VHC4>_U*Xp_aeb%+1HnG0~E%J%_+Za<70>)am z_M_fO3zE1uP?6-UK-=Qp_-k6QcaV`nQXn%3z$j2RC^YmUeiDy|`wAvzpnC_^s@F2H zMN7(#u?EYnr=suC06Kvt0g3ty5w3%|%(-`b(V*6GAxmh{EX7L(G@hr62K91Ut8MU5s|!z%XhCf9kw?@xmD924{uX zzi1r`7XcrtW?Rr;my*(2wTZ;-MyDzmYLtoD5ybF4f4dHWZ7I511#{=`MhV~9+h)Lk z(6jx_{kq43L}*coO4d0asC3>ym@XtJP%QXIyeDI<+q$(O?24H2aA0-@;9Tw?DQ(`o zj)A(Mp5fDGDG!-C=tCvQ2>0|;rzWDqwN;ts1&~9ta&y;Nt`F|~Zc3-^`VpUfa$3b* z6=fBVE>X(w{nr_pzlGKy5we1pg#DO2IetT;Dtg;SKRbI>yf3%4h<}FuvX$6_dM*3m zl6LFzWld>Yg5ef67@cN@6&f6Wsw?OeYbPDQ315E_;3g3fs7MKb{we5YlKl8DAt{m* zb>x1xWtVbuFVf^dOM~N3yZD|`KT`-{^{3t$&aEN;SuKefU@Xp%@-{l^!>kmSdU50ZhS;=;zn)^kjj;OrKzpG+SK9f zP_yI!#4%|D2r+U%VOS%6^+^(_^-S2WFhbbOC|*j-$}XX~yLbP2-qhj2F|T^kddL=_ zC6X}BrJ*C2Wd@s@w7+Plim%Z&QkmEG-?jCUnrp`fJ#9jjj7zvbKP7F?^0!E1b^5x5jrR97=G!KtQ=oCB>Zoun1dKxKGaX6E4&eiyjzJ@5ltNY&l z^ey-bf*@}xmmR+`Hnuja3}et-N*;jCy)9ApCyCP9tQL{d&e>T%;Q5+AK;65l&dNid zDn3mDQQ$sn(6FIbkGgt>`i&ch0uN07FH!FQ&(5)8D2PNV{E@f&IV#^z zoHAt%`TnN!fU+79|Hs;8UGuzO`|A{slP8+Bt%uvYQmvfs^C#g8FRN&_}rNr<8SxkPW1dju<3xw<&# z(U}rkm`4m18wV{U-k)Dw@w{-wC^Vs2FTVg$)X}}!A#`G#@3B|kD}nwI^?s+ce`@C& zmJRsN{p&2V|DGS!jk|2HdCmWbR$iMZyG$_EWB;`uG-$xz<3|~XY+cj$N$>|KAKb8ON+>%kC_`e=i z$2RTT*R|2Kk92vLvwMPvZPZ;X91&f`%*{0&(h+&CYzs>wz2wri1;=`u`2$^An&!;` zTi8%4h|7~Z)iZA1917Lr>xjyLzAd|u&!^tn-B_d*wqVwc8+9<@6`nJR&^pDA#1@f> z1;7dinlv%V_io;`t2yVDehexSG^{`w!2iIfWy_XAgK-E1Ctl3Wtw~d4Q$$gb%80oW zk&zo!)KKMLgu|iBCiM9qXwZQ;La@E$#QHDv`}H}Mx&w$pdrcT$rbNK%+k&9_wxu^WLLb`w{ z&#{i_)~~--$O9}4_q^vR-;YXgG!*l#_~B4Ar2`>*@LW~AJCy^r42vl9D~y*arKKb0 zfsS7Ecp?NZO;aHiIv&_hltrExXQ8&ASUjTVq%rMriVRo!Rir6Y5kPpBi*!gcZC%e0 z`h^lBvsP1BckiCmwnfB=g6lYW5`00;^WL`r>AFh)Bh<7(r%wNopReyG)gvz%Tx&z3 zltx+cPgj-2S$y4uNt0HSMQ8k&sq5-57<=8!NK+FOez=$l9_;r1Va-7ndzchS0=@6; ztbAXJBU#crVW#GA-+zWsT>pbDS-y(yL#Z5HIDIdRi@)YiqPAHB|26@JT)O@cy8>aH zGPAPc?jAj}*w4?UGas!GoVXZBxmH#da%*)92Yo>WY~|tr5uC>@?hLqLc2IJM?h)mu zQ0pL!h@QH3f!nxrbcb^=WIISN%u9}FBS&VbI`uIF9 zTe>Xtf$7nx@84vE0qk-%`F@Y!)OuOQidrLyd1ZH1 zLPk})80z0?eqRfm#E9k~OIl`9W{-f`79H$AqI=_}O{+O5V&s@%KREszA}5{Q!^P_i zb(Dj8DIGFg&}7>o7fd7dLjfVBKh>n@XOi zcf&Bp&U`T-{_5RwHbE98xA3*N{G}KQChb>sON$)OiVNp1i3JiOq~8sJJ+I#Gef3@IGUd9Tjg304M)$+z zf7?j;c%$ZZR6Zer)>GX2XmqVgKTIB3Jad|IN<(BE2Tv_?Aa%hrBL*~Wx}MXku8=t? ztoi}-mJ$D2_^s{4zG4c4Pqt6T8> zqC)B18$5xte*&@9dj8AWBzTz=lig7Z64Y$yx1p+R8Dd_+e$#8xBus?Jse{_4*QfSW z0nwzJm9pMw&YU^2v}Iqvc8Qh#qjTs-8d&Q!s#l+QHL4AMx0)y^-|`pI+9qT-lpSG9 zo1=gE=FPe~&HG$pI>^<>UFlq>@+5ZQgf+LfQ{i}cO=GFtfmqU7HMYvd>bQC1#sE^D z64=k{_51vy7KdAkTtX&fwus@7h~P3HEXK>7w+ophe&cx zp{kz92b8c$(I#Vj2qn?!VZ&$5YRht_Qt3{fI@J?`a5Q%W{TNWsYpc*gSk=W+OFpe> zXP3*M2x=8Nj|{&>f;j=A>3ECb6xOe&c8KfND5ek-btL%FlZzn5@K3#d?eWfQ%LOJi z?BUxQv@CNPkZ~aOH=mwOuND3j>06Qm%EOosc2rq1kag{a3w`Or3ZfNGQE=Qm;s~2L zgVK_AV*zFn%PeoM<+P34Sq=Mf9?jJ*NgG0gf&ySeVs)5cK<9`yJ%&=cLMgJmik-VG z>eQ*jO+iZ(w{GnbCF+Keah@7ur&au3<^qS4icnnor7<~nb74>X0&-UgVZ%{}-3mK3iF2+>`i>IVh0qlS<6cJ(Hg*1CP$g|2fhgU=zTu;HKFP*x z5A8#|gdlY_iVQLksfPMZn)861ijejmaGXjH7`xnJpw_M)3$&oK-h>Q^A3>{$WHHvu?ZWc$Y3E8O ztzy??P!eJ7NpwGVx}~SSms}JfNuR1zdfhj|>nd#BE4567frVQ4WTX~i^?es6B$%~n zcZ zHW{xb{#;zzgrXwJQR2aAwz*-01_C&xPZg|)Uu+d%leIEt2cR>qV~2;;=ElAh=Hz-z zuNbH66+?jdA2oHw@3YSf7Z%!81EH_O!X<+jXnmA^bqB7J4xnZr^snJb4Ihs3?$ZVE zOIGWm7oOXqM43is3S|}s_-E^&3Y|*rgN|#-B?Z#_X%!ezz z$bbOx#38D9A*vhdlW;g@(g|z3+2r|vxp8^j7rZf`pG4b4L)wpcw-?GDFgCC3{UamN zdg~*#w-7DqD&M$m+t|sqtbmBP@SNK{Tg`iULgZV=QWjAkUcP$O{KPY+SwUl5L}gzq zLOzDobG1Q-jwxktZOH3`DBi0nM3{bPOcsIp7*xx_gBf(ZOAH_%95gil6h=} zqQOupiLgkE5Xl%J)1qW3Wge2ELdlQ@C9JY!nG!`&rWGY)A>;mB<$3q>9PfVjKK43} z`##p~M*sis_x)bOd7jsKDGIMunk+gdiZFUj3$Snm*P|Cs4&)0CMep;7n?ZLY1eY*p zG;5r?+Cd%QI^d?=n-QG3bf|;i$?-BZ00W2|2+3_uYf36|O)!5<|0fhMa^$)Zoq+C* zk-IeIa|;xJyV%laFUMefL62wi$Kv@57RZ#S=>0HB8hN>6RDswf0K^L51|89%TQ^7A zT$6P&agp^tCURMId3CPm@_+zkpt(r`6o(LVGCB7F@JEA_I~2JcoMsalY5facEn)W? zc?E$`?M971am`LQ5!fmmagiosxJv*L81rn{@O+a&ZA2DRvXR3(m_}>DfgoEc2{3t$)Yst+XBvN!F32(09n|%>f9$3P{K=< z`mg$Nbop)S^bOaoe);WV=HF-0iH?2U>PYG4fLpl0>gqn5A-{%Hy$ ztOUYFN1|WxdL;>x!ULy~-Mb%e8*S+{a~R)8KTj8+RKd&Vd+rnNYrWE%W6m+g-OQHc zbli5uWarim%CC$mq5zP{s91$X7bLR^{gSY<{Iye9YY|*B0JMO4U>Tm>+kaua%lY4^ zlf|v)6O^Nv}L3_g?Go=NPi0}&)?CfGSM>?aS3$L&|UX6IuHkr zMlIlx$7}K!X@!T>A}F%*^I7>3G&wta&1wmfrzj|S*P_hp4yVhv-YmSH%oTsxYlvg= z+f<;B#LBb3=a{IetqE2);2cqy3q;$n19zPsP#^;uRu$3n1_d?3v12RFT~uFU-$sKA zmx}UKm>wvhUf+6cFxf}?MDif+C(8Fk$E4D;pQ2V*GJz(_LJ2yOLebBFK)B6Xr0+vk zZP;Ng*WyLb+OKR!-G6x0K8XeRBJm|E zDMkgyWd`))X1a*80boZ;Kuve3MA}B`svq|kF9T|M|3g$402>vzxz9F4NusAD2JirA z<)UU9SzIn+U-Eof=WOB_WU%A%j=lFLu!gfy<^vGfr9 zL5f?xJG-(R&a`W+nemWTU6gjh5_e{S&GK3(+hil}K_A^On$yCQUK*%zJYMePuQavX zvva3Rek4|6Gx!}pIZRL;XHL?ec$!i!yG&dWkwYZ%>S(~^CukS@{gh`+`HYyEvVYy2;^>OuFjzmciAvx*`F z(zRjHo4|6k4s;%|Gcw$g{}%DOl0`r4-o+-m9-&9XgxrTXy}c2qt5k<~Zh;xwV~PPj zw2C5Lq#~i9HVpnIr87TUyX-72?e@N3T&76fPsm)`TwBk;%G!$it%zj?Lp`3m?(@zs zGNezYV#xQL$zx6L7!I_I6$dZNgdLs=2E%2%Z(7v4ZQDZ-J|}h{VqNx0}C6uG8Z2j&>X!R}S?;efZn2 zBB*mRruWxW9FcqphHkt(+52{VpsFn7(pV72gbdn~FBw*3 zb-dB`f^Ao(!Gm8W3N_c(Rxhn-=-I5<7B{nXlD<35EKaeYNJKc*0KZ3MfKx84R+kY$ zC~7^+fLNpdBsj-3#*6V(!G~0)sI$xR{uyFpo%jJB_$Oo2rp@-a% za)9Mx4gG1Lzy%l#|Fe9YyZaQxZz2pynp{DAC|0b$DwoGtXP6&7frycT}2WifpQ3a~D7mLa@LC%I^r) zZSH%dEodb3{amLcmIKJ_D2e^9bpXwZ-vp-g8BB@q+e|zQX_dkZ1~)vo(-rQq^^Cme zE2dwNU$qWczFhl9nZ}f;V4y8|-Vj2rV)=-$B)s)T+gXPAN6-$z>d}2&0F5-wFX6vs zoF1r5!1I)qKP&-Yjz^!CfFDz5KAOdpf)f5cYUhUh#NVe`>}aEJvX3HOxiNja^WoLs zk;Wg(UlFtbc4NIV@VB$FWiWPpb@h|OEF%zCF-R;;LXbR4(M(|mK#y-i9VNr>eEHyI z=Ts^gMn?66VA=jLt8LR|o1V`LH2b74@l?DLGFs8pmhz5BvdY$No!*38E z7-PB&th$9L|Ady8Ai`%`g6;M1fBZ2veg&p!0_3YL$&Bu*=~|2-g0KM$;WA$wB_f_V zUpSrsEsXQ=4C7_aNOVtH2T&<-VGzZNWt+_pQJmPHmp7ABTca5*StY9yfpJ_^Xm1gG zt1F{gk94u8{NO(Zqv7|3k`KjN~U~k zDFc_%hj;6?m9EFWiWem^f4JhIQ>W_V+DdJ=o+_|qzfoZZ*;#uji-s_b{mZ-{lR-Qm z5<*mQ_W?&`M~i z;2YF@E4&ZBp_*OClXwJ@7_VU$?J0;N%x@{|X0Q+Hj})yc)Kub3`t<43NH{9Qy<4Cn znF9>LA4?2$1ziCP`U`5^0>$ux%KOzZJ~HV*e<=PavA9X_`^G&@+rDAaE1GbQ48hF4 z@+=@7_>>I`EuCf7&)Kl938x-=%e6p6GV4@18L%4I`;TrA><{Vj7a^g zJ>+*3G}@bnNB@G`_Zl%X?a)Dm?$HWL&!o7FkgKVqgMf_td3-GbvTF#)M6-#6KeUpw z6MO^h`*QCjmkTY<tku@EXk|_X#W3=qYAKtPd_g|nYv8GcL*f$~icqy9&(C59I`;kO>W9Aag*LnM_kp9fHoEKP{qX?HK%`QLy4rLJuS zR2bX6)lRo zB-5MCpNubqY+zdjpn}! zS}vjm+C*2kY5K>tAV5lMTI?izb)EkOdR38O`NVt=vdivT1b}W-t2X#tsMtQ@SN&=h zX>5yHV!2u!e&ux9A}aT`SV&%=+Tb4Z;R2E1XCbc~HA>KA%qu>hBL zZn%g3@d$)F9qQpEkbCDLqmT;5H8EIQ^G~!V*uBHvr1R%+CJ4W)!#hZ;Q`Vf$WvES2 z2z8{(JHBo7AAp){1bz@`SeQ_Jt42eqPG=r1p}l7BK6 z9Do~=s+I36V?DvEHxo4?5u=LEz0I~3-ifv8rPc-G>a_0454WB&u8aYJ$mSU5>!mB0 z-Jv=*aeNG=2D6&2As7-#=>3mCYzs0(9kBU4@U@Tvk>kd{wozZ_tl*5X4khI8Y{g^T z#&Nu0W?{Gs$h6Qmai8^>#xz0F8mJ$G*ADSi{F8=t*n z*RD+jOGi6l|6mcWgyKKkTopLad5S$B?%BKQ>aOp8Fp!e5V6u(YE8ISkIcfNSM0LmE z{4#Ce8VWOy%z)OY=aX*mP>I*#B4#nCxG&kOR<_vD;QAR6I@0|Yp7ZYkofR4u<~(~P zLK?9o6B{_7qEq!-a0m#0Ph>$wXs9OeQp$)%0=q)=hzF7F|AHQ2c~Ns{0PF)9h2HhA zdafzCgeWO(FHQe&N+?A^O2Ak*Qgqr$oBQA#Q)<<~vM_+typEE&Q>H2=-@8DYjJJE~ z{C7JKrf~N$>V?$#Em^!HIFi*BhZ!yR4zI4N>bBzR6860}lfyJ?Y*cP=ZP^tL@_;<< zG0G)O`#vnbpdLb+6~2FUQ+-2!LKG){tq5@c-r4~#`{S5)3N(&YxxhN2km0CXGar`Z z$>6roAHOs8DShyr_`Tt~ZQXIKfAr!726q73Z`d&;%6zsgHWTS;hk7!)`}FBFs0&Z& z#JCCIL~EOiYd%11)QiNy+@_h_WCSxU)pZ`aX&P#`WoLuySiZh^+E`ovWm?iB)3)I% zCR~57n+I_9KsnJ|Xq!^t%GoX{#k*DnLY(HCc)SV@ z{x;JBRTn${B;!ckki{dTg2{Vx_SdEFYg3Dz*in%MjLjyAtX2f)R9|A${Wt}@Nwj`~ zB2fovHM@w1co6X9OcTq{yGBd;Zs(PA;En$HXU4;G{iJ{6nC@8WvaWmk`@CCk_>g+C zT?SMs+BP&C=!Fn;Xvxh*KfW)AkyQr3CZcbfAFh3VF~4M79E2u}FBU~XEWI)F!uwY+?kvR~ zV>Odr3JZ4urWY|F)F1$zbk-Dmbbg0leX<_!&}rKg4qo3CU;FYkwAKY@y)W?w+X}pbZ$|V7Q;}B6FJas<%jaJfP@hcAs7;elI zATn5JmzugSE2s|28X8GZC2mtpz7M1SZ&@nCv!Q|UBwon8sE~ExdCl4f{Gu6}EOtDC zKQ-*&hy%!M9HfA1m;0o1vyHZ&p_^cI+>%!lA^-fP7M@N><8>m3U$CId{G!#1Py>rVWiVSa z>i7@(=T*&BiQeXJ#D}6+CecnQ3?0`;>o3l6Z4HR=()HDi zZ(LpQtsmPewL78qb8g1O7;4@ppwM6`-EOj%+&(C7!|7TZf^hjukORv=!GCk`l6xqY z3-aZ(Mn!jI0bsOj~@pt*z6h(bh{nOp7U849Mkf(eunQB*Q1U4H+VwqJ{Nm~e5P*f(Y9?rfLi7iU6bX?mGAbO4z~J{gDSfWMSY4O8+8ZVy^4%C4uJr%VE91zCh?7`%luMo@*6}3mdI5GpHL+KwUfGQU>)~vuc3vFAnnSa0R4LZ=XiHHLz|^}j z_P=36&vFW&mR&FX&~n)9pB9+>vbniAwxP~Vg4nIb&;aza;K*WA4>};O7W<&g%texe z^_tkH$>^!bN1ktL&fyi4zV5q5B#mt7^fKjQj2=?yaVMuWf`mA{J>=(dh4AN`noq#V z^C&N}lM5!EllVi+IRG8N9TsTmFyG#g!6bMnzqmg_O<0g^WoH;ZU4y>wPK-zPg3{^%bwsOgdw59vR@Q^lzC__z#;;i3v(e~WGsSCb|_FV2ddlj>+Q_u*+r|!^cEe6{~Y40$pz|=6MQHUx)4#! z2VF;rfr4V)`t=R)R*TG8YaP+Hmfr6I41*Dz{Oyza?HNfzdrR-`$wH&O@AlbV5ps{o zUwpr3QMY7)tY|sxQb7XCR*^WzeM^u(WNK8OFPURKj%pQz#u|SHN1!f_0UNT+2d-G87GR^(_ zZ!4?|p*QLV6uY2Piv;|ZDSQefTB(CCaJ8zn6?1tmir9@hb>qF+*&0V8IyrCR?|dwt z)(qm_KP_YWQ|8QRTBqvNS+;8rqBcMt!2`t)3F*NJw(aG|yE?2XSsnWhUfOitGP@qB z5yLKMn1V=jVc-rtJ>HQQ|L$bTq3#q=&#UQ%>#<3vZeA&DWLue`R}@5-u^==j5G%{9GGQKFnMLLEsE(LMkV z+?er1H@!qGe0kEs#EC%y-GgijU@ehD+^NUSU+i8z^)O1aN5rwZl9H=A1E)_aZ8hw< zL?mcz+(;xL95=iHSFKVJ=o708CV9lIOtg9XZzylrGA0I7B$$GyZn!U?IPRspYJ-As zjmc#vi;dpy+NYm5Pr>=k`RxjKjnDq)ZC&QcSK{Up2M>Zn@K3boe?b(bDtj2LpgSL1 zzKGv)nJ6wR6%k)efq@q=EFMkUAR>dTzT>_R0qGZ=1r?%fOvKG6d-F>AMzKcXDC^V8 zpnCp9MXPPT_R__R-KcAY2m@r5saXU82MStRQ?F$h%YHFj0??9VcvQ|UP9cg@fG0>6 zUw8HRFB$!$EfGHiam#039-(~{ZX5FXJO2Fq&uW2oEoLhQmiK|7{!8;4L*7H^wtSiR zVGfugGFFxDRRpIVDZVNs<4e4D&gR>^C&+Wfuu{ye>8N--qKA{wAOt&+V4DKg5TwOh zC?w=R()Dm)$38hr2sSwn;2=A(WSAC{2M{a!2fabY3G4xD#hl61?i=X zvaUOGrk9v1)Cpdq)qZ{nL-f2#u(lW1$H;Q}v}rPuV3>TCE)hs+9>eRk6TVE5$dn(W zHk3E-WpO2n0Ga%PZc`(FU6v=rw^4x7F2m5QX6cKHLaS8kI}_hiBrN}*T4UMs{-+Ry zxwxXe$(4tBk-tloJ1KL$SP-@9KCL0gMeNv}&HcTRCxxW&``a-0>3ZKh3EFd?@S~sS+`nRYcF_`(}YF@UeWK&T##$NA%Od) zsb9qSK~$nLmse{)?GZCkUob5YI6}Fs@8XbwME|s4^NH$6<9tpg`ttZ33VI zy_Dg1zWI>1v#SrXpMr+Lh+#^}`>^J$$gjOXKG_Y=oUTXm?% z5E&r+)xo_%iCzHRy(4HEGG6m}R3O)8~RgG0Rm2r&h8^ z-0hcFg-$(jO`;-0LeyUamwot;*xrMGSk^rKW#kGpv$aE};idGQNY&Oq&N+{k0yI`c zs=~lfMK$5j>H@fDqe4YhwGnKK{G_2cGB8oa2p4H^C_j=)>Mdwe6iWj@Cx)^40w=iR z&!?kyuiZd!p>E=@FR^l0k$qV73}~w}Q@?05j$`cKBs|o14`w6A!&+p1>K@P7-wCTV zu~oSP0uM(oY8Nup#gZ17>SxK_v-lKRdr#liG0*E+YG*M!aOKg|E*hZ@)aR{It!4vz zH$+RnBe{`=#w7;ZgbgC02m`c)o=8QJ=>tr5AVdJ?zy?<{FDNmo{Ik9F+%6s7#%X9e z)y}#8KiV3y{#%4~vXBP;MqtS7OvolrzAL%U+{fIm1?5s;WMt&j9-}e+V%f4DYD*Lh z2vl<51sooHU^?h8s3dgifDVjGPn`CH>2cmvqXrEc2xM^ON{~rsl%UzuJJfsp__2Ml zo2K~(2kY#0fBuOW*l1QxUwt1mLr09>Y5z~a6i-wx;3h_-9tr-3QW9xKj`PUBw^D73 zu{dv9@HsR#24^-M8!jt-X*`{#e!<|PE$u!^$M|1S>a`oRR7(*ZfE+%t>D|Bop+k{4v5>Jp zu`5x#h_M;>r>%empnQE(M?~rD9keXjPOB-K+cRK+-&`ou=@VX4q8&TD>Q1aY?cEQ>$^Yq3Dn@JyxtavGX53g@|nPUG}A;w1|!VIOn|?Z^?fqZNZJXV8b>J#$EzaT40vd`?w(lZ zT4tgJLLi0{*7dm6+_eRQJTcn`aXc)uGd?_dSzQ3v#OdSel5EGsCrf!s9~11eB{%bLXQ}({ta9X3bX^>eS5qLN|haEmtD@P~2%QP9Vb17GroOpd!kyNc{OILv~2c0^n zJt=C-hDonE$hEffh;MPY;++UN%M>d=-MY%27g3lpucK%fD7LZ}!Qh}E0KC3%@1Fab zMtq5Ia~c*Ucj8T#n+cHzE;jBI5bGR6zsWAl+H;}GQ3c$sFU%2;uPl12Yx;zul%|iP zI~Y_Q(nM5-GK$b?+XdJ=p1}-u>dJU6<+lh{ZYKqqoRLacZa5DV#G;O55u#Kq4QVXf z*xFjF3&cy4gkzQPKR>-#$ZXVEwTnmDGkx1alC*4tpkz=K!JIhdD_BoTn z{bd6t^R&A+*hk+MV@?@9g>pz7l+4w@ioqIlvpZWuI88OO!vFxvs-cQRI%@8S%U}@& zg_+9-1U@cET6eZl2N52LLzT%@q3K3}DrH0F;WZ&7A7h9I^L=y}#zx-)W{iaLcWPdn z56w_AgQdwDA4>Jam(OX;>KutEcIAL&F+3`D7{H|X$59xG{mgu3US!vbjKWjeT9Lqo zIV?|~1< z@z{0A>#r9$)qJIcABd}v@D$cLAOo(tGP3=mal&X7(aUh}cF(r6?+Uh=B_IeP1VFTp^i^AZl0|@nu-K_N9L$fPZ zO=54VT?CZ5v7P8~2_pn~!a#hdflh{(YU8{3d1k%WXaW1fQ^6fdw5`&&p}y1V^C|HO zs8wz(PDU0Mno>IecZuli+~;RwF3gpkjbhP+NW)!d;)wB}3v6#ZYV>D3p;OKxMc+lV zW>^eJWskTDBu!=DZ|(K^`PBn8xb3@C|AE+gg4Ni*9dM_hU^qRQtOgWy6{r>ZTXdB; z#PyZM-zmx3LrO)yLip+Rwd;n{4isauY%(RKFMNl|iuo}Xb!c&(o|FZ=7(yKAD_!XO zJWq`qCNXreTp`b_Koy(K{tOWThKRX1t{~WNg1NIxg3uIq4biZSI4@bx!Cx(AMgLoP z2d#ZfDy8xbcGsa!*i5CR^I|?V?9Lw3o33q6p?Z<3y4Tw~8y*_fMHxI4r)E2<>lk`z z>*yRgHqN@gNb05KloIf)+ir)w7jGvcRsKcLIT1Sj`4DbB4yK7gr@XG*c$o^Lk_0j2-pLZcg8(I{vXLI}yR!Xk;K2K$ z1TMdO{AfZUuh-I2Pq0Y7D3b8p;zRPw2$%Ed5g>GHk;P7!ef-dF|Rv z`ACL(=$z~X#AQy6>EGZB$N0n%6B)S|jXGzyrT_Z3-Pe_@6K7oTOWL~?p`Pr44JKAA zSyX3}`;E~KO(?D1iSFz?Wte+~)73E^HqjNJ-)pfKM`UE0Y!|jxlnT5BC``@&sT3ET zF`ALk$A3|raz{i}2NRguDYG#{O?Ap#ZeMVhEj9hQ%1%>E^ef4)Dy21HrzkqQBsn%- zVyQomHb$Nsm;X}Rp0pJ z>)h_iF8Q5@+>Lg&i?OUeXt~_kBqpPaVYt3YQ0TtK_omKXlN4eU-g-^zfByJT$@a0i z*9)fYX`8kVik=)&DK`F6VX;2-M`|qH z?=k6#>KLV(-R2tVjRpZuP5QDmH~#y#*DH(6`nzwBA&fsqkL1FcPaJ<9Tb<<$%#A-e zEhxkyxrTAOIK*c<7kVfjlgatA#HXwKNo(Yx#x5N#7Qa-h*_PhKj+$J%d2`?FB-$~w z8WEIW@#uZDorXkCn*eTNaOLvl;f!+4Sv9l7zfAWVdR%)uyIW-3PycD`u?98$pqi)K z@Ps=xXwju6ePYQs@s0;q+ebV$ouBp4&n@`f$B*Y9rxgFY((WBD)aWb{`esrajL%ql z1|^JtWd{`{<@&%kS!!+i`bGkr91$vc;6M*@xGUzoG=fiGzfPn%h~x03efv=K4D9yh z-&RQ%JO8GfP|J+1zt@wY>HdCCH-uQ2j5!>_mXBCwW}c?YkJ;VbEBFnqYXaB6avMOu})qk7*3u5Wy4YUKYZ4~uw%QWM;tTk;WLu>jxNV_w(K~@sAnpcPu z#iV)qyP{_xys^NtpXs+|=+{rUC39G~TekLeVX$>MI*zm_n+NRG)$sLBHPx8xV*@bVSx~}~m=l3i(?0QMGni41i;@i55i!Xd}%~F}MT~$%C?=r7}lU}`CsprBhhT%)q^_p{5uV7}80UC&5)0Z7{iE|cJZHs$|@UorltOvWa(}ee2saaSmCIKjj5Ydran4}ZW8<8{X86`ixCK~hxNqfL)|-3WIXq{N%M-Smwhfn>0bNzwRoqR;;RjRw8J36c+4<(`IMQs`CWX{SY1(!wlkwP zrD%?kk&!L>pYw-nWS4sP!sk!sx`^Vp^ z@w(44^;VQp6QGGBG-&0dvvu*m&Hp#V;L42~H})q5wkX5Gt9wo?@4u(LicK`f6xmGy zOrQCHPG)AK5DlC>ZW(dl^)$b^*WXrGpBe3_D0^R0F@#0aHRlf5hT>F06ojg@u=OzASe!5V=tT1-yCupv;)Eh(7{ z$VJt2Jf)&hN_~#swgulez9OgJ_qxu1=dw)FTCQnmJ6fZV-2xoFP( zMeX=m31E!(GnSoY#ZYxl+qusfxVXN!lC&8S9&X?ng1YK)%2umJl4?m6CdS5B?l}dv zc!FWXT{d7F*il~Gr5<(74Dc_i;nB>*7s7vRMA~>Y7YfqvU*C`BC$fHdK$iiEViN}~ z7rb%fZDQJ9U3Fjoo(?L3`)q7%ym12oiVbCLmDzynw{MTZ_UG2h@9*2I*Q=*BWk^;p z$z);CRO|5!kuTbHo8Q7DpYynJJAGCt_+W19Xcyq!q%UI|8mDn|Il6h{mMv#>%gU}V z_Y8q|NHA>U7&V9xvKwbPNs&$tPXPdRF)C zM0>quFO!a5Po-`t`b2{?;JY1;_w~{iRmPLPc*d9RIVn4apVCqaZ*jD%VHe9m9Fh$W znRiCC#-_J5EQ>q{TvoAv*(>9y@W5HNBSwT<{uhtnU>vVXBW&4+n65J`o0?U_OQnCv21=8aT zN{S>Tf(=YWY%q+OlK@D}RuF_Ebj|vm30? z<;N6oogzX*53%_?FyEghZCF9I&f zyc$SrmxZs2@9i3`Vl>uo$HD6N?@v{nOMRbYsOIZK!i)w}Iu&E+_*cT+6V0+x_pki! z&z{;DB!libMuP@TM3;AJ1>eqL^Z7R=Rb1-o;`ukUU0t%g(7BAncqE@jH|_`NlpKZ2 z@;-^#{xtG;86mCW_Zs)^{UPe(@$Fu5w9dQ9wytGz*?h8Zx1EzRnBch3_76n~9?t`= z!UU$Agm4$?&&1q3F`$>Gno(cRw3 z<#6r#^}G1zMicmjKM9K_oR2pi(T%jh98@y&Rb0lJx?qOOCWQi>IR zBhNmZfhyYVt_xp3oCp?|@pN(AkDj!p$MKTMtLggv`}+ew`u6TgP%!wo{N+int5r}l zxMfcO(8V?)BOfounlE3zUAkzu)B#V@3wURt`U z!D)O~TYKV6*_UUL|8|v%4wSHwXUL`9F(DRQ&eKI)m8xrfO62)EaqlGz&=lOb7pf>J zbH;6tA?wconYw>|<#|Bfqg1Okq;j?>p&3mAjLfG{OTJLAvC+r(Q&(s$I_mB@BD0O2 zilPL5{ap5C+{2@)n=kbapcKRQD%2n~J$)8{e9g@8ut(^vh~WR$Pchd0xy$iNY_90kF%J!FZ zy_!dV-3x~!)oYg~k_#21Sop}X(@23lA zvBZ%ad9Zwb1<%Cu#yG1W*0M3FU@-l%;@c>7q*Kq~@;Asoffeet1ua#!6Vdx%|4cqH z)GQ?yqJ?%_oBB@d`MXk(NYoiHfTGC8%)}(#_WLOIFo%9+++Y52vzEXrOY*9*P%!G> ze>}6=XY`g=-3ln+-A+IR8*VX-XEgtJ`>Zo8F1>J1lRp+X5NK-K^a}Uk-Exfd^)DV= zDP;iz*fRvgItqBE<>0CE zX9F`ZA<1p^B3gSst*F@ZI*sQ*&cImJf7!AD-8$6%$WS>W z1N<3@rabCyS9bo~9MqvwJy8XlBC3ULu^&0o)x{-Tyl>jIJH|!3a z=gJM94oPR6P14D511VM?tyJ@MrEWc@4kn4$T)HQP!<_U+f`nKUXI0YnP8Wf#Xa~md4)s7~Eg)E6^^#WG*+y7;yPq=e(1TO*d%~(fvyXzF>}GzQ!*nDhBqU(s;&X2rWJN^yEm9O;L!ReS z1V&Z)V@AIKnkgV@YwPLlKZ4D{rJ+)9I67_>dXg%ppM`~@tXupjn>^1EHS>Qp{_Ffn z{Fu`5LfKP{(p#t}^va2aa2ZdVBqIn7sOc3eyFS?RWw7)lxdiFUv=AqHuMmsI4I4W0 zFJN7>M>^%S@?W|%p3>xca1*ToQYER0=76A_{B8fvow0R+34Y^7&=o~Fae%_EXlC$_ z;?=!B7N1GUKgZ|DIwt;ZZ71V>{aG0uZcfq~<23rV^cQVcuVH>=VsZ`f$ZarnTc4 z$lpZSx?_Hf;XaMDo;@9G_?B6>%+7tu?B-Wt=3&#$>QSNRfUBH#()z`w)bhTWG>oHw zRJ-RqM-MHT#=NNcOWF&{@0sLh>B>SZK9{a()^pzDzw}2n(Heb>Aje5@^m{$kvqDiOws!7*oXRBAztnPh{w>+8QO{831fHW zZ9bx-@aCJ=>^?>9;PIdOFG~sAF02cpT{=iXfDZpzdT)9=u*a)0YYJKGye=>FjBnX~-PKAeEdmO&i3VRLW;#VV9ycB{by-%CyW^~@)xov!@kSI}aU zMOtNY(KPq%kT@?>wgzsDtgQpYk|HR`i4LQF({|$`@3XnYs6&Ur#Prk4vl{S$6LdIS zVFri+J9kzI-hv5|eHaBv6;E%{!gCfmcY7|3YflxKPa)TCcUz-IP!`kEe_1Jg2+23I z9Q-Su_hJ_YI;(NIYvVwNWeC~4+_wWs#G&{wrSgmKrPJG-aH!$I1i~ zSN?*cw-IeZxZMWEB}Ex(hOKMU8G@{>3%*~o7s-x`Z1paB_>_G^53h5FmB3l=Ij;_3 z6jXcL)-&qVw7umq={s^-p{*L(=fQ$8ndOZ9t+;o9mft>10CeW`FNMBU+2Mw^au7h$ z$u16bdu=SHuHC=dZIFT5b*xmsHOD-D@3T8 zj~o@ih=N&8Smxv0FF!n-OwKyR6EJPGTPw^-w(L&@%&=f$d8ppi|>7=y0igsO|`}pc!q2}fM90So9nbV3r=$G$>22K@E+2kjLtwOW;%(2~0 zzB8Au{OE)^so7@lRSoa7oqO&eDbSl#6BZ32I)hOigg4G;G@d7k++B}lEnWUmzj(GJ~H4C#}&UFjy5);!xmahj+RGh2*tHb+&^TzAF~wRM4^mP@IUSV!=+S30UeCT6xoS?@ zi%z?;=~W+SiQXS3`k}3`#Lt~xpmL+s7mp2=P2QX z;hb5P_NHVujC0lB9k&!3b}TLTJ6k%7hJlOcbJm~FlZbz$?!YBC=ge0%+=a+VnAZk+ zORP}76;QT?@87@T*0Ql6V=SYX#FaQ=q2}8h21X1PrR&ALH6G20;519A1rTSN2|qp;obQQYDhcXdN4?T_97RJr z8nNVxbA5_^*BD?BeLaEeIFS@Z`t9sBZJNNfsP4wr!L=@_eqI1)es}f%4Wd@YZ10q0&pP(U4Y@)@ktr|N1w_MR8bQzmum-po19lOt4(T=ec6h# zC;$E{o_%qm!riZHG^e(7ZrHHlO+;5`Sg(u{qiB1^KU>I&MS2%`Je2*B)A}v@a141v z93l3e-qmkezH!=pKtZbW9L$YLmhhMP)PNhin3e_$?9DrUc-Lql6I-)T;t6g&dnDq4 zhf6EEvT4C;yO%eb`SROGZfKv?R@+)YmHz(g9QW;vt914ZA;TQenY83l{QOp)PnknT z4sO;|ZJjLbv2$M6SUaJAGOornw%(clc7@K~U!zj!_98Fl4ikK|NcN}c(CFQVlO@i> z^Y=ncSk$jgjG`El4Nc9K$v)|s_fE!SetN6{T|3!7!JkzF&VpX}AFuZ=s5btPpxKXO zj80Sf6&Q=XukWK4k>r`v%B;THB0t^%;nIPF_4h}UHpK5X-KT9}yF?<6NtMN^W2h(n ze4Vov@=-1?5@VALN_My7h^z_-`YzL03EK@PPcFH-zrpXn z@z-(fUHtssxn54j)TiWH>(rtdEf;-R3`(+w11S}ewDJrIcDj1f$n*y9cK`|%wSxay#8%EV`Q6Kqbr*U-d$=b%!=Mqbqun~B9bYjMb@vCND z4r%RKw7d8D#5l!lE)RNlw<)V%o&T|7`fwHdwZ-PK6tJ@j{BP{-*U@;TDaJdFd(U7~ zBU&Bm2Xu)sXcYb=SSsIT^%s-e>wo};9nx}aSR7nmS%e>>TJ`f=L#e~Y&|6W|#4?64 z3;#ogVajcujNaEjhCafZ84&vDtro-Ro)%}Govir!3x-8@pbeRmUfElG*T{Hb3tm)y zG+S_cWS=;}!Z&Ze3tGQX->mfHbv~kJ2;;!!qetIRy@QA*o{UM0d0@8ApDJ1pxm2`1Ar`~PnFUq6bH(vwEzs}|D-MYffJ&~z zhW!ay8iigpmhT1Dwf!e9$pMsn5;nVNjf5cyqdlIenaS`O_{!++q=X5FTvqhLXPC&7 zM`CNI+04j-f@%SIY)ImY&&&e?h(SpcA)fKq?A zIz%Pgznh_BcU{YXaR@1#mn=!~IeKvK-n~cfg5$0H{#D<$q1Id;zYFLz#czV(bDZF% zP-q&1O29ym@+fnUK5c?76zUT;o$x{`1f#in zvI;O(T5PG8PaUZ5*abhhV!?onddt9#FrhiEGT_bC<1^mow3WJt!WNecN#_hx|}+sYIfyX<=2b5YEqvsGe+MSR`^*X zWh_I6z^$hi;4d?X8Rgym<{zd^S$-!CZ008=NeM0{050|5$3z<_g&~FNVby|v=NwQi zE$0ozNO2{3zRT4-@a1@Xlm>k$^i zV!UQCLeY!qE}?JfBUtP1DE_>Rxa*H-%CUC)NP%Y`#uAUl0l#gg zKrdNpg^^wwoqbiI=av_#W&3Y?ZW(kKkH9U%jI&nxxg`dLgmiKFM#E^ZZ?bVyt4CB2 z-h9A#oV0t-E{r4qdlLcOj@<))VD$btX*mu+VmFf~#DI-P7Nz&4??GTPf#Z@}Fm`Ba z;pcird!=?GM}$R_-Q``8g0~2}jdIoW?jL+e_B_k4F~WS5-vUISr4`K^)3FK2in+FL zfE&bbSTvt&CZEP_HgN4qOVQ`t!NA?ldFj$LshQVL2Y;}-auSf)&Y9XuBVg!RE;*gD z!PS#mEnEH@xFB%W6fa_YsWUWFyyQ!;LTRswQ12(EV;sg&9+A`P!P1K3O-D3J$s~>0 zhY2iCb{K}QLIKei53gRpea<{eX;8O+P4sKU_DP{>CHlBs--hKGiBU?oZhmSO82K5S_e z#}%%|NH>-%%4p4b13N+@cO`;iQGftGVJ=Wp@&h-a^gcalM(b9Ty!N`!*mGR?D*j~Q zSSNMIB}BI!1_0*IS#s6Yv2wucbBCmB0s}xPvyUY3ERY6JF^WSgIxWPw>SH&k-?4Cb zMA$#drUh0ASgfRkr@u;bK+EJde*A9PvgtO--*ECm`Gyo0KByQTK5)`p)uO}VS=%&d zS)pm3BiYs2bR(nH><}j+4>(0=;`yPcw0sxRA|I;b=HcRD7L-- z+MsDaV#I=JBeD!B+(*&d9Y+in$+F^mA3nuQy869l9EYseyvNqdp*q1>7lnV>H;Vp4 zgXms8AaP^X86w?Ypc2XLFM>MnH*%~bcL)sjU{R3|sPsblzQNmR?G21AEi(m!T<4Rl zYTsh)cGW;tG=Q5zEJRoV8nF1@JPjSQe7Z22S;BN;|JSg76yMy`Dl7ntdUSVu9B$%j zFU+N0z?;DmcPPLmFD+!T%QN?-i#XR(=5Ue-V80pjp28U2kD-w-bO$22u-zc5S{4ri z%!2Syy5Cs5Z_l1RGTtMMl=M-)zTIG&hDc5mj9pYFq%8>hF#zm<1Ng#p;o3ydmx&aH zZy>HJ9H;&RdY6X%pT!B@b!b(n#V~?>7spC2c@@MliajSJU5*SPw+nBf1V)WP9Fl$c zvT2tt$K+|zx}ThElw~8FXj5^%pyBzg1R&|Jb=jY*IPXQ%lYiXjH^+ZE_CwYy$Vqz(EIqL zsZcLjy#%e_?_;i_l_7K_WRb8xy-fdgqgw>b0kj4VZ@06B5zfbjzuKALJ_SkT9C@ER zG?5W)SDVITbb%R zoAVkLQbHGPrV{l@S9vv)YU2>S=wv@VWruJdimyBO@isI+SJEVdERj}`kS=5;C(wav z#D#!gec;E};S#}oeNW(YQT7sTaN)#Mzh~sVCQpW-{y44Mp5f1v)u}~IKt=ogs_W<{ z`!VFtbD6{U?W=s**D9zwORd@81pF`|Dg`_LFn)1AhJQX|ZF(Fe);^3{;W*hZ>yf*P z+hSTbJ8BK9gXIXo4ebOCozMx5#|vJp8?H%)cGTTzm<*nwJuFr!;3)Juxq24K3TD3(K*UW2eZok+qWO) z@375HZ=oHydOi!KM~&J{3)>MUYx-ps`(spQ5ZUuTBZlU33(bq+TrAO+h!lZP{aIz@ zaptO!6c_`S@#|9mgHu=x1O5v*xMzU!^+2()Vf;>pWsah|ifm%JuhD(}KGrjUiZ5XGlh_=K4?e3wZURGOQ;Z zv^*diZEas~Fr@;jM-aO)v7f=H>>i)drkdag9pRVDT4R-^O#I6JnDeuacQ+s+gh2LQ=iGJ z2PuW2S!E0)x|9@JAn337VM!U#x1NXC&&G3fz-?f<7?OSV7J|x$n#Cl|POGu~xtZ*^e59 zxZsr8fZ_O_1ygD6ewA9L$E?ywNdQz~ma>jy!<=Rp;6<;B2p5(hKX(=ENc?>^_adHnvWPtDM1WXl(O2z4N~e% zUhXO)bsFCo##{==RwRPDOPZxliKEcJzFDJ|4}@+z2?myajTLtj(yyt9TMM4U)!KW+ zE?)Es=*{r@nSHG-ne8S~l@=ILQVhIOMpd3eE}%BH275pKM=5X=kY!Jw(YQ6}_&7OB z&4bok)?4y5%;RA&aUT;}G}r|EKr$n$Q?XsigEKsWKjT&86VJcSnh33tkDOE>k-c=s zvuMV`q6L#f-c#FPk3S0#CvZ`6iFbJeCFLZwXgr7O*rz6i!jLFgF;x`>H_R4)X(J@w zl=aqDSE_`>+tlabiC*?xx8aGIpW|PpJZXxb$;6(PojVVgx{ZF|sGMK$H>v_q=rwcB zz0J`d)huN{E8#pocyQ(fWoKCf$haMs6 z0>~~9sSYsR^x;r|#xh{fZD5+ji}L0UYNuX!%19x`!OfWQ6^=>tNYW(ZnG8(ANf}Gj z-M3YbGn!rZ1)q8N95q)q_u)5kO0Tp!gnQL<%C69}XFqyfPESpJK`-tC&8)Zdc?bA< zS0aWKZgega7F<}>;utN_%m-;_PI)S3rhWU4tBoH*7)VT7@%8O+fu032C5gHMqXb;b z*3G7Sd;9+iu=vg1hS>q~yS;NS;63l`C1KyX<{|z3SaEtdn`!Zd7I&I|M zo%M}U>0fqDS9iI361?v)kMUW};-$TETJ4SrR274Uh~2wQ?8<3*54KsuyhSy|gX!`Md!RnAcIY!;PpS)S4Xn8=Yh#ro0=F22@N|gqYKfgoklwmw{SDF#o*(9)j zRyBN0q!d1U&2Pl>rnU{k+oisq0^964Yg#?WDiT{|_htv$uKGFwvGsoce_NydWtA&6 ziatb{7W#ynAgCLNeB#u9=z1mF}lTKq5lE3r10$iJJKT3kPk_GApM#FH5p0m zjN$~QY5c-I<8V%^3ENy&EOmZgV%%_F$vc0p@;Xs5C9e1^DkjZo97h@O`y!2_Uu zA_L#u{_1km=F7OJ&3r9^P2}W&&D}u^c?TiO!k2}&?j;3IJqmKl`9nHCL{vl!I?fRl zk!Y$Jm&ipED$-ORDBSCtm2e}0VmPW)dqngFSG%6|xE;XHcjJEvon!*sNXvahv$gM5 z9>MM?MKRk{u{!X*)Z6L|ZoPcjP-!R8K|u9IuZrWq$pI@p*3j3EIsfB(Izz@XKdkEG zLvI?#s|AP}@G9lfD&tjQ2bp}AQ^d8bd})NPco(Pp-sbkpURtF8QzP&Q2`bz`dc)cr z7MWG3*DS?Sz)*E@UqM1u`g@;}fKx+;4rN$Pk5>|WX1(50Tc{9c^wF}FDdSzPX-&d6 z8YJzuZB4lfgQ{*01y>0F52#HZckMKzR1{Y8c33Kr)_;E+;U>I`&;5L}`MvQ}rSZE< zRUyzq`ag}h>0w^TXL)p1wV&u|X!=(ls+eutl62N33)q(oAAQXcE1P_2M50Gl&7eLc8VHMDODwZh8c)ps$V0= za}q6y;Y(k{>=5dzoTz#WW%9y;iHqZS_~>icgS3MpMh+>*OOPwX?R!~JT3zzvyBmKN z-Aje`kEU10s=1ZHCrMdFbr&lyniN`&VnbT(X(96#4UDudLL?|s4e$q{Pf)A2VL8ev zPZhHc{Lj3ah?tkgi!*VeO24iVE~bu>Ls5tO-AN)Wwi&eaG=Y6=y(@&K(%_JY^Z>q@ zRU^p^qASU11;qE~Gv8IS&@k>i(x~G{&&v^Qdc5oL?~t5U)K(rtp|9|Z3pX%E$SQs0 zqD9FDao`Y0&qGbd9*gbnmFuxk&+|Vc+Xe3H0*V2ocN0k4d)Yst5YuYide`)qkYj=Z z(oerY5q?|Bk9-;cP@Or>#6#gBq=TOG9#U!JXPzp=RUFiIA(~QyJ(P}g??*t?o_!39 zSigS#q-B-81b1vYN;DX5f8~R-K8c{#xPlx4K_8lB9V0zlk?ZkaNka^{eEHD_jQsp9>@&9a?V%SyN|o&0HYA_#?%rCR)k0~Xe0Xd z@8tN>HP&%3<1u{Le-{-^0E|J8 zC%Hr9S$6WBA1-;)gn89Cik|BMoHE7ePvH_3vlx($bqUwLIVZrxv>CYMM25bKVT+Q;cDT;*#?E_92Jyy*g zPnt#p?KA7xi5+!f59Tj!$)}blM$%{E;7cB5W`(GvkSv%_ppnp%10v{;m-|x6?*;V6 zd07p)!Kyz2wBkyK#_8R!F*tHi?0jL}7mcbP-etCDBah#}c}j_F212Ef9Jei4(e@vZ zV`qV_42TvJliW?1)Oh&tp;S4r;e7jd2l>kVS-l4oS;a6*JoDKF1IxI$+O1n7cLeU7 zxjx+LN^fRG*akoxq;#A`w`ejktx9_q=ln49rsWt5-hl;;K)@z4A~+|rsC-57*fT5j z(SY+vbV&-IN>qY-kQC@YugpCJd=*1uc9ho@iwImW+&LN5V;dY%QzB=ey3Ycyx7|%W z%~luMOOzoqW`*ZAqxlpTNHPEkmJ-`nmwqFkd}3(FoY`HbRF__x&++MZ=h z?ZX)6sBEp_>y9bIQYfNoE-q4_;ZoD1#Bh%NBU^CU8-qUXML587!&uzsM<*4nNgO(Z zIIfl`<79%u%|KDwZEl;ns$xyue2jeG1T%`4u%}O+%pX~eb?l<6^-|J0?_V;6yS2k& z7$XQv=~Sz*u`%h{v*+Y$c?%+==d_se2T}82IEtuZv9Dj`w7-{+>@xclk7mezfBx#g zuTup;M_JiITRWocV_;z5)&1!$b*^hDR`XmB4pA579-S%UKZ{=OG>D_q`4FO>(vv#u z8k$}pm7AnAE}5YJBsddR8Pd(%?0U91iU%o^YNRVLR*8gh3@jDB`h4R-rx<9U-Hc01 zk5_YA*&(||MXW~enVXyI9Oqnfei~|ctDSE|p0*sDLTO1g%+L8| zcb(pY0Ua@n<1Y%6AqM(KZP#fWya{Ah_F~=zYh_RcEXP+{Zb9JGBgE#WKiVGqL(OBN zl~tCP+m^IEB%}J@Tyroe$P^*#-vSK`YggNAK*G5BXJ+A z-;w3e*9eqL{iZ#5d{#9);`85r)NI+mwA}bOwmjIo+y(NL1sUYMq7+V!F^5bBU1UCh zikk_zoS-Wz1q&%TpL6r>viP{GXNYKwWRD-2Pa3%Y4f?`f6RA(g@q2IiGh;#D_kD8# z+6Wffn1Q;R{}*R(0+(anzki>`Ff$D9F^n|^SEQO6OO%qv+~KljsYI4RN?B55BGrsB zO9+=1QY5kzMb>OJV=pb1NjF@`%FzOOW4ugJ!kiwr!F%?ZWr`fL4 zNDvgch`_Dz1cgTmMgViLhz_IDFY5OLFmZb;pB>a#hA|6XC=Ndgz%JG^ta5Mw9_Vl> zC8_hJvn^Vd3;rgfL5MU|VriP++7A6kBYRr>;+o<*8kF@tkVX+N=wBFGLm?ZuTAzADq{2J5G5-h?NJ%3n7T#y$a zbbwXSzZ&>)%Jp`hU?n>2@sxZ!P8PY=RVNN0|-T%HMneRwLMJ zcD4s}tUM)TJEaX}sHoiG3|2mVp2%00);pe|aAq5a%2E>6dcuJQkxNKW%VDjB2BgYg zdZiXfvl663O$?%@??PImR@}{KsVAdlawnPIG@Nql>+oe~M$t!rV^(#|`SjS&NmwxP%r!;WfcFUONFoZ(t(kQH8R zyP^Dqfzk+OMY#`P@)*$!amd`z&|U~%Tt`lOO|)%)te-H;n$DfK+N|ZUjH1wc_wG3Y zqK?RiUztpOaZOYzZCV=r&9Fi&QPR?y!5(hJr?PHwjuF{4<*;Gtyme#-_OQq?pPYI* z^~knHkA4?qhWyX?<94benaR7!p)p)tlL-q}j|JCXakYWr3Me>XucD%&%AYsw&ZxYF%zk4h?@<`BGP7P{-^MpHtmk$qK+c>wqrDWY z+r=1Glcr6tpy4xS^Q+bbcmfhvmY% zzX)ARO>Nw#&-knH)9!)Xh_EBZ#(x~WV6MzUkmTN#531ibrwMZbR5t(I^h%zC!4Ywn z(cjp!5{Z;nUgA_RHL%u=B>ynZ7baq@oDd>o+yKH4i;gCv-bdgtYxRLO2Tey1=&%Nt zRfeL}`^vc)N~3Dp;==Z(qCx@FaAlNp9Pbrv(9CF|RI#8YLeGPpZ6y90YDL_cR##&# zAhXON$(SP*kATF|a1cLRb?i8IbNT5vPki&ov%G^>$&ni5=tXSwqL+P~y&7j5L~hxV zBvie4)RSNLkzKzU>q)VCrtNl;lr%`v6SG?v({*QDxNt$1p3K@>+96^h`}&;sKq6$n?qA-`RO9=<&tTHE{E~To;OmMW!q70@d0ZM?5f0~T7Vmi;;Z}it zh^TSMX49-}29;z8fLQb#sm)Gee!(Aef_3`@p<6ILV@%*01cqdJVG(yPh zE9mV`-|q-&dj$w>m2vOj5x5DhlQHC*m$WpX(bxlc;$3#6pl`+7CB29rX)`{ z*)oorR)8dmx)b6IL-gK%vz5A$gAHJ#E75Y~0i%HUPTg%Ky!3(mrW za#T{%hOLGUa>)BMLLJ2|vV+l1=t-px6d^QgqdJ;r6Gd>?MZ?iGr_t`FR5DHkBda3! zk4F1F$nByXht?;i!^cYSV9vqFORImkfUI8Rv_X2uEdf=R0+!dO{7)kcZMR-Z?2S!K z0nj_iw4LaAiO&abm&5=~Q=NDs#ZAD|Oms;^JQ)r_?0xJjYLN8NR9|^V7ffUORV9oV zI^y{Kme<UrkLA|7Skx5tWRr)*bf6{mJuX%GvqoY3#UKHqcGjk4iP6|9i6WrSy}#C=dkqhQG5{o3Nb111zrn5ObBRW9~VVI8l}h<6sHbKYh)P z@?=o+IPxu3XRvEmCG!5)OfmPxmiCkGpD5{ao-(ZU*HDkcU|}UMNR@ix3s;>`itYN$ z_7HU!<(6ol=^Cv2Tb6WP9lq7%HBk(e840xjNvKB4JLF2GM6rl6WYm$zH9UsJR1Jh| znRC}=r?vY~scLB)yz?0u2$ygpCvP&$P8&_WUw&od{{7z;wOMe!twaDir~7W@=;tSJ z(^96Js!`4BMQiPb2&Aj(xGc9^J%<1YB5{cQ4RRByyqZMO>riQ+hQ zX8^(SFtP9fzr+m|fgH=W=|Dl&5wjaHdW+n44#F_Mkxe+3YF_c_DU_=PkdUIvD!P}t z)o@KOurPEW(vl)-h-uy0DE~%DO9q=+w1Tg!?j`;uQju1$i=2p~b7u3*F8u7z$cF1ZS9C9rz zhBJfe&e*|rDrfLt=yo$}!$wMattvf+GP;SAgh;jsMwy%n|Dy2QqhlFO7f5&Qz!w|& z`tv7ObhfKgMZaPqHbVEsw;yjs=LM zixy6wUgB7uHKh?=!_RMi^pWAM?^_JiQ2L=s^P+#w4sFON*})4lp3p*|Y)qitjDz)X z5IY5QZjaY7w-e|4LmwIV`ytYb47{*tApjr-8A{30agd=k9BC$ODlM-ChlyZ*;(WD! zpG`E!KnYna4@QQE{%UG2YZdpQRxO;ZAgAzroI}B%u z{MheSbcAX&5Di#%a^uB@JeI=-%l!SPEWUHA7x}+nU2{9$Q)~$4;MKjfY?Hf8dN?Mz z7OI<7&Q(o&aDeUyeJsiW&PU4rY!7W>D3yy=)S1$apSWDe|1-a1WMZTDWTKC} z**};2` z5PZj~gXQR&WpC28f;nE?b<4Vk+1n+4J)Nyvq%WKr@Z27PR!lv_=thhZD)2&+!k4nZ z+X|VRe_)GGKRxy+)eG1CE_933;YITpP&#nIlbqkGf6&|KziIYR7f9}$QU(?=ftZKN za`xuVFcq=jkwS5cpc9>#5uGE>RpWtgANO%*gmT09j|v7!WNz;BvcZez&o5QKTG?hj zRq+2+@vSJ zlJASVSP#)Qc`(mo9$-?m`t>hKs4ANE zxe}ac1qGt|=pE4CwqS`BYG%S6weu|Bb4Nc!vCVH}~ ze*__zdjVL)iJ)Shkag+OfyMsQJU82N_3kU@ou1yIYx-%V2xdjDKgRnul2peefz6Vq z=3FrWZX5ylaaDf`y^={jo8upy3O@)yT5FjdaLPiKr z=}7P#Z$C}!#6sp$1Vtv)KzicGqh;i?7L|sW75w>G1HPU>D(V-6-0eYKOnCe-6frkw>| zkOuy7uRD5I0j1S9>K4n0_$HBQ4N(aaa6Py9Qm9>HsNtWpUygxYuhP|Ud-G_Z%s6s6 z?)Rq0eo-}(a4LpV>7`V$TU)yWa?Bo@Y%j8E-(d}u;X6RfJP>L4Z8o>Hy))mJON*V= z>#O2y;yd+muFO#|u4zAXDAg68Tak3)$vEh_Xai&mtR^|x<|>X1vsSNna}i`7H0*1p zG$*z;p5U-&+^uV$D&C{e0`xv_IcvT&IH`|DCGd;s@sgeRu#1wmP~luB5kS&>!#p5U zYEy)n-oDm$>6PJY*nUd{U*vi~(a|Z?K*m~Iqv)~k%46JCeNEme_-8#%oNiGITpNbN zx>o2T_C%6ppv)41)0$>x9BelBNz8e87j?FX7XQM^#>3YE=-hDY1isg`>!aRn99qf+ z(3l-(I8~-d)UxH`<*kE?{|=goXCfddU)|e4`f!T7L!db#CfC>DO!G%>-dyB%fjOn| zGVH&j-YfLA`Od;DG3ss4JzoLHlW>6*&kM?_s~k zlP=gci2cQAdy!_zjC}Jz=tPZ?0({h096{=N%FPM)S&ng=y7`V>Bs3&72Tap(> zxXI)X5X5!rFbilal*TZ3B6Vs$8f88$hDq11gPC^m^Ap7&PqC%QBER11bJTo&cClOM zFTNoB7u?-SF!w%H2`U3i+!X;Kz|G5$ncV6HMSmso<-qM~4KE1&EtK!-UQ4k*mciq~ zf&GIMg+rF<5w_PU3hkv2w*4j|vwWGVqA<=tSwUW-uDpH!(J=jOF>F~G*wC(lOT^;B zeZzcmrew0(&uM4P+JT1i*0JZUFTmlI&iNEA&;@gAKbC~dWGy=F(lFQ;XPk+MU3ID* zfobC3N`ZK?A>D8S*uaqf6jhA+yGMnkG5G1rx3 zX^RPjPU>sx9UpstBV!lR0~2zw!D{8jialzuZ0S-tG6yB=MyUS9@4M#OkLV=wi@m^6 zv>*59{M!5c#S~F>RUi$kjXINJ>SUpUdCIXN^98hI%e+E(9!ZOg{>03>JH$Pwp&Xua zYu=T(bB^Vs#Q%Wti5oqPf(Kg%yxOn1l(i=LU%_5(E>2svW)2KY?W}%N#nl=OJ^$+9 z!8f~#i}oU{e*}zWR;JCG!u>z2n0LY!4)L(pl01J7$1>95mbWnRyP~*E9rdeZP?L-8;?0YB0RQzOKEQ7K_$aKmidlSs4I_r;x z-MlxyHTHG{?V5DVdz+jNSaf9ClG}?T5ks7t6d1KqpOs2})R&c@iy(|njMWG}1ozy# z_%KiG8Za27;&w_7j@QimN!vi#pG?yz2!tb;jo8&=CG5f2nRunJ;)PN{)2A(W4kw^K zb1c6F<_Ho7=7HDL;5 z1P22{XX!otC(lplyucP*SP6o0)5Mm?hJn{_1^O2x=oF#|&`7@f&HX=ozopmtwkRy6 z&xhzf^)Q-pZofe>(-3|+pNI7&B(3kY)6OEotuPdVB968@5lkCm{{79j&&eumprp6H zKed(F|0$;r*D$1@`b2o21Sp$>k86slNzNEEW@M9Gt$;w^C72j(A)(7->otTd!oBvZ z$_d)){HE9W-SG9VMeGWWcrZgUguEjEM4vJJ6Dn{$-U3WIz5ei02Is&w#L|#)ycI1! zQc5O@;V7@Zj2tt#XEbo&Z2&NIHFKEv001g;EU!iQ2k&8~6}u|7nSp_Uw?9j(w$o$= z+rf{W$h1VF#nbh>jcC$*=9C{Z`(x-Llek14QG$~C2LDt!l(L7NiM+^}2))Q|Fb^oy z%>_U=yw?!2w^8nkX-KZ5mtJ&w&Y9b{Z;$QOK=J;aajF4t7jS&kRa}nLOH6k>kRmQn z1RPE4A03g$EQX(ku~vc)ww*cQz>IT)^SIy}R&W~jUf_R?JX@K4F12S;cjj>zAk1TM zR)7e>#6MJ88qHa?(kdCQR_R7q$OzuS*(rnbh7KJ%LS{kKF;l)n5AXYfpM=go`;iiB z1Q1mzBbs7;w7XzTX3CHAppor0`oit}$5hn7m5!C7ijsLqmF*YOl=5KlB3C~5*nR}O!=?S*A*C@g z##0^))6VQm9UPF=2I(|jT7M?B<3oa_7GxVEq!rZ}Qrw4X3N-3+D;kS0SGGa3_ogL? zn#B#DFL0W2WAUZuB2LD?DVY>{vwgg{ySC?%Qtn9;Br#3MDZ2eGuM6SH zh%i%rQEOYn?^ye)CsQuPN{m|Z7=Bvx47W--h8L>6Kvb}VKnhtNDpHTB`UFUF0IdDa@_@p;DZ{S9mm?l-N98Y)wC@vbl^bHbCbG$gcm_=1 zCTpfFUG}9mfjlfyL=hn}RS?x@rdU5U!XPWd?=2%AWhczA07w}m6LdSOpU%NKd*(~n zRK+Njd<^C!@@{MECj80pwvyDW86hbE1-JO|*>nn-Il#sPFnZO~%86j4@1=a+evsqJ zv30aTTJ#z5GH3_v!2%Z{7}K>!Y9;9`Yb%z)dtJ7(eT}hzV<2~-pN1cV{Y!FDL+nL! zTS!YnqMa zhb2Frtdxz##nTcZ2ElhLkB)kNI|B}1mX=xxhMSrI5|qoLvw)mw{||rYpb`15yxOdj z#y~tJ9^PQQu*@r3Q)KK&slT#?)Z1^*sNNyMKhe?&e}9`MQqXtNr9)X~wDryNSG6T4 zx8GwEr>~SYrnxtw4N)D7lNXP*H6D!!E{Njy+eOlmGUBfy=8JJ=w(Mrc2f5lki7+IU zI~x??Y>IZtVp=s54&|FDFW?mHFI6B#$yq_FH1`iBP2 zcgZ(=`ZLHQ{tM#PB$F)QHLcuOYay=8k_l_Ulx{!)=VV!a`$fqB;X#pT8LJ*5W!cW4 z9_T0e=!p^G$rq9|gA&Kc!)PNd)ua3D6Cp&A7FNIe=?;QE(IKP8Sp*YcVCgOMRExY3 zA%<|8Q>d`p%zk)*e>Iwa_uKQWUWLwx-66I{;(tOPSGbsR<0am7th2(!V&)b$7o6AI zPo8X;gRGdCw=f?nH7$-O`LI$lN{r^n6Iv{CUnTAnR3+WDY()b~3MG;QWB!Znobc?# ziUzGl0!AN(frtTKLxAken!|d4QdQiL*r_nNe#1WF6QldHN>ndJyU%2*=f`eIR0 zvM+QyAO$?>(@^tqB-Pvttj}X*wS2g|(hJr6c5#Xo_k_h$)IPo{5lrqigp~}e4?T+v z@1kfq^eiQgnpX)CqSujmmF&>Q$M(V}iGQZ(G6lT0#gLpn-T0pXA=#MnbX&b1paYP0 zM%sS;rz#~HwOme`4xq&SAfm0jIpis(6YDfFR=(%&Tp^N&;8b|d?=52VIphzRqUL2x zdy2SC2U@e;eT)APKEU|8t9%Sn39lmCY6Q$-KuZAN`VR^|xL=BcKOw~owd`q=3Co|X z+(j}`N6a?K?3jw?et8=*k6Jmbtzg))io^*SG8nSsn9xBYej~-5@_YxB^N;wt!NjxZ z&$k6abKsXIezAi+Y^PoF&x5gSJr5u+ikbf~FK5zhHw#ew6QT`3-Ia44)q)gJu}-sO z$;-icJq!&QeOSKyaRf`lo#B*7jacYh(gy3|>91ciw=GPkF$fkMEuSyS0V4h$zORVb z@$-m&@1sF2AVUlA&*PB@XbPReWDju0^Bxw7W%V5PSNb`y$_m=}@xz8)biB}*)xr#y zB#+PJrf2it>}qO0T;_Y_L36Za7d%qpLeS_0w-kt3AQO!9U+$pn`k*4d;q*O6 z_=$q}@y=^_Hi-QI6!&RwV1!Ag2xvVI4g94f|Q5ZqeOOx3**AvYKHtX_->=(Q25N^>BNzR=J>1t+S$fV1FD#m?jLORVz7zOnUMaA**Gg(0 zku(aeNE9*q`4=$6HBCDNWAH^e`NOh#|FC$tjOv<6b5@UKg!PP%8m^+Z10IX#p=jKQ zzh51z!wU5(0}G2t-5ww<3?cCqogFg@=_12F?`$h0p;Eqz4`xv(+!5a9)Lq9xW-M`H z3wn%+VbV1n1535Za%gk2MxNt$5z1PL{b&*eR((PhdHX5VF z2f2UB0IJMmoU4-pT6r`V1{<#K$SZcHsCPI9L0$zLWZ+OR4iwZHEer7w)?SV9&YOCq zJ6Nd|0CgZ5Aw-xSw9{f44tWOEBxN&~sr}gKIUga2z$p0BaRG zDj6U|NCoa3kk2}CP!qipIGEG_Fm6Zi#zFz~rgaj@z1V%;cjvN#@BS(u9gO5dCMsz0 zuaNu92TbGLL~0Fkh2yZv!dW0a6e3S}RbI!$NR7epA0}}+2eGjG?#$F@ zIwz*rB=23y%GzfX$HPru`K&p#wg`U&)5L8u-DZi^yXqe7Kqx~_ZeP)N*2RBO;agc_ zFzOe6BIIuP>H8`a5;m841sO}8>R;L#EN;YA!?ZD3@7AND>wN0nkHXt%;sBa2q!}ouECq zo)#1&Q-=t^j$D{IbLL?tuw+1yfdk#5y7_x#n2&Hm6=qcR79^>NUZCdLnHG@cqp%FW zf=>PJ2cVaanS16F<;64dXeFaaz&bLbYP}>$4#cE&{$|YfFb!XjzVV5e>B|8$yCE!f zVteleB7v8n5`0_64?rA_vQYm4r7?y~5x?{IFS(9o6A=kLeue755%i(jQTlLJ^gp_4 z($_5shfD7G?`q!BGM4eS?=jqnfoQ}E|HFHPO_wj~mrJ%nHD3`idHxk5!laVd^4CRL zin|@BblHBQL73{nfgbqw?VHU&XLxuxvkn^Z_@VJ}#&j@gz*@b^*Td<H?yl~g;7X){$9FD8 z*$q`iuXMBL7rguMCTame-t@$Sg?Vvc*PxB51+q)_4aw6QtqP@~7xK>r2a3-(w+?4dZwyTPcHz~x8f7bm$mDza1@qqGkqPwbXtDnkV>fe=)m~)>dvF4Z`f}(x7b$>{ENeRr z6mu=2y#O-orfZ-$SZF6v?TNqJdz~@5V5-_BZnI}6AOXAyrK!5za$$0y5x_od8WTb? zLz2?WhR-njBV1%w6g%Qo_JCX-Cd7m2lOGp3)8B@XbLP3Cn3l|Vjb~iphJ{nxMR7kzCt+5 zJF#TJC$u?eE4t4*8_67r8VjIagb88MDXX^?T z-+Z)+ycl@`7wR;WXRg<5@CIlzu;&mWMYnNd8=@HroH zT-|i!=*n7pGf`I2ds0mbe2T(i9MVAZ)OUAQimNWzlB15f-6xRg^8Q9MWKtMS7Mo41 zT|UaILr45xPe#Zx8-TC%XWIIsf63SJ?n1ocb>lXXP@|Qmow+wN|6NbR#ezn!Y7PBe zKqtjJMbr#B^uLliJSpyQ)I>@lF`(!v<5m}aZ8@?-4j$??1gh{wQ<^0uQk?O4oyeTx zC^tHRC-_sPiDQCBD>%TZ?P^#}Y6v{dp?(RZ01~das#c0;oJ3o*k$QSSF? z?cW%&YUP+4G0GwE)@+w%`e9oOxh>4!^*zM^GoPOn`C1TCKLWP%Xigk^$Ec!->LH0w$fv>^F52 zbfm*DozCadaY2r;S8~R)$9%v`>IZ96bFR*{A)tbT!kDaB0&+9Sc2un^O+vR)ZlNgu zS__BScIKP(1uhY*dx^r3NTZ^zMEB-FB-2(a>8J$zKy|b-p;%m}#Fm61Jh^?xvY{9z zOG=Y*MLF1qUg^~Rh(|JWLWe}0lk>I)bm=MIM$Y(fP)Hjo1})n6y?Dg)d_1D%O5!ey z>1u7+ZgQJcb#i2z6d#D)hU*AsBFfu0IHv%%hJd-ZYzF5zlq}SYKR}|1YnQ3|FFwy7WJVb?=i3PDVc|L>X8l)Z3G9LF zQ2FmRi)#%*liSqt$PBfjQz7&vS^81YA2~1VZUsel)-@!aJlSjTCUeu0f!eeGppOeB zF~|(6ZikGmV<~aWGYgK<0+k>Ec`{hkyqf~t`z)HkC#f%op9QpS^e=UWmn9K!ZXs$5 zu)O{6aQRz1jWbUhG!$PXtrW$}e})R2CE)@!>JBpJr4&=)QtGjTmZLS~h96ldc$@fg z%0W}eeh$wYTlwEMZ0|oec-4#!wu(_OX-a#w8J^MS7SfilV?)RRD7P10Fo-m$WYC-o zmWtfptW!hkQ0Y(@Tiqjm%}D%3oFj2ZQ{=g-O}|!{3~N!sU!m!`PHhs`LZ+0|m#Z9( z=%4@&(Z~5MG=yqezo>4oH?uvZ<>gDxJ%^dp?I6Dub}^)}TMLHwP)b;Urk%tgn{wKm z#(P}lj*CGcS*5kxNO|Yr4dNUpc0J(Fq#`*kNV3S`?6$sF%17}xRDv-0qi48mqtK@W zOSUvS>M~r1sAo~0|3DiXjaEdTUtv2|$#Ly9Se^z5Y%QiXAmeg67O{a9DlP~|KUDak)WWwqzxKb?SB!5sZ zAq^JK3M4YAM1A)T2kG+#B$G%;H?oh&!Xgt#Nd}0o5K|80rryI=lAc<9*)>3^Q0Qj^ z;kYUri(SC%n}NwV2E#KfIcLQjDfA5ko!~q3j_yU)b`3$UxEhLn#GwXD-$PmIjr1NU z^)+Azh3EJ&1LFmrp~i@0bZXv68f=dJNEX`{+J@fIxHD)G>`)S|qp_Yg2BoRxI4bHz&D07@zplikM8oA}$+@M_cxmOy>G8b^4^VR^R12yIdY&!a#E~Ptk${|yBx#CiZG>BC?Ik!O8Keba zK+RDZ;!EuL-_!}nE=x}zDeKoo1r}UKsI%v6dF%<7EIDehD?WZoNvqst2{5Ij7Iy5hOI=%*gAw;jQzo+4wa!Sn{oYd3 zJmj>Ole6AY#Tshafz2}oy(>5y?vGpu`jDf<9S+x@^kS6YF4*&{lyX|6fOFMt7)Za$ zIu|_iftuokFnST9HWbG~Z!v_x1juO*Hxc+0hC;Y*CmU79-6W7m9?*$i1VEpQ&s5za zzLo#PR=zje^P27A^i!ua3@I}naK{{|*^&*xxrmPgWH6B0Vib2k z4$h+yGv;O{g#G$?yEx%X36E%TV&|v5^TT#Dy=ziKAVkpcRK9eLxbol) zk`gmw#E5HRTn%;R>SH*w;%%0dXdj6k3}^9it{=0xcs+vL{_i7*)blji3XA1vq2XHX zdP+q!o>kFkEpkngPsTAsH1&|@0sdHq$z`7aDcHAb$2+N*Api6$pyWr|KRsPR+Lckb zP(DUEhwu0<5!Q*;6{H9TRV>2TUFDBF__@(e=}`XT<P>lNH`TbO#jsbW-! zmw;+|2Vx|=kK9@sw+xvt;MJVP?`GZn(@%}Omt$A~@!n*L2oQ9u#1xXQVG4gL;wRMu zxds|ziq9=j$s`b;*KrrnU<8iZ`0aN(P8_F%gnU%^eg}KO%G!m>2XEp511zUHE~Rp&b&Eh#J}WWuKC}=T_wb zyg?4`bg1DE^<*@U;)!T7wVc!D3`%D{U@Od^+L}@NGT!`ai_S?5BVTMQV`y?JAjFY< zl&X$$Smfgzp%aHlIp&g*E3dZ|-l7 zHMf4We`L&n!y%UtC*&E8XwWw$#dLgxTUpN5tz`CclWDcX17956Dy~|aY?m)>4w!R` z0gSxR&y;3zgpgK;c*ykmBeXy{2(C700O}WSC}Ss=TzkCtV0K1wU`=Ai!pA}&)48Ob zL~iKFlT1$-?9wFmi`zb#-=vmBdHGz`_Hbj#jCP4|*+2cez&Az3WgUuN4QXz5f1M3= ze9z3@_G4!;KAIQ$xzcRLie~+WxkUJ53v^;=gZhrGci(j`B*{q-n>>6D0V9xB0lo?w|rwx_)CsnobQrK(d3I% z*)4J35l+SO|9fSd-V|mUEi}IGxz^72i52^NW+)zQxt8U+S*Q+D89X(4InBQ`T=JIo zj``qusihGkVwg0@*<}>6myoiiGnpcIOw7I`H;w5lmi5D*N%OSxwZKOQE|euCCMEa~ zk!>r(;+3de=+8(fotG69PNh@FwyM1}CQt|6nam@K{Qs@~t-am64CFooFd@kdz-Rn7YhROK1wi z>(tymdf^a8V;o8sDN2A-Pu9RTpzpK`5%^vG4x%#H740)TBW0vc zo(=BF6UD7uQ$cYX_-sXIjpUvr;n6PRwGM%f#j-5$$r9-#U)7TDpLBn;h^{J+qr- zajVQc!!9ch-|ULzI2fe88?m<+`}XgxA+@t=E#mH=In%1my|n_)Q`Q&vDr7iKp4%qwJu%cg6JsL9Si4`J*<^ zpogqti5O}L{5r(rT{h1@K8K0((pyMK_w3u3g}g5+XZaJ~HLsd0v2)%PrM;UR3UhE0 zZ{NxEqwcL5?FZ!|LQpYF-yQuK6GrWLXYihBCKCvF6~ac6p7q*r)lQg{eL!21>}n` z8g8hits??W1x_6@>_Y>rLFH@W2FtD0#TnW9+~S1wjZB%jb5OTdXTH2*g#7kZ5VrWx zr=mN&j~sta`XT){P|`m+yw{JFrpt4h{0*atZ!Q|d%HTyu|NHC1?_Oh>M#wXra(t0X z8OpaZjW&99lk|j#cG7 zU70rR_}*#b8Y#i?W98;`n0IKb^f&;ArRbKI`Q7;8x1~8uV{G|yv}G3*8zXg;FFiOR zeCsu8qqVLt0U*Zc)|yUeknVnk$i6VJTG5)R^wt{z=bEKd|a_IE>*tcZQ94XpbSdpC^gHz$PkGinCf zb*r+CGgG|({>v?5n{9x(f10J+QG1?r+229+FN3OCe{tj1Rn>g0C~HQ!G?{MLaq;!B z66Jd{4|@6 zJ$l3}8dtjccJb@{qrIo62fMP;*>t0H$fSGIXJ-ILR-x3~dE`jH1z4*|qQ{2mTN=SB z7R6Fku6YzRM@*`zUg?Uiz2u2ES^SM0NUW48g=3E75Wd|PUy_4dskUM@5w5be$Ival} zpX+0Xx*=_`;v^GLu#M=Jwue0BLW0lt<|xs(ciW^5s|wUUl#2qq-wMC&cwTM>c4)2) z!PL6K>eoJGqBq6#AkRr201eWtQVsWF;#fbHr*>LhRb(Hy5Z8d`=Olz<8?{AK0ooR7 zdH45@pez^(_%ydH36`srFXEf$=v<1K`{g#LpREjY+r+w`#l)!mHQmdg4r1<5!Gf|V zZm;y+h&kZ5T4}?gF;X+>yNiLd)CX~^#!;-Ky)8AjqUiPoEO*~`IN$T~ZobuoKan!Q$^ePuHI zw?Pd9x`MKf;*~#f=9;B10of`7Dkm2wJW6=}LuZ?m1zVTDxWry6{P!OXVxP|=)A%z) zbMQ=aYq^H-H-pHtKA;Rc+1fxVd+LXPn3`}Xqgz>a%x1ie`k>4689xL@A@;M5KbHLD z&q*a$p*U=~GvUcC2EAH*kH#Q!z)R9Z)|kMm4b)#<$)oQ5isl{!`(;5z&wQoWZbM9H zIyP*$4i({j0%ayJUFzj-Z#+Fcs}|J1Sde-*#3ahnLg|}C!tHA~_<|jE+-4=X@louM zNG*nbzuu&#FfO37dy0d{#$MeR&7Yv#!E(`8hzTyfSr!{K1mwL-(nRrIiT4R7QsDzt zqsg?rOB9W;7lE^fH&?0Eo#fofnkO6{+Mk4>b%~Jl&umuZK=aJr@|!IB2z?;kDY84e zFJH4=H>Q|Yvoi8d6ytfBQG-Sz<$7xU+wwuxYlGf)vmZMlbRkVk>v#|-rSCDamlcox z0sV7=Pa5pD)x+J}bs8SmRGbvn&0IUMUz;h5#@1C2s_#Qp;dN@kubs(dqonZ*-gzLa zUy;VYK5{Nq{It}xOWik5@G}YIK)R~LY440#ObIre!5VNHhdp=ddaVwR%>#2*G*q5# za%POu2^3Y??@5#T171KIP|B48r04jT$2j{l(|59?cX|VCF-C^OO?=2Rn0J zqvU|q;O@e)sik{JoSK=`V!&PMZ#0u8)A}S)nO6WQ%C@5Q>9Tn7Vv2itu;`Fet_as(<6CsEs+)#mt7m}Rp@U)OfU45@}y(OBAtl!c!@`KSf&(BJj*|XN>)waix)SVQx;*>c-z(&veV@)KvPPD@xSLC{JR^`4z>4-F(&`D8RXKpnF&YV|ktd!=3>{SWd~MH=xN)c+ z<@IWZe5>FgFC{mpQofCEb*@Qxyl#hS=8(~>HoKW8*lHc#&D{H-U=wp2G=b{xL_dlv9M`xcqZpIOjrgg=SPR?pHKChCcVR_>N_iz&T6O-rPo&-<=ADMmC3(aP9gP^;Wgd`m9Ivy2JUC zOx#?EIIBAi96&Cgf8?fTVn-}dQ&GlNiFE|wRLS~1CSGZuPIaZw4W8LvSmd zGq!4XZXx(|fzUk&uD%N_xsXSYXwZ((7_ELdn5|qG@zJcNmbWk|K>R)9l)JQ%`oIoT zQTSsQ_2Lp@ckb+d;_;EmX1LTyva2mM586b3XR8Gwm!d(kvDJgv6)9A9_BBq@$MBj#CQfRpM<77e2aY1@DKCXa$Ui*X2jp zOkGLn$D~A;@)Mm-9l`PO4cf)?(j*7V-z7*tUw(t=KVDE6xzUjWf|#Pv-YIrdFD5os z{?PY|eVbBR#0(aFeHl^MQ~3B4(>{dLTm0OLqh+c|hK|<8RP_rb7)*D3w$ePH3*H8P zTYkLzhrm7n)cb5su$qhuNUFETm8VeVZkpVYXgAG!Yd7EI8rN@ zd)siiSwm%cqq=f~>dS-$+R)VO*w;kmBoPHWb)mg#HXRG*Y_!QyXot!$hPh0{kJR`> zxB2pMQY=e(F!#W@apg#)25?s?BRu}PuH<^dcpScUZ&sVmldXUF>zAFslR(nluhYiH zu$q%4K}wYxXVftbVf_lp|BAp13j(V*Gxk3y=+&9dKJ+8;AXoyqq)^-O7`ug9##McP zw}l96z}-1uXFZv1Z_b|%ij*?S8LOL~Q1jE{bf@rCGnh$vJb((h* zC7lkFWhgCpG*w6Km9>F1I?{`bjOx3mY9&dyzE=98DGzTUI@srPiqqQrX{Rb4Mv<;Q z+QZUmJ!|*keSg7*9tw`T>Hf4vd(dgQk&{Q$y9;KqW9QDDQDYk@6YX2YPiy#z$G0Gv znH=@;|JDPi~ zPK4e#sf}v=1y`fQUSi;+8-tEso?c$nDAb&unAi2nV?1S}pk?@Gi~wv-Z2cws#7;^d zUco~aA`00>i$xx(k7S{lUrK8tHhCEhKIyoQ7DPa^gxMoWZeIZDH2&7-{zc8Da@&`w z(%zd438%w;=;rL;=ll@6k? zK?4}H$A_%Jo%r=2fSj{TnO-NyJ?r_5SlnHpp>b-1r6|p~O-F8Wt7AI}xYd`Q&CPZ* zl*drp`dE4F6sxJcM>TLF8*A>7!r9HZ+4|S3b#Zg|ejnGsX)M|#5R;lG-_#}kbLOS< z1eb`OAzdS<1^ReF1m#m>mrC1g4Ot1=2qOFe@;9L_1SggK=bv*M)?$=*+&RQZsB}p(@ z9d3}HP4DTMAVF%|vL;IXRi5EqR+CkEw6ASGe{ayB%6&(tN&(eaMeTVy?xBA-^iWg` zVlSD|ar8$@Vsrah>gU96Qv>R$7dK6`DF~O`ff(jMpoD*^ZUp{L=d!l8tSHvDEZoiW zj%?x*`v!9>HGc#-tA8uFv6^i-%y81vG&a4I+lKx@1w+)22>vxrf&ew-7R{ysi#@|P z5TT0DjQRkk-)9P8_}ErbmLBe=|H#C35qZs({l>2lhHODHGeGizKqUN6iif_NB)o3Q zVgE=Mm6SUG9#`;E=}T#j$**kmeiPUYynN?-u0wmYZJO`9F?s;(;FS>^`wz~`${$*R{V62^2=*PimV+{b?jtZ$ZjUngF8 z^+1&|FsU|_(s`kccq`M)r9lykgZzV}sgx`ty{ia)jHvf`^)<4V1cq&EcWoy+eSrW- znPVmSKVlC`PL|^C(7YnQmH9UV-#qTn$C$R{voE_}Uj`8^NEX4EEniA~Jo!m``>{J9 zK&9rmMrb=9qK_DTef5bf7usj(h0nv?HbE4ZbL`u@;roDCk!2L*K4-siX!OiIFEdj{ zUI;aatkuf|V)w|BSQkIVYS5I1hK>zJJ|HivCoXkVONgzVt z8-J|MQeZa>&kK@3=Xr_ZDW+iyn{Ocib`Kt>c-Y92TXMQO=5n-EA8iUSO*sP|SsA2JAeOF8Fg3Sd-Mrg^nAm8srhu8|3pwa4?2A(S{^o5 zZI|g5A_aJr=1FQ|W0@o5<>Y7~TP&fiP%`j5h9Tob4}~#D3Lm)(k8wE^@>F?_P4&p; zoN;LwIQ?^Fs~d@Dfo=yqac#wzpyPvm$uBvj0=h`$EO(;#&7GIM!DI#SkXPS6Qr+AK z0Kbf|N_R5o@*TO8+@jKzZKhu2j;^C1>Vq8JPK1P?cj#w&p`-6esn94B*ssylUygyF zQ0E9}%)$q{*?|_r#3q^ASjt~n^%N65ksHi88>W7$ov+&dh_B4*_7=wM5Pm}tEME$o zo#5%=_g;KO2PsuPxa2xeNvFVI8)&~<)LnT@Jv5sS=B!OQ+v}i(Mg8lRvp~N`#1x_W znTt_7J+o9I+ubO&ctv%VH2Lgk+Pi%|h+XRkNS)RstB8$U&3_)Ry4m8F1`{1YhriZJ z%dzw&Vzr5>fs}j^RzaX)BXlR36(QBSy)p*$FU4}`rsc)SWe;R)(h7Mh%vsIK_S2f6!CQP(f z0YTPU1Rc!kH=-)J^XCoI-ET_~#@otqmv`QwusG)Pu4F|Ev&Byk!Bnw~G<(v%R0Y+9 zb_w*s87X^_GQXPeswy%qTH}xTK;`XX=tPr7y9HtN$I>=(BZJQGiUsMT>AhgqRaNOI zrfX!cJpif#eg;_`l>hczbv{kkud5p=jt8%ewwAncw#8Rstq>^%k(M+8Xtx!4-2Tq>3#=?i& z;;!d8N4(YQB$^8sIb@N%yE47CQ`oAY_6^c|q`)<~k#+^9nS3-`_6`R#3nAb#0|0ww zpC6xupa4aTT;}hkBMc&|LAYv3wUVUe)wdggNwM25cB#IX3?uK;D zbLqiT3D27c0CDl!-^9yBXf;P+{Xwjv5yTlWYMhCK-9f3L*wmQ}G(@jxVQZ=2#4eQ0g*4Z)63Qh(2r>W*&Zw>aVzYjc%JNr$fkEhqF0-enq#ph8 zx7FvUvZAC7pZ4ydbK)0YC<#y4zy5)Q9_Oh~X;9R66?`ARz7e-wjJ4=gw$RUYwI6#Q z_BLbv&`yP6`Zg!naiU7JZ8zS0inGo`C*@F@VxTTUYlot^ZQ}1XgS9TpgG$eRAI!!__Aen>t5*B8Jjeacm(>rHr;l`(|6!<{v#BE|ueXQ`ckPN+Qgr ziILV9$-TPi*-;aE$){n6fS?#ASPC(kCV7m)1xjGtrXM;`vNrNqoc{V`?R9Hug(%MR zO(0I06qkQ51EQP!kb_3Z+pI7WAmEr{=ovWJ#EO+UPp&g&E zpSc$HM>ZY1UFwzbXB!5OS%bu;yOt&X?2ode;K6_P)^9qX!o9yx@z96~cb{qS=Ff#} z-+2hSp6$8HFKI53S*s6}p@FE>>c~6&CW3*Eu5rQ(CVi66~_N5qYS*VB5_Mla!Z;c*%dHPTP0XB{Q0zP{(@1%5K+3T+C)xu z>-Aw9tk(aQ3L#JoaBt+f*}i$SVbI&ZThLEDY&o+W`cEESac%Yzk?T16%@Vlm8i3}` zP<>k6`&Y=-LI=fUuUhMO<=*&8VN)8K2Etbb4h4zh>T}7^=IEc z(CypD3xX0mj@qn|7>r#r1vCuHVG5puccSmVi`nD!Pw{B=8dW@df6K2Tp9~y2+=B^hK<$VA8tj7+f;2hQQZ6ezz{DR33geepsf@9wg@Yz~OLrODTBkiSDha`Xhf3^xj->71!Sx_F*eah>Z zxf+326rs8Sgd~BEe{7`wL}{)6iL@3QH0+%$WEWFQ`|5xAR7LXJ--6rQ&~2?vZ!lp% zlx#w^)8k?KztR(3c%lIxGqFahEzb*Qx63!>*#6NXsi~5vLp|+I`DD`f2)RmX`ffe+ zw?ArgoM~pt%`S>I=HL~0_FitPh-zTTzu9jYQ#tk3?uEy|eem#1HzatobuCCEhR zG62_swKoe+=R5S$M+cUl(*%I8$kb)mhYoBQ?07qw(Xosg==;n=b8D|mo88L(z~J~j z#qXXhd4C`5R*WOFd-A4>xp z(ZX@U)mE~5@;1GOSILT&hyUz=7$SvNg7MB@SuTC?rW}=~0}vtT5YC2`{o{iP7 z_g9a^P-BAz1^#6|@YdH}PxhZ_UIAovsz!fR?SIz4HE#0V)ykV<^YnN5rXgK7&rQ+l zOW+Q(Myby+7ziE8=wn$tEIm;)H9P)fK;_;IDr&NV=P9qg3-q0RKc0-VTtYbiZAIm` z;OxJZ-2Hkr*wy$5#PoL*j-R4Ems zpgFW{8mqs5`BECo#+mx}Av6HFqqWmq`ngflgy@fzs5|n2M)fO{Jx;qQ++7SrlNIA( zleX1Lmp_`b4g#ps`Kj-?R`$|qiV$T@v>p@km>=TvJb7NjX9r>yo^O{5ePi-dybVyPy`lt;eBg#X(NB%D@UpHE^XF=a4NS(Aa%0 z=v>mUu?eP`L=>d!{eRSNT~4!B3+BX-wGX#EjMuMZ#jZw$34pAe1YlQR`#S=9zW?|C z!JPl!CZzxTL2e43k52Z2@yO(@P_z`PSqySR+#S8;&yu8586~D6>PwHOi21jOj;VE= zjnTo5tP1?%;hC3m1KeElJAC*`eNAQ4^bUCStfTMcUg#$N?w#q-`E=yTY0>FrSUo@cW%a?W?@UFq0gHn1Oq|LKGafhJ;*O|l}_}&8ZGlQJ{*RblpD=DDV6SxPGLRJyNIE) zfYjRed0*5keZ3?Qm57cJr6;lP8s9oT)FpopkYmLdsf7i<#|^^s^2YZ!CU>v;z}4H4 zLZ47@qL{KYqXyh3V!%-bT_>i0`fG*zang$|}8GRI( zq=y#F^M>W0@84fqjbHaecxbd->XfH}_`Dbw;2n9jcX^tqi=;m3A7J_ZuYdgqZ@l7$ z_nBtnBT8FZe~)Tmc6cSw#*#h0?$oW|UdCE!5dP2a@&Gd&-F*JnI4G4tll zqx^Mw>hBU^1s>A}7Kvxfedy;w}Kzj@c6*ymXnMU zZ8+>;s;K+)Xq4&VK$i%SGMaH;RN0dHK3So1mxZ+B9NZ0JOToZ?%@(YB2a3w14;F<1 zuaa-ex)69(M9@*~cWwW$m1hdmI~XU#Y`&Y0`Tjl-{w^^6g0aQ30Y5-Ml=L?GGgIYb zV_m%O{1<|sYx+Tu!cVf)Qbzr1O0MZ4Rn!|Y!u-@Bt!*|RepA!=gQn(xl$Pz^-IEIW z%AgOGZQr3HgZ#|on)x>&ce`CMf9yX#>^&H-#HOlMoy<->*@)KZK5H+NJjOB) zFNp%{XNJzpX5l^H&km^q7uTpfFD&{1khMuO(>@f)9{Iwh+0}trmBA!u3!@;p5XNk@ z-|fW(ZlOVV4Eb=FTdC|G>>Yji)6%BXUJd?|W8TzPaG6SZT!!zLJSR|>)CB?!;tf|+ zn!R~YP+%z9T${Jg&eUFwJRODty&Kq%*UxA5@1W$qy*@!*Yak~AnF?_I7W`}KwzvsZ zbzcNTPVsXQqB?Tw(w?$Q@9+5Vivj8`C0$u)Q&3Irpm#H|Sb}^r1D!kSHdzbj^@a=l ziYPI@JINQLFru1yav>@$dqCxc{Pz({X|K{98-Ui$KA?DvY_#YnH|WPV9rkr=ndBBiE@4|>eTxt#i@~u z8widnjS2;07Q&(RGSflfJBHDk-qfihqb+-3gENcT=(Id2#3TqaVe3Uo5MKOYIqW?y zS9nqQM^g8F5HDhOc+2Z40nd_)nbaC2)gKJRl~(8p)K{#F6eH8g2DPwY>&fTeED^SL zbDwO>ZlW~)`9nO(Jt>b0!_sQRx{}tU@T{U-ob`d20=#4Ox=Z>O@cBA+k=A-tkk>NK`|n?zFx#0iBcm3z;+xK0QQG$W(wX{LQ>f0g~iJp~QR zx+3~+GMfy?Y+}wpbZ7AWr60cS{)cj1UFq>I!qGYA+#BvHV?z!6ZA!4YNcz2AUc{-$mWoTDP;#Em=R!GjhnEk>%` z9@tBMd6UE*r746yW#mht&8=`M5!FIq@5JT~eQT+-VtyIyd%WBx=EFy5xe=G!h;wm; zD~T=h%)%fKS?}wYAiQ7}VrulzU_H+CE;<|Ik^H&AM1i^!Y;X8=d{~vTvQ7qbj!c!V zAg3gp(zxNZbHGoaKPP$a`snC|3|Cn@N&?S&D&j)*KCz~mpN~9wFXr%Y_%2XE=*UQk z!}|t%3ScdY^&O(skp{VEbTq(gN113L`-*Fp*$e*$7~>L7l#_@_m@Xen4SD ze*ip=-eKGfyA~8IHLoH22(%&I=m9gQ>4hCMo}Y~n0uF>F1h9lZc-usGX^CCz1%y^G z%eS$=b*o}LPjf35ot>Mh>y?kbqsu$k3UB-F>IC!PYv*hlPvm) zWu6?1#_RVobvsYW#ShS97oj zpxci8N1tMISabb(p282G$=+o@injfFYF?^deG( zU%t+Ar>*d*t@@+3j3M>&`Ib1PAU#E~$%6IzBEs7AR?r7ati^mv@oxIW!9{f~@Du8w z%Y1!dNvx_qHi z;PY%#darB)p&?y2X&2JKmgh zQiN3MQaEc)iZHxp4R>5zFCp1ODgxlSm2YctE$OWd-;PI416o6gKv^5ZrR0}MmeOrJ z`|PeIbR3+h_=T|%YylD(`E>@^$PzI0#fS!+C<(-I$bVGgkvmKHh8EuIX`}P5+`6@*D4v4e>5~}pede`%@<+#V)T)HV z*Ws^xSvodqt@0~NnopSMtyo~gvCmBRFnt=)l8=u8@)go-c}5&P=v0O!YQ>n_6_}Wj z73S>%Etn{J3G3x~;;{m&j`sFkn(B^vz-jRC?VIGR52WrMExaw}cq{b6OP*kri%Vmj z`0V7sE2ipzmxYR4g~#@6yvrtySceG`2C*3bqDC{+7d;ew_@0;Nsco3Ah+Zy{X2dga z>%0q;R|Hl?GUyx#`$(}AGIanLi>(mLX|ZH93`@0&tqbZb96N%NM|KURH@v_I^T=YX zAa9wYv~yNFEy;k>r{Q~{?8)Cj!x)`Q&|j>Yk6D~9tOVl2Gf5MUgLkhJNgpP9z|1f( zuZ4)2eBzCd2HXjV9jDa4EJlmKC0r;_eWfhI(1yW;us3NG(knqPG4_rQ^Fu2 z8rR{D236-l$@Wmp2|;HfRB7@8zff^<9wF3AkdZIyk_0M7u? zUBUER@0XAYQAjQX(*d}@C&4CZ5Wn>_W_D6Jj2GR;~um8rh_PFkuAMEVy5ii(Hf&W z>Lgfp1d|?Y8!{0Flx$kcvlCA`?g?c$cJ&&2(kpd}g3p zDo*e7=J(~~|9rF)DuKg5L!6?~QVX+CEKswM zkJD-W7x&&dp|3uvm16uP0ws7}GF;sLPzOvHSCv7;ogv16)@AU^Kex*(e{?@zw3LY52 zLSMX}G46tjX>p*o)>(xs=BgpESf~eHz+?aus(0%b4uYf&}1xBa%trc^jF51*BlAT zRnC%kAmK0_f@ZyaTqA-Z%(Kptprsm$-*VCyAf@tmQ+9myU^J;^$_Fh2J%q7-af+-g zHQNVVgd)&c4c2}BuF-~PO)B&*K;F&sK9GpxU0rV$v<=qcFP31qLLF%Qad=5<|m zuxUs!r4nf*s-R-69oO$oJpv3IPkyZjKcjxkx6gl;u9d0cR;#h;{Io&k1fbe!ANLT; zG$G9Qp%2X9nVyRBaff10JaN#*msOa4OXuIm6+iEe6&o4#7e!PfzK3*6T??#6D*nbL z9WZi)9KIEVgI7b+v`ySIyjA#srOujKa$1DkY~WXUjp$8PA;4m;fsSYtjO)}UuVt6V|CJs}PX)k% zdfHHF)y>?{J-x{jBE5~KUj3$L0pyRVk@e*v zs(5&rK?6?P;n>TjVp_zYA9#Iv3`b%Rt8XW&18v;ahJ$07vg0AZ#fBWby`%(o#rJoH zEl<8M$Hllx73Vz;Ovc79cdPe%lOGWH)~+^d*w?isLwzM-XlN}E!6uQy-dR)Sws_`_Sw^XD7&xp zVD+fj^+uXQ%{CFsn$qt8w7CX8urF4??$Sn5M5812Fv-dP6XQUbHYCR#%x=h8V^Wy7 zVasDg8Aw-&l-zIVGEKs&4w5(j;VN)e#A65V%#~a`O;-^7=26&@>T}MS4Kf=jqv)f1 zq@y(907Ssk-+l6?yaTQ&%N23Ao|SG&vQdp{96Hii$3z>PDzhaEe+*0kc``JkuD7Wz z!z0dGTdva499Xmc+DhJbBf-xBOyrMTOO|UB-FQ=cAA;R*%^3osxm<81W z4DUj1Dp=CYIE_j-N<2?u9;R(k>A>0*J;=kRQ*iLHVHI7ZmyzX?$Y`hnl&hfs#~J}0 ziHvvyrY42|GM=Kvs^L?lJ)1g}ypCX!j!(H4gi$M9Xlid$oHM&z>ZvErit+n6Jq5!- zw(>2TR1S@-TIoUsT;3Q~j1BzWey{c2ir^vBF1He(%zhN3!FME6TxjmWK0GT-jH#K2 z80b}Xvk$=_i3}G;e3~pAhmS{B+!d*4aCGF54i@4GNn$LbP6U`H+eGZsG?4S*SQ3eh zH(fl`El#j1(Zd@?$Yjj*lyg(f4Ng(Qwaozo7%+3GY7*cK+)U}(a43eWVbpCu&cL`P z9ItX?9Nb*oK>iwki}AA?~^}0~c-fy-&EDS+Io7Dfy5pe~?s`d}@OFh_rX;7vmGSE?7-Cw%_U3M2# zMVd)y9%f`w7uV|0FfVZc_M4B1JE)t-H{4e7oQe8HO3WRgL0E)7Z-B)-Rt#KL;tlZ@-obp~=(j_0cl@G|r2ts^JA zK42l>&INqBS_V^Vv=SWI6|9mp(65YOGQ3u2vR0Tk!u*C^bjg@U^nwz$V1AH4vQhNn zqT8NYk zBShd|!LpvK*6_3XlJE%c;RGS$RNhk;!7>8|F$So59`m_$Q_(O)5FumhN26715l!)= zQlE1(P1>}p^mR^-;ew+h4vL~j`?zTGtHg{9NQ~+E)X*K(TU#a`oX@5XZ??Y8$tw=W zw!+TDCa1qFy1&f4j-4H`E;(&S8K6dbL;R=8zVqj?WiA>uZEtmY+T1)~$pCCYv@@P- z8JMP``P?|2G=?yP2Y&{dz>yi9 zXt*i7_s4G^#41~bL>T`y6XV@Bv%7?b;0vn&dHrs=B4Vcf)ZNRFMQK>pmHUVd99HLk zDaJ}L%QhL=Y|1TQ4L@K{K}KH_6(wu27x%8?ECg82q6JZx);nBhsO%wv`yY@_jSScQ z-*yGwWSojp-%jkSqOOE=j<3*=7rha%38o~RMtYPKkkgv$VVwO@+;y#`XZlyQi)bx@-TsPi% z6+d*@SPv);tdXo6<; diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py index 526502bca468..44045e154c0f 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py @@ -18,7 +18,7 @@ pip install azure-ai-projects azure-identity Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. STORAGE_SERVICE_ENDPONT - the storage service queue endpoint, triggering Azure function. Please see Getting Started with Azure Functions page for more information on Azure Functions: https://learn.microsoft.com/azure/azure-functions/functions-get-started @@ -39,9 +39,9 @@ async def main(): async with DefaultAzureCredential( exclude_managed_identity_credential=True, exclude_environment_credential=True ) as creds: - async with AssistantsClient.from_connection_string( + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds, - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as assistants_client: storage_service_endpoint = os.environ["STORAGE_SERVICE_ENDPONT"] diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py index cb4b6a2aab4a..77a0803cb416 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py @@ -16,13 +16,16 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import time from azure.ai.assistants.aio import AssistantsClient -from azure.ai.assistants.models import ListSortOrder +from azure.ai.assistants.models import ( + MessageTextContent, + ListSortOrder +) from azure.identity.aio import DefaultAzureCredential import os @@ -31,8 +34,9 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - assistant_client = AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + assistant_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) async with assistant_client: @@ -65,7 +69,10 @@ async def main() -> None: print("Deleted assistant") messages = await assistant_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) - print(f"Messages: {messages}") + for data_point in messages.data: + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py index 3f6ea212157d..2ce29e6db236 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py @@ -17,7 +17,7 @@ pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-monitor-opentelemetry aiohttp Set these environment variables with your own values: - * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Foundry project. + * PROJECT_ENDPOINT - the Azure AI Assistants endpoint. * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. """ @@ -38,8 +38,9 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - assistants_client = AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) # Enable Azure Monitor tracing diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py index 538407082a9d..605391bc6ad7 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py @@ -22,7 +22,7 @@ pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: - * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Foundry project. + * PROJECT_ENDPOINT - the Azure AI Assistants endpoint. * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. """ @@ -44,9 +44,7 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) as assistant_client: + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistant_client: # Enable console tracing # or, if you have local OTLP endpoint running, change it to diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py index e5873c4115c0..4bab1ba8aa61 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py @@ -17,7 +17,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio @@ -33,9 +33,7 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) as assistants_client: + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistants_client: # Upload a file and wait for it to be processed file = await assistants_client.upload_file_and_poll( file_path="../nifty_500_quarterly_results.csv", purpose=FilePurpose.ASSISTANTS diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py index ff6cee650009..0fabad0a180a 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py @@ -17,7 +17,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import os @@ -29,7 +29,7 @@ async def main(): async with DefaultAzureCredential() as creds: async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + credential=creds, conn_str=os.environ["PROJECT_ENDPOINT"] ) as assistants_client: # Upload a file and wait for it to be processed file = await assistants_client.upload_file_and_poll( diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py index 7a95a1df6352..78256571f64b 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py @@ -16,7 +16,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import os @@ -33,8 +33,8 @@ async def main(): async with DefaultAzureCredential() as credential: - async with AssistantsClient.from_connection_string( - credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=credential ) as assistants_client: code_interpreter = CodeInterpreterTool() @@ -52,7 +52,7 @@ async def main(): print(f"Created thread, thread ID: {thread.id}") # We will upload the local file to Azure and will use it for vector store creation. - _, asset_uri = assistants_client.upload_file_to_azure_blob("../product_info_1.md") + asset_uri = os.environ["AZURE_BLOB_URI"] ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) # Create a message with the attachment diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py index a4791dc9c8c7..399e5610e771 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py @@ -18,7 +18,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import time @@ -31,9 +31,7 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) as assistants_client: + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistants_client: # Initialize assistant functions functions = AsyncFunctionTool(functions=user_async_functions) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_base64_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_base64_async.py index e56e1311aae5..54d16fcb845c 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_base64_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_base64_async.py @@ -16,8 +16,7 @@ pip install azure-ai-projects azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -57,57 +56,55 @@ def image_to_base64(image_path: str) -> str: async def main(): async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, - conn_str=os.environ["PROJECT_CONNECTION_STRING"], - ) as assistants_client: - - assistant = await assistants_client.create_assistant( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-assistant", - instructions="You are helpful assistant", - ) - print(f"Created assistant, assistant ID: {assistant.id}") - - thread = await assistants_client.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - input_message = "Hello, what is in the image ?" - image_base64 = image_to_base64("../image_file.png") - img_url = f"data:image/png;base64,{image_base64}" - url_param = MessageImageUrlParam(url=img_url, detail="high") - content_blocks: List[MessageInputContentBlock] = [ - MessageInputTextBlock(text=input_message), - MessageInputImageUrlBlock(image_url=url_param), - ] - message = await assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) - print(f"Created message, message ID: {message.id}") - - run = await assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) - - # Poll the run as long as run status is queued or in progress - while run.status in ["queued", "in_progress", "requires_action"]: - # Wait for a second - time.sleep(1) - run = await assistants_client.get_run(thread_id=thread.id, run_id=run.id) - print(f"Run status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - await assistants_client.delete_assistant(assistant.id) - print("Deleted assistant") - - messages = await assistants_client.list_messages(thread_id=thread.id) - - # The messages are following in the reverse order, - # we will iterate them and output only text contents. - for data_point in reversed(messages.data): - last_message_content = data_point.content[-1] - if isinstance(last_message_content, MessageTextContent): - print(f"{data_point.role}: {last_message_content.text.value}") - - print(f"Messages: {messages}") + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistants_client: + + assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created assistant, assistant ID: {assistant.id}") + + thread = await assistants_client.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + input_message = "Hello, what is in the image ?" + image_base64 = image_to_base64("../image_file.png") + img_url = f"data:image/png;base64,{image_base64}" + url_param = MessageImageUrlParam(url=img_url, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageUrlBlock(image_url=url_param), + ] + message = await assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = await assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await assistants_client.get_run(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + await assistants_client.delete_assistant(assistant.id) + print("Deleted assistant") + + messages = await assistants_client.list_messages(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") + + print(f"Messages: {messages}") + if __name__ == "__main__": asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_file_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_file_async.py index 979861978d7f..cc5ad1669d98 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_file_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_file_async.py @@ -16,8 +16,7 @@ pip install azure-ai-projects azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -37,24 +36,23 @@ async def main(): async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, - conn_str=os.environ["PROJECT_CONNECTION_STRING"], - ) as assistants_client: - + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistants_client: + assistant = await assistants_client.create_assistant( model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant", ) print(f"Created assistant, assistant ID: {assistant.id}") - + thread = await assistants_client.create_thread() print(f"Created thread, thread ID: {thread.id}") - - image_file = await assistants_client.upload_file_and_poll(file_path="../image_file.png", purpose="assistants") + + image_file = await assistants_client.upload_file_and_poll( + file_path="../image_file.png", purpose="assistants" + ) print(f"Uploaded file, file ID: {image_file.id}") - + input_message = "Hello, what is in the image ?" file_param = MessageImageFileParam(file_id=image_file.id, detail="high") content_blocks: List[MessageInputContentBlock] = [ @@ -63,32 +61,33 @@ async def main(): ] message = await assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) print(f"Created message, message ID: {message.id}") - + run = await assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) - + # Poll the run as long as run status is queued or in progress while run.status in ["queued", "in_progress", "requires_action"]: # Wait for a second time.sleep(1) run = await assistants_client.get_run(thread_id=thread.id, run_id=run.id) print(f"Run status: {run.status}") - + if run.status == "failed": print(f"Run failed: {run.last_error}") - + await assistants_client.delete_assistant(assistant.id) print("Deleted assistant") - + messages = await assistants_client.list_messages(thread_id=thread.id) - + # The messages are following in the reverse order, # we will iterate them and output only text contents. for data_point in reversed(messages.data): last_message_content = data_point.content[-1] if isinstance(last_message_content, MessageTextContent): print(f"{data_point.role}: {last_message_content.text.value}") - + print(f"Messages: {messages}") + if __name__ == "__main__": asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_url_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_url_async.py index 25e1a3e5dd62..626c5af8c3c3 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_url_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_image_input_url_async.py @@ -17,8 +17,7 @@ pip install azure-ai-projects azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -38,10 +37,7 @@ async def main(): async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, - conn_str=os.environ["PROJECT_CONNECTION_STRING"], - ) as assistants_client: + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistants_client: assistant = await assistants_client.create_assistant( model=os.environ["MODEL_DEPLOYMENT_NAME"], @@ -49,10 +45,10 @@ async def main(): instructions="You are helpful assistant", ) print(f"Created assistant, assistant ID: {assistant.id}") - + thread = await assistants_client.create_thread() print(f"Created thread, thread ID: {thread.id}") - + image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" input_message = "Hello, what is in the image ?" url_param = MessageImageUrlParam(url=image_url, detail="high") @@ -62,32 +58,33 @@ async def main(): ] message = await assistants_client.create_message(thread_id=thread.id, role="user", content=content_blocks) print(f"Created message, message ID: {message.id}") - + run = await assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) - + # Poll the run as long as run status is queued or in progress while run.status in ["queued", "in_progress", "requires_action"]: # Wait for a second time.sleep(1) run = await assistants_client.get_run(thread_id=thread.id, run_id=run.id) print(f"Run status: {run.status}") - + if run.status == "failed": print(f"Run failed: {run.last_error}") - + await assistants_client.delete_assistant(assistant.id) print("Deleted assistant") - + messages = await assistants_client.list_messages(thread_id=thread.id) - + # The messages are following in the reverse order, # we will iterate them and output only text contents. for data_point in reversed(messages.data): last_message_content = data_point.content[-1] if isinstance(last_message_content, MessageTextContent): print(f"{data_point.role}: {last_message_content.text.value}") - + print(f"Messages: {messages}") + if __name__ == "__main__": asyncio.run(main()) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py index 34e62f9ca963..51b1528405c8 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py @@ -15,7 +15,7 @@ pip install azure-ai-assistants azure-identity pydantic Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio @@ -48,9 +48,9 @@ class Planet(BaseModel): async def main(): async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds, - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as assistants_client: # [START create_assistant] diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py index 2473637752ac..49ea7ef96fed 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py @@ -16,7 +16,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import os, asyncio @@ -29,9 +29,9 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds, - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as assistants_client: # Initialize assistant toolset with user functions and code interpreter diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py index ec2b570f0978..5a1d47d1f167 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py @@ -16,7 +16,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio from typing import Any, Optional @@ -61,8 +61,9 @@ async def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: assistant = await assistants_client.create_assistant( model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py index d17575f2d6fa..e62fe546a05a 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py @@ -17,7 +17,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio from typing import Any @@ -96,8 +96,9 @@ async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: # [START create_assistant_with_function_tool] diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py index a00f401c1ac6..d1b6276fe284 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py @@ -17,7 +17,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio from typing import Any @@ -61,8 +61,9 @@ async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: # Initialize toolset with user functions diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py index 5d7e0398ce4e..c6a3c7f8933d 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py @@ -16,7 +16,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio @@ -30,8 +30,9 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: assistant = await assistants_client.create_assistant( model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py index dc6b5d0fd2c8..edc8c7bab614 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py @@ -20,7 +20,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set these environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import json @@ -76,8 +76,9 @@ async def get_stream_chunks(self) -> AsyncGenerator[str, None]: async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: assistant = await assistants_client.create_assistant( model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py index c7fef5c65435..e4ebf2ddd616 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py @@ -16,7 +16,7 @@ pip install azure-ai-assistants azure-identity azure-ai-ml aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import os @@ -33,11 +33,12 @@ async def main(): async with DefaultAzureCredential() as credential: - async with AssistantsClient.from_connection_string( - credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: # We will upload the local file to Azure and will use it for vector store creation. - _, asset_uri = assistants_client.upload_file_to_azure_blob("../product_info_1.md") + asset_uri = os.environ["AZURE_BLOB_URI"] ds = VectorStoreDataSource( asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET, diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py index 40b606917baa..252a62cee11b 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py @@ -17,7 +17,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio @@ -29,8 +29,9 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: # Upload a file and wait for it to be processed file = await assistants_client.upload_file_and_poll( diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py index c506935c77c8..103825f2492a 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py @@ -15,7 +15,7 @@ pip install azure-ai-assistants azure-identity azure-ai-ml aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import os @@ -27,11 +27,12 @@ async def main(): async with DefaultAzureCredential() as credential: - async with AssistantsClient.from_connection_string( - credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=credential, ) as assistants_client: # We will upload the local file to Azure and will use it for vector store creation. - _, asset_uri = assistants_client.upload_file_to_azure_blob("../product_info_1.md") + asset_uri = os.environ["AZURE_BLOB_URI"] ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) vector_store = await assistants_client.create_vector_store_and_poll( data_sources=[ds], name="sample_vector_store" diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py index a6ac7c88b062..b9c0050348b4 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py @@ -15,7 +15,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import os @@ -27,8 +27,9 @@ async def main(): async with DefaultAzureCredential() as credential: - async with AssistantsClient.from_connection_string( - credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=credential, ) as assistants_client: # Upload a file and wait for it to be processed file = await assistants_client.upload_file_and_poll( diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py index 294980d31c04..54ea32b05291 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py @@ -17,7 +17,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio @@ -31,8 +31,9 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: # Upload a file and wait for it to be processed file = await assistants_client.upload_file_and_poll( diff --git a/sdk/ai/azure-ai-assistants/samples/fix_sample.sh b/sdk/ai/azure-ai-assistants/samples/fix_sample.sh deleted file mode 100644 index 067c4b55317b..000000000000 --- a/sdk/ai/azure-ai-assistants/samples/fix_sample.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/bash -fix_samples(){ - for fle in `ls $1/*.py | grep agent`; - do - new_name="`echo "$fle" | sed "s/agent/assistant/g"`" - echo "$fle - > $new_name" - sed "s/gent/ssistant/g" "$fle" \ - | sed "s/azure-ai-projects/azure-ai-assistants/g" \ - | sed "s/ai.projects/ai.assistants/g" \ - | sed "s/AIProjectClient/AssistantsClient/g" \ - | sed "s/project_client.assistants/project_client/g" \ - | sed "s/project_client/assistants_client/g" > $new_name - rm -f "$fle" - done -} - -#fix_samples async_samples -#fix_samples . -#fix_samples multiagent -fix_samples ../tests \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py index 66981207c671..d7f7ee874227 100644 --- a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py @@ -15,7 +15,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. """ @@ -25,9 +25,9 @@ from assistant_team import AssistantTeam from assistant_trace_configurator import AssistantTraceConfigurator -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py index 4fcda33d7269..8c819f483d73 100644 --- a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py @@ -16,7 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. """ @@ -28,9 +28,9 @@ from assistant_trace_configurator import AssistantTraceConfigurator from azure.ai.assistants.models import FunctionTool, ToolSet -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py index bf6142d2468c..0e442868652d 100644 --- a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py @@ -15,7 +15,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. """ @@ -28,9 +28,9 @@ from assistant_team import AssistantTeam from assistant_trace_configurator import AssistantTraceConfigurator -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) user_function_set_1: Set = {fetch_current_datetime, fetch_weather} diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py index cc4500c248c5..54f4485098f3 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py @@ -39,9 +39,9 @@ from azure.identity import DefaultAzureCredential from azure.ai.assistants.models import AzureAISearchQueryType, AzureAISearchTool, ListSortOrder, MessageRole -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # [START create_assistant_with_azure_ai_search_tool] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py index ea0ca27d79d0..089cd4e5679b 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) STORAGE_SERVICE_ENDPONT - the storage service queue endpoint, triggering Azure function. @@ -31,9 +30,9 @@ from azure.ai.assistants.models import AzureFunctionStorageQueue, AzureFunctionTool, MessageRole from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(exclude_managed_identity_credential=True, exclude_environment_credential=True), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py index d0387632b874..b9d19c473807 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -28,9 +27,9 @@ from azure.ai.assistants.models import ListSortOrder, MessageTextContent # [START create_project_client] -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # [END create_project_client] @@ -74,7 +73,7 @@ # The messages are following in the reverse order, # we will iterate them and output only text contents. - for data_point in reversed(messages.data): + for data_point in messages.data: last_message_content = data_point.content[-1] if isinstance(last_message_content, MessageTextContent): print(f"{data_point.role}: {last_message_content.text.value}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py index 6a54c6d444ad..19ead3c9bd9b 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity azure-monitor-opentelemetry Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat @@ -30,9 +29,9 @@ from azure.ai.assistants.telemetry import enable_telemetry from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # [START enable_tracing] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py index 62677e999a77..daa5f8cf0f02 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py @@ -22,8 +22,7 @@ pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat @@ -36,9 +35,9 @@ from azure.identity import DefaultAzureCredential from opentelemetry import trace -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # Enable console tracing diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py index 6f897aae649e..3f0f8be8055b 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py @@ -23,8 +23,7 @@ pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat @@ -60,9 +59,9 @@ def on_end(self, span: ReadableSpan): pass -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # Enable console tracing diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py index 3443cd4da0a6..b70e5db51aaa 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py @@ -16,11 +16,10 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. - 3) BING_CONNECTION_NAME - The connection name of the Bing connection, as found in the + 3) AZURE_BING_CONNECTION_ID - The ID of the Bing connection, as found in the "Connected resources" tab in your Azure AI Foundry project. """ @@ -30,13 +29,13 @@ from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # [START create_assistant_with_bing_grounding_tool] -conn_id = os.environ["AZURE_BING_CONECTION_ID"] +conn_id = os.environ["AZURE_BING_CONNECTION_ID"] print(conn_id) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py index 5d4e5b4c2cb5..fc5fc10c7441 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -30,8 +29,9 @@ from azure.identity import DefaultAzureCredential from pathlib import Path -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py index 0ca5b3b4637c..846541c92dfd 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -32,8 +31,9 @@ ) from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: @@ -56,7 +56,7 @@ # [START upload_file_and_create_message_with_code_interpreter] # We will upload the local file to Azure and will use it for vector store creation. - _, asset_uri = assistants_client.upload_file_to_azure_blob("./product_info_1.md") + asset_uri = os.environ["AZURE_BLOB_URI"] ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) # Create a message with the attachment diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py index b61aec56d587..262de38b8d1c 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py @@ -14,8 +14,7 @@ pip install azure-ai-assistants azure-identity azure-ai-ml Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -25,15 +24,16 @@ from azure.ai.assistants.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: # [START upload_file_and_create_assistant_with_file_search] # We will upload the local file to Azure and will use it for vector store creation. - _, asset_uri = assistants_client.upload_file_to_azure_blob("./product_info_1.md") + asset_uri = os.environ["AZURE_BLOB_URI"] # Create a vector store with no file and wait for it to be processed ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py index 603fe0b38114..7f00ceda4050 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py @@ -18,7 +18,7 @@ pip install azure-ai-assistants azure-identity Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import os @@ -26,9 +26,9 @@ from azure.identity import DefaultAzureCredential from azure.ai.assistants.models import FabricTool -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # [START create_assistant_with_fabric_tool] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py index a12f4e0ee2d6..206b2893f9a2 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -29,8 +28,9 @@ ) from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py index fcd0d98408b6..39f548dc7af6 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -27,8 +26,9 @@ from azure.ai.assistants.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput from user_functions import user_functions -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) # Initialize function tool with user functions diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py index 367c9be610de..161636bd3a12 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-monitor-opentelemetry Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat @@ -34,8 +33,9 @@ from opentelemetry import trace from azure.monitor.opentelemetry import configure_azure_monitor -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) # Enable Azure Monitor tracing diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py index 90232ce20c8a..04398187aa35 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py @@ -22,8 +22,7 @@ pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat @@ -38,8 +37,9 @@ from azure.ai.assistants.telemetry import trace_function, enable_telemetry from opentelemetry import trace -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) # Enable console tracing diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_base64.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_base64.py index 5ca326c96938..36291d015ef8 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_base64.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_base64.py @@ -16,8 +16,7 @@ pip install azure-ai-projects azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -55,9 +54,9 @@ def image_to_base64(image_path: str) -> str: raise OSError(f"Error reading file '{image_path}'") from exc -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_file.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_file.py index f0a80971bcff..35662aa3d4b3 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_file.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_file.py @@ -16,8 +16,7 @@ pip install azure-ai-projects azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -35,9 +34,9 @@ ) -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_url.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_url.py index e685b049358e..e8f16cf4aff4 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_url.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_image_input_url.py @@ -17,8 +17,7 @@ pip install azure-ai-projects azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -36,9 +35,9 @@ ) -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py index e1a89c18e5da..f44455a7b45e 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py @@ -15,8 +15,7 @@ pip install azure-ai-assistants azure-identity pydantic Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -36,9 +35,9 @@ ) # [START create_assistants_client] -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # [END create_assistants_client] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py index 62528ebacf5f..7ce16ce374f4 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py @@ -23,8 +23,7 @@ pip install azure-ai-assistants azure-identity Set this environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. @@ -37,7 +36,6 @@ import os -import requests from typing import Set from azure.ai.assistants import AssistantsClient @@ -53,14 +51,14 @@ # [START register_logic_app] # Create the project client -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # Extract subscription and resource group from the project scope -subscription_id = assistants_client.scope["subscription_id"] -resource_group = assistants_client.scope["resource_group_name"] +subscription_id = os.environ["SUBSCRIPTION_ID"] +resource_group = os.environ["resource_group_name"] # Logic App details logic_app_name = "" diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py index 55918213dca6..9344ca64e2dd 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py @@ -18,8 +18,7 @@ pip install azure-ai-assistants azure-identity jsonref Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -31,9 +30,9 @@ from azure.ai.assistants.models import OpenApiTool, OpenApiAnonymousAuthDetails -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # [START create_assistant_with_openapi] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py index d0428e0383f9..b460379733d0 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py @@ -28,8 +28,8 @@ pip install azure-ai-assistants azure-identity jsonref Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your Foundry Project. - PROJECT_OPENAPI_CONNECTION_NAME - the connection name for the OpenAPI connection authentication + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + OPENAPI_CONNECTION_ID - the connection ID for the OpenAPI connection, taken from Azure AI Foundry. MODEL_DEPLOYMENT_NAME - name of the model deployment in the project to use Assistants against """ @@ -40,12 +40,11 @@ from azure.ai.assistants.models import OpenApiTool, OpenApiConnectionAuthDetails, OpenApiConnectionSecurityScheme -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) -connection_name = os.environ["PROJECT_OPENAPI_CONNECTION_NAME"] model_name = os.environ["MODEL_DEPLOYMENT_NAME"] connection_id = os.environ["OPENAPI_CONNECTION_ID"] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py index 3edf52a10b9e..bd958ae0f360 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -28,9 +27,9 @@ from azure.ai.assistants.models import FunctionTool, ToolSet, CodeInterpreterTool from user_functions import user_functions -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # Create assistant with toolset and process assistant run diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py index 495c15a770f1..c8cadc69cd73 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py @@ -20,7 +20,7 @@ pip install azure-ai-assistants azure-identity Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import os @@ -33,9 +33,9 @@ # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # Initialize Sharepoint tool with connection id diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py index b69e2baba867..a8aac8f644ea 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -36,9 +35,9 @@ from typing import Any, Optional -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py index bee37da98664..24cf30480aef 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-monitor-opentelemetry Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat @@ -41,9 +40,9 @@ from opentelemetry import trace from azure.monitor.opentelemetry import configure_azure_monitor -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py index 8ce8e8836fda..a495b46dd07b 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py @@ -17,11 +17,10 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. - 3) BING_CONNECTION_NAME - The connection name of the Bing connection, as found in the "Connected resources" tab + 3) AZURE_BING_CONNECTION_ID - The connection id of the Bing connection, as found in the "Connected resources" tab in your Azure AI Foundry project. """ @@ -79,13 +78,14 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: - bing_connection_id = os.environ["AZURE_BING_CONECTION_ID"] + bing_connection_id = os.environ["AZURE_BING_CONNECTION_ID"] print(f"Bing Connection ID: {bing_connection_id}") # Initialize assistant bing tool and add the connection id diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py index ad1bb8214ec0..16a77a2e7ccc 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py @@ -22,8 +22,7 @@ pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat @@ -44,9 +43,9 @@ from typing import Any from opentelemetry import trace -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py index c76f9d316cf0..8803da00427d 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -40,8 +39,9 @@ from azure.identity import DefaultAzureCredential from user_functions import user_functions -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py index 756ba2bd2e17..4f9bb8fc9e59 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -38,8 +37,9 @@ from typing import Any from user_functions import user_functions -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py index 39e213b65649..7f55066d9c97 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -33,9 +32,9 @@ RunStep, ) -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py index 187300a775d4..b90c016a58fe 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py @@ -17,11 +17,10 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. - 3) BING_CONNECTION_NAME - The connection name of the Bing connection, as found in the "Connected resources" tab + 3) AZURE_BING_CONNECTION_ID - The ID of the Bing connection, as found in the "Connected resources" tab in your Azure AI Foundry project. """ @@ -40,12 +39,13 @@ ) from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: - bing_connection_id = os.environ["AZURE_BING_CONECTION_ID"] + bing_connection_id = os.environ["AZURE_BING_CONNECTION_ID"] bing = BingGroundingTool(connection_id=bing_connection_id) print(f"Bing Connection ID: {bing_connection_id}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py index 948b6496b489..5c2d76b09573 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -29,8 +28,9 @@ from azure.ai.assistants.models import MessageDeltaChunk, RunStep, ThreadMessage, ThreadRun from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py index d689e165a3b2..fcd5300a27f0 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -35,8 +34,9 @@ from azure.identity import DefaultAzureCredential from user_functions import user_functions -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) functions = FunctionTool(user_functions) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py index 4b4325ccdcb1..cda531f4cf2a 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py @@ -19,8 +19,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -73,8 +72,9 @@ def get_stream_chunks(self) -> Generator[str, None, None]: yield chunk -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py index 2d9ac0fdee12..6eaf3a02d496 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py @@ -14,8 +14,7 @@ pip install azure-ai-assistants azure-identity azure-ai-ml Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -25,14 +24,15 @@ from azure.ai.assistants.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: # We will upload the local file to Azure and will use it for vector store creation. - _, asset_uri = assistants_client.upload_file_to_azure_blob("./product_info_1.md") + asset_uri = os.environ["AZURE_BLOB_URI"] # [START attach_files_to_store] # Create a vector store with no file and wait for it to be processed diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py index 4ca83aaa8384..97e206e1f5bf 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -27,8 +26,9 @@ from azure.ai.assistants.models import FileSearchTool, FilePurpose from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py index b96797c97d5f..bdbee38bc942 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py @@ -14,8 +14,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -25,8 +24,9 @@ from azure.ai.assistants.models import FileSearchTool, FilePurpose from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py index 93e85c3abe3e..3262175d194a 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py @@ -18,8 +18,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -31,8 +30,9 @@ from azure.identity import DefaultAzureCredential from pathlib import Path -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py index 765d696e9fd2..08207bdb0975 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -27,8 +26,9 @@ from azure.ai.assistants.models import FilePurpose, FileSearchTool, MessageAttachment from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py index 765d696e9fd2..08207bdb0975 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -27,8 +26,9 @@ from azure.ai.assistants.models import FilePurpose, FileSearchTool, MessageAttachment from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py index ae67c9fba346..e1afcedff945 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ @@ -27,8 +26,9 @@ from azure.ai.assistants.models import FileSearchTool from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/tests/README.md b/sdk/ai/azure-ai-assistants/tests/README.md index a69b9c40bdeb..c49ab7e61a82 100644 --- a/sdk/ai/azure-ai-assistants/tests/README.md +++ b/sdk/ai/azure-ai-assistants/tests/README.md @@ -1,11 +1,11 @@ -# Azure AI Project client library tests for Python +# Azure AI Assistants client library tests for Python The instructions below are for running tests locally, on a Windows machine, against the live service using a local build of the client library. ## Build and install the client library - Clone or download this sample repository. -- Open a command prompt window in the folder `sdk\ai\azure-ai-projects` +- Open a command prompt window in the folder `sdk\ai\azure-ai-assistants` - Install development dependencies: ```bash pip install -r dev_requirements.txt @@ -17,7 +17,7 @@ The instructions below are for running tests locally, on a Windows machine, agai ``` - Install the resulting wheel (update version `1.0.0b5` to the current one): ```bash - pip install dist\azure_ai_projects-1.0.0b5-py3-none-any.whl --user --force-reinstall + pip install dist\azure_ai_assistants-1.0.0b5-py3-none-any.whl --user --force-reinstall ``` ## Log in to Azure @@ -28,7 +28,7 @@ az login ## Setup up environment variables -Edit the file `azure_ai_projects_tests.env` located in the folder above. Follow the instructions there on how to set up Azure AI Foundry projects to be used for testing, and enter appropriate values for the environment variables used for the tests you want to run. +Edit the file `azure_ai_assistants_tests.env` located in the folder above. Follow the instructions there on how to set up Azure AI Foundry projects to be used for testing, and enter appropriate values for the environment variables used for the tests you want to run. ## Configure test proxy @@ -49,12 +49,6 @@ To run all tests, type: pytest ``` -To run tests in a particular folder (`tests\connections` for example): - -```bash -pytest tests\connections -``` - ## Additional information See [test documentation](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md) for additional information, including how to set proxy recordings and run tests using recordings. diff --git a/sdk/ai/azure-ai-assistants/tests/conftest.py b/sdk/ai/azure-ai-assistants/tests/conftest.py index 07dbb1f70ef3..e1f9eaa3a08b 100644 --- a/sdk/ai/azure-ai-assistants/tests/conftest.py +++ b/sdk/ai/azure-ai-assistants/tests/conftest.py @@ -3,34 +3,21 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -import os - import pytest from devtools_testutils import ( + add_general_regex_sanitizer, + add_body_key_sanitizer, remove_batch_sanitizers, get_credential, test_proxy, - add_general_regex_sanitizer, - add_body_key_sanitizer, ) +from azure.ai.assistants import AssistantsClient from dotenv import load_dotenv, find_dotenv if not load_dotenv(find_dotenv(filename="azure_ai_assistants_tests.env"), override=True): print("Failed to apply environment variables for azure-ai-projects tests.") -def pytest_collection_modifyitems(items): - if os.environ.get("AZURE_TEST_RUN_LIVE") == "true": - return - for item in items: - if "tests\\evaluation" in item.fspath.strpath or "tests/evaluation" in item.fspath.strpath: - item.add_marker( - pytest.mark.skip( - reason="Skip running Evaluations tests in PR pipeline until we can sort out the failures related to AI Foundry project settings" - ) - ) - - class SanitizedValues: SUBSCRIPTION_ID = "00000000-0000-0000-0000-000000000000" RESOURCE_GROUP_NAME = "00000" @@ -148,6 +135,11 @@ def azure_workspace_triad_sanitizer(): json_path="data_source.uri", value="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/workspaces/00000/datastores/workspaceblobstore/paths/LocalUpload/00000000000/product_info_1.md", ) + + add_body_key_sanitizer( + json_path="tool_resources.azure_ai_search.indexes[*].index_connection_id", + value="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/connections/someindex" + ) # Sanitize API key from service response (/tests/connections) add_body_key_sanitizer(json_path="properties.credentials.key", value="Sanitized") diff --git a/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py b/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py index c221228a7041..6a927c646779 100644 --- a/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py +++ b/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py @@ -102,9 +102,6 @@ def _get_mock_client() -> AssistantsClient: """Return the fake project client""" client = AssistantsClient( endpoint="www.bcac95dd-a1eb-11ef-978f-8c1645fec84b.com", - subscription_id="00000000-0000-0000-0000-000000000000", - resource_group_name="non-existing-rg", - project_name="non-existing-project", credential=MagicMock(), ) client.submit_tool_outputs_to_run = MagicMock() diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py index f39229119349..4b7d0bd340a5 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py @@ -19,10 +19,6 @@ import user_functions from azure.ai.assistants import AssistantsClient -from azure.ai.assistants.models import ( - ThreadMessage, - RunStep, -) from azure.core.exceptions import HttpResponseError from devtools_testutils import ( AzureRecordedTestCase, @@ -57,6 +53,8 @@ RunStepFileSearchToolCallResult, RunStepFileSearchToolCallResults, RunStatus, + RunStep, + ThreadMessage, ThreadMessageOptions, ThreadRun, ToolResources, @@ -85,12 +83,15 @@ assistantClientPreparer = functools.partial( EnvironmentVariableLoader, - "azure_ai.assistants", - azure_ai_assistants_assistants_tests_project_connection_string="region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", - azure_ai_assistants_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", - azure_ai_assistants_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", - azure_ai_assistants_assistants_tests_search_index_name="sample_index", - azure_ai_assistants_assistants_tests_search_connection_name="search_connection_name", + "azure_ai_assistants", + # TODO: uncomment this endpoint when re running with 1DP + #azure_ai_assistants_tests_project_endpoint="https://aiservices-id.services.ai.azure.com/api/projects/project-name", + # TODO: remove this endpoint when re running with 1DP + azure_ai_assistants_tests_project_endpoint="https://Sanitized.api.azureml.ms/agents/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/", + azure_ai_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", + azure_ai_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", + azure_ai_assistants_tests_search_index_name="sample_index", + azure_ai_assistants_tests_search_connection_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/connections/someindex", ) @@ -130,13 +131,13 @@ class TestAssistantClient(AzureRecordedTestCase): # helper function: create client using environment variables def create_client(self, **kwargs): # fetch environment variables - connection_string = kwargs.pop("azure_ai.assistants_assistants_tests_project_connection_string") + endpoint = kwargs.pop("azure_ai_assistants_tests_project_endpoint") credential = self.get_credential(AssistantsClient, is_async=False) # create and return client - client = AssistantsClient.from_connection_string( + client = AssistantsClient( + endpoint=endpoint, credential=credential, - conn_str=connection_string, ) return client @@ -232,9 +233,9 @@ def _do_test_create_assistant(self, client, body, functions): # create assistant if body: - assistant = client.assistants.create_assistant(body=body) + assistant = client.create_assistant(body=body) elif functions: - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", @@ -244,7 +245,7 @@ def _do_test_create_assistant(self, client, body, functions): assert assistant.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) else: - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id @@ -253,7 +254,7 @@ def _do_test_create_assistant(self, client, body, functions): assert assistant.model == "gpt-4o" # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -292,7 +293,7 @@ def _do_test_update_assistant(self, client, use_body, use_io): """helper function for updating assistant with different body inputs""" # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id @@ -303,14 +304,14 @@ def _do_test_update_assistant(self, client, use_body, use_io): if use_io: binary_body = json.dumps(body).encode("utf-8") body = io.BytesIO(binary_body) - assistant = client.assistants.update_assistant(assistant_id=assistant.id, body=body) + assistant = client.update_assistant(assistant_id=assistant.id, body=body) else: - assistant = client.assistants.update_assistant(assistant_id=assistant.id, name="my-assistant2") + assistant = client.update_assistant(assistant_id=assistant.id, name="my-assistant2") assert assistant.name assert assistant.name == "my-assistant2" # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -320,32 +321,32 @@ def test_assistant_list(self, **kwargs): """test list assistants""" # create client and ensure there are no previous assistants with self.create_client(**kwargs) as client: - list_length = client.assistants.list_assistants().data.__len__() + list_length = client.list_assistants().data.__len__() # create assistant and check that it appears in the list - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) - assert client.assistants.list_assistants().data.__len__() == list_length + 1 - assert client.assistants.list_assistants().data[0].id == assistant.id + assert client.list_assistants().data.__len__() == list_length + 1 + assert client.list_assistants().data[0].id == assistant.id # create second assistant and check that it appears in the list - assistant2 = client.assistants.create_assistant( + assistant2 = client.create_assistant( model="gpt-4o", name="my-assistant2", instructions="You are helpful assistant" ) - assert client.assistants.list_assistants().data.__len__() == list_length + 2 + assert client.list_assistants().data.__len__() == list_length + 2 assert ( - client.assistants.list_assistants().data[0].id == assistant.id - or client.assistants.list_assistants().data[1].id == assistant.id + client.list_assistants().data[0].id == assistant.id + or client.list_assistants().data[1].id == assistant.id ) # delete assistants and check list - client.assistants.delete_assistant(assistant.id) - assert client.assistants.list_assistants().data.__len__() == list_length + 1 - assert client.assistants.list_assistants().data[0].id == assistant2.id + client.delete_assistant(assistant.id) + assert client.list_assistants().data.__len__() == list_length + 1 + assert client.list_assistants().data[0].id == assistant2.id - client.assistants.delete_assistant(assistant2.id) - assert client.assistants.list_assistants().data.__len__() == list_length + client.delete_assistant(assistant2.id) + assert client.list_assistants().data.__len__() == list_length print("Deleted assistants") # ********************************************************************************** @@ -364,20 +365,20 @@ def test_create_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -424,9 +425,9 @@ def _do_test_create_thread(self, client, body): """helper function for creating thread with different body inputs""" # create thread if body: - thread = client.assistants.create_thread(body=body) + thread = client.create_thread(body=body) else: - thread = client.assistants.create_thread(metadata={"key1": "value1", "key2": "value2"}) + thread = client.create_thread(metadata={"key1": "value1", "key2": "value2"}) assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) @@ -441,25 +442,25 @@ def test_get_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # get thread - thread2 = client.assistants.get_thread(thread.id) + thread2 = client.get_thread(thread.id) assert thread2.id assert thread.id == thread2.id print("Got thread, thread ID", thread2.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -471,23 +472,23 @@ def test_update_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # update thread - thread = client.assistants.update_thread(thread.id, metadata={"key1": "value1", "key2": "value2"}) + thread = client.update_thread(thread.id, metadata={"key1": "value1", "key2": "value2"}) assert thread.metadata == {"key1": "value1", "key2": "value2"} # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -503,7 +504,7 @@ def test_update_thread_with_metadata(self, **kwargs): metadata = {"key1": "value1", "key2": "value2"} # create thread - thread = client.assistants.create_thread(metadata=metadata) + thread = client.create_thread(metadata=metadata) assert thread.id print("Created thread, thread ID", thread.id) @@ -511,7 +512,7 @@ def test_update_thread_with_metadata(self, **kwargs): metadata2 = {"key1": "value1", "key2": "newvalue2"} # update thread - thread = client.assistants.update_thread(thread.id, metadata=metadata2) + thread = client.update_thread(thread.id, metadata=metadata2) assert thread.metadata == {"key1": "value1", "key2": "newvalue2"} @assistantClientPreparer() @@ -544,16 +545,16 @@ def test_update_thread_with_iobytes(self, **kwargs): def _do_test_update_thread(self, client, body): """helper function for updating thread with different body inputs""" # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # update thread if body: - thread = client.assistants.update_thread(thread.id, body=body) + thread = client.update_thread(thread.id, body=body) else: metadata = {"key1": "value1", "key2": "value2"} - thread = client.assistants.update_thread(thread.id, metadata=metadata) + thread = client.update_thread(thread.id, metadata=metadata) assert thread.metadata == {"key1": "value1", "key2": "value2"} @assistantClientPreparer() @@ -565,26 +566,26 @@ def test_delete_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) # delete thread - deletion_status = client.assistants.delete_thread(thread.id) + deletion_status = client.delete_thread(thread.id) assert deletion_status.id == thread.id assert deletion_status.deleted == True print("Deleted thread, thread ID", deletion_status.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") # # ********************************************************************************** @@ -631,15 +632,15 @@ def _do_test_create_message(self, client, body): """helper function for creating message with different body inputs""" # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message if body: - message = client.assistants.create_message(thread_id=thread.id, body=body) + message = client.create_message(thread_id=thread.id, body=body) else: - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id @@ -654,36 +655,36 @@ def test_create_multiple_messages(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create messages - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) - message2 = client.assistants.create_message( + message2 = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me another joke" ) assert message2.id print("Created message, message ID", message2.id) - message3 = client.assistants.create_message( + message3 = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a third joke" ) assert message3.id print("Created message, message ID", message3.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -695,47 +696,47 @@ def test_list_messages(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # check that initial message list is empty - messages0 = client.assistants.list_messages(thread_id=thread.id) + messages0 = client.list_messages(thread_id=thread.id) print(messages0.data) assert messages0.data.__len__() == 0 # create messages and check message list for each one - message1 = client.assistants.create_message( + message1 = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message1.id print("Created message, message ID", message1.id) - messages1 = client.assistants.list_messages(thread_id=thread.id) + messages1 = client.list_messages(thread_id=thread.id) assert messages1.data.__len__() == 1 assert messages1.data[0].id == message1.id - message2 = client.assistants.create_message( + message2 = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me another joke" ) assert message2.id print("Created message, message ID", message2.id) - messages2 = client.assistants.list_messages(thread_id=thread.id) + messages2 = client.list_messages(thread_id=thread.id) assert messages2.data.__len__() == 2 assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id - message3 = client.assistants.create_message( + message3 = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a third joke" ) assert message3.id print("Created message, message ID", message3.id) - messages3 = client.assistants.list_messages(thread_id=thread.id) + messages3 = client.list_messages(thread_id=thread.id) assert messages3.data.__len__() == 3 assert ( messages3.data[0].id == message3.id @@ -744,7 +745,7 @@ def test_list_messages(self, **kwargs): ) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -756,32 +757,32 @@ def test_get_message(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) # get message - message2 = client.assistants.get_message(thread_id=thread.id, message_id=message.id) + message2 = client.get_message(thread_id=thread.id, message_id=message.id) assert message2.id assert message.id == message2.id print("Got message, message ID", message.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -821,20 +822,20 @@ def test_update_message_with_iobytes(self, **kwargs): def _do_test_update_message(self, client, body): """helper function for updating message with different body inputs""" # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") assert message.id print("Created message, message ID", message.id) # update message if body: - message = client.assistants.update_message(thread_id=thread.id, message_id=message.id, body=body) + message = client.update_message(thread_id=thread.id, message_id=message.id, body=body) else: - message = client.assistants.update_message( + message = client.update_message( thread_id=thread.id, message_id=message.id, metadata={"key1": "value1", "key2": "value2"} ) assert message.metadata == {"key1": "value1", "key2": "value2"} @@ -854,24 +855,24 @@ def test_create_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -905,14 +906,14 @@ def _do_test_create_run(self, client, use_body, use_io=False): """helper function for creating run with different body inputs""" # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) @@ -922,9 +923,9 @@ def _do_test_create_run(self, client, use_body, use_io=False): if use_io: binary_body = json.dumps(body).encode("utf-8") body = io.BytesIO(binary_body) - run = client.assistants.create_run(thread_id=thread.id, body=body) + run = client.create_run(thread_id=thread.id, body=body) else: - run = client.assistants.create_run( + run = client.create_run( thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.id @@ -932,7 +933,7 @@ def _do_test_create_run(self, client, use_body, use_io=False): print("Created run, run ID", run.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -944,30 +945,30 @@ def test_get_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) # get run - run2 = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run2 = client.get_run(thread_id=thread.id, run_id=run.id) assert run2.id assert run.id == run2.id print("Got run, run ID", run2.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -979,26 +980,26 @@ def test_run_status(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1016,14 +1017,14 @@ def test_run_status(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) print("Run status:", run.status) assert run.status in ["cancelled", "failed", "completed", "expired"] print("Run completed with status:", run.status) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -1035,19 +1036,19 @@ def test_update_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1055,14 +1056,14 @@ def test_update_run(self, **kwargs): while run.status in ["queued", "in_progress"]: # wait for a second time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) - run = client.assistants.update_run( + run = client.get_run(thread_id=thread.id, run_id=run.id) + run = client.update_run( thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.metadata == {"key1": "value1", "key2": "value2"} # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -1102,19 +1103,19 @@ def test_update_run_with_iobytes(self, **kwargs): def _do_test_update_run(self, client, body): """helper function for updating run with different body inputs""" # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = client.assistants.create_run( + run = client.create_run( thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.id @@ -1124,17 +1125,17 @@ def _do_test_update_run(self, client, body): # update run while run.status in ["queued", "in_progress"]: time.sleep(5) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) if body: - run = client.assistants.update_run(thread_id=thread.id, run_id=run.id, body=body) + run = client.update_run(thread_id=thread.id, run_id=run.id, body=body) else: - run = client.assistants.update_run( + run = client.update_run( thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "newvalue2"} ) assert run.metadata == {"key1": "value1", "key2": "newvalue2"} # delete assistant - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -1176,24 +1177,24 @@ def _do_test_submit_tool_outputs_to_run(self, client, use_body, use_io): # toolset.add(code_interpreter) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") assert message.id print("Created message, message ID", message.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1215,7 +1216,7 @@ def _do_test_submit_tool_outputs_to_run(self, client, use_body, use_io): ] while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed if run.status == "requires_action" and run.required_action.submit_tool_outputs: @@ -1223,7 +1224,7 @@ def _do_test_submit_tool_outputs_to_run(self, client, use_body, use_io): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: print("No tool calls provided - cancelling run") - client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + client.cancel_run(thread_id=thread.id, run_id=run.id) break # submit tool outputs to run @@ -1235,9 +1236,9 @@ def _do_test_submit_tool_outputs_to_run(self, client, use_body, use_io): if use_io: binary_body = json.dumps(body).encode("utf-8") body = io.BytesIO(binary_body) - client.assistants.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, body=body) + client.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, body=body) else: - client.assistants.submit_tool_outputs_to_run( + client.submit_tool_outputs_to_run( thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs ) @@ -1247,7 +1248,7 @@ def _do_test_submit_tool_outputs_to_run(self, client, use_body, use_io): # check that messages used the tool print("Messages: ") - messages = client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + messages = client.list_messages(thread_id=thread.id, run_id=run.id) tool_message = messages["data"][0]["content"][0]["text"]["value"] # if user_functions_live is used, the time will be the current time # since user_functions_recording is used, the time will be 12:30 @@ -1255,7 +1256,7 @@ def _do_test_submit_tool_outputs_to_run(self, client, use_body, use_io): print("Used tool_outputs") # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -1290,7 +1291,7 @@ def _wait_for_run(self, client, run, timeout=1): """Wait while run will get to terminal state.""" while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS, RunStatus.REQUIRES_ACTION]: time.sleep(timeout) - run = client.assistants.get_run(thread_id=run.thread_id, run_id=run.id) + run = client.get_run(thread_id=run.thread_id, run_id=run.id) return run def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_thread_run, **kwargs): @@ -1309,7 +1310,7 @@ def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_thread_ toolset = ToolSet() toolset.add(functions) toolset.add(code_interpreter) - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4", name="my-assistant", instructions="You are helpful assistant", @@ -1323,16 +1324,16 @@ def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_thread_ ) if create_thread_run: - run = client.assistants.create_thread_and_run( + run = client.create_thread_and_run( assistant_id=assistant.id, parallel_tool_calls=use_parallel_runs, ) run = self._wait_for_run(client, run) else: - thread = client.assistants.create_thread(messages=[message]) + thread = client.create_thread(messages=[message]) assert thread.id - run = client.assistants.create_and_process_run( + run = client.create_and_process_run( thread_id=thread.id, assistant_id=assistant.id, parallel_tool_calls=use_parallel_runs, @@ -1341,8 +1342,8 @@ def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_thread_ assert run.status == RunStatus.COMPLETED, run.last_error.message assert run.parallel_tool_calls == use_parallel_runs - assert client.assistants.delete_assistant(assistant.id).deleted, "The assistant was not deleted" - messages = client.assistants.list_messages(thread_id=run.thread_id) + assert client.delete_assistant(assistant.id).deleted, "The assistant was not deleted" + messages = client.list_messages(thread_id=run.thread_id) assert len(messages.data), "The data from the assistant was not received." """ @@ -1356,38 +1357,38 @@ def test_cancel_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") assert message.id print("Created message, message ID", message.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) # check status and cancel assert run.status in ["queued", "in_progress", "requires_action"] - client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + client.cancel_run(thread_id=thread.id, run_id=run.id) while run.status in ["queued", "cancelling"]: time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) print("Current run status:", run.status) assert run.status == "cancelled" print("Run cancelled") # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") client.close() """ @@ -1423,7 +1424,7 @@ def _do_test_create_thread_and_run(self, client, use_body, use_io): """helper function for creating thread and run with different body inputs""" # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id @@ -1438,10 +1439,10 @@ def _do_test_create_thread_and_run(self, client, use_body, use_io): if use_io: binary_body = json.dumps(body).encode("utf-8") body = io.BytesIO(binary_body) - run = client.assistants.create_thread_and_run(body=body) + run = client.create_thread_and_run(body=body) assert run.metadata == {"key1": "value1", "key2": "value2"} else: - run = client.assistants.create_thread_and_run(assistant_id=assistant.id) + run = client.create_thread_and_run(assistant_id=assistant.id) # create thread and run assert run.id @@ -1449,7 +1450,7 @@ def _do_test_create_thread_and_run(self, client, use_body, use_io): print("Created run, run ID", run.id) # get thread - thread = client.assistants.get_thread(run.thread_id) + thread = client.get_thread(run.thread_id) assert thread.id print("Created thread, thread ID", thread.id) @@ -1467,7 +1468,7 @@ def _do_test_create_thread_and_run(self, client, use_body, use_io): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) # assert run.status in ["queued", "in_progress", "requires_action", "completed"] print("Run status:", run.status) @@ -1475,7 +1476,7 @@ def _do_test_create_thread_and_run(self, client, use_body, use_io): print("Run completed") # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -1489,28 +1490,28 @@ def test_list_run_step(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") assert message.id print("Created message, message ID", message.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) - steps = client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + steps = client.list_run_steps(thread_id=thread.id, run_id=run.id) # commenting assertion out below, do we know exactly when run starts? # assert steps['data'].__len__() == 0 @@ -1519,7 +1520,7 @@ def test_list_run_step(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) assert run.status in [ "queued", "in_progress", @@ -1528,7 +1529,7 @@ def test_list_run_step(self, **kwargs): ] print("Run status:", run.status) if run.status != "queued": - steps = client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + steps = client.list_run_steps(thread_id=thread.id, run_id=run.id) print("Steps:", steps) assert steps["data"].__len__() > 0 @@ -1536,7 +1537,7 @@ def test_list_run_step(self, **kwargs): print("Run completed") # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") client.close() @@ -1550,26 +1551,26 @@ def test_get_run_step(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" ) assert message.id print("Created message, message ID", message.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1583,7 +1584,7 @@ def test_get_run_step(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) if run.status == "failed": assert run.last_error print(run.last_error) @@ -1597,14 +1598,14 @@ def test_get_run_step(self, **kwargs): print("Run status:", run.status) # list steps, check that get_run_step works with first step_id - steps = client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + steps = client.list_run_steps(thread_id=thread.id, run_id=run.id) assert steps["data"].__len__() > 0 step = steps["data"][0] - get_step = client.assistants.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) + get_step = client.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) assert step == get_step # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") # # ********************************************************************************** @@ -1623,26 +1624,26 @@ def test_create_stream(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" ) assert message.id print("Created message, message ID", message.id) # create stream - with client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + with client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: for event_type, event_data, _ in stream: assert ( isinstance(event_data, (MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep)) @@ -1650,7 +1651,7 @@ def test_create_stream(self, **kwargs): ) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") # TODO create_stream doesn't work with body -- fails on for event_type, event_data : TypeError: 'ThreadRun' object is not an iterator @@ -1664,19 +1665,19 @@ def test_create_stream_with_body(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" ) assert message.id @@ -1686,7 +1687,7 @@ def test_create_stream_with_body(self, **kwargs): body = {"assistant_id": assistant.id, "stream": True} # create stream - with client.assistants.create_stream(thread_id=thread.id, body=body, stream=True) as stream: + with client.create_stream(thread_id=thread.id, body=body, stream=True) as stream: for event_type, event_data, _ in stream: print("event type: event data") @@ -1697,7 +1698,7 @@ def test_create_stream_with_body(self, **kwargs): ) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -1710,19 +1711,19 @@ def test_create_stream_with_iobytes(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" ) assert message.id @@ -1733,7 +1734,7 @@ def test_create_stream_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # create stream - with client.assistants.create_stream( + with client.create_stream( thread_id=thread.id, body=io.BytesIO(binary_body), stream=True ) as stream: for event_type, event_data, _ in stream: @@ -1743,7 +1744,7 @@ def test_create_stream_with_iobytes(self, **kwargs): ) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -1787,7 +1788,7 @@ def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): # toolset.add(code_interpreter) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", @@ -1798,17 +1799,17 @@ def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") assert message.id print("Created message, message ID", message.id) # create stream - with client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + with client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: for event_type, event_data, _ in stream: # Check if tools are needed @@ -1821,7 +1822,7 @@ def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): if not tool_calls: print("No tool calls provided - cancelling run") - client.assistants.cancel_run(thread_id=thread.id, run_id=event_data.id) + client.cancel_run(thread_id=thread.id, run_id=event_data.id) break # submit tool outputs to stream @@ -1834,7 +1835,7 @@ def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): if use_io: binary_body = json.dumps(body).encode("utf-8") body = io.BytesIO(binary_body) - client.assistants.submit_tool_outputs_to_stream( + client.submit_tool_outputs_to_stream( thread_id=thread.id, run_id=event_data.id, body=body, @@ -1842,7 +1843,7 @@ def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): stream=True, ) else: - client.assistants.submit_tool_outputs_to_stream( + client.submit_tool_outputs_to_stream( thread_id=thread.id, run_id=event_data.id, tool_outputs=tool_outputs, @@ -1859,7 +1860,7 @@ def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): print("Stream processing completed") # check that messages used the tool - messages = client.assistants.list_messages(thread_id=thread.id) + messages = client.list_messages(thread_id=thread.id) print("Messages: ", messages) tool_message = messages["data"][0]["content"][0]["text"]["value"] # TODO if testing live, uncomment these @@ -1874,7 +1875,7 @@ def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): print("Used tool_outputs") # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") # client.close() @@ -2037,7 +2038,7 @@ def _test_tools_with_different_functions( toolset.add(functions) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", @@ -2047,17 +2048,17 @@ def _test_tools_with_different_functions( print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content=content) + message = client.create_message(thread_id=thread.id, role="user", content=content) assert message.id print("Created message, message ID", message.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -2079,7 +2080,7 @@ def _test_tools_with_different_functions( ] while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed if run.status == "requires_action" and run.required_action.submit_tool_outputs: @@ -2087,14 +2088,14 @@ def _test_tools_with_different_functions( tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: print("No tool calls provided - cancelling run") - client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + client.cancel_run(thread_id=thread.id, run_id=run.id) break # submit tool outputs to run tool_outputs = toolset.execute_tool_calls(tool_calls) print("Tool outputs:", tool_outputs) if tool_outputs: - client.assistants.submit_tool_outputs_to_run( + client.submit_tool_outputs_to_run( thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs ) @@ -2103,7 +2104,7 @@ def _test_tools_with_different_functions( print("Run completed with status:", run.status) # check that messages used the tool - messages = client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + messages = client.list_messages(thread_id=thread.id, run_id=run.id) print("Messages: ", messages) tool_message = messages["data"][0]["content"][0]["text"]["value"] if expected_values: @@ -2119,7 +2120,7 @@ def _test_tools_with_different_functions( print("Used tool_outputs") # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") # # ********************************************************************************** @@ -2142,7 +2143,7 @@ def test_create_assistant_with_invalid_code_interpreter_tool_resource(self, **kw exception_message = "" try: - client.assistants.create_assistant( + client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", @@ -2175,7 +2176,7 @@ def test_create_assistant_with_invalid_file_search_tool_resource(self, **kwargs) exception_message = "" try: - client.assistants.create_assistant( + client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", tools=[], tool_resources=tool_resources ) except: @@ -2202,7 +2203,7 @@ def test_create_assistant_with_invalid_file_search_tool_resource(self, **kwargs) exception_message = "" try: - client.assistants.create_assistant( + client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", @@ -2233,10 +2234,10 @@ def test_file_search_add_vector_store(self, **kwargs): # Adjust the file path to be relative to the test file location file_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md") - openai_file = client.assistants.upload_file_and_poll(file_path=file_path, purpose="assistants") + openai_file = client.upload_file_and_poll(file_path=file_path, purpose="assistants") print(f"Uploaded file, file ID: {openai_file.id}") - openai_vectorstore = client.assistants.create_vector_store_and_poll( + openai_vectorstore = client.create_vector_store_and_poll( file_ids=[openai_file.id], name="my_vectorstore" ) print(f"Created vector store, vector store ID: {openai_vectorstore.id}") @@ -2248,7 +2249,7 @@ def test_file_search_add_vector_store(self, **kwargs): print("Created toolset and added file search") # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset ) assert assistant.id @@ -2261,7 +2262,7 @@ def test_file_search_add_vector_store(self, **kwargs): assert assistant.tool_resources["file_search"]["vector_store_ids"][0] == openai_vectorstore.id # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") client.close() @@ -2277,7 +2278,7 @@ def test_create_vector_store_and_poll(self, **kwargs): # Create vector store body = {"name": "test_vector_store", "metadata": {"key1": "value1", "key2": "value2"}} try: - vector_store = client.assistants.create_vector_store_and_poll(body=body, sleep_interval=2) + vector_store = client.create_vector_store_and_poll(body=body, sleep_interval=2) # check correct creation assert isinstance(vector_store, VectorStore) assert vector_store.name == "test_vector_store" @@ -2306,7 +2307,7 @@ def test_create_vector_store(self, **kwargs): # Create vector store body = {"name": "test_vector_store", "metadata": {"key1": "value1", "key2": "value2"}} try: - vector_store = client.assistants.create_vector_store(body=body) + vector_store = client.create_vector_store(body=body) print("here") print(vector_store) # check correct creation @@ -2364,11 +2365,11 @@ def _do_test_create_vector_store(self, streaming, **kwargs): else: ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = ai_client.assistants.create_vector_store_and_poll( + vector_store = ai_client.create_vector_store_and_poll( file_ids=file_ids, data_sources=ds, name="my_vectorstore" ) assert vector_store.id @@ -2385,7 +2386,7 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2398,7 +2399,7 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): ] ) file_search = FileSearchTool() - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4o", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2407,19 +2408,19 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): ) assert assistant.id - thread = ai_client.assistants.create_thread(tool_resources=ToolResources(file_search=fs)) + thread = ai_client.create_thread(tool_resources=ToolResources(file_search=fs)) assert thread.id # create message - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = ai_client.assistants.list_messages(thread.id) + messages = ai_client.list_messages(thread.id) assert len(messages) - ai_client.assistants.delete_assistant(assistant.id) + ai_client.delete_assistant(assistant.id) ai_client.close() @assistantClientPreparer() @@ -2459,12 +2460,12 @@ def _do_test_create_vector_store_add_file(self, streaming, **kwargs): ds = None else: ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type="uri_asset", ) - vector_store = ai_client.assistants.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + vector_store = ai_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id - vector_store_file = ai_client.assistants.create_vector_store_file( + vector_store_file = ai_client.create_vector_store_file( vector_store_id=vector_store.id, data_source=ds, file_id=file_id ) assert vector_store_file.id @@ -2511,13 +2512,13 @@ def _do_test_create_vector_store_batch(self, streaming, **kwargs): file_ids = None ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = ai_client.assistants.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + vector_store = ai_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id - vector_store_file_batch = ai_client.assistants.create_vector_store_file_batch_and_poll( + vector_store_file_batch = ai_client.create_vector_store_file_batch_and_poll( vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids ) assert vector_store_file_batch.id @@ -2529,7 +2530,7 @@ def _test_file_search( ) -> None: """Test the file search""" file_search = FileSearchTool(vector_store_ids=[vector_store.id]) - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2538,18 +2539,18 @@ def _test_file_search( ) assert assistant.id - thread = ai_client.assistants.create_thread() + thread = ai_client.create_thread() assert thread.id # create message - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." if streaming: thread_run = None - with ai_client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + with ai_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: for _, event_data, _ in stream: if isinstance(event_data, ThreadRun): thread_run = event_data @@ -2562,16 +2563,16 @@ def _test_file_search( event_data.delta.step_details.tool_calls[0].file_search, RunStepFileSearchToolCallResults ) assert thread_run is not None - run = ai_client.assistants.get_run(thread_id=thread_run.thread_id, run_id=thread_run.id) + run = ai_client.get_run(thread_id=thread_run.thread_id, run_id=thread_run.id) assert run is not None else: - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) - ai_client.assistants.delete_vector_store(vector_store.id) + ai_client.delete_vector_store(vector_store.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = ai_client.assistants.list_messages(thread.id) + messages = ai_client.list_messages(thread.id) assert len(messages) - ai_client.assistants.delete_assistant(assistant.id) + ai_client.delete_assistant(assistant.id) self._remove_file_maybe(file_id, ai_client) ai_client.close() @@ -2581,7 +2582,7 @@ def _test_file_search( def test_message_attachement_azure(self, **kwargs): """Test message attachment with azure ID.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_message_attachment(data_source=ds, **kwargs) @@ -2601,14 +2602,14 @@ def _do_test_message_attachment(self, **kwargs): file_id = self._get_file_id_maybe(ai_client, **kwargs) # Create assistant with file search tool - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", ) assert assistant.id, "Assistant was not created" - thread = ai_client.assistants.create_thread() + thread = ai_client.create_thread() assert thread.id, "The thread was not created." # Create a message with the file search attachment @@ -2621,7 +2622,7 @@ def _do_test_message_attachment(self, **kwargs): CodeInterpreterTool().definitions[0], ], ) - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?", @@ -2629,12 +2630,12 @@ def _do_test_message_attachment(self, **kwargs): ) assert message.id, "The message was not created." - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id, "The run was not created." self._remove_file_maybe(file_id, ai_client) - ai_client.assistants.delete_assistant(assistant.id) + ai_client.delete_assistant(assistant.id) - messages = ai_client.assistants.list_messages(thread_id=thread.id) + messages = ai_client.list_messages(thread_id=thread.id) assert len(messages), "No messages were created" ai_client.close() @@ -2644,7 +2645,7 @@ def _do_test_message_attachment(self, **kwargs): def test_create_assistant_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) @@ -2665,7 +2666,7 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = ai_client.assistants.upload_file_and_poll( + file = ai_client.upload_file_and_poll( file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS ) assert file.id, "The file was not uploaded." @@ -2677,7 +2678,7 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): ) tr = ToolResources(code_interpreter=cdr) # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", @@ -2686,20 +2687,20 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): ) assert assistant.id, "Assistant was not created" - thread = ai_client.assistants.create_thread() + thread = ai_client.create_thread() assert thread.id, "The thread was not created." - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id, "The run was not created." self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" - ai_client.assistants.delete_assistant(assistant.id) - assert len(ai_client.assistants.list_messages(thread_id=thread.id)), "No messages were created" + ai_client.delete_assistant(assistant.id) + assert len(ai_client.list_messages(thread_id=thread.id)), "No messages were created" ai_client.close() @assistantClientPreparer() @@ -2708,7 +2709,7 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): def test_create_thread_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) @@ -2729,7 +2730,7 @@ def _do_test_create_thread_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = ai_client.assistants.upload_file_and_poll( + file = ai_client.upload_file_and_poll( file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS ) assert file.id, "The file was not uploaded." @@ -2741,7 +2742,7 @@ def _do_test_create_thread_with_interpreter(self, **kwargs): ) tr = ToolResources(code_interpreter=cdr) # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", @@ -2749,20 +2750,20 @@ def _do_test_create_thread_with_interpreter(self, **kwargs): ) assert assistant.id, "Assistant was not created" - thread = ai_client.assistants.create_thread(tool_resources=tr) + thread = ai_client.create_thread(tool_resources=tr) assert thread.id, "The thread was not created." - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id, "The run was not created." self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" - ai_client.assistants.delete_assistant(assistant.id) - messages = ai_client.assistants.list_messages(thread.id) + ai_client.delete_assistant(assistant.id) + messages = ai_client.list_messages(thread.id) assert len(messages) ai_client.close() @@ -2777,7 +2778,7 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2790,7 +2791,7 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): ] ) file_search = FileSearchTool() - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4o", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2799,19 +2800,19 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): ) assert assistant.id - thread = ai_client.assistants.create_thread() + thread = ai_client.create_thread() assert thread.id # create message - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = ai_client.assistants.list_messages(thread.id) + messages = ai_client.list_messages(thread.id) assert len(messages) - ai_client.assistants.delete_assistant(assistant.id) + ai_client.delete_assistant(assistant.id) ai_client.close() @assistantClientPreparer() @@ -2820,7 +2821,7 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): def test_create_attachment_in_thread_azure(self, **kwargs): """Create thread with message attachment inline with azure asset IDs.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_create_attachment_in_thread_azure(data_source=ds, **kwargs) @@ -2840,7 +2841,7 @@ def _do_test_create_attachment_in_thread_azure(self, **kwargs): file_id = self._get_file_id_maybe(ai_client, **kwargs) file_search = FileSearchTool() - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2862,14 +2863,14 @@ def _do_test_create_attachment_in_thread_azure(self, **kwargs): content="What does the attachment say?", attachments=[attachment], ) - thread = ai_client.assistants.create_thread(messages=[message]) + thread = ai_client.create_thread(messages=[message]) assert thread.id - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = ai_client.assistants.list_messages(thread.id) + messages = ai_client.list_messages(thread.id) assert len(messages) - ai_client.assistants.delete_assistant(assistant.id) + ai_client.delete_assistant(assistant.id) ai_client.close() @assistantClientPreparer() @@ -2881,12 +2882,10 @@ def test_azure_ai_search_tool(self, **kwargs): assert isinstance(client, AssistantsClient) # Create AzureAISearchTool - connection_name = kwargs.pop( - "azure_ai.assistants_assistants_tests_search_connection_name", "my-search-connection-name" + conn_id = kwargs.pop( + "azure_ai_assistants_tests_search_connection_id", "my-search-connection-ID" ) - connection = client.connections.get(connection_name=connection_name) - conn_id = connection.id - index_name = kwargs.pop("azure_ai.assistants_assistants_tests_search_index_name", "my-search-index") + index_name = kwargs.pop("azure_ai_assistants_tests_search_index_name", "my-search-index") azure_search_tool = AzureAISearchTool( index_connection_id=conn_id, @@ -2894,7 +2893,7 @@ def test_azure_ai_search_tool(self, **kwargs): ) # Create assistant with the search tool - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="search-assistant", instructions="You are a helpful assistant that can search for information using Azure AI Search.", @@ -2905,27 +2904,27 @@ def test_azure_ai_search_tool(self, **kwargs): print(f"Created assistant with ID: {assistant.id}") # Create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print(f"Created thread with ID: {thread.id}") # Create message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Search for information about iPhone prices." ) assert message.id print(f"Created message with ID: {message.id}") # Create and process run - run = client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == RunStatus.COMPLETED, run.last_error.message # List messages to verify tool was used - messages = client.assistants.list_messages(thread_id=thread.id) + messages = client.list_messages(thread_id=thread.id) assert len(messages.data) > 0 # Clean up - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -2949,18 +2948,18 @@ def _do_test_include_file_search_results(self, use_stream, include_content, **kw with self.create_client(**kwargs) as ai_client: ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = ai_client.assistants.create_vector_store_and_poll( + vector_store = ai_client.create_vector_store_and_poll( file_ids=[], data_sources=ds, name="my_vectorstore" ) - # vector_store = await ai_client.assistants.get_vector_store('vs_M9oxKG7JngORHcYNBGVZ6Iz3') + # vector_store = await ai_client.get_vector_store('vs_M9oxKG7JngORHcYNBGVZ6Iz3') assert vector_store.id file_search = FileSearchTool(vector_store_ids=[vector_store.id]) - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4o", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2968,10 +2967,10 @@ def _do_test_include_file_search_results(self, use_stream, include_content, **kw tool_resources=file_search.resources, ) assert assistant.id - thread = ai_client.assistants.create_thread() + thread = ai_client.create_thread() assert thread.id # create message - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", # content="What does the attachment say?" @@ -2982,7 +2981,7 @@ def _do_test_include_file_search_results(self, use_stream, include_content, **kw if use_stream: run = None - with ai_client.assistants.create_stream( + with ai_client.create_stream( thread_id=thread.id, assistant_id=assistant.id, include=include ) as stream: for event_type, event_data, _ in stream: @@ -2992,26 +2991,26 @@ def _do_test_include_file_search_results(self, use_stream, include_content, **kw print("Stream completed.") break else: - run = ai_client.assistants.create_and_process_run( + run = ai_client.create_and_process_run( thread_id=thread.id, assistant_id=assistant.id, include=include ) assert run.status == RunStatus.COMPLETED assert run is not None - steps = ai_client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id, include=include) + steps = ai_client.list_run_steps(thread_id=thread.id, run_id=run.id, include=include) # The 1st (not 0th) step is a tool call. step_id = steps.data[1].id - one_step = ai_client.assistants.get_run_step( + one_step = ai_client.get_run_step( thread_id=thread.id, run_id=run.id, step_id=step_id, include=include ) self._assert_file_search_valid(one_step.step_details.tool_calls[0], include_content) self._assert_file_search_valid(steps.data[1].step_details.tool_calls[0], include_content) - messages = ai_client.assistants.list_messages(thread_id=thread.id) + messages = ai_client.list_messages(thread_id=thread.id) assert len(messages) - ai_client.assistants.delete_vector_store(vector_store.id) + ai_client.delete_vector_store(vector_store.id) # delete assistant and close client - ai_client.assistants.delete_assistant(assistant.id) + ai_client.delete_assistant(assistant.id) print("Deleted assistant") ai_client.close() @@ -3039,7 +3038,7 @@ def _assert_file_search_valid(self, tool_call: Any, include_content: bool) -> No def test_assistants_with_json_schema(self, **kwargs): """Test structured output from the assistant.""" with self.create_client(**kwargs) as ai_client: - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( # Note only gpt-4o-mini-2024-07-18 and # gpt-4o-2024-08-06 and later support structured output. model="gpt-4o-mini", @@ -3067,24 +3066,24 @@ def test_assistants_with_json_schema(self, **kwargs): ) assert assistant.id - thread = ai_client.assistants.create_thread() + thread = ai_client.create_thread() assert thread.id - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", content=("The mass of the Mars is 6.4171E23 kg"), ) assert message.id - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == RunStatus.COMPLETED, run.last_error.message - del_assistant = ai_client.assistants.delete_assistant(assistant.id) + del_assistant = ai_client.delete_assistant(assistant.id) assert del_assistant.deleted - messages = ai_client.assistants.list_messages(thread_id=thread.id) + messages = ai_client.list_messages(thread_id=thread.id) planet_info = [] # The messages are following in the reverse order, @@ -3102,7 +3101,7 @@ def test_assistants_with_json_schema(self, **kwargs): def _get_file_id_maybe(self, ai_client: AssistantsClient, **kwargs) -> str: """Return file id if kwargs has file path.""" if "file_path" in kwargs: - file = ai_client.assistants.upload_file_and_poll( + file = ai_client.upload_file_and_poll( file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS ) assert file.id, "The file was not uploaded." @@ -3112,7 +3111,7 @@ def _get_file_id_maybe(self, ai_client: AssistantsClient, **kwargs) -> str: def _remove_file_maybe(self, file_id: str, ai_client: AssistantsClient) -> None: """Remove file if we have file ID.""" if file_id: - ai_client.assistants.delete_file(file_id) + ai_client.delete_file(file_id) @assistantClientPreparer() @pytest.mark.skip("File ID issues with sanitization.") @@ -3131,13 +3130,13 @@ def test_code_interpreter_and_save_file(self, **kwargs): with open(test_file_path, "w") as f: f.write("This is a test file") - file: OpenAIFile = client.assistants.upload_file_and_poll( + file: OpenAIFile = client.upload_file_and_poll( file_path=test_file_path, purpose=FilePurpose.ASSISTANTS ) # create assistant code_interpreter = CodeInterpreterTool(file_ids=[file.id]) - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", @@ -3146,11 +3145,11 @@ def test_code_interpreter_and_save_file(self, **kwargs): ) print(f"Created assistant, assistant ID: {assistant.id}") - thread = client.assistants.create_thread() + thread = client.create_thread() print(f"Created thread, thread ID: {thread.id}") # create a message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Create an image file same as the text file and give me file id?", @@ -3158,15 +3157,15 @@ def test_code_interpreter_and_save_file(self, **kwargs): print(f"Created message, message ID: {message.id}") # create run - run = client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) print(f"Run finished with status: {run.status}") # delete file - client.assistants.delete_file(file.id) + client.delete_file(file.id) print("Deleted file") # get messages - messages = client.assistants.list_messages(thread_id=thread.id) + messages = client.list_messages(thread_id=thread.id) print(f"Messages: {messages}") last_msg = messages.get_last_text_message_by_role(MessageRole.ASSISTANT) @@ -3177,7 +3176,7 @@ def test_code_interpreter_and_save_file(self, **kwargs): file_id = file_path_annotation.file_path.file_id print(f"Image File ID: {file_path_annotation.file_path.file_id}") temp_file_path = os.path.join(temp_dir, "output.png") - client.assistants.save_file(file_id=file_id, file_name="output.png", target_dir=temp_dir) + client.save_file(file_id=file_id, file_name="output.png", target_dir=temp_dir) output_file_exist = os.path.exists(temp_file_path) assert output_file_exist @@ -3189,7 +3188,7 @@ def test_azure_function_call(self, **kwargs): # Note: This test was recorded in westus region as for now # 2025-02-05 it is not supported in test region (East US 2) # create client - storage_queue = kwargs["azure_ai.assistants_assistants_tests_storage_queue"] + storage_queue = kwargs["azure_ai_assistants_tests_storage_queue"] with self.create_client(**kwargs) as client: azure_function_tool = AzureFunctionTool( name="foo", @@ -3210,7 +3209,7 @@ def test_azure_function_call(self, **kwargs): storage_service_endpoint=storage_queue, ), ) - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4", name="azure-function-assistant-foo", instructions=( @@ -3226,29 +3225,29 @@ def test_azure_function_call(self, **kwargs): assert assistant.id, "The assistant was not created" # Create a thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id, "The thread was not created." # Create a message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="What is the most prevalent element in the universe? What would foo say?", ) assert message.id, "The message was not created." - run = client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == RunStatus.COMPLETED, f"The run is in {run.status} state." # Get messages from the thread - messages = client.assistants.list_messages(thread_id=thread.id) + messages = client.list_messages(thread_id=thread.id) assert len(messages.text_messages) > 1, "No messages were received from assistant." # Check that we have function response in at least one message. assert any("bar" in msg.text.value.lower() for msg in messages.text_messages) # Delete the assistant once done - result = client.assistants.delete_assistant(assistant.id) + result = client.delete_assistant(assistant.id) assert result.deleted, "The assistant was not deleted." @assistantClientPreparer() @@ -3259,16 +3258,16 @@ def test_client_with_thread_messages(self, **kwargs): with self.create_client(**kwargs) as client: # [START create_assistant] - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="You are a personal electronics tutor. Write and run code to answer questions.", ) assert assistant.id, "The assistant was not created." - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id, "Thread was not created" - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="What is the equation of light energy?" ) assert message.id, "The message was not created." @@ -3277,7 +3276,7 @@ def test_client_with_thread_messages(self, **kwargs): ThreadMessageOptions(role=MessageRole.ASSISTANT, content="E=mc^2"), ThreadMessageOptions(role=MessageRole.USER, content="What is the impedance formula?"), ] - run = client.assistants.create_run( + run = client.create_run( thread_id=thread.id, assistant_id=assistant.id, additional_messages=additional_messages ) @@ -3285,12 +3284,12 @@ def test_client_with_thread_messages(self, **kwargs): while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS]: # wait for a second time.sleep(1) - run = client.assistants.get_run( + run = client.get_run( thread_id=thread.id, run_id=run.id, ) assert run.status in RunStatus.COMPLETED - assert client.assistants.delete_assistant(assistant.id).deleted, "The assistant was not deleted" - messages = client.assistants.list_messages(thread_id=thread.id) + assert client.delete_assistant(assistant.id).deleted, "The assistant was not deleted" + messages = client.list_messages(thread_id=thread.id) assert len(messages.data), "The data from the assistant was not received." diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py index 82bb0d5d1a49..9bb7f478b14c 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py @@ -80,10 +80,15 @@ assistantClientPreparer = functools.partial( EnvironmentVariableLoader, - "azure_ai.assistants", - azure_ai_assistants_assistants_tests_project_connection_string="region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", - azure_ai_assistants_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", - azure_ai_assistants_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", + "azure_ai_assistants", + # TODO: uncomment this endpoint when re running with 1DP + #azure_ai_assistants_tests_project_endpoint="https://aiservices-id.services.ai.azure.com/api/projects/project-name", + # TODO: remove this endpoint when re running with 1DP + azure_ai_assistants_tests_project_endpoint="https://Sanitized.api.azureml.ms/agents/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/", + azure_ai_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", + azure_ai_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", + azure_ai_assistants_tests_search_index_name="sample_index", + azure_ai_assistants_tests_search_connection_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/connections/someindex", ) @@ -123,13 +128,13 @@ class TestAssistantClientAsync(AzureRecordedTestCase): # helper function: create client using environment variables def create_client(self, **kwargs): # fetch environment variables - connection_string = kwargs.pop("azure_ai.assistants_assistants_tests_project_connection_string") + endpoint = kwargs.pop("azure_ai_assistants_tests_project_endpoint") credential = self.get_credential(AssistantsClient, is_async=True) # create and return client - client = AssistantsClient.from_connection_string( + client = AssistantsClient( + endpoint=endpoint, credential=credential, - conn_str=connection_string, ) return client @@ -149,10 +154,10 @@ async def test_clear_client(self, **kwargs): print("Created client") # clear assistant list - assistants = await client.assistants.list_assistants().data + assistants = await client.list_assistants().data for assistant in assistants: - await client.assistants.delete_assistant(assistant.id) - assert client.assistants.list_assistants().data.__len__() == 0 + await client.delete_assistant(assistant.id) + assert client.list_assistants().data.__len__() == 0 # close client await client.close() @@ -192,14 +197,14 @@ async def test_create_delete_assistant(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test assistant creation with tools @@ -215,7 +220,7 @@ async def test_create_assistant_with_tools(self, **kwargs): functions = FunctionTool(functions=user_functions_recording) # create assistant with tools - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", @@ -228,7 +233,7 @@ async def test_create_assistant_with_tools(self, **kwargs): print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test update assistant without body: JSON @@ -244,17 +249,17 @@ async def test_update_assistant(self, **kwargs): body = {"name": "my-assistant", "model": "gpt-4o", "instructions": "You are helpful assistant"} # create assistant - assistant = await client.assistants.create_assistant(body=body) + assistant = await client.create_assistant(body=body) assert assistant.id print("Created assistant, assistant ID", assistant.id) # update assistant and confirm changes went through - assistant = await client.assistants.update_assistant(assistant.id, name="my-assistant2") + assistant = await client.update_assistant(assistant.id, name="my-assistant2") assert assistant.name assert assistant.name == "my-assistant2" # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -271,7 +276,7 @@ async def test_update_assistant_with_body(self, **kwargs): body = {"name": "my-assistant", "model": "gpt-4o", "instructions": "You are helpful assistant"} # create assistant - assistant = await client.assistants.create_assistant(body=body) + assistant = await client.create_assistant(body=body) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -279,12 +284,12 @@ async def test_update_assistant_with_body(self, **kwargs): body2 = {"name": "my-assistant2", "instructions": "You are helpful assistant"} # update assistant and confirm changes went through - assistant = await client.assistants.update_assistant(assistant.id, body=body2) + assistant = await client.update_assistant(assistant.id, body=body2) assert assistant.name assert assistant.name == "my-assistant2" # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -299,7 +304,7 @@ async def test_update_assistant_with_iobytes(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id @@ -309,12 +314,12 @@ async def test_update_assistant_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # update assistant and confirm changes went through - assistant = await client.assistants.update_assistant(assistant.id, body=io.BytesIO(binary_body)) + assistant = await client.update_assistant(assistant.id, body=io.BytesIO(binary_body)) assert assistant.name assert assistant.name == "my-assistant2" # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -325,27 +330,27 @@ async def test_update_assistant_with_iobytes(self, **kwargs): async def test_assistant_list(self, **kwargs): # create client and ensure there are no previous assistants client = self.create_client(**kwargs) - list_length = await client.assistants.list_assistants().data.__len__() + list_length = await client.list_assistants().data.__len__() # create assistant and check that it appears in the list - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) - assert client.assistants.list_assistants().data.__len__() == list_length + 1 - assert client.assistants.list_assistants().data[0].id == assistant.id + assert client.list_assistants().data.__len__() == list_length + 1 + assert client.list_assistants().data[0].id == assistant.id # create second assistant and check that it appears in the list - assistant2 = await client.assistants.create_assistant(model="gpt-4o", name="my-assistant2", instructions="You are helpful assistant") - assert client.assistants.list_assistants().data.__len__() == list_length + 2 - assert client.assistants.list_assistants().data[0].id == assistant.id or client.assistants.list_assistants().data[1].id == assistant.id + assistant2 = await client.create_assistant(model="gpt-4o", name="my-assistant2", instructions="You are helpful assistant") + assert client.list_assistants().data.__len__() == list_length + 2 + assert client.list_assistants().data[0].id == assistant.id or client.list_assistants().data[1].id == assistant.id # delete assistants and check list - await client.assistants.delete_assistant(assistant.id) - assert client.assistants.list_assistants().data.__len__() == list_length + 1 - assert client.assistants.list_assistants().data[0].id == assistant2.id + await client.delete_assistant(assistant.id) + assert client.list_assistants().data.__len__() == list_length + 1 + assert client.list_assistants().data[0].id == assistant2.id - client.assistants.delete_assistant(assistant2.id) - assert client.assistants.list_assistants().data.__len__() == list_length + client.delete_assistant(assistant2.id) + assert client.list_assistants().data.__len__() == list_length print("Deleted assistants") # close client @@ -367,20 +372,20 @@ async def test_create_thread(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test creating thread with no body @@ -395,7 +400,7 @@ async def test_create_thread_with_metadata(self, **kwargs): metadata = {"key1": "value1", "key2": "value2"} # create thread - thread = await client.assistants.create_thread(metadata=metadata) + thread = await client.create_thread(metadata=metadata) assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) @@ -419,7 +424,7 @@ async def test_create_thread_with_body(self, **kwargs): } # create thread - thread = await client.assistants.create_thread(body=body) + thread = await client.create_thread(body=body) assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) @@ -444,7 +449,7 @@ async def test_create_thread_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # create thread - thread = await client.assistants.create_thread(body=io.BytesIO(binary_body)) + thread = await client.create_thread(body=io.BytesIO(binary_body)) assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) @@ -463,25 +468,25 @@ async def test_get_thread(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # get thread - thread2 = await client.assistants.get_thread(thread.id) + thread2 = await client.get_thread(thread.id) assert thread2.id assert thread.id == thread2.id print("Got thread, thread ID", thread2.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test updating thread @@ -493,23 +498,23 @@ async def test_update_thread(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # update thread - thread = await client.assistants.update_thread(thread.id, metadata={"key1": "value1", "key2": "value2"}) + thread = await client.update_thread(thread.id, metadata={"key1": "value1", "key2": "value2"}) assert thread.metadata == {"key1": "value1", "key2": "value2"} # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -525,7 +530,7 @@ async def test_update_thread_with_metadata(self, **kwargs): metadata = {"key1": "value1", "key2": "value2"} # create thread - thread = await client.assistants.create_thread(metadata=metadata) + thread = await client.create_thread(metadata=metadata) assert thread.id print("Created thread, thread ID", thread.id) @@ -533,7 +538,7 @@ async def test_update_thread_with_metadata(self, **kwargs): metadata2 = {"key1": "value1", "key2": "newvalue2"} # update thread - thread = await client.assistants.update_thread(thread.id, metadata=metadata2) + thread = await client.update_thread(thread.id, metadata=metadata2) assert thread.metadata == {"key1": "value1", "key2": "newvalue2"} # close client @@ -548,7 +553,7 @@ async def test_update_thread_with_body(self, **kwargs): print("Created client") # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) @@ -556,7 +561,7 @@ async def test_update_thread_with_body(self, **kwargs): body = {"metadata": {"key1": "value1", "key2": "value2"}} # update thread - thread = await client.assistants.update_thread(thread.id, body=body) + thread = await client.update_thread(thread.id, body=body) assert thread.metadata == {"key1": "value1", "key2": "value2"} # close client @@ -571,7 +576,7 @@ async def test_update_thread_with_iobytes(self, **kwargs): print("Created client") # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) @@ -580,7 +585,7 @@ async def test_update_thread_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # update thread - thread = await client.assistants.update_thread(thread.id, body=io.BytesIO(binary_body)) + thread = await client.update_thread(thread.id, body=io.BytesIO(binary_body)) assert thread.metadata == {"key1": "value1", "key2": "value2"} # close client @@ -595,26 +600,26 @@ async def test_delete_thread(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() # assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) # delete thread - deletion_status = await client.assistants.delete_thread(thread.id) + deletion_status = await client.delete_thread(thread.id) assert deletion_status.id == thread.id assert deletion_status.deleted == True print("Deleted thread, thread ID", deletion_status.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -633,26 +638,26 @@ async def test_create_message(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -665,7 +670,7 @@ async def test_create_message_with_body(self, **kwargs): print("Created client") # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) @@ -673,7 +678,7 @@ async def test_create_message_with_body(self, **kwargs): body = {"role": "user", "content": "Hello, tell me a joke"} # create message - message = await client.assistants.create_message(thread_id=thread.id, body=body) + message = await client.create_message(thread_id=thread.id, body=body) assert message.id print("Created message, message ID", message.id) @@ -689,7 +694,7 @@ async def test_create_message_with_iobytes(self, **kwargs): print("Created client") # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) @@ -698,7 +703,7 @@ async def test_create_message_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # create message - message = await client.assistants.create_message(thread_id=thread.id, body=io.BytesIO(binary_body)) + message = await client.create_message(thread_id=thread.id, body=io.BytesIO(binary_body)) assert message.id print("Created message, message ID", message.id) @@ -714,36 +719,36 @@ async def test_create_multiple_messages(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create messages - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) - message2 = await client.assistants.create_message( + message2 = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me another joke" ) assert message2.id print("Created message, message ID", message2.id) - message3 = await client.assistants.create_message( + message3 = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a third joke" ) assert message3.id print("Created message, message ID", message3.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -756,47 +761,47 @@ async def test_list_messages(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # check that initial message list is empty - messages0 = await client.assistants.list_messages(thread_id=thread.id) + messages0 = await client.list_messages(thread_id=thread.id) print(messages0.data) assert messages0.data.__len__() == 0 # create messages and check message list for each one - message1 = await client.assistants.create_message( + message1 = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message1.id print("Created message, message ID", message1.id) - messages1 = await client.assistants.list_messages(thread_id=thread.id) + messages1 = await client.list_messages(thread_id=thread.id) assert messages1.data.__len__() == 1 assert messages1.data[0].id == message1.id - message2 = await client.assistants.create_message( + message2 = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me another joke" ) assert message2.id print("Created message, message ID", message2.id) - messages2 = await client.assistants.list_messages(thread_id=thread.id) + messages2 = await client.list_messages(thread_id=thread.id) assert messages2.data.__len__() == 2 assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id - message3 = await client.assistants.create_message( + message3 = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a third joke" ) assert message3.id print("Created message, message ID", message3.id) - messages3 = await client.assistants.list_messages(thread_id=thread.id) + messages3 = await client.list_messages(thread_id=thread.id) assert messages3.data.__len__() == 3 assert ( messages3.data[0].id == message3.id @@ -805,7 +810,7 @@ async def test_list_messages(self, **kwargs): ) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -818,32 +823,32 @@ async def test_get_message(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) # get message - message2 = await client.assistants.get_message(thread_id=thread.id, message_id=message.id) + message2 = await client.get_message(thread_id=thread.id, message_id=message.id) assert message2.id assert message.id == message2.id print("Got message, message ID", message.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test updating message in a thread without body @@ -855,19 +860,19 @@ async def test_update_message(self, **kwargs): print("Created client") # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) # update message - message = await client.assistants.update_message( + message = await client.update_message( thread_id=thread.id, message_id=message.id, metadata={"key1": "value1", "key2": "value2"} ) assert message.metadata == {"key1": "value1", "key2": "value2"} @@ -884,12 +889,12 @@ async def test_update_message_with_body(self, **kwargs): print("Created client") # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id @@ -899,7 +904,7 @@ async def test_update_message_with_body(self, **kwargs): body = {"metadata": {"key1": "value1", "key2": "value2"}} # update message - message = await client.assistants.update_message(thread_id=thread.id, message_id=message.id, body=body) + message = await client.update_message(thread_id=thread.id, message_id=message.id, body=body) assert message.metadata == {"key1": "value1", "key2": "value2"} # close client @@ -914,12 +919,12 @@ async def test_update_message_with_iobytes(self, **kwargs): print("Created client") # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id @@ -930,7 +935,7 @@ async def test_update_message_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # update message - message = await client.assistants.update_message( + message = await client.update_message( thread_id=thread.id, message_id=message.id, body=io.BytesIO(binary_body) ) assert message.metadata == {"key1": "value1", "key2": "value2"} @@ -953,24 +958,24 @@ async def test_create_run(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -983,19 +988,19 @@ async def test_create_run_with_metadata(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = await client.assistants.create_run( + run = await client.create_run( thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.id @@ -1003,7 +1008,7 @@ async def test_create_run_with_metadata(self, **kwargs): print("Created run, run ID", run.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1016,14 +1021,14 @@ async def test_create_run_with_body(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) @@ -1031,13 +1036,13 @@ async def test_create_run_with_body(self, **kwargs): body = {"assistant_id": assistant.id, "metadata": {"key1": "value1", "key2": "value2"}} # create run - run = await client.assistants.create_run(thread_id=thread.id, body=body) + run = await client.create_run(thread_id=thread.id, body=body) assert run.id assert run.metadata == {"key1": "value1", "key2": "value2"} print("Created run, run ID", run.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1050,14 +1055,14 @@ async def test_create_run_with_iobytes(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) @@ -1066,13 +1071,13 @@ async def test_create_run_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # create run - run = await client.assistants.create_run(thread_id=thread.id, body=io.BytesIO(binary_body)) + run = await client.create_run(thread_id=thread.id, body=io.BytesIO(binary_body)) assert run.id assert run.metadata == {"key1": "value1", "key2": "value2"} print("Created run, run ID", run.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1085,30 +1090,30 @@ async def test_get_run(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) # get run - run2 = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run2 = await client.get_run(thread_id=thread.id, run_id=run.id) assert run2.id assert run.id == run2.id print("Got run, run ID", run2.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1121,26 +1126,26 @@ async def test_run_status(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1158,14 +1163,14 @@ async def test_run_status(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) print("Run status:", run.status) assert run.status in ["cancelled", "failed", "completed", "expired"] print("Run completed with status:", run.status) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1181,39 +1186,39 @@ async def test_list_runs(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # check list for current runs - runs0 = await client.assistants.list_runs(thread_id=thread.id) + runs0 = await client.list_runs(thread_id=thread.id) assert runs0.data.__len__() == 0 # create run and check list - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) - runs1 = await client.assistants.list_runs(thread_id=thread.id) + runs1 = await client.list_runs(thread_id=thread.id) assert runs1.data.__len__() == 1 assert runs1.data[0].id == run.id # create second run - run2 = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run2 = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run2.id print("Created run, run ID", run2.id) - runs2 = await client.assistants.list_runs(thread_id=thread.id) + runs2 = await client.list_runs(thread_id=thread.id) assert runs2.data.__len__() == 2 assert runs2.data[0].id == run2.id or runs2.data[1].id == run2.id # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() """ @@ -1227,33 +1232,33 @@ async def test_update_run(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) # update run while run.status in ["queued", "in_progress"]: time.sleep(5) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) - run = await client.assistants.update_run( + run = await client.get_run(thread_id=thread.id, run_id=run.id) + run = await client.update_run( thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.metadata == {"key1": "value1", "key2": "value2"} # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1266,19 +1271,19 @@ async def test_update_run_with_metadata(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = await client.assistants.create_run( + run = await client.create_run( thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.id @@ -1288,14 +1293,14 @@ async def test_update_run_with_metadata(self, **kwargs): # update run while run.status in ["queued", "in_progress"]: time.sleep(5) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) - run = await client.assistants.update_run( + run = await client.get_run(thread_id=thread.id, run_id=run.id) + run = await client.update_run( thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "newvalue2"} ) assert run.metadata == {"key1": "value1", "key2": "newvalue2"} # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1308,19 +1313,19 @@ async def test_update_run_with_body(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = await client.assistants.create_run( + run = await client.create_run( thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.id @@ -1333,12 +1338,12 @@ async def test_update_run_with_body(self, **kwargs): # update run while run.status in ["queued", "in_progress"]: time.sleep(5) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) - run = await client.assistants.update_run(thread_id=thread.id, run_id=run.id, body=body) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + run = await client.update_run(thread_id=thread.id, run_id=run.id, body=body) assert run.metadata == {"key1": "value1", "key2": "newvalue2"} # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1351,19 +1356,19 @@ async def test_update_run_with_iobytes(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = await client.assistants.create_run( + run = await client.create_run( thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.id @@ -1377,12 +1382,12 @@ async def test_update_run_with_iobytes(self, **kwargs): # update run while run.status in ["queued", "in_progress"]: time.sleep(5) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) - run = await client.assistants.update_run(thread_id=thread.id, run_id=run.id, body=io.BytesIO(binary_body)) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + run = await client.update_run(thread_id=thread.id, run_id=run.id, body=io.BytesIO(binary_body)) assert run.metadata == {"key1": "value1", "key2": "newvalue2"} # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1404,26 +1409,26 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): # toolset.add(code_interpreter) # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, what time is it?" ) assert message.id print("Created message, message ID", message.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1445,7 +1450,7 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): ] while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed if run.status == "requires_action" and run.required_action.submit_tool_outputs: @@ -1453,14 +1458,14 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: print("No tool calls provided - cancelling run") - await client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + await client.cancel_run(thread_id=thread.id, run_id=run.id) break # submit tool outputs to run tool_outputs = toolset.execute_tool_calls(tool_calls) print("Tool outputs:", tool_outputs) if tool_outputs: - await client.assistants.submit_tool_outputs_to_run( + await client.submit_tool_outputs_to_run( thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs ) @@ -1469,7 +1474,7 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): print("Run completed with status:", run.status) # check that messages used the tool - messages = await client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + messages = await client.list_messages(thread_id=thread.id, run_id=run.id) tool_message = messages["data"][0]["content"][0]["text"]["value"] hour12 = time.strftime("%H") hour24 = time.strftime("%I") @@ -1478,7 +1483,7 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): print("Used tool_outputs") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test submitting tool outputs to run with body: JSON @@ -1496,26 +1501,26 @@ async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): toolset.add(functions) # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, what time is it?" ) assert message.id print("Created message, message ID", message.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1537,7 +1542,7 @@ async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): ] while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed if run.status == "requires_action" and run.required_action.submit_tool_outputs: @@ -1545,7 +1550,7 @@ async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: print("No tool calls provided - cancelling run") - await client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + await client.cancel_run(thread_id=thread.id, run_id=run.id) break # submit tool outputs to run @@ -1553,7 +1558,7 @@ async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): print("Tool outputs:", tool_outputs) if tool_outputs: body = {"tool_outputs": tool_outputs} - await client.assistants.submit_tool_outputs_to_run( + await client.submit_tool_outputs_to_run( thread_id=thread.id, run_id=run.id, body=body ) @@ -1562,7 +1567,7 @@ async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): print("Run completed with status:", run.status) # check that messages used the tool - messages = await client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + messages = await client.list_messages(thread_id=thread.id, run_id=run.id) tool_message = messages["data"][0]["content"][0]["text"]["value"] # hour12 = time.strftime("%H") # hour24 = time.strftime("%I") @@ -1573,7 +1578,7 @@ async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): print("Used tool_outputs") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test submitting tool outputs to run with body: IO[bytes] @@ -1591,26 +1596,26 @@ async def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): toolset.add(functions) # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, what time is it?" ) assert message.id print("Created message, message ID", message.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1632,7 +1637,7 @@ async def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): ] while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed if run.status == "requires_action" and run.required_action.submit_tool_outputs: @@ -1640,7 +1645,7 @@ async def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: print("No tool calls provided - cancelling run") - client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + client.cancel_run(thread_id=thread.id, run_id=run.id) break # submit tool outputs to run @@ -1649,7 +1654,7 @@ async def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): if tool_outputs: body = {"tool_outputs": tool_outputs} binary_body = json.dumps(body).encode("utf-8") - await client.assistants.submit_tool_outputs_to_run( + await client.submit_tool_outputs_to_run( thread_id=thread.id, run_id=run.id, body=io.BytesIO(binary_body) ) @@ -1658,7 +1663,7 @@ async def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): print("Run completed with status:", run.status) # check that messages used the tool - messages = await client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + messages = await client.list_messages(thread_id=thread.id, run_id=run.id) tool_message = messages["data"][0]["content"][0]["text"]["value"] # hour12 = time.strftime("%H") # hour24 = time.strftime("%I") @@ -1669,7 +1674,7 @@ async def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): print("Used tool_outputs") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") """ @@ -1683,24 +1688,24 @@ async def test_cancel_run(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + message = await client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") assert message.id print("Created message, message ID", message.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1722,7 +1727,7 @@ async def test_cancel_run(self, **kwargs): ] while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed if run.status == "requires_action" and run.required_action.submit_tool_outputs: @@ -1732,14 +1737,14 @@ async def test_cancel_run(self, **kwargs): print( "No tool calls provided - cancelling run" ) # TODO how can i make sure that it wants tools? should i have some kind of error message? - await client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + await client.cancel_run(thread_id=thread.id, run_id=run.id) break # submit tool outputs to run tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here print("Tool outputs:", tool_outputs) if tool_outputs: - await client.assistants.submit_tool_outputs_to_run( + await client.submit_tool_outputs_to_run( thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs ) @@ -1748,7 +1753,7 @@ async def test_cancel_run(self, **kwargs): print("Run completed with status:", run.status) # check that messages used the tool - messages = await client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + messages = await client.list_messages(thread_id=thread.id, run_id=run.id) tool_message = messages["data"][0]["content"][0]["text"]["value"] hour12 = time.strftime("%H") hour24 = time.strftime("%I") @@ -1757,7 +1762,7 @@ async def test_cancel_run(self, **kwargs): print("Used tool_outputs") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() """ @@ -1794,7 +1799,7 @@ async def _wait_for_run(self, client, run, timeout=1): """Wait while run will get to terminal state.""" while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS, RunStatus.REQUIRES_ACTION]: time.sleep(timeout) - run = await client.assistants.get_run(thread_id=run.thread_id, run_id=run.id) + run = await client.get_run(thread_id=run.thread_id, run_id=run.id) return run async def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_thread_run, **kwargs): @@ -1813,7 +1818,7 @@ async def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_t toolset = ToolSet() toolset.add(functions) toolset.add(code_interpreter) - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4", name="my-assistant", instructions="You are helpful assistant", @@ -1827,16 +1832,16 @@ async def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_t ) if create_thread_run: - run = await client.assistants.create_thread_and_run( + run = await client.create_thread_and_run( assistant_id=assistant.id, parallel_tool_calls=use_parallel_runs, ) run = await self._wait_for_run(client, run) else: - thread = await client.assistants.create_thread(messages=[message]) + thread = await client.create_thread(messages=[message]) assert thread.id - run = await client.assistants.create_and_process_run( + run = await client.create_and_process_run( thread_id=thread.id, assistant_id=assistant.id, parallel_tool_calls=use_parallel_runs, @@ -1845,8 +1850,8 @@ async def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_t assert run.status == RunStatus.COMPLETED, run.last_error.message assert run.parallel_tool_calls == use_parallel_runs - assert (await client.assistants.delete_assistant(assistant.id)).deleted, "The assistant was not deleted" - messages = await client.assistants.list_messages(thread_id=run.thread_id) + assert (await client.delete_assistant(assistant.id)).deleted, "The assistant was not deleted" + messages = await client.list_messages(thread_id=run.thread_id) assert len(messages.data), "The data from the assistant was not received." """ @@ -1860,38 +1865,38 @@ async def test_cancel_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") assert message.id print("Created message, message ID", message.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) # check status and cancel assert run.status in ["queued", "in_progress", "requires_action"] - client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + client.cancel_run(thread_id=thread.id, run_id=run.id) while run.status in ["queued", "cancelling"]: time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) print("Current run status:", run.status) assert run.status == "cancelled" print("Run cancelled") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() """ @@ -1906,20 +1911,20 @@ async def test_create_thread_and_run(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread and run - run = await client.assistants.create_thread_and_run(assistant_id=assistant.id) + run = await client.create_thread_and_run(assistant_id=assistant.id) assert run.id assert run.thread_id print("Created run, run ID", run.id) # get thread - thread = await client.assistants.get_thread(run.thread_id) + thread = await client.get_thread(run.thread_id) assert thread.id print("Created thread, thread ID", thread.id) @@ -1937,7 +1942,7 @@ async def test_create_thread_and_run(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) # assert run.status in ["queued", "in_progress", "requires_action", "completed"] print("Run status:", run.status) @@ -1945,7 +1950,7 @@ async def test_create_thread_and_run(self, **kwargs): print("Run completed") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test create thread and run with body: JSON @@ -1958,7 +1963,7 @@ async def test_create_thread_and_run_with_body(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id @@ -1971,14 +1976,14 @@ async def test_create_thread_and_run_with_body(self, **kwargs): } # create thread and run - run = await client.assistants.create_thread_and_run(body=body) + run = await client.create_thread_and_run(body=body) assert run.id assert run.thread_id assert run.metadata == {"key1": "value1", "key2": "value2"} print("Created run, run ID", run.id) # get thread - thread = await client.assistants.get_thread(run.thread_id) + thread = await client.get_thread(run.thread_id) assert thread.id print("Created thread, thread ID", thread.id) @@ -1996,7 +2001,7 @@ async def test_create_thread_and_run_with_body(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) # assert run.status in ["queued", "in_progress", "requires_action", "completed"] print("Run status:", run.status) @@ -2004,7 +2009,7 @@ async def test_create_thread_and_run_with_body(self, **kwargs): print("Run completed") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -2018,7 +2023,7 @@ async def test_create_thread_and_run_with_iobytes(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id @@ -2032,14 +2037,14 @@ async def test_create_thread_and_run_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # create thread and run - run = await client.assistants.create_thread_and_run(body=io.BytesIO(binary_body)) + run = await client.create_thread_and_run(body=io.BytesIO(binary_body)) assert run.id assert run.thread_id assert run.metadata == {"key1": "value1", "key2": "value2"} print("Created run, run ID", run.id) # get thread - thread = await client.assistants.get_thread(run.thread_id) + thread = await client.get_thread(run.thread_id) assert thread.id print("Created thread, thread ID", thread.id) @@ -2057,7 +2062,7 @@ async def test_create_thread_and_run_with_iobytes(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) # assert run.status in ["queued", "in_progress", "requires_action", "completed"] print("Run status:", run.status) @@ -2065,7 +2070,7 @@ async def test_create_thread_and_run_with_iobytes(self, **kwargs): print("Run completed") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -2081,30 +2086,30 @@ async def test_list_run_step(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, what time is it?" ) assert message.id print("Created message, message ID", message.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) - steps = await client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + steps = await client.list_run_steps(thread_id=thread.id, run_id=run.id) # commenting assertion out below, do we know exactly when run starts? # assert steps['data'].__len__() == 0 @@ -2113,7 +2118,7 @@ async def test_list_run_step(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) assert run.status in [ "queued", "in_progress", @@ -2121,7 +2126,7 @@ async def test_list_run_step(self, **kwargs): "completed", ] print("Run status:", run.status) - steps = await client.assistants.list_run_steps( + steps = await client.list_run_steps( thread_id=thread.id, run_id=run.id ) assert steps["data"].__len__() > 0 @@ -2130,7 +2135,7 @@ async def test_list_run_step(self, **kwargs): print("Run completed") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() """ @@ -2144,26 +2149,26 @@ async def test_get_run_step(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" ) assert message.id print("Created message, message ID", message.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -2177,7 +2182,7 @@ async def test_get_run_step(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) if run.status == "failed": assert run.last_error print(run.last_error) @@ -2191,14 +2196,14 @@ async def test_get_run_step(self, **kwargs): print("Run status:", run.status) # list steps, check that get_run_step works with first step_id - steps = await client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + steps = await client.list_run_steps(thread_id=thread.id, run_id=run.id) assert steps["data"].__len__() > 0 step = steps["data"][0] - get_step = await client.assistants.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) + get_step = await client.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) assert step == get_step # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -2241,11 +2246,11 @@ async def _do_test_create_vector_store(self, streaming, **kwargs): else: ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = await ai_client.assistants.create_vector_store_and_poll( + vector_store = await ai_client.create_vector_store_and_poll( file_ids=file_ids, data_sources=ds, name="my_vectorstore" ) assert vector_store.id @@ -2289,12 +2294,12 @@ async def _do_test_create_vector_store_add_file(self, streaming, **kwargs): ds = None else: ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) - vector_store = await ai_client.assistants.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + vector_store = await ai_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id - vector_store_file = await ai_client.assistants.create_vector_store_file( + vector_store_file = await ai_client.create_vector_store_file( vector_store_id=vector_store.id, data_source=ds, file_id=file_id ) assert vector_store_file.id @@ -2341,13 +2346,13 @@ async def _do_test_create_vector_store_batch(self, streaming, **kwargs): file_ids = None ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = await ai_client.assistants.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + vector_store = await ai_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id - vector_store_file_batch = await ai_client.assistants.create_vector_store_file_batch_and_poll( + vector_store_file_batch = await ai_client.create_vector_store_file_batch_and_poll( vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids ) assert vector_store_file_batch.id @@ -2358,7 +2363,7 @@ async def _test_file_search( ) -> None: """Test the file search""" file_search = FileSearchTool(vector_store_ids=[vector_store.id]) - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2366,17 +2371,17 @@ async def _test_file_search( tool_resources=file_search.resources, ) assert assistant.id - thread = await ai_client.assistants.create_thread() + thread = await ai_client.create_thread() assert thread.id # create message - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." if streaming: thread_run = None - async with await ai_client.assistants.create_stream( + async with await ai_client.create_stream( thread_id=thread.id, assistant_id=assistant.id ) as stream: async for _, event_data, _ in stream: @@ -2391,17 +2396,17 @@ async def _test_file_search( event_data.delta.step_details.tool_calls[0].file_search, RunStepFileSearchToolCallResults ) assert thread_run is not None - run = await ai_client.assistants.get_run(thread_id=thread_run.thread_id, run_id=thread_run.id) + run = await ai_client.get_run(thread_id=thread_run.thread_id, run_id=thread_run.id) assert run is not None else: - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) - await ai_client.assistants.delete_vector_store(vector_store.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + await ai_client.delete_vector_store(vector_store.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = await ai_client.assistants.list_messages(thread_id=thread.id) + messages = await ai_client.list_messages(thread_id=thread.id) assert len(messages) await self._remove_file_maybe(file_id, ai_client) # delete assistant and close client - await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.delete_assistant(assistant.id) print("Deleted assistant") await ai_client.close() @@ -2411,7 +2416,7 @@ async def _test_file_search( async def test_message_attachement_azure(self, **kwargs): """Test message attachment with azure ID.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) await self._do_test_message_attachment(data_sources=[ds], **kwargs) @@ -2431,14 +2436,14 @@ async def _do_test_message_attachment(self, **kwargs): file_id = await self._get_file_id_maybe(ai_client, **kwargs) # Create assistant with file search tool - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", ) assert assistant.id, "Assistant was not created" - thread = await ai_client.assistants.create_thread() + thread = await ai_client.create_thread() assert thread.id, "The thread was not created." # Create a message with the file search attachment @@ -2451,7 +2456,7 @@ async def _do_test_message_attachment(self, **kwargs): CodeInterpreterTool().definitions[0], ], ) - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?", @@ -2459,12 +2464,12 @@ async def _do_test_message_attachment(self, **kwargs): ) assert message.id, "The message was not created." - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id, "The run was not created." await self._remove_file_maybe(file_id, ai_client) - await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.delete_assistant(assistant.id) - messages = await ai_client.assistants.list_messages(thread_id=thread.id) + messages = await ai_client.list_messages(thread_id=thread.id) assert len(messages), "No messages were created" await ai_client.close() @@ -2479,7 +2484,7 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2492,7 +2497,7 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): ] ) file_search = FileSearchTool() - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4o", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2501,19 +2506,19 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): ) assert assistant.id - thread = await ai_client.assistants.create_thread(tool_resources=ToolResources(file_search=fs)) + thread = await ai_client.create_thread(tool_resources=ToolResources(file_search=fs)) assert thread.id # create message - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = await ai_client.assistants.list_messages(thread.id) + messages = await ai_client.list_messages(thread.id) assert len(messages) - await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.delete_assistant(assistant.id) await ai_client.close() @assistantClientPreparer() @@ -2522,7 +2527,7 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): async def test_create_assistant_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) await self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) @@ -2543,7 +2548,7 @@ async def _do_test_create_assistant_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = await ai_client.assistants.upload_file_and_poll( + file = await ai_client.upload_file_and_poll( file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS ) assert file.id, "The file was not uploaded." @@ -2555,7 +2560,7 @@ async def _do_test_create_assistant_with_interpreter(self, **kwargs): ) tr = ToolResources(code_interpreter=cdr) # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2564,20 +2569,20 @@ async def _do_test_create_assistant_with_interpreter(self, **kwargs): ) assert assistant.id, "Assistant was not created" - thread = await ai_client.assistants.create_thread() + thread = await ai_client.create_thread() assert thread.id, "The thread was not created." - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id, "The run was not created." await self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" - await ai_client.assistants.delete_assistant(assistant.id) - messages = await ai_client.assistants.list_messages(thread_id=thread.id) + await ai_client.delete_assistant(assistant.id) + messages = await ai_client.list_messages(thread_id=thread.id) assert len(messages), "No messages were created" await ai_client.close() @@ -2587,7 +2592,7 @@ async def _do_test_create_assistant_with_interpreter(self, **kwargs): async def test_create_thread_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) await self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) @@ -2608,7 +2613,7 @@ async def _do_test_create_thread_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = await ai_client.assistants.upload_file_and_poll( + file = await ai_client.upload_file_and_poll( file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS ) assert file.id, "The file was not uploaded." @@ -2620,7 +2625,7 @@ async def _do_test_create_thread_with_interpreter(self, **kwargs): ) tr = ToolResources(code_interpreter=cdr) # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", @@ -2628,20 +2633,20 @@ async def _do_test_create_thread_with_interpreter(self, **kwargs): ) assert assistant.id, "Assistant was not created" - thread = await ai_client.assistants.create_thread(tool_resources=tr) + thread = await ai_client.create_thread(tool_resources=tr) assert thread.id, "The thread was not created." - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id, "The run was not created." await self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" - await ai_client.assistants.delete_assistant(assistant.id) - messages = await ai_client.assistants.list_messages(thread.id) + await ai_client.delete_assistant(assistant.id) + messages = await ai_client.list_messages(thread.id) assert len(messages) await ai_client.close() @@ -2656,7 +2661,7 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2669,7 +2674,7 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): ] ) file_search = FileSearchTool() - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4o", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2678,19 +2683,19 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): ) assert assistant.id - thread = await ai_client.assistants.create_thread() + thread = await ai_client.create_thread() assert thread.id # create message - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = await ai_client.assistants.list_messages(thread.id) + messages = await ai_client.list_messages(thread.id) assert len(messages) - await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.delete_assistant(assistant.id) await ai_client.close() @assistantClientPreparer() @@ -2699,7 +2704,7 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): async def test_create_attachment_in_thread_azure(self, **kwargs): """Create thread with message attachment inline with azure asset IDs.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) await self._do_test_create_attachment_in_thread_azure(data_sources=[ds], **kwargs) @@ -2719,7 +2724,7 @@ async def _do_test_create_attachment_in_thread_azure(self, **kwargs): file_id = await self._get_file_id_maybe(ai_client, **kwargs) file_search = FileSearchTool() - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4o", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2741,14 +2746,14 @@ async def _do_test_create_attachment_in_thread_azure(self, **kwargs): content="What does the attachment say?", attachments=[attachment], ) - thread = await ai_client.assistants.create_thread(messages=[message]) + thread = await ai_client.create_thread(messages=[message]) assert thread.id - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = await ai_client.assistants.list_messages(thread.id) + messages = await ai_client.list_messages(thread.id) assert len(messages) - await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.delete_assistant(assistant.id) await ai_client.close() @assistantClientPreparer() @@ -2758,7 +2763,7 @@ async def test_azure_function_call(self, **kwargs): # Note: This test was recorded in westus region as for now # 2025-02-05 it is not supported in test region (East US 2) # create client - storage_queue = kwargs["azure_ai.assistants_assistants_tests_storage_queue"] + storage_queue = kwargs["azure_ai_assistants_tests_storage_queue"] async with self.create_client(**kwargs) as client: azure_function_tool = AzureFunctionTool( name="foo", @@ -2779,7 +2784,7 @@ async def test_azure_function_call(self, **kwargs): storage_service_endpoint=storage_queue, ), ) - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4", name="azure-function-assistant-foo", instructions=( @@ -2795,29 +2800,29 @@ async def test_azure_function_call(self, **kwargs): assert assistant.id, "The assistant was not created" # Create a thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id, "The thread was not created." # Create a message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="What is the most prevalent element in the universe? What would foo say?", ) assert message.id, "The message was not created." - run = await client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == RunStatus.COMPLETED, f"The run is in {run.status} state." # Get messages from the thread - messages = await client.assistants.list_messages(thread_id=thread.id) + messages = await client.list_messages(thread_id=thread.id) assert len(messages.text_messages) > 1, "No messages were received from assistant." # Chech that we have function response in at least one message. assert any("bar" in msg.text.value.lower() for msg in messages.text_messages) # Delete the assistant once done - result = await client.assistants.delete_assistant(assistant.id) + result = await client.delete_assistant(assistant.id) assert result.deleted, "The assistant was not deleted." @assistantClientPreparer() @@ -2827,16 +2832,16 @@ async def test_client_with_thread_messages(self, **kwargs): async with self.create_client(**kwargs) as client: # [START create_assistant] - assistant = await client.assistants.create_assistant( - model="gpt-4-1106-preview", + assistant = await client.create_assistant( + model="gpt-4", name="my-assistant", instructions="You are helpful assistant", ) assert assistant.id, "The assistant was not created." - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id, "Thread was not created" - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="What is the equation of light energy?" ) assert message.id, "The message was not created." @@ -2845,7 +2850,7 @@ async def test_client_with_thread_messages(self, **kwargs): ThreadMessageOptions(role=MessageRole.ASSISTANT, content="E=mc^2"), ThreadMessageOptions(role=MessageRole.USER, content="What is the impedance formula?"), ] - run = await client.assistants.create_run( + run = await client.create_run( thread_id=thread.id, assistant_id=assistant.id, additional_messages=additional_messages ) @@ -2853,14 +2858,14 @@ async def test_client_with_thread_messages(self, **kwargs): while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS]: # wait for a second time.sleep(1) - run = await client.assistants.get_run( + run = await client.get_run( thread_id=thread.id, run_id=run.id, ) - assert run.status in RunStatus.COMPLETED + assert run.status in RunStatus.COMPLETED, run.last_error - assert (await client.assistants.delete_assistant(assistant.id)).deleted, "The assistant was not deleted" - messages = await client.assistants.list_messages(thread_id=thread.id) + assert (await client.delete_assistant(assistant.id)).deleted, "The assistant was not deleted" + messages = await client.list_messages(thread_id=thread.id) assert len(messages.data), "The data from the assistant was not received." @assistantClientPreparer() @@ -2884,18 +2889,18 @@ async def _do_test_include_file_search_results(self, use_stream, include_content async with self.create_client(**kwargs) as ai_client: ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = await ai_client.assistants.create_vector_store_and_poll( + vector_store = await ai_client.create_vector_store_and_poll( file_ids=[], data_sources=ds, name="my_vectorstore" ) - # vector_store = await ai_client.assistants.get_vector_store('vs_M9oxKG7JngORHcYNBGVZ6Iz3') + # vector_store = await ai_client.get_vector_store('vs_M9oxKG7JngORHcYNBGVZ6Iz3') assert vector_store.id file_search = FileSearchTool(vector_store_ids=[vector_store.id]) - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4o", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2903,10 +2908,10 @@ async def _do_test_include_file_search_results(self, use_stream, include_content tool_resources=file_search.resources, ) assert assistant.id - thread = await ai_client.assistants.create_thread() + thread = await ai_client.create_thread() assert thread.id # create message - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", # content="What does the attachment say?" @@ -2917,7 +2922,7 @@ async def _do_test_include_file_search_results(self, use_stream, include_content if use_stream: run = None - async with await ai_client.assistants.create_stream( + async with await ai_client.create_stream( thread_id=thread.id, assistant_id=assistant.id, include=include ) as stream: async for event_type, event_data, _ in stream: @@ -2927,26 +2932,26 @@ async def _do_test_include_file_search_results(self, use_stream, include_content print("Stream completed.") break else: - run = await ai_client.assistants.create_and_process_run( + run = await ai_client.create_and_process_run( thread_id=thread.id, assistant_id=assistant.id, include=include ) assert run.status == RunStatus.COMPLETED assert run is not None - steps = await ai_client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id, include=include) + steps = await ai_client.list_run_steps(thread_id=thread.id, run_id=run.id, include=include) # The 1st (not 0th) step is a tool call. step_id = steps.data[1].id - one_step = await ai_client.assistants.get_run_step( + one_step = await ai_client.get_run_step( thread_id=thread.id, run_id=run.id, step_id=step_id, include=include ) self._assert_file_search_valid(one_step.step_details.tool_calls[0], include_content) self._assert_file_search_valid(steps.data[1].step_details.tool_calls[0], include_content) - messages = await ai_client.assistants.list_messages(thread_id=thread.id) + messages = await ai_client.list_messages(thread_id=thread.id) assert len(messages) - await ai_client.assistants.delete_vector_store(vector_store.id) + await ai_client.delete_vector_store(vector_store.id) # delete assistant and close client - await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.delete_assistant(assistant.id) print("Deleted assistant") await ai_client.close() @@ -2974,7 +2979,7 @@ def _assert_file_search_valid(self, tool_call: Any, include_content: bool) -> No async def test_assistants_with_json_schema(self, **kwargs): """Test structured output from the assistant.""" async with self.create_client(**kwargs) as ai_client: - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( # Note only gpt-4o-mini-2024-07-18 and # gpt-4o-2024-08-06 and later support structured output. model="gpt-4o-mini", @@ -3002,24 +3007,24 @@ async def test_assistants_with_json_schema(self, **kwargs): ) assert assistant.id - thread = await ai_client.assistants.create_thread() + thread = await ai_client.create_thread() assert thread.id - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", content=("The mass of the Mars is 6.4171E23 kg"), ) assert message.id - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == RunStatus.COMPLETED, run.last_error.message - del_assistant = await ai_client.assistants.delete_assistant(assistant.id) + del_assistant = await ai_client.delete_assistant(assistant.id) assert del_assistant.deleted - messages = await ai_client.assistants.list_messages(thread_id=thread.id) + messages = await ai_client.list_messages(thread_id=thread.id) planet_info = [] # The messages are following in the reverse order, @@ -3037,7 +3042,7 @@ async def test_assistants_with_json_schema(self, **kwargs): async def _get_file_id_maybe(self, ai_client: AssistantsClient, **kwargs) -> str: """Return file id if kwargs has file path.""" if "file_path" in kwargs: - file = await ai_client.assistants.upload_file_and_poll( + file = await ai_client.upload_file_and_poll( file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS ) assert file.id, "The file was not uploaded." @@ -3047,7 +3052,7 @@ async def _get_file_id_maybe(self, ai_client: AssistantsClient, **kwargs) -> str async def _remove_file_maybe(self, file_id: str, ai_client: AssistantsClient) -> None: """Remove file if we have file ID.""" if file_id: - await ai_client.assistants.delete_file(file_id) + await ai_client.delete_file(file_id) # # ********************************************************************************** # # @@ -3081,7 +3086,7 @@ async def test_negative_create_delete_assistant(self, **kwargs): # attempt to create assistant with bad client exception_caught = False try: - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) # check for error (will not have a status code since it failed on request -- no response was recieved) diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py index 67359b64dae4..5bae3fbe46f1 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py @@ -60,9 +60,6 @@ def get_mock_client(self) -> AssistantsClient: """Return the fake project client""" client = AssistantsClient( endpoint="www.bcac95dd-a1eb-11ef-978f-8c1645fec84b.com", - subscription_id="00000000-0000-0000-0000-000000000000", - resource_group_name="non-existing-rg", - project_name="non-existing-project", credential=MagicMock(), ) client.submit_tool_outputs_to_run = MagicMock() diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py index 14455ea69d5d..278b3ad963f1 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py @@ -60,9 +60,6 @@ def get_mock_client(self) -> AssistantsClient: """Return the fake project client""" client = AssistantsClient( endpoint="www.bcac95dd-a1eb-11ef-978f-8c1645fec84b.com", - subscription_id="00000000-0000-0000-0000-000000000000", - resource_group_name="non-existing-rg", - project_name="non-existing-project", credential=AsyncMock(), ) client.submit_tool_outputs_to_run = AsyncMock() diff --git a/sdk/ai/azure-ai-assistants/tsp-location.yaml b/sdk/ai/azure-ai-assistants/tsp-location.yaml index 386b3a320a97..524b693d4faf 100644 --- a/sdk/ai/azure-ai-assistants/tsp-location.yaml +++ b/sdk/ai/azure-ai-assistants/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Assistants -commit: 877eb552cfa93c270707e2404f63fc05683c6f87 +commit: 4b4d290aa11e0d562c7e53276effe9a60fcec1fa repo: Azure/azure-rest-api-specs additionalDirectories: diff --git a/sdk/ai/ci.yml b/sdk/ai/ci.yml index 117b9c6c785d..633ae26cc11e 100644 --- a/sdk/ai/ci.yml +++ b/sdk/ai/ci.yml @@ -50,6 +50,8 @@ extends: # Selection: sparse # GenerateVMJobs: true Artifacts: + - name: azure-ai-assistants + safeName: azureaiassistants - name: azure-ai-projects safeName: azureaiprojects - name: azure-ai-inference

`YBf zQ!5coGNvpyC#MKaPNW;sS+`qB#F^oy z``D$s(W+P2LCHJO<^cqc2?!;j;X!!ZRMB$Vz~4ZrQAp%izOPyfH`og4mCsw<+w5g1 zhYI}+pUjONfdwZYXwQwg*c{Q)-1CjQ9&z{H5{`#%H5+?W?T%?xX!;^$6e%WZnABVB zT}$2k)9>G}gVMI7yz|3CBspHlm5hsz-&YEj!y?~d^ytz1(7$`cUcxF8@Q%Mt*`5KL zU9A?n>}KOA0@}aqZ&Vb&T;SW)#1tBy#1pS}+DKb{qD|R* zdE%$SSrx{YEcNJMJTYe6xEgdDo!!OzPb!DwD@?p-Mx$AWCHi37-3Hp)+UHP05bukM zD>F_M=|VTd=Xq#>?0_bGQdt@RR`rW%?VbxicA3!2Ee9rRc&GbD~T%?SHI{VJ@h}Te+oJk`pK0!145|*`CS5J64}q zB#hm^|LoOxY4MmTK-K^?2@46W)6mdx+gzq!@UgjzcfQMRl}q7UzkKvAIeq^8#l3sS z-%CGZTo~qEg8Ht?Y_-yG-uS*+G1~nym=$x+`PQq`_ZEv=o?$b zjSQI?H6jKD=&g==#CBS5QhafB`fiU6icv?zFHbMN>#a4~Y||h9BP7s8acsK8g>~M+ z=N;>Ww7t|DtT7Ad+&S%fmrI{82wfLv@@LPY4k`RXDY)YMK9AG0IXMeq@=_j|85zoi zKRG`~eh5VlfG&PllUG841#I?T{rhlH$_5Pwj^MR^-8CLl2q&uTL|}`V-%>VMZ8M(r zXR?NE63-bFkTQDh1I?0{U3=Y@Gs;f-`_E<(0+j8^pr%OpZNX?v0*mR#y=`pC@;L5f zk42$ffy-s>IT=NpYI>-8Ys!wsrE(Ev!t0T&7Io$_d$48Y#moavbntLb6Qs< zmIhoQ7HQPH;0~F#MX0j;b04puRu?8cHQ)r+zO9e^A?{VA} z?^@HJ(=f&&QO*R3Vz%y8;!YRVEp-u?xcFWHTll=w{`*y5#tv6pe$OrS6z^+>`_Nyv zHf}A3;n?t_pKc}Vf{}I|V&rzWT`Mn=K4hd#PEM2$kOSCc356DZyb__DnVnq#SZJS{ z3e>UQ81lZ^O@%E65R0_VSFc`W2T@42PP`gonIhgSe1AmAGaZ3paZlSk7fG`vM?^W5E%iz#Z1va40 zGp3tCfbVOxI~@cDbfNLvtV&ZynJsVwS?z-N!u`RZp8Mok=2W#AG{t%M=hE`JiJ7BEKN_(P$)9QIekIOb2ByuF$4>CfY|Y3(oirO;fczk+_(mF z7lvDwd@#KRDu|LE*g;?kiljr!8)I9>gps*HVk<}~Ar|uMqO5yspqV%mrJka0qa3wQ z@M!_5?R*R8UrFW+|FW=}=&~@iQnOAPJBsXad8+oZAG!+^y(`|W=D%+H41ejTicV~}_0fG@v5&kYI43yh_i)bsH{wn!PoeBWzDKJLLd%&# z*ea=*Lg|tpR>LXGPM#c9wpLLwdV3jcZevHS>kQ1!&GpKwiL`!>VHp)4w72@HSDtXqGd@8R6#v?E$ZA7@gaXu|F` zGqv+2COFXZF)EN&M4pteSB%Nwih5&t;(Hg z_bJPj>mn@u=BSmVXTX~39 z9HnPlq@Ixb8Ij1q7!{3-jMzbozPuPgu7V;U#}#LxfH6a~9X!7$nDk3vT7pWnwU?)7 z?eY1!`G}I+n}E2i$U3NytRBF^Xp1cBdF^Cu!EV3C09N{oq7did&D=Tttemr>XGu1y z*jj9|B}g7C52(c44XuXwy?gtR8VUvkuk^}?gJ$>!z|2QBOq|>>!Z0f z@HCBkT}pc^4U%O&y4H9j67`M>#u%y~s+Q(Q%`~_nBSC90e^F<%hg8k+qema%dPQMQ zix!z0!03?&jYb0l+l9NP2oB?Y!*swRQ8@LE$8N}@|MB5EiKk&lczvtATYH^MC>;S$ zFlsp!9`o{wN;6ZhhzE^r*^O-_pPW&=zr}o7%vK9oTr&ytUd&62`;L$axOj0Y&Ob5; zKTW2j)N@&{V>FUJ6!4QlzrE`J0V7i(pRo z+x@AQhL?#$gH%;r9T66`3YI7p^nkXR)6Tp`HzOCN$&cA425?*)AlVyi>&tL);cVE0 zfHIP-z+Y{E_$4(k2qW4S_2^y^K{xTazeXn74U)mFtXZR^qycgkbKq)UU-v{kRtInV zh#Ty@K^UfmSlCK2%QA`d>V7{iID=94$~aza`OKA8B=MCRIVDJUb3Z`x>c4SUdm!mjR(QJww zrJQT=`zBmB?SUS1z_B$$wfn94HTAM0kYHI;uYdOhi_#Xx@E$KCXf);2LVK+t*MJk^ z3N$C4nYp>e*kZ0=b{t$dBp)CwN=d4gi8S+P4K2Mcwb-bbue}$QTogNsLnFF)JIFp7 zFdB2UdLpI!?jPIhiW{3Q>6~~2Xb|e`S_)7{Ph(yh(|V@7v4o_oXxTDr-W^C$>-AEK zt5*4owqg$ZdQIK!2TDIRgq1F9uHN{bRyHK5oc0hI$=f7bRZj}QQ2!~ z&z~1?gun*Gao83pE;Qi)NC5h1h~-10GkPKLyQ@!1!!|z zK*V~yvga^{dsePH@o{X}qX5j^boq6gy75V(#i5tW03W0vlhRPz%U!a1Jf&pQS(chk zZx=wuRZzwJ*>PefkC;#c%r8$_g8HG!quKAtNY)hE=WBDm&;dnUdq;V z-^5^MIQAAw0ygXjJ+&FLl-yK$1Jofi4Rcg*HlbmbsyeM&hzoQ_>I01U#X_OJQ%e(( zgAlPIVkBdtfsT$lx-<@g11v2o^UnwlEEP-^u*@%`cn!Ae-jiWii2=C9_}qV=C&=&) zhJJv_!U|bw^97QLhzC6ga@@y50(k#$h@G}51-9AFc_kFxexkk1oAPJrCS!>ShkxPq z#ye`>$om;(sxbhWI8hOaUfd^x#~qIx0h6~HfQ{-Qxx{v#`}$<73mBBETq)CY^)T|4 zPvVjT4vvAW#thmo8ahmkxu0#ZHiV+5yAxabr(+>)gvod!mf}B_*_F z%AP|R?%5futY`uK^%u70!M{b_8f)&H%ctFm*qxx&+xvMR&)0j=+y= zf?8ID9W}Uj?_O43o+cmy&%9~1)Ei>ViF{yh>P6u1xRNGyz&iVA2O#w_bsCg8iyKrZ zVF_(@3d9){kEwiS5W{U#VVEdO#nJI7?PfDYB4v&V2|-0L>DRUBlxtfkR8j zVykIr{2ig%YYUWD&d|TJvo1W8*O?If!1V)8-bLD(-kieWC z3Ha!_bHDGA@ZhWiuxWJ-x>axwG;=TjOPVI;k+K~Ny3Q{>PL!g(-L!>!z8}}=g18|V z_W+dOR>x7CHMGXzehhpri34O_cOAE6UmDxQsWSacSNR$J9ZBw18%x1vb3V#O#%&P_ zzy73l)s0^JiK~GKelpaJ!-~Rtz9J*Q8+!sZL2Ca}HJFO}w3XlL{}YV`zs%kAGppF3 z8NZ?QNIQ=oKl;asJ?xhA=E}Zod8*L5F#?F24sD~q1G467A^;tIrl(9$JwyD@seq8p8% z0Ak?i*2C~x9G|99^B6Y;zldzSIrK~RkngniO<9B##<=yAY~&s`KF)&Vh3527c5qg9 z_9=_f2>Vildv-XwDZ~N)C-TyzOLMarjLPE^J%hn+b50E4h*?=!%-_XnyH?!f(VfH* z5`E(*9t!5DQ1U+JF5}=a($;8tcl4kT3zc!AMLqhX0+Kt~%Ph;%MgI3rNgKFXwsjN*vuN6%U7DnRq z5(Fnx6d_YcK;TYoua=C#pV7P}9M4-uoG4LBU=3~Ep@oY?`45O!Ny)vSw>U=r1c89B z_CuNt!BYX2N6d`#_7|TwlhE$@?#~t!k|-@*nyIp3&Nn5;Xs3x2%8WQVj=U$FhkxYr z+ib*lF-zW>0AU2%n| z;9BpEGbogfX%Tn{2B@BQo3b5sCB?r>bqFrN2QrNGaAVXvIc0Tq3| zUHNVe&D}|}9(D`+eD635cu&NWN-xE|_9y+@N3)OKdubBQi`xfuklc+?6$s^azw;@7%~qK85H;=0`Z-kbZw?9O9m6bTLf#gZYg3=+$f1MD2WwH(g7qPiC>HAxq1hlI;?% zb}i)`Ov6d+M0!mmPVKB3PEDHmkfL`WEu=JvI-p(qsvl%zr%*(NpgTp_@9(&dx4j zMK}yIz)cn4EP0x`D{0=md3)FH*NHPcG7EOCRHWS_( zkOw*4lf-D3&UgM;q=lHnroJupf0y!puG6dkPI@S+zK;U`sW&3N8eq3(`&)po#B`$L z41M`>6XYt-oZjWokUvxBqGyn218xUj{0U%k4DOHyzP=?%hN_Or$zA!VfoZ7?fDt?s zi`}ffyu36VRu@uuF%snD9t*0^tpREkyQr%@?^Yg}h_Efj7VG%izZ?V+EsOnoDtsw) z#V44kluZzgs7}2XFgZtfFi3sO!cZ4KjIGzM_wD(XU4`&yFyU-Gqi1)HaOcLy*13`^ zLw$Fww(26VZ+jWqxfno9t2m~t3AqjsPe(wE&&k&-k*YzIk2peQDmt;^)(5vo z#Ql@*mhHmuhJB@Epv3hti3cVv#2l!~vzS;S zn(Pb-ir|6;iN$31BRxt#Vw4Idrjr>;%DyRpieGKS*@VUC!iUm7sV2uqv+^wISimrg z^Qo5mQrJBjmO7ph0lIc}zH#xDoZ^6p$53V;t8v|h|Ic=isn0;YZqe4T`V0bJ+`S#( z+rCnOr4c)GX*xH?ld>t#G5NWqs;cTFv8@N7$sk)S<~rzl&h^X#*eB|du3F??)P7BN}j3%1}G7k7_!ORrpqcGOI!b5)Iujjp0~uj+sL1?@U+K$jX0 z6APfbiF5H! zf-~JpH2PR(XJ)<_8|OOZTchxk^K@~1;V2Tt?~8q|fa#4Gs2kyie0y$73_8$(DWx@c z5`yMnbnf5QcaOYhBSzRjs{dD6?3cS?DQbK>UxP+|ijgQQtIZE%umvfuZ9DsTvwLf? zRysBGsSy3mz6l-G&4+>*xra>5VQ=AheFk1HW`QLx-K_>pvKx>$8Horm37J~2Qb8Y( zaCw+td`QY}#c0oTULDP)sOc1Gg;}qu>M9u`kW14u%lt>Dt-KWSwD`2Yzd>+k$fb~_Ah~U> zMAhcYEI1%8e$Rijndv99i2T#Vg=1WlEMvo4oHr+zn~4>BE6L1VWN)sjt=eDJMd=IQ zEpOBH1s7bDEC!oNjR8Zx@{TviXmv~^w1^3GP*iNLx$Av^-FWq)8O4PXW)Twi2gzvH&4GHi4*b&aw*~-LQ!rv3q&8;Im$~EEZIbj} z*tJdI-i5A8X`8eQ?~27k)e?%cHrK0MT^to6+3SQk^;KxS{mNt1-Ybrezku7vVk=3! z=tnka^vkwxmDKangl=}0fp!w|*$4Q+S}yk#iRa8(VwfFj{KkEm1+W7tt~;z;zH`>k z|BIY;1H>@DlJ#V~D z+5C&M{-u4PP8vvEize^nQKK$^dyhmJN3Lfq(?D3;wWoaYNdfrk#wRQFSP+_%Ac&?4 zpF)3y-F>L#{H~9t*S+S)(FD*ZFNwH`G}}* zOnxzdx;gjUU18YYD5r~N(gn4&iGh??1qy4+Hx8cYvgh$zJy4OL&{wUQi<1ha$I!Q$ zy9Zt(kII1Z20<3QYTNQB3VTS~GxtvUAM_K27yM)ATe;9F?pp}L(051S$lZC{aBDc(p-^2@;>{#a~maiLI0)}7t# z9^UpC0ITMca-^H4#y-f-$%(dhKbi-9AFprx6ydz}1-|Hg!2895XFPa)@JQEJ?;J=7 znKdbf>+GIx0`F&~IoN8PuU!(lKV9bP>-+{FXH@wFt`IQsYfWk!;$kPtz!*g=Y4nHNEetW*)*}y z7?pq&X6a0_aROR_q{#p*0M;Yu59=+2k$+`mRa)6x_|ZEJRQmv*VC=u@+jw`UrDdrG zVCCra=|Q=y5#u*5k=Y|8t4VGn;@Hd z6xtdqRvg5@HYrapuR4h7)S*S9BDEu}tF4O$BRxYn#MfRMRm6}5BPA~c1uY{^3P>P- z_?On5pjA^q#J#%Xy%k8_tp=*UpEo03VvywsJ{g?6A|A0h^eC00kCnpIqB`IC($dn8 zD*&LE$KraGtQT>O=>dnkcPt|&91&(>lUI)D+;b5`2hSR3?z`=f2SKwI|M;Am1ov0cLAbQ=c zmHynW)PmS563mjivi&XOnkt|-n6*G5J{Q^YAr*nfpV9OzmI^{!$v_1cQp4fuNkHc? zu2GbQC+iHTuX8%-7=Nb?IRTqVgP1T_Mclm}a9J*x$%;WD}`1j8BTm4)EKCqX+kx9B^pT$8h(nitA&5HU71 zD4c@#Azj5{3WBOF91>X4f!koBT6SF2fIQ@^xH#jw(_umbxQS-OcqI&P(j@QpqAn*C z0V>#?WTJGqR=um4aaFGm!*gLrpE-Sh)84>J(V*%HvsRDww=HVg&Bgx$Mc2;xRO=eJ!kA-Jk+-k7F-mb* z+i}iH*dL06c~tV@vtJw@V30if@)$%l!@FD2_Ndd~g^TPdir zW=JowL_U*LShR?VM(Jwz?>cFut&{4QKoi91GNP!f!;oRLj_SI)QMhQPm`}iwD~7*( zq1n~g)v<`ALRr^A8F5?sgQ~xvGn~P55}X3EZ&C}>t5&-L$k{q9E%gDujHuykHf@-6b)oX=1!?&j*d6xGcjb zIH*OlB^9+aZOMR%u0Rljlu%$-s~~$;4 zPwRQb&=T)pDyC@s9rlfML~z(tfpw;&5J z3SfTasJzATV_XA^AQff@ozbtW+%ZkwK08Y|ZE=C3SlFl6ed6vrrCu2m)pe|P;oKr; zJjRF~QugbWTqA!LTNw?kgdP~SXNsgCxs6(AFCPG7`Tn|P2*Jj~4wU0!Z^BVNLmQs7 z3li&45D^6m>&xrQ>3l=(32poi`}Z<%&KBdiefedq$`6Me+_i%9o8DH{?=w0M_2g`DuX|{`7v&hG z6KI$m#7Ra0)j&N*nDJ%D8TTIfWAzw+5~{#e;;-iSTsjOZ9mp2!CyrsP`l(##XHQ9X z^+P4AJ^#YkM!ff06Oj4UR3&lf^9nI>{8f6Tk>~&Uw3B_7drEA{?AVev&rd zCNsO|A7F^>56n?Ps#&;bHk*la8h54mnm3Ah;wK%b`YMdGa|jJdMmM` zHrHGa!qUkOLWz~4JBYz^_4x6A95?wCgo}iH^LGF2eMA37OQN{(%WfCHa!oizrlSXx zw61=Qb^yv33V~BK>LZ`g?!-qwF|!hKqVGzFpFI9V4u4%j5g)Evl#+x7J;2Vp)&|f9 zr-g|1`dv3hJk-Clwe-VAXZ^|9aWrt3Br9LUjksJfv>Ot8h`BiA)jw@>ZO2_D|AmPU zgpmY|4fMT8?!cWOEeFBMsn%LvG~5a8VmJc-NS}>S^=0F$@N0qjPe$E``S$4j`v0cz z1N~@<_OytjHe}Xe)j&K(pK^H)YkqW}w*p8_9 zTQRw~7CiPacWn@}XlR2GDZ(ODWH<00fI%d+(A0MbrlGo6DhDGBd)+O?RX7%&BjyKO z5-JMWFHLt+D)tT;Giu$t+Vz-0K%|W~EH-qsVV5NPC_I8TVOhQIZ2nz3QNdt(l2Ifk zt01UuJ_>c&rkj0nT+v|%bZJuLvfeMPg_j2N0*^9kKH7!3;N` z`}a87CDtFFXgbi*yyK#!Jyi`BUbqsT;Dq}qwjsL+W@a|FE!02B$j|tq{toqeTbTWS3Ws+C0^Apy04j0>YC8(jQozk71uG7S}#+x|j~rQejAKY8>? zT!>~L;zCZho3~b~HmKqQq;JZVm!CX&0>JHrkv8V+8s8P>3fS)U9|~UR3P7y2N_fOJ zATmwwMk`4y2B?JFh%hKOWMp2%XoP-g9;h@|A*w2{eM;Yo`{Q@=b!ca4X9a`D@ftU2LO4LN8xPH7F+L(LF7EWX za}&PMO7A}@ky>P2%mFzhrv<0mbSc+`tkF<84&zG$FP{T&89840z){t8bcB%_5?d0f zFgsNZ`8u*u?-U-PBlpAMBy}g9!owa2>jhpSy&_{Ay~28}oV{y2;aU%_;vD zF|%Z`hioBjbv!(@fmcAfaXeR8kHL^4u-Fh$Qe+_wGE)Jp9at<$`JmvdVbZoNd?$ga zDMDnx+~=k{mS*V@7m3X_<@Lp5?;mgW2#2C#n71`d`QCFj?uF|h(&;>m`d|*70X==f zfcJ0$uR!zdG@se?d6M4@$m=u{=v8QOBe|(zTLLen{|L_9eTphxMg(=Gz>f=m#)%cN z+p!kiiUlEn5Ycn24Q_QeL*2fwdKV10$Y*+0u^W1f9%)jyc6#mxVh-m^K5k7+Mn;Ou zE4s?C?}y@{RbZourqC>C-t?h*Ae9JUHspnpA-WdAX=F;7D5v&(!>&GPdDQ(0`=Sjh z(fr9p0MWQ~VW6lX>(=oH*&d zQX&-ODil^VNN!XwMfRL?Mw&9W2eE^IyM;$ZtpS>>F6RP`B(eG|6^uLHl6U@DKs)6z zK&iEwP4n~swB??Z%9C!CLCLj*O$mmnO}B@LyYGT`-WHIWg0M?m5grUv3%OkoGPc5V zu$t{I`$3DqoTF}T(F-{FXK*o1a%wH7WGOrZ9hd6bdW2gXvK5_ps7LIFt936Sl@L^* zDWD2$khD;HaFQkWf+uGwM*%{xHSyrKHUz3+!UC){@EoJ0(m)a6%M6EHu6TMZ>}$Ac z>u+^nXHUJqLWso)$=dNMp<;{dT>*YFj)9GruZ6Ah{G6S7E@@JyvIYc`#xLIZdvY z#&P!pY6n{06^z&5xSy-DXyZZyiYt$_f(X?G+U1a*SxJkFCp>RPw>sEztD#}ol`B`C zl8b9#QmTF|j@f$Oc;u(vnz0tt7lhD(&!rfv?i{RwEtrSnPJ!BO#|}uJf7h@OAPX*+ zSQ&^0!jw#W@j#eCDwXx%K_NC9O611wz(Je9WTg&UJ(dib6#&sw>v@T%@1HaRk$b?g z$R+zx-Fn6H;E=JSG0ww9_=aoGl97YmOTT?Xi08c-n^->(~E756ovb7b??-g z4RO$2H$tMKiMEySJfsN7sD;X!_Upq%QYNY`BgAI$MqmtH)^5T*#atlci13Y!aX^Rq zI>UF^cBGd>C>ES+(DVJfj$ZP?kRzvYY0oxz5Bptx@%n8=h&T}2aMM-U@3}gq%25>F z{9K8pGyH7W;0>ZOGFCxMyA-f+0phc1SUpSF<#qw7Ajv{Ss+a{O(2GTW^_w5esvPcD znV;wD9X|!jLZh*3INa>(8{)gY<`K1iQLv07aqXm9=dxJ1Jxw=*f`@gK8UE#2jJUYd zG$s?k^(rzxkHKI9{8>Wc83eHq_-EDHqUEwJ?)m3G<2y3PRxCDLsMA+V z)jleGiK>q3|3loHz*D)d@8gR!sWfUpX;2|4MTUf(q=jV6R-{OSp%ltYwVM=b8Ip($ znJOD4WynsYWGhoD63JXqkuiSPy=WiWo%Z>j-}m?b`@QH=-Sga+{d3#N}Sq`PR3@8w2lG zlhA(sY++`h<7l+L@5nXzy`Znov*nDrcK!N!Pfxx835HiRNZXi%s#HXnICiWzVg|!2 z{<>ciCKq~xxdZ25<`HEP3G+NAL6D1NB@g8O+Or87wDVBf}OBL)y?zKP*I({18)2XbN65+IF<92NQJ@UT z`iLi|<{jkP$8~yuAU|Q&-$J|G^@Z{KBajOoZ;Ow;T@$eRu-68f=|v!o_mXWLtv}8+ zT(@SQ)T)}ePdhdLd@+HzaSN?Vz6{B4Y*r%`k9>I{T|I-+uR7LA$7#GSu72Vqqqyu>wR5? zW~RZKHSaU5NQGDYPS1WOd#(iMYm@iN56Ic|Z1PZQM2?*0Fgw*9+b#y86ksJ1&VuIb zGqCQj!zh~GhcNJXfzK%C8=>bqdc`5k{IA;?yl-uG?-7O;IDkJ=Quz^mBfz*IcFJ6k z4GM7UuCR$jqM;Q@V(=`EeJO$GnL-*+M7e;}3Z+I7&txEa(D}7jG?YYcu#L_Uvt~8Q zYO5R7G@#@tf3dO`qCJu}m06W;Ai@>L8!W*+(M0e1C6ZS}mM!m%0My?Bzvh@zlM9&9 zFvIk#%*1dv8bNX7ykT@p2dccbEosOd*3;9NJFn@3PdB2g9gWJslbH>!(eN^k%~H&* zql_u6e3FMgEx0-Q&&st1_3N5{_c{K#UxxDN7q*y|q;{3a;Y3VvP=$#GTJj*ou8>I&-6v*f%fdz(%?3~k!)fbY2nX;UoA%+hI;csvk-AIb zF>VSKyMZ}NG-M4h??LLneLYY5BrUEI=e5ZyAda{tCFmShW?Gja9b-MI(*>o4+T-9n zAv03KiGyiHe`wc`WKIZp5A3`teQm-etVwnQ0MbCkU|(Yr>oQOe)Poz)elOmQ&fjy} z+PbD=zX4#OBP&Gk=#;SUj!%2aw_o&UT$f%litjvzNk80HJ4(n5! ze&f+2kte1)lMK1a*Rmy9CAr$YynLJ=JbiO6Ex`R) zU>U6UW@46u$ie`aN>1npK%b^r;5_D_h-OZ^8El5Chrv<}V3y5tsN_(svjR(mL}p(( zRC{^nkNGVPq)b-Fsl8d&kR6x=A={q~-AtSqH<=~_*s{|kZF8K^9Kj_Rse*gv@kK)h zK_>8^<3Vxhagx|qi16yu*nD?KYN`yA#N!374XNXlWB`QgVb)&gQT0cGf{y{9dRdhn zxH4ptn^8p=3mB-ZNEVAVPtFa;6RWrTH<7{Njv*sJ!(u3tsr1Jdo%JO##hRq;(H_H8 zlm;8w!L}v&SH0f4NsJvo1E+BSQ@M=+_q70siV*^ev8+Z2u7tV<+%I%7hmBKWVmz0} zgPPAXU&OEDkZD?HGG=TRk|egU+7A)O7nU8={b$x}I7v&I&m|aR%&+@;4^mVn$bm0H ze#k|wH^K*@(ilZ-Zi+P*@bEF+&^N>m*J zaJP&{O!z{Ojlar2Bm>(Ms(d4fe?bWoj-zX+Z1q6RXTLjLdX7BcL->fNp)(!l;+7|o zQxA&RIP+VK%G4i<*8T|dUw0wuwp~LwX)U9%o`J%PP00dtfTh>#xr#~rm&4Ua{h;U) zS>Fd)^kU>WS^-1Sqp;DVaU(@ZMwtYg30Z)K#>15No7~B;XYvA#r=*T(<}JIR_`Am4 z>ebK_;KouSp>ca~ON23m2uTTqMLN%cn!wapHDrIT2)V|igYaSP$srFUXu3gA)Zv9b zr)hMuw8=hs$Z}?E%POHBoAdyu1AyW`VEf2;|4|=vXa{PcL8ur=yTYMQqJd3`fc6TC zAH_5j@Z=JX@qZe`-xDx|$rECi}F5hn#qxMy`1->ryCqWCsg zP6uMthvY|3f1~CD&Hfr6d^Nu;V0!RK zhWEz*wCj8tphE)cK^BRM%`EuHo1>5EdesQ*c!;;9Qh33G*3*HkSuYt z_hSn@2fqPBU_H?bXow)`3MDcWnJQq#K#X{)S!QJ^Lc0MD2g&R+&8YibOr2kPkKN_1 zLtbIM8WoUzD-=uGUz|NXq!ZwA`_}^}cQ+1A18}BwOXsnYp z{c7Fc2q7wyhggU+q*YSc+U0M5BJljf)yZx)j^huQ#{}jrb8rmDT_a6R$EiS*%NKJo z9J$8+GL>WbkLa*}lX$Mo^^JlZ06?n+;F}DHk)Vm8i<}*t>dLjuAvY)}6+44w7)XnZ z#v-xPH|1a%tp_$)2rVqK7}oM1Qgw!%v~1lGi+qqOoTnHuT*!o~Om2ICxqhb=A}gvw z-wMaNnKao(b{GJTpFa>Ea;KCgPb&!Ybq8`U*ZnS;I5YZ|tT?UGIx1QqFBSzd(eDzE zxjfE@{6tIaM>j+)EoD!X3EA-6t~$mKo5DXTiy|Ph8H;>99-RgNYuY$!N>JaDAThXP zIvY0z(2yaCk)oy_Hw((8%ZYs^hXHH;2))#-rnX$nLRNo#SoissF%MW7WxR}h%i#X` z;EO}Bk684-k!ILI`i~gc*ymKVLKw;JKCeLxz`?dzCLl`^N z?Bi*5}FH8s;!dX&s z*q?6LXwuhpPAC>DmCEy(=Rb#^5+{`dAtpHM2LJ*M{;InFafb+``X48~ZF7R^N6kZG z1;b$m&j%Qr{SW?mEUuQ#eU13MKuBmE3sA+(kRb|EN)9LNobG~HK^-dGWP7^`a88&k zvkmc_4C(DVbBN5!1oZw5`ePf-{D7`e<&dlgm>NCz7!pUWyBe5)7eaNyxYT-o8k2p% zQLXfshSCp0$k-f{?1ShxA~(jFsR1L2v$A8jxnb5onmH>!$fyog+nTUNU~hmN(DM0A z*shdfPVU{qVDc9P;cy94uKM(Hf%B6YOTx^D`pPa<{0yv`MLK0#{$7O(*v z4qXTbllG=C1YofOAQMdDOK29g5>N%yh30022!*0G+X@6E4r#^T-s35VJS;oZn8a5u z4ejgMNi#sAd)*+6PTq!)I1J|ViQjdGba|f884mC$Gf{0whB;vb9|-B9d+jK1BWU$BrE>r6hVy0_g)?T-PI=yB9Z(fbs_M^y9mXGt@8}li{GxFOK!HP-t>>u1}nN}QZ3ul)3Yh372?b|nbU7$ zwxDv;qR)dm%&L+jgZs5Q`aM@F1wQZ>6eB618?EF3mHOk)Z&(_Hc zSn9lvgds%Q-=_YlbEE#ptb$nuM)W~esKM8F7l!}&Buv+|bmN3*0QOXhO)t!JqJ}?Q zQK1b1pD;**#!0Gu?R?_?jIwe(l#zNa>r66v=`Pxl#G5G z?v7dFCj?G8dB5d5!b+f^@(a09;b-~~+Rrt))1EbKJXbq4c?l1at0Q$JENdp4vw+Fc zt7=G%zSc_LDL{UU361$on)$&N(b_pwXLIzf0J$}I9WdYOpSxhIVX*ovdsd%H?W<^SM>q0iaSRbI$%;hV1Nt{<5B4Ey(+tny zZUo0PU@Mja#AmU1`|#`^Y)OId8y1r4Qc0MW7=BaLi*NqpB#W6Bz{Ddkn5`nRp)%#k&ZR?sK3gK}2O5M5Cd^`met7{PycaL{4s7KoLcK$%Lq{FQDb zl%F$}bcHhY_Yz!{27?UM`LRdeyZbW zRsX}iC1D%qzJ!a5%nAG`fro`01*6IUk@4QkCA52-DAApx6Crm<@NLr?R;K0F4xz8 zQj8YgQXci_O6=iS#f2n$f8%Py)9we~gaIV4>_(kOecc<*4+f3(ql?0d1g!#Z`xhC( zU30yr=ALCF9^j^VC3fF4y?v)A&lR{Zx zw}wC9PZ>V>wqjjECWU~i>jRXy)*eW{i&PE^HTdaj7GRuOrNA;FrTSR` z-w*RUj5C>s0mAWnnsHqIl0yxhXR;PA_tvN5agW(!kgk}N;yC_l)w;SN(y$tr3?w&n+Y*vbkYa*&7d~(H}CxpGS7sq8&NHV}q3>5wGH8-I%KK5Czl;rhi zK|?F3Z!efOcW!RqWN7e#tyk)t2WsJwGNe6M_G#2SAcgpu&w|M`WdHt82^VeA;w1k| z%O8e4(0&?A6GG8e5Mm;tpDk^=`Z2X;FwvYLT&(OKl0{ku%i)l=Vu87;GfhTSWEUBUngVL;nwK16KO<9ljNktuAaKm{; zL8X0*H}8Px*VyQ6hS3sem(G|mL-RPiw!jSBk4{(}W+h@sA&|`QAo#U^V#_!zc*BUb zUacU{zUc0OaqUy|<~Leh017}e-DFnXegmmMlBKqPDu9(utC{ z6`84Hs1FFj=z zn1${yt7seJ8i|ib3gMG-bb`ld#FdvN3r#}{_9tCBvj2N7^PV>_G$gH_FGUJ#ngBNX z>uv|M8el%L;p9G~(wXpxP)qMcu1SrBO$bS_tRc~=CQ#=~K&^-ay&NT+8XW&B-sgDn zN5j1)WZ8pRtDNd-fsxA$vyjBh&_!QoeA`ZY-MSN0qu_2k-fZsg;vUldkc_QB?0=HU zhvDpQG&gBr7J84D5Lp`mI+S7`ngbvfnLBqi*wia)6oxDfU5Ju-f{3{uP_)SBuSh9q zu6zC#MaLb{b7>=kS$dWHvO?W^Zwc)Nfgc6C<;nLXSaGMPW=W0b=e_RT7r+g@1Y-h9 zli@6M?}|PPOhE>5X}Nh6zUEMuvI&iso3oti=b$qp0!J>OQ{suQ=vGiYQe3-qaDtY# zMpC_#39Xm52mE_?VZEUkF9G$%kHred3XBKo3xAy>n+T*)=bS?W%o|A2fmFG`F{Aqc zkNS;!_FTK0olOlqNH@gqd~%W36HTQ|8oY<}l@STZ1NSThB8q{~d9-(-6T}vN!>b## z_6_OXWJyS>`0eJj+zmia${J=@DiAW~Adi79Ezm}bs^A?cJCmc*S&uAOvkuDa zT@zLn%w~8n4s!388+`j1jFyoY+sg6@5104!{3J7TM5Pmo-&x~tjSBQ|f!e1yPTOE8 z)RFWezIUYmdzqMxWprpqYl<`$i`;Km$|zL_NUX;FAN}J;K4AbW1DVKQNTpO9?$rI?+ek)FpBXyf-P~Kne8}_b6nkN_M4|pe%vx-@!ODVY?hBxyC(Gi znFvy;TR@+mXUSJ_SXXfofv9o*z>+^`ca04INPa|hi7#(#gC zk`7p>7R`Z~)GSSLOw|2 zVJgR&)gD8`X&%is6(^LkW#_R!-iLe3T!ws3_Qz_8sLGb!)zLDJ)=&k}wYJf=`lqcn z_=WzP)$pB*tIhDAMUF!&ixWZ@Gbs$oOgb2L_mbF)7e{YR=Tj5s)SSm&Lc-(o`4S#r^U4|*_E z55h>ic^Zt+iA$l9C4D#q9Mtm24WU9*nH-blA5{sR7(Aq@Q){heGjJ;={JWK$_kDy7 zWP7^`AH|t*XysVJEgj7l_R8@e-~2oUmoe~FDfXjJ{_Usqx6SZ?s$t)sUB!?!W&c42 zM}Olp@qA(*Bpqz`aJvaMp+Gntt*!3Or>!QPCPRiXkO$-h{ynH|1@Q-;47 z_pos;`|bYSwu%v^BaC*{MO5-R%Ho_0=%%s%;4Jw52C@%YoLwp*a#8F<59yT}bIvr5 z$+Yv?e{j3Mj{^S-%V@$oJIH=47(IHM=5e^-{`zYm;&aCm${OiUBks_586?{~TW(b9 z2C(EIEQ5TJtO4z0V4kh1%IKKI%aipYY{lFO+WZ{B<1BE9*$*7`rZa|)^IpqZ=Hj*# z*^JlBl_{&uC?z5z2%|FS9QX{RO%&WFO$x{O)dnI1hW&D+7#JCMPP%LN0a8;UrzI+w zkd)FR6BSh>X=r4T+EuQcXK+5Ugj_$bA9H*jNBLrr*rn4pU&peI#z*_N_cTvT|L9EF z{cDH_OZrZf#v&C5g))G2OfbZi0nbz#=mwE)@Kr-r~ z20AkOLR9*T8XkIN@yEXlgD-5i~DgS0&j##eVX^3`-U5UOy$bmFeGa;Y8qPjr8 z-IcQQ_8X_fZ?B`y0H7mMSs~pvB(swjY7zk&f^4=gL%T7?yBia&6?IirRg`A0UcH*= z1vn4YS=45L*(*v7XKtQa#s1S`U@Hash)+{&0xJ|0pBKp$Pr$Lx@Y1<-=~9rqV-j$! zO4q)QkJapHVRYcsSu`97W@H=?XVhS*b;OqWCGb;h_nn4rb*u^x@odzhdB_ zpxXhhDR6UFCt{L}Miu&tsyld4z(H45n+-{xdXDah6@|3xM!-pOLyveA-KKIyy@*Zwu zVOe_2^8HI2cI7EAZ&X!<(8T$ZO-N;&mIMwcqRzHEf*GQ@vKEr}#XgF?!c1Biy&$am zKd&WrZ;QgIE8y;9c#?bcXxaBNS?}MxnHx;W&52Qz+)yq}f9!vhSNny|RB&MzQD~t` z(|?lzEs7IBpHXOCboeE&VUol)H#fHsZ+?p%$UYap?A!vKf{Gk~&P(hskYCAdS3}L7 zTL5IBdR>fWtINxMgYO%a8lRHsu9N*pY%|%Bkevt#5$q_^!^^irQ`2h-AD`39P{%hc zCu}nk;pJOlRqy&9EjJ9>o@sXk+^?DT1UNw)^v3Atk#~123X1Y5I(72oIU#^5531HN z4xaK^l6QZFH4d)b>|IBv!rxB>2!D)tf$jc?YHZ zo|CS@&&BNDC@e~PjeqNWAqsIKSE_b9WU^4oDb%ch|FLX@>#zlJCW>VZ6BsczhMO7v zCjp%ZKRgXS`69ZIJuQ)?LB*a%?m!i5FF_g25_`HLswK0tmLEGx4P)%yfV@55*MaI& zkyCSCKkoW_&GO?ypafmq-~H)b`&(B(15A2g(#<8;Iuk?DlM;ybud~e;BjGx+iM;kj zPV%!5H~gM25ECO0=QA)so$vp$g?HAh5610MnA9nLQv|maR5wf(jC!;hB@jN=A`ax{ z=YKiS9dtk=`*`2p3&>xEB_!V6NjT@G@wxT}tWRUu$yBFPlJsja`O8Qu09HIQLn_x+ zv6!E?1{K?UACWn8G(e_t(8zFJ2-?A0iL1u^Fb2G9=6UkulD0QVB00q=?|+%8V3gf0 zKfxRLv+bLMTseBrBzUE|r`{JFm1DQe`;l~xUWr??{{7cAXhn(Hm&LhgEO6IUiOp^v z<=ybsOmxz^oGB+leT#DJ-Z+}+cV-ww#`TCVi94mOebGbq^4!aAnmyi?fw^**eV_0T zs|wV7AhAzFzbX(x)eahOle*(_>Gvn+y!HO}T9b38bl}ytFF3ScLxr=!_D!H@U7+WK z4y6BXHkiVt(VaEbzCtQ%*NQG%MoxEyh?P{mGYcU%)&|VC9yPKdqiU=dbBHNa&hxcnNm-@Eu4fxO!9IiPXN=hbo4T5 zid%$9Jbu`_o+|nv4ZH$q%_jwu>$}dWjfJ- zw&}eSMcyKl`g2GuXPP#RE)Ov8=*dPRlr_`y!|BDhAM^U3;y#`<#r}48(y*C_j=!uI zEMD6ez9L+#c&*>a8;PH^r+qzj`-JYL@6yCT!`#^jrDRNwjV;V1K{U9%pOPAbef79I zH9c82B>mON?lX}z!TUT@{!g8SAK2?xOKZZPT7z#!%b1|3u|?<&>nCh62AIZ*M3k9g zi9L2Fv;8Ac%x^rwl}&SYd!wM{>;}%Ospa@{^QELpx>M!LhMfrz+g(Kkdvj18BUqIx zboyib_iFvAn0X)pH^8fCqEiSU5U;c1vSo$c9SA!$LBlODY+I4DZQC}B=f7`DZ0DD< zh;(XeY=FQuTRCyc=9C0U1Zs*#?VSxNBek$a@(>12EHKFu`bqp*d!~pZEE2euqhM(I z_AOAnJbaj2aRnw7&{r(_uoswEMGp7@;`SFNUkGu-isOEt3~5A+=hp1b8GNr8n&F1I zOF`h4+yC}NyQ71HLt?wKvNEAd&7T2}bzbi_y0t&5eIG_Z)L+ptB46!#0J1N0=FEwn zIZO$XZ%2-ee_cr>G4E@_ zfty5+^$5cAaOTVeWH3y$W zVk_!S4hCLQlV47A=?{;MrAv$~make>T(z$J5QWIa&inu>!mSV=!&Y=nyD`!H(u#2T z7%zXH+w~^JD#6@;2QmeIN%nJ0t@lz|b99)^iWsj^Yl_!0b{=_oLP^<}-4M$PIULNc zx81&4HHX|~1Y>Q!&*0{WLgZzMQ$OPP1c4GU3q;_I|&Fk7F#AH>tEYEwP#5kv}CYpEu_urT9 z9L9l@U@RN6{lRIlbf}QQ{{TQ|2BqGk>Lx2$nSbV-?eYvtmfy`)xTo%b}py3exAxnmo;1C;yOnW;>Z;gw`tlV6|V5OARpQV zaSI5X9Q0MmEtWl!;U?+QF@gZp6kC}|S+7u^QlA;1JhyG4@yh)ZZ6?v8>pW4kEkL?& zHd1WE3LDX2@K1ngz25qx;5g_lesHh2Ru0I~nJ0=x&$BsRl|7~pXxYwcn=xevcvRo;=qKJidy1BV0sD5ePWx1F zC(cxXHTZiY_ZQrGh*Pk$-b7zQ@R15#R=jBK+@6Ii)0PS;4ynKd7lDyDN`DO}5NG(B-I9ZEM=;+iqr{jB@iJ z1-OCxPVY3R#aMd+lGiTsoug#DjJRBm42i8-Gm?LF1NEl? z;&UhH-#7AmsLC}9m!PVjr@DXQp%~m$1_0!VCt|z~o%&JBs#0|j{lVv74Y5Ier>)+9 zwF4@h5QwGwVe6uWOq`{%)T{j$qCU8%W(zz)MC7CkSMXYE+`I2rDq+w|i#}H8v_b!* z0emQZ7ngJ%=oTSwj770Slp6AT4f|zSboLNRP_EYw5x?Kfa(GCD9C{AoUz~t+KvM-& z_Gr>rCxJl0-jM3G@31MS@zrcipv^dx)+_w(#a-wsRiJTA{`y|~80B))G4gk2+MGXZeF!Z$k zAbX3ePoH2q;a~0KdEG!dweo?i}SYxDlqK^oMG3CzZrR`nb~HcbB- z=L0JeaFyu7g{nGLdD@_QDb{=e@`TI%q@b8sz^(KGnF+Q2so63M&C3B!z~BazKz}=B z@N43QoCV48hnt5-;e8;i3G%ufaAW-^4H*%tTqLOuY8}SzT+6|9-0!u!F!E%(#L(e@ zrLP*I#~)qC?-*ZvBM=E?i+gq=eb&xkF?faXK@6P>w&A zs=fDKQ@~_mY;0^}`K`|)H#?z_rJfEghn38u>o=jYiF)wt@_K$I9vN7ylw^Pj#!rIKkdK z)&N2--MjaDU*?uB2>_&P5xjv@ZkCXU5`-Go)LFVnuhHLlD-jia96}>NW@o+6p6h2<5t<_0`R>+pjR7m3{Vx6k9X9Q* znwkQ-XO-j@_$o#h7UnnD-kt2!m zxIkl6uo)|_H#jb6vhBw;hk9i@y92!iM6PVha2lS^zSy`pt1@G{o7WFme{>o9n^_!p zsILD0zONs?2KP8>T)K2rQCLXG#yd97Ra5_ulkV}i+ic_BnTd9-lv=O=-^TUn>g&C9 zGz)Xb4K8|A=XJJLStx?ku zb8*|Bz4ol*=g3w#dValo=cd}{+6zbfpiA4vZ(|P*ps#o-`?PZ>ES# zdD_h>GYt1%xM3Lu=r>0~;wY}xQ&n zh}pIwPt-J_He!NnHnk)8q2X zXxC7W)T#Z0>srO{GM*0p$1RtB!|+K1j`OF~p(BA3L}fV)S0E9I2S#Q>HXStkgSNQL zOi`_xd(^8%+pyY3$Rq9SUsRwW3GuhJghMkbPCMclz)kRr-`fc*DZy&-z(3!fiEqIM z82nW^{(p||3>5BAz958s4UhsH=4>|Hpv?LkAC+!ROGJNh|E>NbHrC+W{@dE7B4E&0 zKN(LE3-<_mQfzekpYO|{-)HeU3K0L5O z5B{yR`e(l3-#*YkRuKP8Kl?@XxV)PemYJ(1*1?K+ zpYS9GNB|W%0p=7_dDad~HL$X>N~~L0yMTd~T0c+m9RlL>&u9tDO&RTh0OKsUIUouF zS9bci$}K3w1dMcti#|xQM%g}!VS`>s5ER#&KVyJvCH}=8l}_j?i9H4>^*BJNZ#1GA znmsH1YsY384nS(`7eQgf$ePf4jgz4zaD#|D3zDoa+|lP4cIG&{%_I9zCCueloR?#> z1jRjOAaD|j*?WpecLT(`KY*3Y0L{AT+Yb?=IQ27F0Q(NQQGoZ-lcVTb&tOD z(tEcgQoX>V!pUHFlw?ycbL)*Anqf1H<~#%(IQT0|@NZn)e|+Yve=p)-DCtk+=WjX} zIvJtyT<9=8bF>GpwuWvq48X3{Yfu~+lZ!FYhI66RUcA#I5GESY{}>p;@TA@2-@6=U zJ`}O>PZ*2a%R#+MKUQ(rD6Zazs!aH0AB^f7%YLzM z^snWT=!X@$k8qFXtkJ#r^4FK8dQo1ojw>H0K~h5OF`j-j0nvX*>1T~4o*b`ML2qIB zUp~BwhuBC18e6PLpg|Zqj>Z3{Bo(@zm*EvY(3D{jhYJ)a z`q%z^iW7u$SNeaZG~Ftkocfsck^ct=vjabPpJ$I9ixQrs1o+bB2+tzZXKN)3Q6vL zZtQ{{gc-0`+MX(p-woCf4eg(__QA)cS0VWXpa-x>7?}ai8I5AjJRvYj9@7Iw#Qf=K z+17^a?1}v6(=_o^`-O%yFft#D86UVtXf$WEUzCGDan6ZDS&){GC8r`x#Oh&`qK&NV zi0)HcjZ=abLywpw-E3%RNFpcH^P(2`p@)aElnIPSz8HuR@SCy`kYJ$sm1J2n2)C2P z7@}qCy~!KuFFJqNC+9MIE9`ny-MhFqz18)8l- zZsXM;!MP(i4bWhLa{4@QOrhq_CU9+vyO&~3eLC>GOnM`U zK!cl5gnu|Dw%H-zEbKOY~A}W_5$(P!GL@Xu4l$KF4E_ z%YVAagbuV?D8h88qQA~in*A$2GMQW8^g&ftA#8v-W2s$Fm5A;GUd?qhok=3lUjV-f zA@-|yBQnDv$)a1C9xU$Y*bte0;mC>?AHzw&ibijk6FEOnyA}2xUNHkr=A18^H3+qSjD#x3<&X`)MhF#4PA{FlRy(Sp5*2Ek%bPu z@m0@H80&01{h%5BrVeg25J-@Md!$686#w2 zACt8ld_xU(y#us(1I>Nh*_M_THO~n)#}Q9Sr2ww!bX6MXR( z53H>|^?>*N$gJ}aNj;A@1Wf?&mewVH5GS+QEfs&OXRzk4ph)B&et(R_uh#Uv2HJgUwE zd6k6KX80oX5^hBZPzGxqJmLqq%`-?{1Wywsdp!Z3PoG1$Q3XWIbLNK7U{RT`GAdOS z*ynuS3JoWWr8)0xRt&j*U6KG`0B!fb46Do=y0Cy8r~8;VR1IH`m;#}QXaQfuJOdL~ zeQ;#O>=haLc@Tb}{m+EJ>6M=(OCudoV)1CQwABq1irW|DQvw}aR)`7)HOuA`L4ve1MM(|NG1u#AwukP!|j z&abN?7@XN8wt%DA@v}CIIAZkcm{Y58(a_XKS5&-wj^IH`x%cQIG~fnLABa{`kYGf{{2>&4m8jbgfL_Sza}8 zI?2;}-yhEvv^TSJEp*q4KwL{t@ZTu7p+}AwXXJWY0?WTU_eOEaVPWxHi{q4BPSw0gv5Bc-*Z`GBb{M?mZ&eU6vuj>+&AuKzQK z21G$B(?)1P(zFOYAu3;SGgZuyR#Dav_1SFmVO8iVHM)%kObmj@=L9{icdo8tEh^DQF#!nL>@Jp z3##P7H4Wn$dmdxLC@74~Co|)X*b0l}-N5x@o5zaC$-O*xiR0RGtSY7sJSNF4Z=*vG z@yJLA2Sp_0Wiolj+jy~04>SbqpsK6q(1Y_AI`m>>*Ac%&d6?1h{$h<0mWugZrfB-C zO?nF#nF3!z><{R#Z{9YwU>tKLExE~r3r!e>K34=}C>x=(BNy?JWIO{U%vzC2chbR& zx-=RvU;hcT7d%0_xo}o+5tvaaPGcF^Hf>lM!37wzyHP{qw9mX@swEqoleCtCROQ(hnlG^zU-2 zlSXVa*JChxkKZO8p~)W)iZU4YQBq)BNvV^~;K>^8sVOA(c)!df2HM8`2{%K_$jvQN zNJJ*kkzh|P49#+QaDe77%8WxG%|6Rmr>^cxX+s&2$R9(JTj=O-zk%|=7zp{wnA#PJ zMM&pS<=l!hiiK39XdvD66GTC~)q|}a4e`__Lh&dLVrChV*i>~DAtOk^PzIBz!UEo0 zHqrwxn}a7S&csZ`wd6w#@tatx6bn(4VPV+;qbd38Eo^-`m4-v7C@HU9e%2F_*NWQs zM3~W`Di6yKq1qU<{7ml2Hb+HtFZ*>?Am!Vi+rB~1&xZzSC>|RIfG7Sge(50(13T%x zyK(tW?0;~I2$7_5jEqBF{MJ2#ZJgHzZrq`TU?Q5hw)wmDd{o7cksZsyh_7`VE_g|7KzVo>m&KFTQbnwN@Mc6Rm+IBf5D z%E&!AkP{{oP?xV{=z4kQOLq&y007V`5mRhNxP!`0&<=Agg)HAbZ)p_eOLdtPJH+^so2~5KQP>6E$z3+4m>5t~tXf0kQ4`(ky#fxi2 z!Z`8y9Z{sn5Ihv(mP?Q^+t)v>I^Fz|Q67hrfBq+wHmmUJ z=(LKL8lPNPXNuz!Li~2_u{w6C!#WV>}jNZ}(BtY%IE{ixHR>oq z_E4hDVc;Pm5Y^Ucc)W*GI5d1EZxdQp*F+*X0@+10cpzpt_gFN^ACDO#9@Sx$RxZdR z*dk)P^8zLC9X(^bGeK)T6zi8NL1~hlz_3pUzZw>%PF z-IT%M7m$;&e?bMX3B^4dNe^cVYbc_G6^T)#{4|Z5+TX!-A05bj*f5~RPM85=AdPnC zvx`#H^O+MyjNMkYqRz`B`&;BNp;~zGmxtp2KS=c75LPxvNpC(0EBSq`t*zad>vl+K zDps3F_n#25^7{&)@h(Bv~`OWb%1Bs8vXH<9|+;OKTJBv?c@RnZRtqV>{ zKgqQJ@TslbtCls^U+Tg*56^E2s~7=woCHM@kZOsfgFT`N!R^0BG^x%&?+&DrEcf;2 zm))na0$jy!C}Tg1UHGzk@FQi2Px#;H?)-B%;cw6I|HV>bH<{@2lxogHhEOOLxXkU^ zAWKnsH%MrpN9QX<@KAK(z`FLh^KCI4YD9_*A8k_U@k1qu;3_wBlE-AL@>FeT_7tEk zNSYePuyX;&WWSu)?AbjH8{7-e%uyzfla({ME1eKT$jD+Kb|Jlj6jKB#0wV!KR_Z@t z4nCPy3Z_ys(7U{Ut;z@ynAC9flXt9-&K?$*et!AR#+0%>xC-F^nwSKGRvm`O-vBf`6AtNUKz3Fq9$ZG9e~>D`^VT#PkBnG-w-A z#V+!ZMpbH`geKsS)OWg~G@Eb+fN;eLCW>T-0~x-kqB$Jb`r(W@OfoIOA(f_cb2B=~ zD5DcFLf>o&dy6hWrk5aC(lvi$SezNAhg&Y87B|Ev+6b1HP=npq7{DuqAX{N1W(T}d z3}`R}iY`??T8BJtID9lE7{5YxcvZv_Uk<B19T-Sl&SwhN@P*VcYQ1Bta z-^6acNoMK0^>*&0`C+Un$P7lUP{%`EPva+=nwp6(Q^9%za%$Ywey8s$LEg))!qndI zb2D1bQt4U$^pc!gPbw@!kzK-RmPb(G3W!3>atP7D`<4=b1Zb}9!>ckM?O})N;GGa4 z1dMcjsqsPMlkHF*PM>yt@naIVCBq#w185Wss=aisQ&0bfMM-W#;;=(+X)*;t3EhB2 zvA@4C#r6X>ejZA(VN^jOnVtK@L$ngGCF|a#g&yPCq&CREL+982p^vs`2iXBlxn$XF?jdxjJcML}&NwzH3;r zdo_!n|U~7>^m|arKd8@ zSg+Bl8=+4d+zfn!SO*x;E$H5Y;zSDDfmw+9axK>TRE4hkd1M(wW)W~~$&hxhgLV@P za!HJnPkPT4$k*v&i5BAwY_yHZA))>F+!7=?G^I!S^3+;D&!-R( z`38n_NKlZlG>F_b*3}=1I~u=~Q8^56rVYtMd@{o6@KI9A&G@mpfLdumBDxD;&msx6 zteJ#u8E!nEToUZC^MOh4r*o#$s>u=qX%kK3Z9?cZKhDi1kDM4xEgJYC30>e!aBqyq z-g3No5+eIhEV}6Oi8*fiQY9>19n{|;WB6j;pdf$jvKRcoXqUbVz{x+GURXT@m5okT8t`euJjUCO|}i`0N?( z32M~BTI)0GDt@xjqQ_%18$w1%QFEm=RFTuy-1qg4X*2bpFsAzyQ4`qQvD`6}Y);bM zAmb{sHgpAVG-&xwZ_I^mMnAvbWzhPi~T>m&KlMe<>QlP~|dIHzWZ;e!^J%lu?p0GmN=}crR}51p-GoAqG`fy*f~4&6d!{3ml&} zk~15Mu=9EI#q@*1U)(uz_^=&pe5N?#L*(m1T?0gtYU4x0lwPpMy*U2>dXIAj#3;|@ zn}(&v#8zWfvw;x^88f4n+&Tl7YoK{>s{-7}B#$@QiXBipJ(8%ENc_ZQi!i_|`ObipwwY!6DOKQUN^Id3VNo^<-FcBdG@}v4$k{KDrf*jZs-|z{mwp8{tD`+C#u#)P#A} z7bSI&5fccU8md>y;MvZv*qpq>3z0t^=u9vrXUJlpS-Tc-g&rYxy+7D5Who>Bjag0- zUM4$HDG2D{y-vKi?4f%C-PwhyC^^y_N$K25qJPfc~O8{bj{T$j?e0ca6sLBg`Mc=fdS(3ICHB)e4 zJ|izd(C%=&9pF(Nr?(16ICRP{BR4R5L&8Ov+FTc4?s)F?O2dNRc%r1{4iok!e%2v? zf~1g#F=jO4Vqzn%m}fKmUJsvU6vpn(JiDsv=H{08A|kV^aS=|~4{GvYvI)haZujRG z_Amv+&&$Ubhl`ogeK)Q?94*DPR%jnK0+}tKCMNc}s7zqdjLPl0mo-G#cT*@9IvKP{ zdyt&>5kQ9JUu(RA&to~@EMWw3bS<$YqgR`uDvtofV~KO93m3gRfHPkQ9eaOB?jg%- zK<07(+I<&vAu4jrjk7<3wcjh9J;fsz)*;r^?Ln$Z>$?q|`YCBM{ zs*f;VVb2hb1;S6_75Erx={Ip&;X*&f?Ye&&=e)kAhZ18OWvGYHszAd6>~aGK_%@O} z2M#W5f0NM#93Kk7C}HY=!C2wcw79v6PA}NLf?~QF0VlEeBj_db1vwNSJ$_t-6ZjH6 zD|{;mL_%a&O(FRii1vIkHeuwv)@-TjEl_}o-bfs}Mb6VCJajE_m*CwLhx4Aj!jLin zYeO+)D>o&p1o--VCQ%xm>~evYhEQzrc^iga9y&pbnP7}RgB%O=grSX2XfieIVeF37 z>^6i1)Mxg=`Y9LqA-cP6p3`k^LNPSs%S%g0*b=OJ1OAr)8!~o)L3YaY33?F`=m;B= z>U+_pVfqgs9}U^csj*my4kk;`9mdTSdX$##ynE8*$@8giwPn64x1MXc=QxJK(h|s%7S=;E8#D(_U1plG#%_theIeikoC^{36mk5WlT+zN z=`ett;F-xySXIdbj%Cm)=M)7DVK?ifs4QQHxB-N7*TXY&2(!vaR$Xk8X+(YyP-vh6 z1ty#5uvh}h@t!$SOCNt{J4ZJ^N*3pA3B`NvM$h48+Q9_U&;jaqzu^qS0N&hwkvix0 zEb$@zfwJ))%co7A908Dx7UHH^+L|0NK@I=6Blibb^~i~Ej1t*NZd7s_s_HB>SAiRfhKQ&+!y3k zH2yT7i2&SU@KDMBKKtZC+3HWsWVCV`>7S5$2u=!ZI^}HhVD;1rdmvJo#u@_KrI@;> zoe~zaZC0TtgB<7L7S}d?)Jl#f#D5@iTchzbq;ZJ2=ky%z79b10bAJq164i@XM6@;! zGLMFjc}@8YL`o@n#sL)&e=>qReWBgz)=@PwK!GDyL`LRUB*Et5zl_OJBpMjTXEe}G zFNQvoq+Puhs>;ozD}vf064kF0|KSbM*a7|mizlR#jXaKUbx@z2;0_EQf!6(X6$5;M z=9ym^`4uQ@qlxQ-AQVSi5EXwM38#W{F$?&rsdT{t9dH{N(QuqX}PV~TS2b6`WEk$W}lZZJBF4ba_$wp&a2P> z1U!&RS(Z)?vZ*KC9axZY#nTTdZGE8va}%45NY*J2X|{Vul2rNz#!&#DNPQL+9_=z1 z@Q}g|xks_y8b9tP1{mQy@?5hk-`3SxVmXjx14P2Z@lgD3y+@oUijq2!>#w4QC@|T^ z$os&~ow z3MAUpjaZnTQro92UZSTB3;}t6=GbvI{%B+BgS`S0|6!f3AXvMEg;3a1wb+>egceRzFWkEGFSv;DUaxt<5Ju@dL?3)^k!1HqDskK^9 z&nJxPg_d#y!b{a^el1NZk&)jMc!A~ixWR@|Dwnw8V!5RL#A3zDcO>ykK8J}$>5xPs zF)KWa7<8V0fC3FZ?mZF5#tU9U z810l$1FDvtVlB11a19mD(3ROBo3>e{=T2XHpB;oBDw2s#VJdB=r) zf|GFT=^ZT2N@r^Ad@+#W`#7fZx+82MWuoI-Dc>?j`E zR!+FI5FQY@=Usuf3MC5=!qrT(MrWq=;XI!hq&{vZ#@ZZ*)_e^7j%m@7${qkxqb3M{ z>!N#1iw#{bxtY+zpg|l4W0Wwga>>qvJKAP1)_iOk`IZxCgQ3#)8(?dEtrqOlLh1H( z(jqj8lYpa95{P-5+g*+EZGh8^kq~MgAI|WYc;MahOJom%PB!WHU{p^ZIU}-vEfj($ zL85cps6K&1*cincMR#B&w5-^h51m(p;171w8NE#;MviKQA`&36i1KQ}Q6Q@sgYy>U zpw|4gqqs(}N5VO3u)v>T0pKzHAI{!9EXVeH`@Tv@p+U*4E@dc`IWxJ^U@T=!kuhTt zk|~5HQc8wQDJnx{%v@2K-NKDbB_U%-LU=wa_wRY$_uZbqUVnVIFL%Rr9_MkawXc2O z*E$%=JZ8pG!JBe1O!12suP>edgc;(sTz7rp&#$>(OYY?EnpjOS+-u+9tx5;EbY1RN zthz9pM_GWi>MeM96t52WJ@)W281a1_2UJ&N=pjGGHPrIX5C{*HWB8Y~lq8(X7*o0g zXW%vMJXEdj4}e?e<&}Ow14_G8n@Nkf{2I(HNDwhtnqJLY*&Fw(V))gsQu=>f=JxM+ zBe_V9QisdBP33q9!$JHweD-Xw-j$^nDcDaD>E4HLR8u6nz>t3a_pFk)_TP-|HRW)t zirvW;Kk^c(AlUN_Jv6J;6`$-e!iO{kkvl|_7aCa{st$R?%0c5bnv_=!-g~zA)egf( zJy|rdd__talNoSTx`aj)zSKdH!d3nM;-oS*F zcmKncJ+XCbc_(m8EPD4I&orN*>(tYqf)`C4*p@S>3D@Lwo%!!z{vy=~d&;6kFXP?v z2BIfGMGE6qg_I@ucB{JTPh)ghre+ zTv54j?=3Z7MokeP#Gx$L62oNNOzb|1(-dvcw4g3bfeZvs7>yfQ$dX?$9Af_Yw47&L zz=iBG2}{OIR>S)vEgjO=B$N7lTL-)qnjQ<`ePGZbL=frgm3+&3y!D+Pa3Gc$OBg(a z{XrM;K8FX7&Bu((*I_wW5VE-%MdteB8d2AwW-k~Z?mTK{zmXnqgnHdU*mrc^y68Do z`mM?!;c0YGHJ80kS@u~thX9<=a=W6G={sja?N`E}&S3H}xa3yX~0j^q1q zN{p<4R-HQS?w_YpvgqVuyj%2mu`?|i*jRD1JDwqFpnF$k+hmMkxKdr+NS>{Q5H+A& z(;eSqGTY8pk3EN=T?g!w*;-$(?c>wPxc;a6EELmPa+xoSFj64)acD5< z5oXVi^0wNqRN#HZ*nQ~a(`2rI)PJ>Jc#6Mv{g=PCpP0TqJJd+v$qxvj>02nHMXjc` zlQYNcMzMg)nd^cx&+|Uss*wbAF645XHMR6kT3h-93)t@Ep2n&IL@efte zeQ_ROStEi{ecn#zy?Csxu3>t9k3!LWG#t!dvjVsr`(8rd%0ivf_^iI$G$@skW{B{2 zmekA%Mg+sQU_zxie)-4AK&;KCa;t)Kk%sycxe$fHjBFO@o!7Wf<$l_3xl97yOXtCo z7}t&QGY_8{C@Ujm2@2oGh0^ZF$?CkMe3nqAk@151v9bR^2WT4HRMUWLT=x4=4|1yf z^U!q~>0`aR?dWLHp}<4k#T(krM;r2)chiOI>h*X?EieG8b zeXMb+7>c-#Dvw|q4S>XpIWPPP!oq(BZ6Qm-^fhZ&8PgBaWDrT`^?0e?r9Qc+r}kk1 z7Do$&0ZMb}vKa!f8!$RK_6O%?FbD;+T@CS0&GhY+b3_Zus%r(vaZ| zJ<2AwNI4rhuuaCqZcltS5rd2!L_L-PY6q78`oW&v3^ai&fCT&IynoLPCJwXPTJjID zzJ2ZLYHSuxIQgF^!xZMvc&S~cTs2ts`AVz(Zu7dDnyOAd^*=x*)&E278cX9}x7-fG zdcAqv9NgBlbiC$ji#$AMAD+Nn%tu>}s<3$g9f|wTFkgqHrBEN@w8_?K$7F}W8Gq)S zDz483%!8Pvb{jpes?9|@H%LX-nrzy+^RO%@c#PTzx%{x)7~OYZ9hV_hJK`?MfJ5kl z(oz-enRPJ~gd=0)=sFZnt>KbqN80wLPEOeHieO#aI;v2$yj4=LCjnX1ADcw zhk@eHS6&m1iZT%IvX`a*v}x03-i+-obuHLFzVV#&%wbOIE9DAuav@m^GL!%PK|Oq& z?x5y&HPVN9E!b!osHT{6j{&Ei>n|x{@fF9ds4QyE5X5v-uPc#q`}hGyAMb{7d`pfQ zMQV)(6Tqo@@tAUpDJ)dUlU#zs_zwx4)Vhi3b!h>nsk^ZKr>56w;K@po4!6M5xV-yH zDD2I|>(kFeEF7D5qsQW7(SgIKlbt7NsE_B=9tWS!ew=>0WVph9>}lU5xvB#)=wnPK zz)A*vvKjUfBddD2xm{cMUgDsG+zrwNhn5%+*wrzc)+t;TR}IP(h=02x&d2trn)lSv zJ#-WS&%vR^m`sNLgGMhOM(9v82QI7^VhL(y{F`#y`->qm<%Y%Wb538Go2Kr%b;|r} zwFyP=8KdMzR!UqZf$ztYwts-STMCH%+0rlT`Oyim`iXz*_Z3^Oy@iqRBK&a3htqr_ zN9;$p3C0y9K28Kx~IHVqV2_X&Q5^C(%{c+PNd?<-3Z8TKfB zHbT`rEnWl9Uq*MyylG1B`T}PVt;Lrm@5H>l+q`L2|t;(9&9X&T8x{li-xc4oFvJhxW-9>HlVA27M!1c8!EjhU^AnSV9tsPl}w zt+DPsHVsxVs_s2DOPoAg&s(NN$^@Sg7H^V?!a9hf$fF61*@)dWJQj#`TsLu3q34zLB;(d zV|M@tFMZSdMcYVSewn(w%ilXH->j_J>>tV+znp6nQM#?g`zyT=kyY%^$7dcgmiA`% zS54Mo@12AhL2j-w#H=qaWg|E5{-<)~4pZp@plB66R{ZzTMy+)VM2%h7%pV~l3B$Ii zFuc~GJ=@c&PnW1%jW=bC_~IA98viiyq;xJK+uuefH2pQV%^qb!mq8AB)pUAQX-F_x zoQ-H)$Gt9GqIs9DKzWjW$Tg-{h-vV9vf((5GWo~vg6@g72i$&QO+I{mxSFCgl;<*9 z;_hqbZ7ZOoM(vEPB`|_uo4YXiADbKbSV}`PZ1)4eIh6R~@8f!4{F&ke zJPo7GRqi7_f3hJ<*rWpNqIT|V?;v`Tj6#n(9oSN#aJ~IM%^16|)ZTmZ^xPSte+7kK zpDGLD#hpULQX$)LBvm%-TA+n;`=x#G)|i;wMMjE%fU0-ZPspETYHno*j>maueOY5w zumtr+-okN)#DBc946ic}H)>W-#4Rc{2gcxxM4;NCX{6b}_ITQ{mPsM8tD+W6Y|`&L z6O7-$&xvq}gqW=rzM`UV9YDG*_UG0nCVTX08k>aER%}H4toMf^&>lzN0un9aztF@a z?0;dLFk4#>R=$7uM1aKD4y9<$xy-_pg@#zhZ%#ZJG@6mv8$vbZOGNhqGy8xQFaOk)8Noz^H6!uDdbhjqz7q?~&7|CrbIs z6xvRWJUX}U-1%Bve&u>{?-)ayn#D{a=+=UW|ym~+J zHsuQ!N>z0l;r))I0DMDYI{3nVWyW#teyf_WNX;qJ)Ct_Kg=v)K>SKE*K$8PhK9iOfTqtQpiciP$C zupjhq-4X$YvmX%*^XRFNe8S-p7`}hjW+8SMcN|UBX8t=}oKXWHq|Sv8o0HM^)48Ml zC-wFA2TpPHkI)=&V@J#x(v?{F z*OZ_o!@uZy0ElmLsaP|Dno+9X?uK1;z3D}hc%D|*kPwNGfJ?=ef?dlGz z5k31+KF4uNU`<89GqhAE!Oee9WV02OKI74kwn6htKa+GDO_|b$Dj8KEkC%-g)|cch z*PXM^=ILzlINFBR_k#404TtpA3?^G} zSO`s0ylA-qq-MCfW^Ias7*lphX;@Axx#d;3kg7$fs_kCBpbRnZ)rTSRkz2BBBnFJ= zG60dQqGqS6clsG1=^&F|<`2A99ddhdq-l6=(H#!Dw+K)?jGIW zWQ4B|q|`$61?MU2G!+3JONrM{dzPcsPoY=uchn;`^H|y2QnAWAmxC-g13de))db@J z;SA)0zY9(lN)`$>nO0gHLWe%CuI6THVv^bOs3+*gyw`codSpB?wxUO8W8?P^kG=|f zTCHH5n53gg4gDo=O}!Z_V*x}f{lP`&l!*nt)B9zZkL_9rrs+-V?1&4^doH|AOjQ&l z@=?sWn*+}B%g|TLPydl61;jX$u_3=o%5c^e)H^ULWwR+4z9dpg7dN{P11-EOYHIVP zr7IWF(&q2(+?E#(PB|~3?Qwg|T+%Qo-J0`ncF&c5wsw$KGSgt*z@ko_I+^`S@zTn8NJCH_ zo;Yc5p26`H5{r8DT9<@lQ^pJ~s#{?XLm=IFQhLjJOD`{IjxMzOXNC}C7hExmNyO(a zE+?6tGvQLdd!43Sjq<#e8`f~d%O%r_F8xwZ+yeK=ts?rJ9CuVypZk|&cK3WI&W_-E zt)w*G)@|-inRNZIP+hGZRu&-#ASnbl3;#!_-ghq%wuT8yLtq28R~d+I*w5US6)2VZ)9HJ1vhW)Ibnp<=mS?l1#qto7!;l!=9D^5dw9EBWL z_5dhg;nZuFN|x9(4g8UNCyqF=`Jd&$wj>isl^6~B6Q0JK!x zOlEJ*etP!Q-7{foE|2BL&sc7_54eOrJd&0ecJ3Tm}-n3u%hT9(<$fH69m+SR4kW|3Cck&>7EnEQ@&fW95jda@ZKeg9qjF&=H6{EWOW{@=Xhofu$%hEz|4 zB3I9-eg!hR0=SCe4##v@<`7UfY@(Io1nIT7i&`yC@(X`{pIL~4BmQ@w(ef}xo3J+iKyBX_m-Tx8D!U{71mecO zvw;ysA*49VYex*4H5`nD|Je%QD*aKQmpU?(-(66;~)?dvlJy$UUa4T0ZeSP3|O_ z_pyj;(`gFsZtEYu0^S&2EE%kHxAM*RM8tn0w^X z!Go7~L~9Qjai?=$lc0d~?C5I4HTqv%Ic^^`OU)O?k1zDC^f_;w7}n`U?ENpFNl49S%hCvu>?dsa%p$ zM~4L2*CO8L1`2N|?)roDT(NT+$}K;B{{GLY?Y$Q) z$fm^BFfuZ_Ts$w^XV}hO{pZc=$Y6*xGJhr7TgbBcuWq`wJ=9NCjDfA8;#N$jt&Ay2 z%GWe0W%rm^SZGr(TmqTg2A?YTRtCSm2XDI$*T0(ISLgAOrW46gsmG38qRs7|VR%rG zeG%QDVZ6lWh{+qTOt$LMrMluQpwQdfyPvObXDahjcJ>5RsgQ;jZ|&$=Q%6U~KJwDT zpkS9vH*ePEBwwThxb*ey7TjZR+@eLbmMvR?c50BylS+bAV`t(RV6aJzIZw|9b7NuI z+xt4evTSydVea5qYdt_n`n+PT7xPYedT&_0x|;NOC%sm=*k|_Kn04);zSEV!ON$=_ z1({#JeOr&MGyVSIrdU}2p_dOIWETGEYzsJp8eZ%>iN#DNBBTX~^`A3&HxqAKj z^=t3c)6+|m41D{{qPdzTsdJw6$X0Y!So@c?mK2%pare$0zOc^DKFg9v*BBgWx@F6j z0s>k>v$l3@pN8@=X;!hFKD~87j7~4AZPyQ%0T(e<}czTdOIM4?jU7eEp!0#J`OA6VA^%&O%4P?udG% zsbQN)eVf)N3@eYaDPVWUw6={m4R7AJhJqck|8jY<>z}#v9%b#Vr`^fQsvoG**g>BQ z+4dX>Q|0I6=Gt{wmCB8E)_$eNWXK@7$*n=pg>9+s|4amySbG;J(1Gg|EcS^D4-aoS zsa^@buc;HDM>RDyTaWrlq?W&&bvH}i%?6wV^}!&s*%1xbs%2cc;?QA9l&QnzYu5%i zI%-kC8$M#*ZYu0XeMddLh<^d>H3)XW==ZzfO|p0Hs=quZpQ}Fn7B8Miz1cQOnU@OT z0Pi>f@lquHrSTs6m?vxQaSEUNzpmwwt*u*@3^I3QudS!K~Zl3f)@$) za7cT;?rJwbmZD?ki*N3~EU6=lbg2pEjsvkb{;Sn<&9(B5P>!#?(8|Di4PRA#k=1pPJ zRG_ZZ4+a*Tm(dJP+nIb2@-yV3d&UOSHAM>+Ell=l<`%|Jpk`nmg~-G}z%KB8p|Z%^~68H5Hd9cf7a8 zG_NB9^J-mnPKF&85ckCg_wH>05?)ErG6u*FsYQq|Ux%wRjzznC>z3Wvv5m^g$|g*i z@-?rRogH+{iTKq)bpKrAys_2S3|SL%qV3s~vrU>dE&TAoJz~15$I5JHUhF?)7?|0b zKr@lNI$+&#KKbP2+~yRP)l4)v(+9JnRHr8bLlrx3O+5-u^z3wE>y|AUD1`NH2l+Zu z^hIph62!qXeE5_JOWCi8lAdhP?EIFkTD9oat4YR}I18ouj#b5$9$oj|4aLR~EDwUH zza}ZjevFo-#Sqn-J^T09Y}~jppEvogSp7aU(l+4y$+J?CzX_~-2Vs$4o? z(^;jfjs#ve=7ee9ZUAQy2RMECuO4{c4Z6K19Snqv-xw7Y$eG(cY*!z$jq9;%*RKbz zTUV3neYE{xmw|*8U7INXF=IxI7-8E}>Hh^LU#;T8v-Wy=PE&M^jK1aDJd1QUh`IYR zyGUzl(FBtYPT2>?ovhnpU|2ucfX+?V*zY&Ee$ZpfjvYpW zmu?hp5*IQq-CHt1?L~_gy~{tY&3@muK^1?;Z+Q!6ottfKRq>_(flNlnd4GJ`+deXo z`lkpRFqWqlEePvK2-+;R>#;WJ1z!K`R$Kn(*P`_=TIO|y%NsawV2x>4jPoRd?jdyB z4;{KTCB>4xZ1@v7E09lS?*1d^(j_??X%`AAt?BUbrSaqaLpdjyQ{E>0RJLg0aq2=U z(daS3r`52XDq>}zZm(|ouK%D$`2aQ82sP>HYet~Gs8lHpdjO1QhIgO$LX)?nIOSHi zXV0GL>FK*6sZx(0ckMM|$l$>Q4Gr~b)#&=yW^y?IC5~ciMn!#9z9s2y+RfNZtR1CT zJoZotvXyKp4EF5LJ=HDfZub5AMj2neHBrTnwej>)>P6)BT3VsotXVT_pXcN8J3Gk)Akey^cm0um1H(RVG_AFE z?OMr0@7}++h>frxb*|#4fz59Anj|V5%i+nc-Oq2a-+|n^Qhu(D=JnX_^$4=dia%Oe zTIyh(%f>5i;-HTYLbd@DwDbCW`0zoqR;~WUqTCAyl+qhoNx$2i#{s(P)LZ`V zKiO!u#W&E{_^q>|)^{4N7kv|iGU##3 zz(;HI*s^{5Uf``+GoxnBHsBI6k&}A} zVsT5~6@C26-z9B6sETeHnm{VEw;(3hy-@iX=YAGKj};w95Vu;$BkOS1?kfU!wAqnt zS3d7lpSX}VZ=JUi_Gn+uqIN4NDq79HY;(fstZvsXU9x#0q`wW6^fOwUMJ%LMO=r&I z^p&_&3VAV>^MBsz^RxEKl`EO$*hX}YRe>ts-<69sG&E$2jbGX82+t*%i}Nxv6nM2) zZr|RR)@JQx%a#qXHOte2VrbCYzjL52ZB^zBTR-EO>vm>g9bDJ_hYqdl-u7$a*yH9> zL*ZLUvgqiPkS*vznP(2=W$}51L*&&i>K>X-lV5O&{q5~HFJ2r_yyD`uYlDhcthOJ= zR5yKv6`tn7TdewD^a@HeWpw+46SEuRIS(b8wLW3=cm!^^+5qh}2M$=YZP%`g;U@c; zlwY+wZmrER9DUZ$XX~RC*!ShzuHU%f^Yx9Zp{Z${It<}OD6@i(AH!iU3P=SZ%c{S^ z9yLg1XE)Pa88LG7vFUnLHUI)vWdkwSpyx|Lf5bL2gsB7e%|>icgQE!;NvaX`R?<>- zx~QJ3dE3oOrGGV*t!*LaP!Yfz_Ohq*X zj2T)pXU>$D%)zzgw03T~Nl=Zi+rouid0=gld@QB?K4}44I)k3)HwKgvAI)r zShL=Zc!ieZ_Yctm_ zO#^}wJGR-9wvVkm`oi+w($thtb_Pn$Z^@DY!-j=}wH!0XddGDeM82S%{gtQXHpl0$ zUkC9GQYp=_My-go7a8uBadg;=sCo#ZXq?C*_Eu`Y&HTu_)I33-;zgiAQ zuLEn|cKm4ragq7|QMoKfpWC*9i~5MEwGm4i<3|{@U}w)Id)Ht5&Y;A2B9O`|xb0Abx_~pU7mNif4a*uOlqQ&2a6ru$6fo!m%79sG_R> z0_~EKpa0|ddrYNPjM<}&V1T9?U(_YuOsPm;@V36Xy1K5eZUE1w7~Zz}K$T(4wryI9 zL{>WZCplV8;qUM7?c>wtND6oYLB1NxcB|Mc*m32e*(%i(m(mIEAQ>tk=sQfz(qL9X zW7lMt+uj|O%D8UV2&{$0#jcaLQ-Y%nD#nc)_aAzp=4Oj#toZW_7=PDqEmjh%n`6Wqe!bZO*qvGV$7dzBerE1?$-$J|;{QP(!S7;wxj}P+mvmQKX0G7u`nn98&efO}F z;?|1oyzWeTirjnrtf3d}F>+*mhV5ytSg}H*X3Z7=oqo5~lPK9PW@lGFeE6{C_>N&g zR{@_Igjq(hWtY=9OKsECMl<{K63dRl6TwnNzckIOA;HkRCa^^)AairAj4zHKXcL%( zNq~5>f9ge2**P^ja>DqunkEw07z0s${HRs_qen!Erd3Y@&XJG7B~Llvt$R`1Si9ql zX7~jDc+Oc>xSuoV8PmzcS%=brFgJ-cO!xZ~N9y{R!h4n^#ZdB4jMYXn|6m9yKuhaC ze7G*=VNHc*?b?&m44m4)lIp^swqgSZL-gB3n!b4XQrWU)4QNu^4ege?A3Jd(Cof@8 z;k$PSKU`LgWeR2URzqX^_3NjA&)c|TN1ffT=Q1$1Hh?XVgv5}89m+v>^_{YrlvVHK z+-Er;`;hlNeVzwC|8gxPVP~(!>II8$8|s~FBx(O@=D+{eUs3s~COp?_g4qDgmALa| zq|}x^%X%~q(mR()AgAi4&fC{uN`KW=qb5yiE3CS9l@B%OoZF$DJDUMS!>GWG(aCS? zP(^m#(tUnQfJ=>n??<#?ItIA8HGyxdWuhT4<@)u(V7&qm*5w;FU_c<~8d!T9dkoZT z<1~770|d|wEWOc^h6h2JQ0y}GciqvW^OFuw0YBcRyC$~RVtsI&`Hi9Rv#GDVpI@pa ze8huO^IH+Z-N#zFA%sBe)SzN%@AKV6X?{lAsC~w>XU(aeZK>e{;;K8tLE2LN6})*9 z!rr(@n}z@u76YwfKjJ_a6>Cy>9IA()pt>C{a`pFpMdRr3p zC#4zaIdw02`*sq1%In9cI>`(K+QUrPJItPdK+_P5#?#b#=hQH5r_FxU6>6-20B5^z zR~JYuqyX5_Zs}*v?Crn0Mx()w@#8-{yP#vD0m7}JtsUC&_;L6>VvG-rIYpI+tXjE&n;2m5#i&Q#X1w%rIMe4Z&Ri1|8@F z!OH9m5}p&oKdr5;cax{vHf!m81e6zTeZWhosh2O;!uNd{Uo&Ex;9O*;8VZ1w8aBN; zFbt`zMYi#F7|*>FEZO-}r%fy3wCLK%&zdoGsLOXDt`k?exw-uV<1v_7LZ1SDB09uyXKiP%&C6lJ1Ogyrs_C?SjH zooWKYHXrp9b+Ppi6QE&@^XJcp+~RGm+p*(`MK}dRF4JDLh#+B&x@h6Ju_V?}Q`c~{ zGs~L+M4umFltp@b{p3tHmZK(V5&k~<%*T*pW^sS+K>2mXCwiv?v`yyXG~Ox|sh~x{ zD|PlO^Ln$v?Fbh>g;~{!Q4=_Pw&mI;R-j$|N6p~)_=uzg%tUp(g3Y@KWF zT`J?&bIh>-TH&>n5mFaRf_#VZHfZkJAS`;$NBOE{WIq9+@?VH8$=y3dlr#OJ zk0R>lH{7lyYb?=u9f^isdEdXhQtP!sP5RrNPcFf@m&RBI_ydaYLVE3zL+s-_nL5~T znsHCmj_tjqK7ny9$8t^R4(V>L`0>LNz8UX~N)_)va^%Pg{1$28D57~0oVVv5gNPcm zK-7YlaXx#db;W*8H%+=~AV{+l7E9~=AVMZuvB4xk{4kQ|Pz|}xdv}qqR ziOp)g;&(6V@0^Dng6{T^EFn#& zd3vf-ae6Ry=xDWOt#m?}@me-kycew*ZtJ#S~NHl{7*KR-lbo}X!svZ+y zQ3R^0$_7r{V{m)B-LPRgERpxZh3)Ts;u~V0L?wp#mA6njuS6q}aZUJ>8&UYwt%8>{ z+O5LuGVyMrQ=_5w@#iWrU+wZ3+1fRd4X9Aa;0N2jWP0(Co)q7qG>8k5dh;BJ$p9B8Jocw zO7kwv(vKNs92auR|EsxTH~A}}eU!NsML{JpYWkv&JL8%w&6`7bC^C>|Qwc;&h^`GM zXvbDJ9OT)kabx*;n9S3?3)R8QIu4N&rcV#TYfwnMOQ-YK;!j=ejlnzz4U6p$dtBJS zel&deo1bsdhkUaeqmgWp8*-Be^k8j&cwY}}am*-D_@Mq&a4S0{I9<5b+0cs#_;SgC-e(Y0w^BRTq5iqi2-OpxP9s}?I( zW>pWh{BS_CZbvi!E+(I5Yv?&8{eRTDT`@81u*)f_(!|Z|+AGPe7{y^`=Fbbp{7L^`$5exn+5NUF)+c%8KQy6MeP% z?$@(1?%MUlrDcYnJTItRnoN_8z1Q3|IpfZqdH}a9z~RP-2sMYujf`w5nXVc;i7;ag z15F1H(>UwYthvtlfCfmn*o;Y!$f?hTeHtycNtNe)@CWv_7qNPcSdslMexzH!0#7TdEOhqu}k^P%3|Q1%NOdMnuUr`SIh&j9bGvuFX=m9U8c` z(?eaSlCv}VG`_iDKzy^q9?%3zJ7M0 zZ`gXc*=YOyPttv=HSRh~Xk|gIy?gh@sG#5{iEJWv7)1Bzorrq9diRDLJ$0%(-5xs3 z;3zu&9*b!QhU@XCpXN$p_5^ZVvW{^5ssOQOj%72dYP!{{N~ZVa z`?ZV)sr93L=4YnCsw!glZMGi+8l@L<1M8Iq$YZRJ5Qoz2{O$o9MaB2;6VVz`PA_5R zP6ga-*|u$lad+bS417?cPL+Q7auK*1!BDFQqlQ~gfd-bUQ689~GtT=~FptZBMEdE| z%C>E5+h|te^z)uK&wRlnj<_cu9o$MFp^P3i(V3UWA7{hAT+Y&^y%5W;*A|!h*b6_u zeY7J_Dnw~^d>dnLWa!?4q8@u0xq*cZ>3DJ%wIiprI-eTueh}f|*mdZa(8>#Vt1O?4 z(p&S>OxDMLNAkA1AOuzXtd!*HmR`+g<_5cbwz~Yiz3&j|+zj1+y8-n59cOIh&VkeYk2NP~VxbeMnl9 zvOf;gT*+?G|K@rr^q0Oo^0~Ty+3y1Y*cm<4^$h2M$t!7iL7s27Xrhw;diMY&O~so7 zDZtx8_fnRu~37Swfk_eYXFW~ZPuc5pmI-5hdtqL(n zC@I{22{SALGYBNEu^S;3x{Cn8mQR2}7r^vq7srJjs5(&9zZ7l__l5;0mVv<1=$he) z3(=7wzSilkvP=tdOe#H|sH+a4YWCqkkkL=GxFbF!!3h(L54)cWf#+gxHcObZ)_tCS~RUE3=4lzk8qk+WDl z31+iq&yJ1?vF}+ccvvst-h%9}NbKh|S!lIQc1c=Ash{=qX)|{B0D5VgpOgy0HQQ9F z5XKwZf-v5E4gfsL$JGl|n%p7M01YTUX^Sd)6;*v)#&FG*R5qSJ&Ew*i2PeK08xqsm z>wptW%gYCncsw4pgvt?+2O}vZolIdeX~X&T7lEu#pFW0-T3>wY5*IQeXW1mRdiClF zvCS#Sz&3`Y6Bo5S_*nI-m(3HD2#zMbz-ijq+4205d$$HXwSw7_D8R7?M%LXuePt9{ zI1~dBel50!sHDp%cBIcXePUeNCe&V9%htt59+KHHft2C99iz>q)Fbd<;`exA=FHn? ztrWChR{Z=abP}m>)&BjhShl##xorLTUc8`e=p;kiKj5}Cp`qFc_qD*ZVECBRmN$P4 zbLnvP^xU??hxpeF0ruW$I_6@iM7h?$gUmlrtjhDTWg|9jRPcN*{a5bg(*m}kGv}kPBu9}DIH;woKNx*;4Ls6( zcfFFxEK47*9|}j*%^qZGqJii!an`Ib>J->|i<_Hp8zI$j(EQ&&E10*H+_Y7SEK1I` zy6WxQw;z4lw@vLjbpW0At>oIAPu-CSiFQ7{XUv|xp6+R>%1Q5dHEakn0k7`ws!^kc zR68K_wLs${J}}vHYh2%I)v7_1R)=mI^)W{2KR^r#lCVHlX+UW%564(UW}wSs9i*-I zr9Mv8g^j16CNbndsdR3-&VE0AdMyYX!pzPq4uP4?Rl|tGtIKAy496fk8?eKXHTdb0sYH00MymAB{_Fr2UyLv-1rg^Q0lm*Fd==hMtp( zSuIQSM5l;Zvt5yqtMG^Fqq#YS)jx6kcs9DjzMBtJW2Z4k&wAmz1~+&14yDs$ z^tf>|d+0~pqz%m+4qXi{sqEvk<|6Up)7S)J5mt=k=+#3jVkmyiJH8sqAAc}eL_43g z43s`WeSN)>eTaX?JQ;VeS3McNvRVo`5d?=Z`%bX6Ra9TN7&d?g%tI0aULDbSKUNIIz$eaGjDoi*_SvhCTa?T`PJqd?L&p> zVa1w1-KuxQLS35z3==lg zQ9cD3Y9zWMGydIbL%Po2$28?zg|7+lSc9%B2=(ns3%e? zM-c-M&n@z6nNWNuKDcF85oDgjexx$)$A^(toGNPwXIjBJ;n$0~R&4vD&Go!IeFX}~ z(Ruq?;vmsjydh!9Z=|@W5`bf{IM7WC!zkeKf{?dkhOP8aH z_F&H!6M4VF4oJeFy z#}T&l-KY@38{kD5!Z45Gz^RF@A4*kMm*l?i4}| z*q#`5XsjWl461|c>-O$FG~FRyivn2EiLlm+1oMslPE|E3K+Y@Bt$i*I)WTP<)?sxw zYS_-rd&9BR)C>kf6+-euOHDU7r}l5m@CQ9My9!m}eC6jspobB6B7&a!aXo2D@0K96 zr%#{aHG%5uhbvHQaO|+CAB*d1HwZTWKYDxy(s7&iE$6ekV4rvZ@x?U z^U34K?EUWCVAzDh*=eC~oa4GGBOuKD8qA?QHA2%Ko-|=j<9hX0Q%KurR{4Eobc5C= z{Svv&bny1Bb%1sujh8q?mQs9XVk(jzZ*L5>$7pg?>J(j_|JI~R#*2Ic@h?UwP-p}! zA~Fjx_`-=Dy6QMNT?W`Fp8%)py?b<0=lAd5)=VIY_%b=Oi`<&V@@S13HA*;ZG!XM0 zxCh$wx#0KPoQm4d-8AnYM(Ntk`!oR(J~%o}N5(*w*sJDt?%DH9$2t=>nFd#T8q`@N z#0qe;;h>xT_+1fvQ)Zt8u?|M-1~KW{Oki3n1kI>-T>0LgpiG^$hT#tr@J%OIHc~}b zMd7Fkgj=jD1F6;Y^!1%zmWwrk`bqRini8sP)pP+`iCzXWs8(^obN#CA%W$^|GRHA= z$3>+)BvTut2QB2!r~@PGNVP*$nCJzW?wTBRT6zTzD3uX6Pr+M}MO_bjJ=Sg(^(C`g z#0*n2Gbt%pB9#7ZAOjV9bz7w483~I0gS4U07Y2SaUFQu0z?>#N@wTYCuki_Qua*jTeRO zar&MYlU1iJU=MNQXF?Xj=x>=lXU+zARH0Zz(nBCaG+a+pNnK>b4D{Ctvu7VFe4)xO zBzUTqR`!3$fss2jr9QScjQBurdJfjxjW|1!$2`@#b;(|pJPIVrjj4u0oS`okaON^H z`T<(ZHw2>%VKG_$qoh^#=M#7kQhKcWR5>udDQ#jKUca7>bTIOc6UR-4o|5~_tHT*c zE!}`&y&s;Q(^hEe=-9y`CglTlno~`gJGZW`V0Vo$7y@P*p0qXC+@i|rs6h_yI&c2` z4%B5*vJsIWWj)zjR-HRvLJJYA@=44!MdG1n2ArWzCQ4oq(%&8uj3R)1Ewuv#afneC zkvA|V#zkJ9hQahq}?u)2C05_+!*(#kutV zP8)6?Pq<*RMHY%|m!|6ggkB`tKuTy(>+ZX=;muYOM1A^MJGJqdTIG!fZm<#&n>LL& zTwXlePL)mbS$#(A_n$MTJ-UssH*|YvT)epV@^o?iNE%_IWkS$?Dk~Xb+N#wg;2e`z z)#cKH1`QfaFWs)|I;ZO7?nX8h^STvf3`J~zO+hsWN2jSPB*tZTSmRL+ZQl$BT`u=w ztS0(S;^u}52xM&QH5b^m%3I9h+u@IpS{bcHY4EuB<|*PlbZRmo4Nrn^90IF@u1UL!UDK2>UZ|aMd8+ zAz=O$Jh}*(MsZ8K654Zovbbc|f_%gcOM8!;0HXU!6a@3V`g%Jpb-41d4TYXKZfRcO zuDcxJIk@noS_S=`7@D}qGXdP7QkmaQ_nCx`)%^D^h{&yjYhZEiv)3fq_}u;GnEz7l zkaiGx7^$Re(`jr8-k+Y^Vk4ptP<;IDsJfte`2CBg>~UF1Wj6`$oY-l^;tlkA zpWmg6sac*oB>{M;xV>phWif}7l8iXoZiLTylzZWkdDq5dL?lp=c3`lxn1m!RF0664 zy2L>!vp`b^Y#$sOalgQ1?k!2=5gN@$2xpX~CA!Tv&qquZ>5RG(WWTI!+qT0A5a7f% zYE{(hDv|N2O)=44M05HC#*R7to~;)V6H^nf39~E2{hkR$#@z#}4jh=dH6KO&B5W-K zP;#V5p>aT-x(+--4wy?1e&)1ll9hly|!(^|AtDi1ktp3-3#l_yhe;O0p& z^QVxGK4CF9VA@3gRRMjYQ`*v+b_VU%J zHzBw1`&qV|ad1>hGerS?1m=$R@#BEdsW7#;Ckt5IkoRk};}IV<$>^k-`WAxA^XgM# zVWt03VHC4>_U*Xp_aeb%+1HnG0~E%J%_+Za<70>)am z_M_fO3zE1uP?6-UK-=Qp_-k6QcaV`nQXn%3z$j2RC^YmUeiDy|`wAvzpnC_^s@F2H zMN7(#u?EYnr=suC06Kvt0g3ty5w3%|%(-`b(V*6GAxmh{EX7L(G@hr62K91Ut8MU5s|!z%XhCf9kw?@xmD924{uX zzi1r`7XcrtW?Rr;my*(2wTZ;-MyDzmYLtoD5ybF4f4dHWZ7I511#{=`MhV~9+h)Lk z(6jx_{kq43L}*coO4d0asC3>ym@XtJP%QXIyeDI<+q$(O?24H2aA0-@;9Tw?DQ(`o zj)A(Mp5fDGDG!-C=tCvQ2>0|;rzWDqwN;ts1&~9ta&y;Nt`F|~Zc3-^`VpUfa$3b* z6=fBVE>X(w{nr_pzlGKy5we1pg#DO2IetT;Dtg;SKRbI>yf3%4h<}FuvX$6_dM*3m zl6LFzWld>Yg5ef67@cN@6&f6Wsw?OeYbPDQ315E_;3g3fs7MKb{we5YlKl8DAt{m* zb>x1xWtVbuFVf^dOM~N3yZD|`KT`-{^{3t$&aEN;SuKefU@Xp%@-{l^!>kmSdU50ZhS;=;zn)^kjj;OrKzpG+SK9f zP_yI!#4%|D2r+U%VOS%6^+^(_^-S2WFhbbOC|*j-$}XX~yLbP2-qhj2F|T^kddL=_ zC6X}BrJ*C2Wd@s@w7+Plim%Z&QkmEG-?jCUnrp`fJ#9jjj7zvbKP7F?^0!E1b^5x5jrR97=G!KtQ=oCB>Zoun1dKxKGaX6E4&eiyjzJ@5ltNY&l z^ey-bf*@}xmmR+`Hnuja3}et-N*;jCy)9ApCyCP9tQL{d&e>T%;Q5+AK;65l&dNid zDn3mDQQ$sn(6FIbkGgt>`i&ch0uN07FH!FQ&(5)8D2PNV{E@f&IV#^z zoHAt%`TnN!fU+79|Hs;8UGuzO`|A{slP8+Bt%uvYQmvfs^C#g8FRN&_}rNr<8SxkPW1dju<3xw<&# z(U}rkm`4m18wV{U-k)Dw@w{-wC^Vs2FTVg$)X}}!A#`G#@3B|kD}nwI^?s+ce`@C& zmJRsN{p&2V|DGS!jk|2HdCmWbR$iMZyG$_EWB;`uG-$xz<3|~XY+cj$N$>|KAKb8ON+>%kC_`e=i z$2RTT*R|2Kk92vLvwMPvZPZ;X91&f`%*{0&(h+&CYzs>wz2wri1;=`u`2$^An&!;` zTi8%4h|7~Z)iZA1917Lr>xjyLzAd|u&!^tn-B_d*wqVwc8+9<@6`nJR&^pDA#1@f> z1;7dinlv%V_io;`t2yVDehexSG^{`w!2iIfWy_XAgK-E1Ctl3Wtw~d4Q$$gb%80oW zk&zo!)KKMLgu|iBCiM9qXwZQ;La@E$#QHDv`}H}Mx&w$pdrcT$rbNK%+k&9_wxu^WLLb`w{ z&#{i_)~~--$O9}4_q^vR-;YXgG!*l#_~B4Ar2`>*@LW~AJCy^r42vl9D~y*arKKb0 zfsS7Ecp?NZO;aHiIv&_hltrExXQ8&ASUjTVq%rMriVRo!Rir6Y5kPpBi*!gcZC%e0 z`h^lBvsP1BckiCmwnfB=g6lYW5`00;^WL`r>AFh)Bh<7(r%wNopReyG)gvz%Tx&z3 zltx+cPgj-2S$y4uNt0HSMQ8k&sq5-57<=8!NK+FOez=$l9_;r1Va-7ndzchS0=@6; ztbAXJBU#crVW#GA-+zWsT>pbDS-y(yL#Z5HIDIdRi@)YiqPAHB|26@JT)O@cy8>aH zGPAPc?jAj}*w4?UGas!GoVXZBxmH#da%*)92Yo>WY~|tr5uC>@?hLqLc2IJM?h)mu zQ0pL!h@QH3f!nxrbcb^=WIISN%u9}FBS&VbI`uIF9 zTe>Xtf$7nx@84vE0qk-%`F@Y!)OuOQidrLyd1ZH1 zLPk})80z0?eqRfm#E9k~OIl`9W{-f`79H$AqI=_}O{+O5V&s@%KREszA}5{Q!^P_i zb(Dj8DIGFg&}7>o7fd7dLjfVBKh>n@XOi zcf&Bp&U`T-{_5RwHbE98xA3*N{G}KQChb>sON$)OiVNp1i3JiOq~8sJJ+I#Gef3@IGUd9Tjg304M)$+z zf7?j;c%$ZZR6Zer)>GX2XmqVgKTIB3Jad|IN<(BE2Tv_?Aa%hrBL*~Wx}MXku8=t? ztoi}-mJ$D2_^s{4zG4c4Pqt6T8> zqC)B18$5xte*&@9dj8AWBzTz=lig7Z64Y$yx1p+R8Dd_+e$#8xBus?Jse{_4*QfSW z0nwzJm9pMw&YU^2v}Iqvc8Qh#qjTs-8d&Q!s#l+QHL4AMx0)y^-|`pI+9qT-lpSG9 zo1=gE=FPe~&HG$pI>^<>UFlq>@+5ZQgf+LfQ{i}cO=GFtfmqU7HMYvd>bQC1#sE^D z64=k{_51vy7KdAkTtX&fwus@7h~P3HEXK>7w+ophe&cx zp{kz92b8c$(I#Vj2qn?!VZ&$5YRht_Qt3{fI@J?`a5Q%W{TNWsYpc*gSk=W+OFpe> zXP3*M2x=8Nj|{&>f;j=A>3ECb6xOe&c8KfND5ek-btL%FlZzn5@K3#d?eWfQ%LOJi z?BUxQv@CNPkZ~aOH=mwOuND3j>06Qm%EOosc2rq1kag{a3w`Or3ZfNGQE=Qm;s~2L zgVK_AV*zFn%PeoM<+P34Sq=Mf9?jJ*NgG0gf&ySeVs)5cK<9`yJ%&=cLMgJmik-VG z>eQ*jO+iZ(w{GnbCF+Keah@7ur&au3<^qS4icnnor7<~nb74>X0&-UgVZ%{}-3mK3iF2+>`i>IVh0qlS<6cJ(Hg*1CP$g|2fhgU=zTu;HKFP*x z5A8#|gdlY_iVQLksfPMZn)861ijejmaGXjH7`xnJpw_M)3$&oK-h>Q^A3>{$WHHvu?ZWc$Y3E8O ztzy??P!eJ7NpwGVx}~SSms}JfNuR1zdfhj|>nd#BE4567frVQ4WTX~i^?es6B$%~n zcZ zHW{xb{#;zzgrXwJQR2aAwz*-01_C&xPZg|)Uu+d%leIEt2cR>qV~2;;=ElAh=Hz-z zuNbH66+?jdA2oHw@3YSf7Z%!81EH_O!X<+jXnmA^bqB7J4xnZr^snJb4Ihs3?$ZVE zOIGWm7oOXqM43is3S|}s_-E^&3Y|*rgN|#-B?Z#_X%!ezz z$bbOx#38D9A*vhdlW;g@(g|z3+2r|vxp8^j7rZf`pG4b4L)wpcw-?GDFgCC3{UamN zdg~*#w-7DqD&M$m+t|sqtbmBP@SNK{Tg`iULgZV=QWjAkUcP$O{KPY+SwUl5L}gzq zLOzDobG1Q-jwxktZOH3`DBi0nM3{bPOcsIp7*xx_gBf(ZOAH_%95gil6h=} zqQOupiLgkE5Xl%J)1qW3Wge2ELdlQ@C9JY!nG!`&rWGY)A>;mB<$3q>9PfVjKK43} z`##p~M*sis_x)bOd7jsKDGIMunk+gdiZFUj3$Snm*P|Cs4&)0CMep;7n?ZLY1eY*p zG;5r?+Cd%QI^d?=n-QG3bf|;i$?-BZ00W2|2+3_uYf36|O)!5<|0fhMa^$)Zoq+C* zk-IeIa|;xJyV%laFUMefL62wi$Kv@57RZ#S=>0HB8hN>6RDswf0K^L51|89%TQ^7A zT$6P&agp^tCURMId3CPm@_+zkpt(r`6o(LVGCB7F@JEA_I~2JcoMsalY5facEn)W? zc?E$`?M971am`LQ5!fmmagiosxJv*L81rn{@O+a&ZA2DRvXR3(m_}>DfgoEc2{3t$)Yst+XBvN!F32(09n|%>f9$3P{K=< z`mg$Nbop)S^bOaoe);WV=HF-0iH?2U>PYG4fLpl0>gqn5A-{%Hy$ ztOUYFN1|WxdL;>x!ULy~-Mb%e8*S+{a~R)8KTj8+RKd&Vd+rnNYrWE%W6m+g-OQHc zbli5uWarim%CC$mq5zP{s91$X7bLR^{gSY<{Iye9YY|*B0JMO4U>Tm>+kaua%lY4^ zlf|v)6O^Nv}L3_g?Go=NPi0}&)?CfGSM>?aS3$L&|UX6IuHkr zMlIlx$7}K!X@!T>A}F%*^I7>3G&wta&1wmfrzj|S*P_hp4yVhv-YmSH%oTsxYlvg= z+f<;B#LBb3=a{IetqE2);2cqy3q;$n19zPsP#^;uRu$3n1_d?3v12RFT~uFU-$sKA zmx}UKm>wvhUf+6cFxf}?MDif+C(8Fk$E4D;pQ2V*GJz(_LJ2yOLebBFK)B6Xr0+vk zZP;Ng*WyLb+OKR!-G6x0K8XeRBJm|E zDMkgyWd`))X1a*80boZ;Kuve3MA}B`svq|kF9T|M|3g$402>vzxz9F4NusAD2JirA z<)UU9SzIn+U-Eof=WOB_WU%A%j=lFLu!gfy<^vGfr9 zL5f?xJG-(R&a`W+nemWTU6gjh5_e{S&GK3(+hil}K_A^On$yCQUK*%zJYMePuQavX zvva3Rek4|6Gx!}pIZRL;XHL?ec$!i!yG&dWkwYZ%>S(~^CukS@{gh`+`HYyEvVYy2;^>OuFjzmciAvx*`F z(zRjHo4|6k4s;%|Gcw$g{}%DOl0`r4-o+-m9-&9XgxrTXy}c2qt5k<~Zh;xwV~PPj zw2C5Lq#~i9HVpnIr87TUyX-72?e@N3T&76fPsm)`TwBk;%G!$it%zj?Lp`3m?(@zs zGNezYV#xQL$zx6L7!I_I6$dZNgdLs=2E%2%Z(7v4ZQDZ-J|}h{VqNx0}C6uG8Z2j&>X!R}S?;efZn2 zBB*mRruWxW9FcqphHkt(+52{VpsFn7(pV72gbdn~FBw*3 zb-dB`f^Ao(!Gm8W3N_c(Rxhn-=-I5<7B{nXlD<35EKaeYNJKc*0KZ3MfKx84R+kY$ zC~7^+fLNpdBsj-3#*6V(!G~0)sI$xR{uyFpo%jJB_$Oo2rp@-a% za)9Mx4gG1Lzy%l#|Fe9YyZaQxZz2pynp{DAC|0b$DwoGtXP6&7frycT}2WifpQ3a~D7mLa@LC%I^r) zZSH%dEodb3{amLcmIKJ_D2e^9bpXwZ-vp-g8BB@q+e|zQX_dkZ1~)vo(-rQq^^Cme zE2dwNU$qWczFhl9nZ}f;V4y8|-Vj2rV)=-$B)s)T+gXPAN6-$z>d}2&0F5-wFX6vs zoF1r5!1I)qKP&-Yjz^!CfFDz5KAOdpf)f5cYUhUh#NVe`>}aEJvX3HOxiNja^WoLs zk;Wg(UlFtbc4NIV@VB$FWiWPpb@h|OEF%zCF-R;;LXbR4(M(|mK#y-i9VNr>eEHyI z=Ts^gMn?66VA=jLt8LR|o1V`LH2b74@l?DLGFs8pmhz5BvdY$No!*38E z7-PB&th$9L|Ady8Ai`%`g6;M1fBZ2veg&p!0_3YL$&Bu*=~|2-g0KM$;WA$wB_f_V zUpSrsEsXQ=4C7_aNOVtH2T&<-VGzZNWt+_pQJmPHmp7ABTca5*StY9yfpJ_^Xm1gG zt1F{gk94u8{NO(Zqv7|3k`KjN~U~k zDFc_%hj;6?m9EFWiWem^f4JhIQ>W_V+DdJ=o+_|qzfoZZ*;#uji-s_b{mZ-{lR-Qm z5<*mQ_W?&`M~i z;2YF@E4&ZBp_*OClXwJ@7_VU$?J0;N%x@{|X0Q+Hj})yc)Kub3`t<43NH{9Qy<4Cn znF9>LA4?2$1ziCP`U`5^0>$ux%KOzZJ~HV*e<=PavA9X_`^G&@+rDAaE1GbQ48hF4 z@+=@7_>>I`EuCf7&)Kl938x-=%e6p6GV4@18L%4I`;TrA><{Vj7a^g zJ>+*3G}@bnNB@G`_Zl%X?a)Dm?$HWL&!o7FkgKVqgMf_td3-GbvTF#)M6-#6KeUpw z6MO^h`*QCjmkTY<tku@EXk|_X#W3=qYAKtPd_g|nYv8GcL*f$~icqy9&(C59I`;kO>W9Aag*LnM_kp9fHoEKP{qX?HK%`QLy4rLJuS zR2bX6)lRo zB-5MCpNubqY+zdjpn}! zS}vjm+C*2kY5K>tAV5lMTI?izb)EkOdR38O`NVt=vdivT1b}W-t2X#tsMtQ@SN&=h zX>5yHV!2u!e&ux9A}aT`SV&%=+Tb4Z;R2E1XCbc~HA>KA%qu>hBL zZn%g3@d$)F9qQpEkbCDLqmT;5H8EIQ^G~!V*uBHvr1R%+CJ4W)!#hZ;Q`Vf$WvES2 z2z8{(JHBo7AAp){1bz@`SeQ_Jt42eqPG=r1p}l7BK6 z9Do~=s+I36V?DvEHxo4?5u=LEz0I~3-ifv8rPc-G>a_0454WB&u8aYJ$mSU5>!mB0 z-Jv=*aeNG=2D6&2As7-#=>3mCYzs0(9kBU4@U@Tvk>kd{wozZ_tl*5X4khI8Y{g^T z#&Nu0W?{Gs$h6Qmai8^>#xz0F8mJ$G*ADSi{F8=t*n z*RD+jOGi6l|6mcWgyKKkTopLad5S$B?%BKQ>aOp8Fp!e5V6u(YE8ISkIcfNSM0LmE z{4#Ce8VWOy%z)OY=aX*mP>I*#B4#nCxG&kOR<_vD;QAR6I@0|Yp7ZYkofR4u<~(~P zLK?9o6B{_7qEq!-a0m#0Ph>$wXs9OeQp$)%0=q)=hzF7F|AHQ2c~Ns{0PF)9h2HhA zdafzCgeWO(FHQe&N+?A^O2Ak*Qgqr$oBQA#Q)<<~vM_+typEE&Q>H2=-@8DYjJJE~ z{C7JKrf~N$>V?$#Em^!HIFi*BhZ!yR4zI4N>bBzR6860}lfyJ?Y*cP=ZP^tL@_;<< zG0G)O`#vnbpdLb+6~2FUQ+-2!LKG){tq5@c-r4~#`{S5)3N(&YxxhN2km0CXGar`Z z$>6roAHOs8DShyr_`Tt~ZQXIKfAr!726q73Z`d&;%6zsgHWTS;hk7!)`}FBFs0&Z& z#JCCIL~EOiYd%11)QiNy+@_h_WCSxU)pZ`aX&P#`WoLuySiZh^+E`ovWm?iB)3)I% zCR~57n+I_9KsnJ|Xq!^t%GoX{#k*DnLY(HCc)SV@ z{x;JBRTn${B;!ckki{dTg2{Vx_SdEFYg3Dz*in%MjLjyAtX2f)R9|A${Wt}@Nwj`~ zB2fovHM@w1co6X9OcTq{yGBd;Zs(PA;En$HXU4;G{iJ{6nC@8WvaWmk`@CCk_>g+C zT?SMs+BP&C=!Fn;Xvxh*KfW)AkyQr3CZcbfAFh3VF~4M79E2u}FBU~XEWI)F!uwY+?kvR~ zV>Odr3JZ4urWY|F)F1$zbk-Dmbbg0leX<_!&}rKg4qo3CU;FYkwAKY@y)W?w+X}pbZ$|V7Q;}B6FJas<%jaJfP@hcAs7;elI zATn5JmzugSE2s|28X8GZC2mtpz7M1SZ&@nCv!Q|UBwon8sE~ExdCl4f{Gu6}EOtDC zKQ-*&hy%!M9HfA1m;0o1vyHZ&p_^cI+>%!lA^-fP7M@N><8>m3U$CId{G!#1Py>rVWiVSa z>i7@(=T*&BiQeXJ#D}6+CecnQ3?0`;>o3l6Z4HR=()HDi zZ(LpQtsmPewL78qb8g1O7;4@ppwM6`-EOj%+&(C7!|7TZf^hjukORv=!GCk`l6xqY z3-aZ(Mn!jI0bsOj~@pt*z6h(bh{nOp7U849Mkf(eunQB*Q1U4H+VwqJ{Nm~e5P*f(Y9?rfLi7iU6bX?mGAbO4z~J{gDSfWMSY4O8+8ZVy^4%C4uJr%VE91zCh?7`%luMo@*6}3mdI5GpHL+KwUfGQU>)~vuc3vFAnnSa0R4LZ=XiHHLz|^}j z_P=36&vFW&mR&FX&~n)9pB9+>vbniAwxP~Vg4nIb&;aza;K*WA4>};O7W<&g%texe z^_tkH$>^!bN1ktL&fyi4zV5q5B#mt7^fKjQj2=?yaVMuWf`mA{J>=(dh4AN`noq#V z^C&N}lM5!EllVi+IRG8N9TsTmFyG#g!6bMnzqmg_O<0g^WoH;ZU4y>wPK-zPg3{^%bwsOgdw59vR@Q^lzC__z#;;i3v(e~WGsSCb|_FV2ddlj>+Q_u*+r|!^cEe6{~Y40$pz|=6MQHUx)4#! z2VF;rfr4V)`t=R)R*TG8YaP+Hmfr6I41*Dz{Oyza?HNfzdrR-`$wH&O@AlbV5ps{o zUwpr3QMY7)tY|sxQb7XCR*^WzeM^u(WNK8OFPURKj%pQz#u|SHN1!f_0UNT+2d-G87GR^(_ zZ!4?|p*QLV6uY2Piv;|ZDSQefTB(CCaJ8zn6?1tmir9@hb>qF+*&0V8IyrCR?|dwt z)(qm_KP_YWQ|8QRTBqvNS+;8rqBcMt!2`t)3F*NJw(aG|yE?2XSsnWhUfOitGP@qB z5yLKMn1V=jVc-rtJ>HQQ|L$bTq3#q=&#UQ%>#<3vZeA&DWLue`R}@5-u^==j5G%{9GGQKFnMLLEsE(LMkV z+?er1H@!qGe0kEs#EC%y-GgijU@ehD+^NUSU+i8z^)O1aN5rwZl9H=A1E)_aZ8hw< zL?mcz+(;xL95=iHSFKVJ=o708CV9lIOtg9XZzylrGA0I7B$$GyZn!U?IPRspYJ-As zjmc#vi;dpy+NYm5Pr>=k`RxjKjnDq)ZC&QcSK{Up2M>Zn@K3boe?b(bDtj2LpgSL1 zzKGv)nJ6wR6%k)efq@q=EFMkUAR>dTzT>_R0qGZ=1r?%fOvKG6d-F>AMzKcXDC^V8 zpnCp9MXPPT_R__R-KcAY2m@r5saXU82MStRQ?F$h%YHFj0??9VcvQ|UP9cg@fG0>6 zUw8HRFB$!$EfGHiam#039-(~{ZX5FXJO2Fq&uW2oEoLhQmiK|7{!8;4L*7H^wtSiR zVGfugGFFxDRRpIVDZVNs<4e4D&gR>^C&+Wfuu{ye>8N--qKA{wAOt&+V4DKg5TwOh zC?w=R()Dm)$38hr2sSwn;2=A(WSAC{2M{a!2fabY3G4xD#hl61?i=X zvaUOGrk9v1)Cpdq)qZ{nL-f2#u(lW1$H;Q}v}rPuV3>TCE)hs+9>eRk6TVE5$dn(W zHk3E-WpO2n0Ga%PZc`(FU6v=rw^4x7F2m5QX6cKHLaS8kI}_hiBrN}*T4UMs{-+Ry zxwxXe$(4tBk-tloJ1KL$SP-@9KCL0gMeNv}&HcTRCxxW&``a-0>3ZKh3EFd?@S~sS+`nRYcF_`(}YF@UeWK&T##$NA%Od) zsb9qSK~$nLmse{)?GZCkUob5YI6}Fs@8XbwME|s4^NH$6<9tpg`ttZ33VI zy_Dg1zWI>1v#SrXpMr+Lh+#^}`>^J$$gjOXKG_Y=oUTXm?% z5E&r+)xo_%iCzHRy(4HEGG6m}R3O)8~RgG0Rm2r&h8^ z-0hcFg-$(jO`;-0LeyUamwot;*xrMGSk^rKW#kGpv$aE};idGQNY&Oq&N+{k0yI`c zs=~lfMK$5j>H@fDqe4YhwGnKK{G_2cGB8oa2p4H^C_j=)>Mdwe6iWj@Cx)^40w=iR z&!?kyuiZd!p>E=@FR^l0k$qV73}~w}Q@?05j$`cKBs|o14`w6A!&+p1>K@P7-wCTV zu~oSP0uM(oY8Nup#gZ17>SxK_v-lKRdr#liG0*E+YG*M!aOKg|E*hZ@)aR{It!4vz zH$+RnBe{`=#w7;ZgbgC02m`c)o=8QJ=>tr5AVdJ?zy?<{FDNmo{Ik9F+%6s7#%X9e z)y}#8KiV3y{#%4~vXBP;MqtS7OvolrzAL%U+{fIm1?5s;WMt&j9-}e+V%f4DYD*Lh z2vl<51sooHU^?h8s3dgifDVjGPn`CH>2cmvqXrEc2xM^ON{~rsl%UzuJJfsp__2Ml zo2K~(2kY#0fBuOW*l1QxUwt1mLr09>Y5z~a6i-wx;3h_-9tr-3QW9xKj`PUBw^D73 zu{dv9@HsR#24^-M8!jt-X*`{#e!<|PE$u!^$M|1S>a`oRR7(*ZfE+%t>D|Bop+k{4v5>Jp zu`5x#h_M;>r>%empnQE(M?~rD9keXjPOB-K+cRK+-&`ou=@VX4q8&TD>Q1aY?cEQ>$^Yq3Dn@JyxtavGX53g@|nPUG}A;w1|!VIOn|?Z^?fqZNZJXV8b>J#$EzaT40vd`?w(lZ zT4tgJLLi0{*7dm6+_eRQJTcn`aXc)uGd?_dSzQ3v#OdSel5EGsCrf!s9~11eB{%bLXQ}({ta9X3bX^>eS5qLN|haEmtD@P~2%QP9Vb17GroOpd!kyNc{OILv~2c0^n zJt=C-hDonE$hEffh;MPY;++UN%M>d=-MY%27g3lpucK%fD7LZ}!Qh}E0KC3%@1Fab zMtq5Ia~c*Ucj8T#n+cHzE;jBI5bGR6zsWAl+H;}GQ3c$sFU%2;uPl12Yx;zul%|iP zI~Y_Q(nM5-GK$b?+XdJ=p1}-u>dJU6<+lh{ZYKqqoRLacZa5DV#G;O55u#Kq4QVXf z*xFjF3&cy4gkzQPKR>-#$ZXVEwTnmDGkx1alC*4tpkz=K!JIhdD_BoTn z{bd6t^R&A+*hk+MV@?@9g>pz7l+4w@ioqIlvpZWuI88OO!vFxvs-cQRI%@8S%U}@& zg_+9-1U@cET6eZl2N52LLzT%@q3K3}DrH0F;WZ&7A7h9I^L=y}#zx-)W{iaLcWPdn z56w_AgQdwDA4>Jam(OX;>KutEcIAL&F+3`D7{H|X$59xG{mgu3US!vbjKWjeT9Lqo zIV?|~1< z@z{0A>#r9$)qJIcABd}v@D$cLAOo(tGP3=mal&X7(aUh}cF(r6?+Uh=B_IeP1VFTp^i^AZl0|@nu-K_N9L$fPZ zO=54VT?CZ5v7P8~2_pn~!a#hdflh{(YU8{3d1k%WXaW1fQ^6fdw5`&&p}y1V^C|HO zs8wz(PDU0Mno>IecZuli+~;RwF3gpkjbhP+NW)!d;)wB}3v6#ZYV>D3p;OKxMc+lV zW>^eJWskTDBu!=DZ|(K^`PBn8xb3@C|AE+gg4Ni*9dM_hU^qRQtOgWy6{r>ZTXdB; z#PyZM-zmx3LrO)yLip+Rwd;n{4isauY%(RKFMNl|iuo}Xb!c&(o|FZ=7(yKAD_!XO zJWq`qCNXreTp`b_Koy(K{tOWThKRX1t{~WNg1NIxg3uIq4biZSI4@bx!Cx(AMgLoP z2d#ZfDy8xbcGsa!*i5CR^I|?V?9Lw3o33q6p?Z<3y4Tw~8y*_fMHxI4r)E2<>lk`z z>*yRgHqN@gNb05KloIf)+ir)w7jGvcRsKcLIT1Sj`4DbB4yK7gr@XG*c$o^Lk_0j2-pLZcg8(I{vXLI}yR!Xk;K2K$ z1TMdO{AfZUuh-I2Pq0Y7D3b8p;zRPw2$%Ed5g>GHk;P7!ef-dF|Rv z`ACL(=$z~X#AQy6>EGZB$N0n%6B)S|jXGzyrT_Z3-Pe_@6K7oTOWL~?p`Pr44JKAA zSyX3}`;E~KO(?D1iSFz?Wte+~)73E^HqjNJ-)pfKM`UE0Y!|jxlnT5BC``@&sT3ET zF`ALk$A3|raz{i}2NRguDYG#{O?Ap#ZeMVhEj9hQ%1%>E^ef4)Dy21HrzkqQBsn%- zVyQomHb$Nsm;X}Rp0pJ z>)h_iF8Q5@+>Lg&i?OUeXt~_kBqpPaVYt3YQ0TtK_omKXlN4eU-g-^zfByJT$@a0i z*9)fYX`8kVik=)&DK`F6VX;2-M`|qH z?=k6#>KLV(-R2tVjRpZuP5QDmH~#y#*DH(6`nzwBA&fsqkL1FcPaJ<9Tb<<$%#A-e zEhxkyxrTAOIK*c<7kVfjlgatA#HXwKNo(Yx#x5N#7Qa-h*_PhKj+$J%d2`?FB-$~w z8WEIW@#uZDorXkCn*eTNaOLvl;f!+4Sv9l7zfAWVdR%)uyIW-3PycD`u?98$pqi)K z@Ps=xXwju6ePYQs@s0;q+ebV$ouBp4&n@`f$B*Y9rxgFY((WBD)aWb{`esrajL%ql z1|^JtWd{`{<@&%kS!!+i`bGkr91$vc;6M*@xGUzoG=fiGzfPn%h~x03efv=K4D9yh z-&RQ%JO8GfP|J+1zt@wY>HdCCH-uQ2j5!>_mXBCwW}c?YkJ;VbEBFnqYXaB6avMOu})qk7*3u5Wy4YUKYZ4~uw%QWM;tTk;WLu>jxNV_w(K~@sAnpcPu z#iV)qyP{_xys^NtpXs+|=+{rUC39G~TekLeVX$>MI*zm_n+NRG)$sLBHPx8xV*@bVSx~}~m=l3i(?0QMGni41i;@i55i!Xd}%~F}MT~$%C?=r7}lU}`CsprBhhT%)q^_p{5uV7}80UC&5)0Z7{iE|cJZHs$|@UorltOvWa(}ee2saaSmCIKjj5Ydran4}ZW8<8{X86`ixCK~hxNqfL)|-3WIXq{N%M-Smwhfn>0bNzwRoqR;;RjRw8J36c+4<(`IMQs`CWX{SY1(!wlkwP zrD%?kk&!L>pYw-nWS4sP!sk!sx`^Vp^ z@w(44^;VQp6QGGBG-&0dvvu*m&Hp#V;L42~H})q5wkX5Gt9wo?@4u(LicK`f6xmGy zOrQCHPG)AK5DlC>ZW(dl^)$b^*WXrGpBe3_D0^R0F@#0aHRlf5hT>F06ojg@u=OzASe!5V=tT1-yCupv;)Eh(7{ z$VJt2Jf)&hN_~#swgulez9OgJ_qxu1=dw)FTCQnmJ6fZV-2xoFP( zMeX=m31E!(GnSoY#ZYxl+qusfxVXN!lC&8S9&X?ng1YK)%2umJl4?m6CdS5B?l}dv zc!FWXT{d7F*il~Gr5<(74Dc_i;nB>*7s7vRMA~>Y7YfqvU*C`BC$fHdK$iiEViN}~ z7rb%fZDQJ9U3Fjoo(?L3`)q7%ym12oiVbCLmDzynw{MTZ_UG2h@9*2I*Q=*BWk^;p z$z);CRO|5!kuTbHo8Q7DpYynJJAGCt_+W19Xcyq!q%UI|8mDn|Il6h{mMv#>%gU}V z_Y8q|NHA>U7&V9xvKwbPNs&$tPXPdRF)C zM0>quFO!a5Po-`t`b2{?;JY1;_w~{iRmPLPc*d9RIVn4apVCqaZ*jD%VHe9m9Fh$W znRiCC#-_J5EQ>q{TvoAv*(>9y@W5HNBSwT<{uhtnU>vVXBW&4+n65J`o0?U_OQnCv21=8aT zN{S>Tf(=YWY%q+OlK@D}RuF_Ebj|vm30? z<;N6oogzX*53%_?FyEghZCF9I&f zyc$SrmxZs2@9i3`Vl>uo$HD6N?@v{nOMRbYsOIZK!i)w}Iu&E+_*cT+6V0+x_pki! z&z{;DB!libMuP@TM3;AJ1>eqL^Z7R=Rb1-o;`ukUU0t%g(7BAncqE@jH|_`NlpKZ2 z@;-^#{xtG;86mCW_Zs)^{UPe(@$Fu5w9dQ9wytGz*?h8Zx1EzRnBch3_76n~9?t`= z!UU$Agm4$?&&1q3F`$>Gno(cRw3 z<#6r#^}G1zMicmjKM9K_oR2pi(T%jh98@y&Rb0lJx?qOOCWQi>IR zBhNmZfhyYVt_xp3oCp?|@pN(AkDj!p$MKTMtLggv`}+ew`u6TgP%!wo{N+int5r}l zxMfcO(8V?)BOfounlE3zUAkzu)B#V@3wURt`U z!D)O~TYKV6*_UUL|8|v%4wSHwXUL`9F(DRQ&eKI)m8xrfO62)EaqlGz&=lOb7pf>J zbH;6tA?wconYw>|<#|Bfqg1Okq;j?>p&3mAjLfG{OTJLAvC+r(Q&(s$I_mB@BD0O2 zilPL5{ap5C+{2@)n=kbapcKRQD%2n~J$)8{e9g@8ut(^vh~WR$Pchd0xy$iNY_90kF%J!FZ zy_!dV-3x~!)oYg~k_#21Sop}X(@23lA zvBZ%ad9Zwb1<%Cu#yG1W*0M3FU@-l%;@c>7q*Kq~@;Asoffeet1ua#!6Vdx%|4cqH z)GQ?yqJ?%_oBB@d`MXk(NYoiHfTGC8%)}(#_WLOIFo%9+++Y52vzEXrOY*9*P%!G> ze>}6=XY`g=-3ln+-A+IR8*VX-XEgtJ`>Zo8F1>J1lRp+X5NK-K^a}Uk-Exfd^)DV= zDP;iz*fRvgItqBE<>0CE zX9F`ZA<1p^B3gSst*F@ZI*sQ*&cImJf7!AD-8$6%$WS>W z1N<3@rabCyS9bo~9MqvwJy8XlBC3ULu^&0o)x{-Tyl>jIJH|!3a z=gJM94oPR6P14D511VM?tyJ@MrEWc@4kn4$T)HQP!<_U+f`nKUXI0YnP8Wf#Xa~md4)s7~Eg)E6^^#WG*+y7;yPq=e(1TO*d%~(fvyXzF>}GzQ!*nDhBqU(s;&X2rWJN^yEm9O;L!ReS z1V&Z)V@AIKnkgV@YwPLlKZ4D{rJ+)9I67_>dXg%ppM`~@tXupjn>^1EHS>Qp{_Ffn z{Fu`5LfKP{(p#t}^va2aa2ZdVBqIn7sOc3eyFS?RWw7)lxdiFUv=AqHuMmsI4I4W0 zFJN7>M>^%S@?W|%p3>xca1*ToQYER0=76A_{B8fvow0R+34Y^7&=o~Fae%_EXlC$_ z;?=!B7N1GUKgZ|DIwt;ZZ71V>{aG0uZcfq~<23rV^cQVcuVH>=VsZ`f$ZarnTc4 z$lpZSx?_Hf;XaMDo;@9G_?B6>%+7tu?B-Wt=3&#$>QSNRfUBH#()z`w)bhTWG>oHw zRJ-RqM-MHT#=NNcOWF&{@0sLh>B>SZK9{a()^pzDzw}2n(Heb>Aje5@^m{$kvqDiOws!7*oXRBAztnPh{w>+8QO{831fHW zZ9bx-@aCJ=>^?>9;PIdOFG~sAF02cpT{=iXfDZpzdT)9=u*a)0YYJKGye=>FjBnX~-PKAeEdmO&i3VRLW;#VV9ycB{by-%CyW^~@)xov!@kSI}aU zMOtNY(KPq%kT@?>wgzsDtgQpYk|HR`i4LQF({|$`@3XnYs6&Ur#Prk4vl{S$6LdIS zVFri+J9kzI-hv5|eHaBv6;E%{!gCfmcY7|3YflxKPa)TCcUz-IP!`kEe_1Jg2+23I z9Q-Su_hJ_YI;(NIYvVwNWeC~4+_wWs#G&{wrSgmKrPJG-aH!$I1i~ zSN?*cw-IeZxZMWEB}Ex(hOKMU8G@{>3%*~o7s-x`Z1paB_>_G^53h5FmB3l=Ij;_3 z6jXcL)-&qVw7umq={s^-p{*L(=fQ$8ndOZ9t+;o9mft>10CeW`FNMBU+2Mw^au7h$ z$u16bdu=SHuHC=dZIFT5b*xmsHOD-D@3T8 zj~o@ih=N&8Smxv0FF!n-OwKyR6EJPGTPw^-w(L&@%&=f$d8ppi|>7=y0igsO|`}pc!q2}fM90So9nbV3r=$G$>22K@E+2kjLtwOW;%(2~0 zzB8Au{OE)^so7@lRSoa7oqO&eDbSl#6BZ32I)hOigg4G;G@d7k++B}lEnWUmzj(GJ~H4C#}&UFjy5);!xmahj+RGh2*tHb+&^TzAF~wRM4^mP@IUSV!=+S30UeCT6xoS?@ zi%z?;=~W+SiQXS3`k}3`#Lt~xpmL+s7mp2=P2QX z;hb5P_NHVujC0lB9k&!3b}TLTJ6k%7hJlOcbJm~FlZbz$?!YBC=ge0%+=a+VnAZk+ zORP}76;QT?@87@T*0Ql6V=SYX#FaQ=q2}8h21X1PrR&ALH6G20;519A1rTSN2|qp;obQQYDhcXdN4?T_97RJr z8nNVxbA5_^*BD?BeLaEeIFS@Z`t9sBZJNNfsP4wr!L=@_eqI1)es}f%4Wd@YZ10q0&pP(U4Y@)@ktr|N1w_MR8bQzmum-po19lOt4(T=ec6h# zC;$E{o_%qm!riZHG^e(7ZrHHlO+;5`Sg(u{qiB1^KU>I&MS2%`Je2*B)A}v@a141v z93l3e-qmkezH!=pKtZbW9L$YLmhhMP)PNhin3e_$?9DrUc-Lql6I-)T;t6g&dnDq4 zhf6EEvT4C;yO%eb`SROGZfKv?R@+)YmHz(g9QW;vt914ZA;TQenY83l{QOp)PnknT z4sO;|ZJjLbv2$M6SUaJAGOornw%(clc7@K~U!zj!_98Fl4ikK|NcN}c(CFQVlO@i> z^Y=ncSk$jgjG`El4Nc9K$v)|s_fE!SetN6{T|3!7!JkzF&VpX}AFuZ=s5btPpxKXO zj80Sf6&Q=XukWK4k>r`v%B;THB0t^%;nIPF_4h}UHpK5X-KT9}yF?<6NtMN^W2h(n ze4Vov@=-1?5@VALN_My7h^z_-`YzL03EK@PPcFH-zrpXn z@z-(fUHtssxn54j)TiWH>(rtdEf;-R3`(+w11S}ewDJrIcDj1f$n*y9cK`|%wSxay#8%EV`Q6Kqbr*U-d$=b%!=Mqbqun~B9bYjMb@vCND z4r%RKw7d8D#5l!lE)RNlw<)V%o&T|7`fwHdwZ-PK6tJ@j{BP{-*U@;TDaJdFd(U7~ zBU&Bm2Xu)sXcYb=SSsIT^%s-e>wo};9nx}aSR7nmS%e>>TJ`f=L#e~Y&|6W|#4?64 z3;#ogVajcujNaEjhCafZ84&vDtro-Ro)%}Govir!3x-8@pbeRmUfElG*T{Hb3tm)y zG+S_cWS=;}!Z&Ze3tGQX->mfHbv~kJ2;;!!qetIRy@QA*o{UM0d0@8ApDJ1pxm2`1Ar`~PnFUq6bH(vwEzs}|D-MYffJ&~z zhW!ay8iigpmhT1Dwf!e9$pMsn5;nVNjf5cyqdlIenaS`O_{!++q=X5FTvqhLXPC&7 zM`CNI+04j-f@%SIY)ImY&&&e?h(SpcA)fKq?A zIz%Pgznh_BcU{YXaR@1#mn=!~IeKvK-n~cfg5$0H{#D<$q1Id;zYFLz#czV(bDZF% zP-q&1O29ym@+fnUK5c?76zUT;o$x{`1f#in zvI;O(T5PG8PaUZ5*abhhV!?onddt9#FrhiEGT_bC<1^mow3WJt!WNecN#_hx|}+sYIfyX<=2b5YEqvsGe+MSR`^*X zWh_I6z^$hi;4d?X8Rgym<{zd^S$-!CZ008=NeM0{050|5$3z<_g&~FNVby|v=NwQi zE$0ozNO2{3zRT4-@a1@Xlm>k$^i zV!UQCLeY!qE}?JfBUtP1DE_>Rxa*H-%CUC)NP%Y`#uAUl0l#gg zKrdNpg^^wwoqbiI=av_#W&3Y?ZW(kKkH9U%jI&nxxg`dLgmiKFM#E^ZZ?bVyt4CB2 z-h9A#oV0t-E{r4qdlLcOj@<))VD$btX*mu+VmFf~#DI-P7Nz&4??GTPf#Z@}Fm`Ba z;pcird!=?GM}$R_-Q``8g0~2}jdIoW?jL+e_B_k4F~WS5-vUISr4`K^)3FK2in+FL zfE&bbSTvt&CZEP_HgN4qOVQ`t!NA?ldFj$LshQVL2Y;}-auSf)&Y9XuBVg!RE;*gD z!PS#mEnEH@xFB%W6fa_YsWUWFyyQ!;LTRswQ12(EV;sg&9+A`P!P1K3O-D3J$s~>0 zhY2iCb{K}QLIKei53gRpea<{eX;8O+P4sKU_DP{>CHlBs--hKGiBU?oZhmSO82K5S_e z#}%%|NH>-%%4p4b13N+@cO`;iQGftGVJ=Wp@&h-a^gcalM(b9Ty!N`!*mGR?D*j~Q zSSNMIB}BI!1_0*IS#s6Yv2wucbBCmB0s}xPvyUY3ERY6JF^WSgIxWPw>SH&k-?4Cb zMA$#drUh0ASgfRkr@u;bK+EJde*A9PvgtO--*ECm`Gyo0KByQTK5)`p)uO}VS=%&d zS)pm3BiYs2bR(nH><}j+4>(0=;`yPcw0sxRA|I;b=HcRD7L-- z+MsDaV#I=JBeD!B+(*&d9Y+in$+F^mA3nuQy869l9EYseyvNqdp*q1>7lnV>H;Vp4 zgXms8AaP^X86w?Ypc2XLFM>MnH*%~bcL)sjU{R3|sPsblzQNmR?G21AEi(m!T<4Rl zYTsh)cGW;tG=Q5zEJRoV8nF1@JPjSQe7Z22S;BN;|JSg76yMy`Dl7ntdUSVu9B$%j zFU+N0z?;DmcPPLmFD+!T%QN?-i#XR(=5Ue-V80pjp28U2kD-w-bO$22u-zc5S{4ri z%!2Syy5Cs5Z_l1RGTtMMl=M-)zTIG&hDc5mj9pYFq%8>hF#zm<1Ng#p;o3ydmx&aH zZy>HJ9H;&RdY6X%pT!B@b!b(n#V~?>7spC2c@@MliajSJU5*SPw+nBf1V)WP9Fl$c zvT2tt$K+|zx}ThElw~8FXj5^%pyBzg1R&|Jb=jY*IPXQ%lYiXjH^+ZE_CwYy$Vqz(EIqL zsZcLjy#%e_?_;i_l_7K_WRb8xy-fdgqgw>b0kj4VZ@06B5zfbjzuKALJ_SkT9C@ER zG?5W)SDVITbb%R zoAVkLQbHGPrV{l@S9vv)YU2>S=wv@VWruJdimyBO@isI+SJEVdERj}`kS=5;C(wav z#D#!gec;E};S#}oeNW(YQT7sTaN)#Mzh~sVCQpW-{y44Mp5f1v)u}~IKt=ogs_W<{ z`!VFtbD6{U?W=s**D9zwORd@81pF`|Dg`_LFn)1AhJQX|ZF(Fe);^3{;W*hZ>yf*P z+hSTbJ8BK9gXIXo4ebOCozMx5#|vJp8?H%)cGTTzm<*nwJuFr!;3)Juxq24K3TD3(K*UW2eZok+qWO) z@375HZ=oHydOi!KM~&J{3)>MUYx-ps`(spQ5ZUuTBZlU33(bq+TrAO+h!lZP{aIz@ zaptO!6c_`S@#|9mgHu=x1O5v*xMzU!^+2()Vf;>pWsah|ifm%JuhD(}KGrjUiZ5XGlh_=K4?e3wZURGOQ;Z zv^*diZEas~Fr@;jM-aO)v7f=H>>i)drkdag9pRVDT4R-^O#I6JnDeuacQ+s+gh2LQ=iGJ z2PuW2S!E0)x|9@JAn337VM!U#x1NXC&&G3fz-?f<7?OSV7J|x$n#Cl|POGu~xtZ*^e59 zxZsr8fZ_O_1ygD6ewA9L$E?ywNdQz~ma>jy!<=Rp;6<;B2p5(hKX(=ENc?>^_adHnvWPtDM1WXl(O2z4N~e% zUhXO)bsFCo##{==RwRPDOPZxliKEcJzFDJ|4}@+z2?myajTLtj(yyt9TMM4U)!KW+ zE?)Es=*{r@nSHG-ne8S~l@=ILQVhIOMpd3eE}%BH275pKM=5X=kY!Jw(YQ6}_&7OB z&4bok)?4y5%;RA&aUT;}G}r|EKr$n$Q?XsigEKsWKjT&86VJcSnh33tkDOE>k-c=s zvuMV`q6L#f-c#FPk3S0#CvZ`6iFbJeCFLZwXgr7O*rz6i!jLFgF;x`>H_R4)X(J@w zl=aqDSE_`>+tlabiC*?xx8aGIpW|PpJZXxb$;6(PojVVgx{ZF|sGMK$H>v_q=rwcB zz0J`d)huN{E8#pocyQ(fWoKCf$haMs6 z0>~~9sSYsR^x;r|#xh{fZD5+ji}L0UYNuX!%19x`!OfWQ6^=>tNYW(ZnG8(ANf}Gj z-M3YbGn!rZ1)q8N95q)q_u)5kO0Tp!gnQL<%C69}XFqyfPESpJK`-tC&8)Zdc?bA< zS0aWKZgega7F<}>;utN_%m-;_PI)S3rhWU4tBoH*7)VT7@%8O+fu032C5gHMqXb;b z*3G7Sd;9+iu=vg1hS>q~yS;NS;63l`C1KyX<{|z3SaEtdn`!Zd7I&I|M zo%M}U>0fqDS9iI361?v)kMUW};-$TETJ4SrR274Uh~2wQ?8<3*54KsuyhSy|gX!`Md!RnAcIY!;PpS)S4Xn8=Yh#ro0=F22@N|gqYKfgoklwmw{SDF#o*(9)j zRyBN0q!d1U&2Pl>rnU{k+oisq0^964Yg#?WDiT{|_htv$uKGFwvGsoce_NydWtA&6 ziatb{7W#ynAgCLNeB#u9=z1mF}lTKq5lE3r10$iJJKT3kPk_GApM#FH5p0m zjN$~QY5c-I<8V%^3ENy&EOmZgV%%_F$vc0p@;Xs5C9e1^DkjZo97h@O`y!2_Uu zA_L#u{_1km=F7OJ&3r9^P2}W&&D}u^c?TiO!k2}&?j;3IJqmKl`9nHCL{vl!I?fRl zk!Y$Jm&ipED$-ORDBSCtm2e}0VmPW)dqngFSG%6|xE;XHcjJEvon!*sNXvahv$gM5 z9>MM?MKRk{u{!X*)Z6L|ZoPcjP-!R8K|u9IuZrWq$pI@p*3j3EIsfB(Izz@XKdkEG zLvI?#s|AP}@G9lfD&tjQ2bp}AQ^d8bd})NPco(Pp-sbkpURtF8QzP&Q2`bz`dc)cr z7MWG3*DS?Sz)*E@UqM1u`g@;}fKx+;4rN$Pk5>|WX1(50Tc{9c^wF}FDdSzPX-&d6 z8YJzuZB4lfgQ{*01y>0F52#HZckMKzR1{Y8c33Kr)_;E+;U>I`&;5L}`MvQ}rSZE< zRUyzq`ag}h>0w^TXL)p1wV&u|X!=(ls+eutl62N33)q(oAAQXcE1P_2M50Gl&7eLc8VHMDODwZh8c)ps$V0= za}q6y;Y(k{>=5dzoTz#WW%9y;iHqZS_~>icgS3MpMh+>*OOPwX?R!~JT3zzvyBmKN z-Aje`kEU10s=1ZHCrMdFbr&lyniN`&VnbT(X(96#4UDudLL?|s4e$q{Pf)A2VL8ev zPZhHc{Lj3ah?tkgi!*VeO24iVE~bu>Ls5tO-AN)Wwi&eaG=Y6=y(@&K(%_JY^Z>q@ zRU^p^qASU11;qE~Gv8IS&@k>i(x~G{&&v^Qdc5oL?~t5U)K(rtp|9|Z3pX%E$SQs0 zqD9FDao`Y0&qGbd9*gbnmFuxk&+|Vc+Xe3H0*V2ocN0k4d)Yst5YuYide`)qkYj=Z z(oerY5q?|Bk9-;cP@Or>#6#gBq=TOG9#U!JXPzp=RUFiIA(~QyJ(P}g??*t?o_!39 zSigS#q-B-81b1vYN;DX5f8~R-K8c{#xPlx4K_8lB9V0zlk?ZkaNka^{eEHD_jQsp9>@&9a?V%SyN|o&0HYA_#?%rCR)k0~Xe0Xd z@8tN>HP&%3<1u{Le-{-^0E|J8 zC%Hr9S$6WBA1-;)gn89Cik|BMoHE7ePvH_3vlx($bqUwLIVZrxv>CYMM25bKVT+Q;cDT;*#?E_92Jyy*g zPnt#p?KA7xi5+!f59Tj!$)}blM$%{E;7cB5W`(GvkSv%_ppnp%10v{;m-|x6?*;V6 zd07p)!Kyz2wBkyK#_8R!F*tHi?0jL}7mcbP-etCDBah#}c}j_F212Ef9Jei4(e@vZ zV`qV_42TvJliW?1)Oh&tp;S4r;e7jd2l>kVS-l4oS;a6*JoDKF1IxI$+O1n7cLeU7 zxjx+LN^fRG*akoxq;#A`w`ejktx9_q=ln49rsWt5-hl;;K)@z4A~+|rsC-57*fT5j z(SY+vbV&-IN>qY-kQC@YugpCJd=*1uc9ho@iwImW+&LN5V;dY%QzB=ey3Ycyx7|%W z%~luMOOzoqW`*ZAqxlpTNHPEkmJ-`nmwqFkd}3(FoY`HbRF__x&++MZ=h z?ZX)6sBEp_>y9bIQYfNoE-q4_;ZoD1#Bh%NBU^CU8-qUXML587!&uzsM<*4nNgO(Z zIIfl`<79%u%|KDwZEl;ns$xyue2jeG1T%`4u%}O+%pX~eb?l<6^-|J0?_V;6yS2k& z7$XQv=~Sz*u`%h{v*+Y$c?%+==d_se2T}82IEtuZv9Dj`w7-{+>@xclk7mezfBx#g zuTup;M_JiITRWocV_;z5)&1!$b*^hDR`XmB4pA579-S%UKZ{=OG>D_q`4FO>(vv#u z8k$}pm7AnAE}5YJBsddR8Pd(%?0U91iU%o^YNRVLR*8gh3@jDB`h4R-rx<9U-Hc01 zk5_YA*&(||MXW~enVXyI9Oqnfei~|ctDSE|p0*sDLTO1g%+L8| zcb(pY0Ua@n<1Y%6AqM(KZP#fWya{Ah_F~=zYh_RcEXP+{Zb9JGBgE#WKiVGqL(OBN zl~tCP+m^IEB%}J@Tyroe$P^*#-vSK`YggNAK*G5BXJ+A z-;w3e*9eqL{iZ#5d{#9);`85r)NI+mwA}bOwmjIo+y(NL1sUYMq7+V!F^5bBU1UCh zikk_zoS-Wz1q&%TpL6r>viP{GXNYKwWRD-2Pa3%Y4f?`f6RA(g@q2IiGh;#D_kD8# z+6Wffn1Q;R{}*R(0+(anzki>`Ff$D9F^n|^SEQO6OO%qv+~KljsYI4RN?B55BGrsB zO9+=1QY5kzMb>OJV=pb1NjF@`%FzOOW4ugJ!kiwr!F%?ZWr`fL4 zNDvgch`_Dz1cgTmMgViLhz_IDFY5OLFmZb;pB>a#hA|6XC=Ndgz%JG^ta5Mw9_Vl> zC8_hJvn^Vd3;rgfL5MU|VriP++7A6kBYRr>;+o<*8kF@tkVX+N=wBFGLm?ZuTAzADq{2J5G5-h?NJ%3n7T#y$a zbbwXSzZ&>)%Jp`hU?n>2@sxZ!P8PY=RVNN0|-T%HMneRwLMJ zcD4s}tUM)TJEaX}sHoiG3|2mVp2%00);pe|aAq5a%2E>6dcuJQkxNKW%VDjB2BgYg zdZiXfvl663O$?%@??PImR@}{KsVAdlawnPIG@Nql>+oe~M$t!rV^(#|`SjS&NmwxP%r!;WfcFUONFoZ(t(kQH8R zyP^Dqfzk+OMY#`P@)*$!amd`z&|U~%Tt`lOO|)%)te-H;n$DfK+N|ZUjH1wc_wG3Y zqK?RiUztpOaZOYzZCV=r&9Fi&QPR?y!5(hJr?PHwjuF{4<*;Gtyme#-_OQq?pPYI* z^~knHkA4?qhWyX?<94benaR7!p)p)tlL-q}j|JCXakYWr3Me>XucD%&%AYsw&ZxYF%zk4h?@<`BGP7P{-^MpHtmk$qK+c>wqrDWY z+r=1Glcr6tpy4xS^Q+bbcmfhvmY% zzX)ARO>Nw#&-knH)9!)Xh_EBZ#(x~WV6MzUkmTN#531ibrwMZbR5t(I^h%zC!4Ywn z(cjp!5{Z;nUgA_RHL%u=B>ynZ7baq@oDd>o+yKH4i;gCv-bdgtYxRLO2Tey1=&%Nt zRfeL}`^vc)N~3Dp;==Z(qCx@FaAlNp9Pbrv(9CF|RI#8YLeGPpZ6y90YDL_cR##&# zAhXON$(SP*kATF|a1cLRb?i8IbNT5vPki&ov%G^>$&ni5=tXSwqL+P~y&7j5L~hxV zBvie4)RSNLkzKzU>q)VCrtNl;lr%`v6SG?v({*QDxNt$1p3K@>+96^h`}&;sKq6$n?qA-`RO9=<&tTHE{E~To;OmMW!q70@d0ZM?5f0~T7Vmi;;Z}it zh^TSMX49-}29;z8fLQb#sm)Gee!(Aef_3`@p<6ILV@%*01cqdJVG(yPh zE9mV`-|q-&dj$w>m2vOj5x5DhlQHC*m$WpX(bxlc;$3#6pl`+7CB29rX)`{ z*)oorR)8dmx)b6IL-gK%vz5A$gAHJ#E75Y~0i%HUPTg%Ky!3(mrW za#T{%hOLGUa>)BMLLJ2|vV+l1=t-px6d^QgqdJ;r6Gd>?MZ?iGr_t`FR5DHkBda3! zk4F1F$nByXht?;i!^cYSV9vqFORImkfUI8Rv_X2uEdf=R0+!dO{7)kcZMR-Z?2S!K z0nj_iw4LaAiO&abm&5=~Q=NDs#ZAD|Oms;^JQ)r_?0xJjYLN8NR9|^V7ffUORV9oV zI^y{Kme<UrkLA|7Skx5tWRr)*bf6{mJuX%GvqoY3#UKHqcGjk4iP6|9i6WrSy}#C=dkqhQG5{o3Nb111zrn5ObBRW9~VVI8l}h<6sHbKYh)P z@?=o+IPxu3XRvEmCG!5)OfmPxmiCkGpD5{ao-(ZU*HDkcU|}UMNR@ix3s;>`itYN$ z_7HU!<(6ol=^Cv2Tb6WP9lq7%HBk(e840xjNvKB4JLF2GM6rl6WYm$zH9UsJR1Jh| znRC}=r?vY~scLB)yz?0u2$ygpCvP&$P8&_WUw&od{{7z;wOMe!twaDir~7W@=;tSJ z(^96Js!`4BMQiPb2&Aj(xGc9^J%<1YB5{cQ4RRByyqZMO>riQ+hQ zX8^(SFtP9fzr+m|fgH=W=|Dl&5wjaHdW+n44#F_Mkxe+3YF_c_DU_=PkdUIvD!P}t z)o@KOurPEW(vl)-h-uy0DE~%DO9q=+w1Tg!?j`;uQju1$i=2p~b7u3*F8u7z$cF1ZS9C9rz zhBJfe&e*|rDrfLt=yo$}!$wMattvf+GP;SAgh;jsMwy%n|Dy2QqhlFO7f5&Qz!w|& z`tv7ObhfKgMZaPqHbVEsw;yjs=LM zixy6wUgB7uHKh?=!_RMi^pWAM?^_JiQ2L=s^P+#w4sFON*})4lp3p*|Y)qitjDz)X z5IY5QZjaY7w-e|4LmwIV`ytYb47{*tApjr-8A{30agd=k9BC$ODlM-ChlyZ*;(WD! zpG`E!KnYna4@QQE{%UG2YZdpQRxO;ZAgAzroI}B%u z{MheSbcAX&5Di#%a^uB@JeI=-%l!SPEWUHA7x}+nU2{9$Q)~$4;MKjfY?Hf8dN?Mz z7OI<7&Q(o&aDeUyeJsiW&PU4rY!7W>D3yy=)S1$apSWDe|1-a1WMZTDWTKC} z**};2` z5PZj~gXQR&WpC28f;nE?b<4Vk+1n+4J)Nyvq%WKr@Z27PR!lv_=thhZD)2&+!k4nZ z+X|VRe_)GGKRxy+)eG1CE_933;YITpP&#nIlbqkGf6&|KziIYR7f9}$QU(?=ftZKN za`xuVFcq=jkwS5cpc9>#5uGE>RpWtgANO%*gmT09j|v7!WNz;BvcZez&o5QKTG?hj zRq+2+@vSJ zlJASVSP#)Qc`(mo9$-?m`t>hKs4ANE zxe}ac1qGt|=pE4CwqS`BYG%S6weu|Bb4Nc!vCVH}~ ze*__zdjVL)iJ)Shkag+OfyMsQJU82N_3kU@ou1yIYx-%V2xdjDKgRnul2peefz6Vq z=3FrWZX5ylaaDf`y^={jo8upy3O@)yT5FjdaLPiKr z=}7P#Z$C}!#6sp$1Vtv)KzicGqh;i?7L|sW75w>G1HPU>D(V-6-0eYKOnCe-6frkw>| zkOuy7uRD5I0j1S9>K4n0_$HBQ4N(aaa6Py9Qm9>HsNtWpUygxYuhP|Ud-G_Z%s6s6 z?)Rq0eo-}(a4LpV>7`V$TU)yWa?Bo@Y%j8E-(d}u;X6RfJP>L4Z8o>Hy))mJON*V= z>#O2y;yd+muFO#|u4zAXDAg68Tak3)$vEh_Xai&mtR^|x<|>X1vsSNna}i`7H0*1p zG$*z;p5U-&+^uV$D&C{e0`xv_IcvT&IH`|DCGd;s@sgeRu#1wmP~luB5kS&>!#p5U zYEy)n-oDm$>6PJY*nUd{U*vi~(a|Z?K*m~Iqv)~k%46JCeNEme_-8#%oNiGITpNbN zx>o2T_C%6ppv)41)0$>x9BelBNz8e87j?FX7XQM^#>3YE=-hDY1isg`>!aRn99qf+ z(3l-(I8~-d)UxH`<*kE?{|=goXCfddU)|e4`f!T7L!db#CfC>DO!G%>-dyB%fjOn| zGVH&j-YfLA`Od;DG3ss4JzoLHlW>6*&kM?_s~k zlP=gci2cQAdy!_zjC}Jz=tPZ?0({h096{=N%FPM)S&ng=y7`V>Bs3&72Tap(> zxXI)X5X5!rFbilal*TZ3B6Vs$8f88$hDq11gPC^m^Ap7&PqC%QBER11bJTo&cClOM zFTNoB7u?-SF!w%H2`U3i+!X;Kz|G5$ncV6HMSmso<-qM~4KE1&EtK!-UQ4k*mciq~ zf&GIMg+rF<5w_PU3hkv2w*4j|vwWGVqA<=tSwUW-uDpH!(J=jOF>F~G*wC(lOT^;B zeZzcmrew0(&uM4P+JT1i*0JZUFTmlI&iNEA&;@gAKbC~dWGy=F(lFQ;XPk+MU3ID* zfobC3N`ZK?A>D8S*uaqf6jhA+yGMnkG5G1rx3 zX^RPjPU>sx9UpstBV!lR0~2zw!D{8jialzuZ0S-tG6yB=MyUS9@4M#OkLV=wi@m^6 zv>*59{M!5c#S~F>RUi$kjXINJ>SUpUdCIXN^98hI%e+E(9!ZOg{>03>JH$Pwp&Xua zYu=T(bB^Vs#Q%Wti5oqPf(Kg%yxOn1l(i=LU%_5(E>2svW)2KY?W}%N#nl=OJ^$+9 z!8f~#i}oU{e*}zWR;JCG!u>z2n0LY!4)L(pl01J7$1>95mbWnRyP~*E9rdeZP?L-8;?0YB0RQzOKEQ7K_$aKmidlSs4I_r;x z-MlxyHTHG{?V5DVdz+jNSaf9ClG}?T5ks7t6d1KqpOs2})R&c@iy(|njMWG}1ozy# z_%KiG8Za27;&w_7j@QimN!vi#pG?yz2!tb;jo8&=CG5f2nRunJ;)PN{)2A(W4kw^K zb1c6F<_Ho7=7HDL;5 z1P22{XX!otC(lplyucP*SP6o0)5Mm?hJn{_1^O2x=oF#|&`7@f&HX=ozopmtwkRy6 z&xhzf^)Q-pZofe>(-3|+pNI7&B(3kY)6OEotuPdVB968@5lkCm{{79j&&eumprp6H zKed(F|0$;r*D$1@`b2o21Sp$>k86slNzNEEW@M9Gt$;w^C72j(A)(7->otTd!oBvZ z$_d)){HE9W-SG9VMeGWWcrZgUguEjEM4vJJ6Dn{$-U3WIz5ei02Is&w#L|#)ycI1! zQc5O@;V7@Zj2tt#XEbo&Z2&NIHFKEv001g;EU!iQ2k&8~6}u|7nSp_Uw?9j(w$o$= z+rf{W$h1VF#nbh>jcC$*=9C{Z`(x-Llek14QG$~C2LDt!l(L7NiM+^}2))Q|Fb^oy z%>_U=yw?!2w^8nkX-KZ5mtJ&w&Y9b{Z;$QOK=J;aajF4t7jS&kRa}nLOH6k>kRmQn z1RPE4A03g$EQX(ku~vc)ww*cQz>IT)^SIy}R&W~jUf_R?JX@K4F12S;cjj>zAk1TM zR)7e>#6MJ88qHa?(kdCQR_R7q$OzuS*(rnbh7KJ%LS{kKF;l)n5AXYfpM=go`;iiB z1Q1mzBbs7;w7XzTX3CHAppor0`oit}$5hn7m5!C7ijsLqmF*YOl=5KlB3C~5*nR}O!=?S*A*C@g z##0^))6VQm9UPF=2I(|jT7M?B<3oa_7GxVEq!rZ}Qrw4X3N-3+D;kS0SGGa3_ogL? zn#B#DFL0W2WAUZuB2LD?DVY>{vwgg{ySC?%Qtn9;Br#3MDZ2eGuM6SH zh%i%rQEOYn?^ye)CsQuPN{m|Z7=Bvx47W--h8L>6Kvb}VKnhtNDpHTB`UFUF0IdDa@_@p;DZ{S9mm?l-N98Y)wC@vbl^bHbCbG$gcm_=1 zCTpfFUG}9mfjlfyL=hn}RS?x@rdU5U!XPWd?=2%AWhczA07w}m6LdSOpU%NKd*(~n zRK+Njd<^C!@@{MECj80pwvyDW86hbE1-JO|*>nn-Il#sPFnZO~%86j4@1=a+evsqJ zv30aTTJ#z5GH3_v!2%Z{7}K>!Y9;9`Yb%z)dtJ7(eT}hzV<2~-pN1cV{Y!FDL+nL! zTS!YnqMa zhb2Frtdxz##nTcZ2ElhLkB)kNI|B}1mX=xxhMSrI5|qoLvw)mw{||rYpb`15yxOdj z#y~tJ9^PQQu*@r3Q)KK&slT#?)Z1^*sNNyMKhe?&e}9`MQqXtNr9)X~wDryNSG6T4 zx8GwEr>~SYrnxtw4N)D7lNXP*H6D!!E{Njy+eOlmGUBfy=8JJ=w(Mrc2f5lki7+IU zI~x??Y>IZtVp=s54&|FDFW?mHFI6B#$yq_FH1`iBP2 zcgZ(=`ZLHQ{tM#PB$F)QHLcuOYay=8k_l_Ulx{!)=VV!a`$fqB;X#pT8LJ*5W!cW4 z9_T0e=!p^G$rq9|gA&Kc!)PNd)ua3D6Cp&A7FNIe=?;QE(IKP8Sp*YcVCgOMRExY3 zA%<|8Q>d`p%zk)*e>Iwa_uKQWUWLwx-66I{;(tOPSGbsR<0am7th2(!V&)b$7o6AI zPo8X;gRGdCw=f?nH7$-O`LI$lN{r^n6Iv{CUnTAnR3+WDY()b~3MG;QWB!Znobc?# ziUzGl0!AN(frtTKLxAken!|d4QdQiL*r_nNe#1WF6QldHN>ndJyU%2*=f`eIR0 zvM+QyAO$?>(@^tqB-Pvttj}X*wS2g|(hJr6c5#Xo_k_h$)IPo{5lrqigp~}e4?T+v z@1kfq^eiQgnpX)CqSujmmF&>Q$M(V}iGQZ(G6lT0#gLpn-T0pXA=#MnbX&b1paYP0 zM%sS;rz#~HwOme`4xq&SAfm0jIpis(6YDfFR=(%&Tp^N&;8b|d?=52VIphzRqUL2x zdy2SC2U@e;eT)APKEU|8t9%Sn39lmCY6Q$-KuZAN`VR^|xL=BcKOw~owd`q=3Co|X z+(j}`N6a?K?3jw?et8=*k6Jmbtzg))io^*SG8nSsn9xBYej~-5@_YxB^N;wt!NjxZ z&$k6abKsXIezAi+Y^PoF&x5gSJr5u+ikbf~FK5zhHw#ew6QT`3-Ia44)q)gJu}-sO z$;-icJq!&QeOSKyaRf`lo#B*7jacYh(gy3|>91ciw=GPkF$fkMEuSyS0V4h$zORVb z@$-m&@1sF2AVUlA&*PB@XbPReWDju0^Bxw7W%V5PSNb`y$_m=}@xz8)biB}*)xr#y zB#+PJrf2it>}qO0T;_Y_L36Za7d%qpLeS_0w-kt3AQO!9U+$pn`k*4d;q*O6 z_=$q}@y=^_Hi-QI6!&RwV1!Ag2xvVI4g94f|Q5ZqeOOx3**AvYKHtX_->=(Q25N^>BNzR=J>1t+S$fV1FD#m?jLORVz7zOnUMaA**Gg(0 zku(aeNE9*q`4=$6HBCDNWAH^e`NOh#|FC$tjOv<6b5@UKg!PP%8m^+Z10IX#p=jKQ zzh51z!wU5(0}G2t-5ww<3?cCqogFg@=_12F?`$h0p;Eqz4`xv(+!5a9)Lq9xW-M`H z3wn%+VbV1n1535Za%gk2MxNt$5z1PL{b&*eR((PhdHX5VF z2f2UB0IJMmoU4-pT6r`V1{<#K$SZcHsCPI9L0$zLWZ+OR4iwZHEer7w)?SV9&YOCq zJ6Nd|0CgZ5Aw-xSw9{f44tWOEBxN&~sr}gKIUga2z$p0BaRG zDj6U|NCoa3kk2}CP!qipIGEG_Fm6Zi#zFz~rgaj@z1V%;cjvN#@BS(u9gO5dCMsz0 zuaNu92TbGLL~0Fkh2yZv!dW0a6e3S}RbI!$NR7epA0}}+2eGjG?#$F@ zIwz*rB=23y%GzfX$HPru`K&p#wg`U&)5L8u-DZi^yXqe7Kqx~_ZeP)N*2RBO;agc_ zFzOe6BIIuP>H8`a5;m841sO}8>R;L#EN;YA!?ZD3@7AND>wN0nkHXt%;sBa2q!}ouECq zo)#1&Q-=t^j$D{IbLL?tuw+1yfdk#5y7_x#n2&Hm6=qcR79^>NUZCdLnHG@cqp%FW zf=>PJ2cVaanS16F<;64dXeFaaz&bLbYP}>$4#cE&{$|YfFb!XjzVV5e>B|8$yCE!f zVteleB7v8n5`0_64?rA_vQYm4r7?y~5x?{IFS(9o6A=kLeue755%i(jQTlLJ^gp_4 z($_5shfD7G?`q!BGM4eS?=jqnfoQ}E|HFHPO_wj~mrJ%nHD3`idHxk5!laVd^4CRL zin|@BblHBQL73{nfgbqw?VHU&XLxuxvkn^Z_@VJ}#&j@gz*@b^*Td<H?yl~g;7X){$9FD8 z*$q`iuXMBL7rguMCTame-t@$Sg?Vvc*PxB51+q)_4aw6QtqP@~7xK>r2a3-(w+?4dZwyTPcHz~x8f7bm$mDza1@qqGkqPwbXtDnkV>fe=)m~)>dvF4Z`f}(x7b$>{ENeRr z6mu=2y#O-orfZ-$SZF6v?TNqJdz~@5V5-_BZnI}6AOXAyrK!5za$$0y5x_od8WTb? zLz2?WhR-njBV1%w6g%Qo_JCX-Cd7m2lOGp3)8B@XbLP3Cn3l|Vjb~iphJ{nxMR7kzCt+5 zJF#TJC$u?eE4t4*8_67r8VjIagb88MDXX^?T z-+Z)+ycl@`7wR;WXRg<5@CIlzu;&mWMYnNd8=@HroH zT-|i!=*n7pGf`I2ds0mbe2T(i9MVAZ)OUAQimNWzlB15f-6xRg^8Q9MWKtMS7Mo41 zT|UaILr45xPe#Zx8-TC%XWIIsf63SJ?n1ocb>lXXP@|Qmow+wN|6NbR#ezn!Y7PBe zKqtjJMbr#B^uLliJSpyQ)I>@lF`(!v<5m}aZ8@?-4j$??1gh{wQ<^0uQk?O4oyeTx zC^tHRC-_sPiDQCBD>%TZ?P^#}Y6v{dp?(RZ01~das#c0;oJ3o*k$QSSF? z?cW%&YUP+4G0GwE)@+w%`e9oOxh>4!^*zM^GoPOn`C1TCKLWP%Xigk^$Ec!->LH0w$fv>^F52 zbfm*DozCadaY2r;S8~R)$9%v`>IZ96bFR*{A)tbT!kDaB0&+9Sc2un^O+vR)ZlNgu zS__BScIKP(1uhY*dx^r3NTZ^zMEB-FB-2(a>8J$zKy|b-p;%m}#Fm61Jh^?xvY{9z zOG=Y*MLF1qUg^~Rh(|JWLWe}0lk>I)bm=MIM$Y(fP)Hjo1})n6y?Dg)d_1D%O5!ey z>1u7+ZgQJcb#i2z6d#D)hU*AsBFfu0IHv%%hJd-ZYzF5zlq}SYKR}|1YnQ3|FFwy7WJVb?=i3PDVc|L>X8l)Z3G9LF zQ2FmRi)#%*liSqt$PBfjQz7&vS^81YA2~1VZUsel)-@!aJlSjTCUeu0f!eeGppOeB zF~|(6ZikGmV<~aWGYgK<0+k>Ec`{hkyqf~t`z)HkC#f%op9QpS^e=UWmn9K!ZXs$5 zu)O{6aQRz1jWbUhG!$PXtrW$}e})R2CE)@!>JBpJr4&=)QtGjTmZLS~h96ldc$@fg z%0W}eeh$wYTlwEMZ0|oec-4#!wu(_OX-a#w8J^MS7SfilV?)RRD7P10Fo-m$WYC-o zmWtfptW!hkQ0Y(@Tiqjm%}D%3oFj2ZQ{=g-O}|!{3~N!sU!m!`PHhs`LZ+0|m#Z9( z=%4@&(Z~5MG=yqezo>4oH?uvZ<>gDxJ%^dp?I6Dub}^)}TMLHwP)b;Urk%tgn{wKm z#(P}lj*CGcS*5kxNO|Yr4dNUpc0J(Fq#`*kNV3S`?6$sF%17}xRDv-0qi48mqtK@W zOSUvS>M~r1sAo~0|3DiXjaEdTUtv2|$#Ly9Se^z5Y%QiXAmeg67O{a9DlP~|KUDak)WWwqzxKb?SB!5sZ zAq^JK3M4YAM1A)T2kG+#B$G%;H?oh&!Xgt#Nd}0o5K|80rryI=lAc<9*)>3^Q0Qj^ z;kYUri(SC%n}NwV2E#KfIcLQjDfA5ko!~q3j_yU)b`3$UxEhLn#GwXD-$PmIjr1NU z^)+Azh3EJ&1LFmrp~i@0bZXv68f=dJNEX`{+J@fIxHD)G>`)S|qp_Yg2BoRxI4bHz&D07@zplikM8oA}$+@M_cxmOy>G8b^4^VR^R12yIdY&!a#E~Ptk${|yBx#CiZG>BC?Ik!O8Keba zK+RDZ;!EuL-_!}nE=x}zDeKoo1r}UKsI%v6dF%<7EIDehD?WZoNvqst2{5Ij7Iy5hOI=%*gAw;jQzo+4wa!Sn{oYd3 zJmj>Ole6AY#Tshafz2}oy(>5y?vGpu`jDf<9S+x@^kS6YF4*&{lyX|6fOFMt7)Za$ zIu|_iftuokFnST9HWbG~Z!v_x1juO*Hxc+0hC;Y*CmU79-6W7m9?*$i1VEpQ&s5za zzLo#PR=zje^P27A^i!ua3@I}naK{{|*^&*xxrmPgWH6B0Vib2k z4$h+yGv;O{g#G$?yEx%X36E%TV&|v5^TT#Dy=ziKAVkpcRK9eLxbol) zk`gmw#E5HRTn%;R>SH*w;%%0dXdj6k3}^9it{=0xcs+vL{_i7*)blji3XA1vq2XHX zdP+q!o>kFkEpkngPsTAsH1&|@0sdHq$z`7aDcHAb$2+N*Api6$pyWr|KRsPR+Lckb zP(DUEhwu0<5!Q*;6{H9TRV>2TUFDBF__@(e=}`XT<P>lNH`TbO#jsbW-! zmw;+|2Vx|=kK9@sw+xvt;MJVP?`GZn(@%}Omt$A~@!n*L2oQ9u#1xXQVG4gL;wRMu zxds|ziq9=j$s`b;*KrrnU<8iZ`0aN(P8_F%gnU%^eg}KO%G!m>2XEp511zUHE~Rp&b&Eh#J}WWuKC}=T_wb zyg?4`bg1DE^<*@U;)!T7wVc!D3`%D{U@Od^+L}@NGT!`ai_S?5BVTMQV`y?JAjFY< zl&X$$Smfgzp%aHlIp&g*E3dZ|-l7 zHMf4We`L&n!y%UtC*&E8XwWw$#dLgxTUpN5tz`CclWDcX17956Dy~|aY?m)>4w!R` z0gSxR&y;3zgpgK;c*ykmBeXy{2(C700O}WSC}Ss=TzkCtV0K1wU`=Ai!pA}&)48Ob zL~iKFlT1$-?9wFmi`zb#-=vmBdHGz`_Hbj#jCP4|*+2cez&Az3WgUuN4QXz5f1M3= ze9z3@_G4!;KAIQ$xzcRLie~+WxkUJ53v^;=gZhrGci(j`B*{q-n>>6D0V9xB0lo?w|rwx_)CsnobQrK(d3I% z*)4J35l+SO|9fSd-V|mUEi}IGxz^72i52^NW+)zQxt8U+S*Q+D89X(4InBQ`T=JIo zj``qusihGkVwg0@*<}>6myoiiGnpcIOw7I`H;w5lmi5D*N%OSxwZKOQE|euCCMEa~ zk!>r(;+3de=+8(fotG69PNh@FwyM1}CQt|6nam@K{Qs@~t-am64CFooFd@kdz-Rn7YhROK1wi z>(tymdf^a8V;o8sDN2A-Pu9RTpzpK`5%^vG4x%#H740)TBW0vc zo(=BF6UD7uQ$cYX_-sXIjpUvr;n6PRwGM%f#j-5$$r9-#U)7TDpLBn;h^{J+qr- zajVQc!!9ch-|ULzI2fe88?m<+`}XgxA+@t=E#mH=In%1my|n_)Q`Q&vDr7iKp4%qwJu%cg6JsL9Si4`J*<^ zpogqti5O}L{5r(rT{h1@K8K0((pyMK_w3u3g}g5+XZaJ~HLsd0v2)%PrM;UR3UhE0 zZ{NxEqwcL5?FZ!|LQpYF-yQuK6GrWLXYihBCKCvF6~ac6p7q*r)lQg{eL!21>}n` z8g8hits??W1x_6@>_Y>rLFH@W2FtD0#TnW9+~S1wjZB%jb5OTdXTH2*g#7kZ5VrWx zr=mN&j~sta`XT){P|`m+yw{JFrpt4h{0*atZ!Q|d%HTyu|NHC1?_Oh>M#wXra(t0X z8OpaZjW&99lk|j#cG7 zU70rR_}*#b8Y#i?W98;`n0IKb^f&;ArRbKI`Q7;8x1~8uV{G|yv}G3*8zXg;FFiOR zeCsu8qqVLt0U*Zc)|yUeknVnk$i6VJTG5)R^wt{z=bEKd|a_IE>*tcZQ94XpbSdpC^gHz$PkGinCf zb*r+CGgG|({>v?5n{9x(f10J+QG1?r+229+FN3OCe{tj1Rn>g0C~HQ!G?{MLaq;!B z66Jd{4|@6 zJ$l3}8dtjccJb@{qrIo62fMP;*>t0H$fSGIXJ-ILR-x3~dE`jH1z4*|qQ{2mTN=SB z7R6Fku6YzRM@*`zUg?Uiz2u2ES^SM0NUW48g=3E75Wd|PUy_4dskUM@5w5be$Ival} zpX+0Xx*=_`;v^GLu#M=Jwue0BLW0lt<|xs(ciW^5s|wUUl#2qq-wMC&cwTM>c4)2) z!PL6K>eoJGqBq6#AkRr201eWtQVsWF;#fbHr*>LhRb(Hy5Z8d`=Olz<8?{AK0ooR7 zdH45@pez^(_%ydH36`srFXEf$=v<1K`{g#LpREjY+r+w`#l)!mHQmdg4r1<5!Gf|V zZm;y+h&kZ5T4}?gF;X+>yNiLd)CX~^#!;-Ky)8AjqUiPoEO*~`IN$T~ZobuoKan!Q$^ePuHI zw?Pd9x`MKf;*~#f=9;B10of`7Dkm2wJW6=}LuZ?m1zVTDxWry6{P!OXVxP|=)A%z) zbMQ=aYq^H-H-pHtKA;Rc+1fxVd+LXPn3`}Xqgz>a%x1ie`k>4689xL@A@;M5KbHLD z&q*a$p*U=~GvUcC2EAH*kH#Q!z)R9Z)|kMm4b)#<$)oQ5isl{!`(;5z&wQoWZbM9H zIyP*$4i({j0%ayJUFzj-Z#+Fcs}|J1Sde-*#3ahnLg|}C!tHA~_<|jE+-4=X@louM zNG*nbzuu&#FfO37dy0d{#$MeR&7Yv#!E(`8hzTyfSr!{K1mwL-(nRrIiT4R7QsDzt zqsg?rOB9W;7lE^fH&?0Eo#fofnkO6{+Mk4>b%~Jl&umuZK=aJr@|!IB2z?;kDY84e zFJH4=H>Q|Yvoi8d6ytfBQG-Sz<$7xU+wwuxYlGf)vmZMlbRkVk>v#|-rSCDamlcox z0sV7=Pa5pD)x+J}bs8SmRGbvn&0IUMUz;h5#@1C2s_#Qp;dN@kubs(dqonZ*-gzLa zUy;VYK5{Nq{It}xOWik5@G}YIK)R~LY440#ObIre!5VNHhdp=ddaVwR%>#2*G*q5# za%POu2^3Y??@5#T171KIP|B48r04jT$2j{l(|59?cX|VCF-C^OO?=2Rn0J zqvU|q;O@e)sik{JoSK=`V!&PMZ#0u8)A}S)nO6WQ%C@5Q>9Tn7Vv2itu;`Fet_as(<6CsEs+)#mt7m}Rp@U)OfU45@}y(OBAtl!c!@`KSf&(BJj*|XN>)waix)SVQx;*>c-z(&veV@)KvPPD@xSLC{JR^`4z>4-F(&`D8RXKpnF&YV|ktd!=3>{SWd~MH=xN)c+ z<@IWZe5>FgFC{mpQofCEb*@Qxyl#hS=8(~>HoKW8*lHc#&D{H-U=wp2G=b{xL_dlv9M`xcqZpIOjrgg=SPR?pHKChCcVR_>N_iz&T6O-rPo&-<=ADMmC3(aP9gP^;Wgd`m9Ivy2JUC zOx#?EIIBAi96&Cgf8?fTVn-}dQ&GlNiFE|wRLS~1CSGZuPIaZw4W8LvSmd zGq!4XZXx(|fzUk&uD%N_xsXSYXwZ((7_ELdn5|qG@zJcNmbWk|K>R)9l)JQ%`oIoT zQTSsQ_2Lp@ckb+d;_;EmX1LTyva2mM586b3XR8Gwm!d(kvDJgv6)9A9_BBq@$MBj#CQfRpM<77e2aY1@DKCXa$Ui*X2jp zOkGLn$D~A;@)Mm-9l`PO4cf)?(j*7V-z7*tUw(t=KVDE6xzUjWf|#Pv-YIrdFD5os z{?PY|eVbBR#0(aFeHl^MQ~3B4(>{dLTm0OLqh+c|hK|<8RP_rb7)*D3w$ePH3*H8P zTYkLzhrm7n)cb5su$qhuNUFETm8VeVZkpVYXgAG!Yd7EI8rN@ zd)siiSwm%cqq=f~>dS-$+R)VO*w;kmBoPHWb)mg#HXRG*Y_!QyXot!$hPh0{kJR`> zxB2pMQY=e(F!#W@apg#)25?s?BRu}PuH<^dcpScUZ&sVmldXUF>zAFslR(nluhYiH zu$q%4K}wYxXVftbVf_lp|BAp13j(V*Gxk3y=+&9dKJ+8;AXoyqq)^-O7`ug9##McP zw}l96z}-1uXFZv1Z_b|%ij*?S8LOL~Q1jE{bf@rCGnh$vJb((h* zC7lkFWhgCpG*w6Km9>F1I?{`bjOx3mY9&dyzE=98DGzTUI@srPiqqQrX{Rb4Mv<;Q z+QZUmJ!|*keSg7*9tw`T>Hf4vd(dgQk&{Q$y9;KqW9QDDQDYk@6YX2YPiy#z$G0Gv znH=@;|JDPi~ zPK4e#sf}v=1y`fQUSi;+8-tEso?c$nDAb&unAi2nV?1S}pk?@Gi~wv-Z2cws#7;^d zUco~aA`00>i$xx(k7S{lUrK8tHhCEhKIyoQ7DPa^gxMoWZeIZDH2&7-{zc8Da@&`w z(%zd438%w;=;rL;=ll@6k? zK?4}H$A_%Jo%r=2fSj{TnO-NyJ?r_5SlnHpp>b-1r6|p~O-F8Wt7AI}xYd`Q&CPZ* zl*drp`dE4F6sxJcM>TLF8*A>7!r9HZ+4|S3b#Zg|ejnGsX)M|#5R;lG-_#}kbLOS< z1eb`OAzdS<1^ReF1m#m>mrC1g4Ot1=2qOFe@;9L_1SggK=bv*M)?$=*+&RQZsB}p(@ z9d3}HP4DTMAVF%|vL;IXRi5EqR+CkEw6ASGe{ayB%6&(tN&(eaMeTVy?xBA-^iWg` zVlSD|ar8$@Vsrah>gU96Qv>R$7dK6`DF~O`ff(jMpoD*^ZUp{L=d!l8tSHvDEZoiW zj%?x*`v!9>HGc#-tA8uFv6^i-%y81vG&a4I+lKx@1w+)22>vxrf&ew-7R{ysi#@|P z5TT0DjQRkk-)9P8_}ErbmLBe=|H#C35qZs({l>2lhHODHGeGizKqUN6iif_NB)o3Q zVgE=Mm6SUG9#`;E=}T#j$**kmeiPUYynN?-u0wmYZJO`9F?s;(;FS>^`wz~`${$*R{V62^2=*PimV+{b?jtZ$ZjUngF8 z^+1&|FsU|_(s`kccq`M)r9lykgZzV}sgx`ty{ia)jHvf`^)<4V1cq&EcWoy+eSrW- znPVmSKVlC`PL|^C(7YnQmH9UV-#qTn$C$R{voE_}Uj`8^NEX4EEniA~Jo!m``>{J9 zK&9rmMrb=9qK_DTef5bf7usj(h0nv?HbE4ZbL`u@;roDCk!2L*K4-siX!OiIFEdj{ zUI;aatkuf|V)w|BSQkIVYS5I1hK>zJJ|HivCoXkVONgzVt z8-J|MQeZa>&kK@3=Xr_ZDW+iyn{Ocib`Kt>c-Y92TXMQO=5n-EA8iUSO*sP|SsA2JAeOF8Fg3Sd-Mrg^nAm8srhu8|3pwa4?2A(S{^o5 zZI|g5A_aJr=1FQ|W0@o5<>Y7~TP&fiP%`j5h9Tob4}~#D3Lm)(k8wE^@>F?_P4&p; zoN;LwIQ?^Fs~d@Dfo=yqac#wzpyPvm$uBvj0=h`$EO(;#&7GIM!DI#SkXPS6Qr+AK z0Kbf|N_R5o@*TO8+@jKzZKhu2j;^C1>Vq8JPK1P?cj#w&p`-6esn94B*ssylUygyF zQ0E9}%)$q{*?|_r#3q^ASjt~n^%N65ksHi88>W7$ov+&dh_B4*_7=wM5Pm}tEME$o zo#5%=_g;KO2PsuPxa2xeNvFVI8)&~<)LnT@Jv5sS=B!OQ+v}i(Mg8lRvp~N`#1x_W znTt_7J+o9I+ubO&ctv%VH2Lgk+Pi%|h+XRkNS)RstB8$U&3_)Ry4m8F1`{1YhriZJ z%dzw&Vzr5>fs}j^RzaX)BXlR36(QBSy)p*$FU4}`rsc)SWe;R)(h7Mh%vsIK_S2f6!CQP(f z0YTPU1Rc!kH=-)J^XCoI-ET_~#@otqmv`QwusG)Pu4F|Ev&Byk!Bnw~G<(v%R0Y+9 zb_w*s87X^_GQXPeswy%qTH}xTK;`XX=tPr7y9HtN$I>=(BZJQGiUsMT>AhgqRaNOI zrfX!cJpif#eg;_`l>hczbv{kkud5p=jt8%ewwAncw#8Rstq>^%k(M+8Xtx!4-2Tq>3#=?i& z;;!d8N4(YQB$^8sIb@N%yE47CQ`oAY_6^c|q`)<~k#+^9nS3-`_6`R#3nAb#0|0ww zpC6xupa4aTT;}hkBMc&|LAYv3wUVUe)wdggNwM25cB#IX3?uK;D zbLqiT3D27c0CDl!-^9yBXf;P+{Xwjv5yTlWYMhCK-9f3L*wmQ}G(@jxVQZ=2#4eQ0g*4Z)63Qh(2r>W*&Zw>aVzYjc%JNr$fkEhqF0-enq#ph8 zx7FvUvZAC7pZ4ydbK)0YC<#y4zy5)Q9_Oh~X;9R66?`ARz7e-wjJ4=gw$RUYwI6#Q z_BLbv&`yP6`Zg!naiU7JZ8zS0inGo`C*@F@VxTTUYlot^ZQ}1XgS9TpgG$eRAI!!__Aen>t5*B8Jjeacm(>rHr;l`(|6!<{v#BE|ueXQ`ckPN+Qgr ziILV9$-TPi*-;aE$){n6fS?#ASPC(kCV7m)1xjGtrXM;`vNrNqoc{V`?R9Hug(%MR zO(0I06qkQ51EQP!kb_3Z+pI7WAmEr{=ovWJ#EO+UPp&g&E zpSc$HM>ZY1UFwzbXB!5OS%bu;yOt&X?2ode;K6_P)^9qX!o9yx@z96~cb{qS=Ff#} z-+2hSp6$8HFKI53S*s6}p@FE>>c~6&CW3*Eu5rQ(CVi66~_N5qYS*VB5_Mla!Z;c*%dHPTP0XB{Q0zP{(@1%5K+3T+C)xu z>-Aw9tk(aQ3L#JoaBt+f*}i$SVbI&ZThLEDY&o+W`cEESac%Yzk?T16%@Vlm8i3}` zP<>k6`&Y=-LI=fUuUhMO<=*&8VN)8K2Etbb4h4zh>T}7^=IEc z(CypD3xX0mj@qn|7>r#r1vCuHVG5puccSmVi`nD!Pw{B=8dW@df6K2Tp9~y2+=B^hK<$VA8tj7+f;2hQQZ6ezz{DR33geepsf@9wg@Yz~OLrODTBkiSDha`Xhf3^xj->71!Sx_F*eah>Z zxf+326rs8Sgd~BEe{7`wL}{)6iL@3QH0+%$WEWFQ`|5xAR7LXJ--6rQ&~2?vZ!lp% zlx#w^)8k?KztR(3c%lIxGqFahEzb*Qx63!>*#6NXsi~5vLp|+I`DD`f2)RmX`ffe+ zw?ArgoM~pt%`S>I=HL~0_FitPh-zTTzu9jYQ#tk3?uEy|eem#1HzatobuCCEhR zG62_swKoe+=R5S$M+cUl(*%I8$kb)mhYoBQ?07qw(Xosg==;n=b8D|mo88L(z~J~j z#qXXhd4C`5R*WOFd-A4>xp z(ZX@U)mE~5@;1GOSILT&hyUz=7$SvNg7MB@SuTC?rW}=~0}vtT5YC2`{o{iP7 z_g9a^P-BAz1^#6|@YdH}PxhZ_UIAovsz!fR?SIz4HE#0V)ykV<^YnN5rXgK7&rQ+l zOW+Q(Myby+7ziE8=wn$tEIm;)H9P)fK;_;IDr&NV=P9qg3-q0RKc0-VTtYbiZAIm` z;OxJZ-2Hkr*wy$5#PoL*j-R4Ems zpgFW{8mqs5`BECo#+mx}Av6HFqqWmq`ngflgy@fzs5|n2M)fO{Jx;qQ++7SrlNIA( zleX1Lmp_`b4g#ps`Kj-?R`$|qiV$T@v>p@km>=TvJb7NjX9r>yo^O{5ePi-dybVyPy`lt;eBg#X(NB%D@UpHE^XF=a4NS(Aa%0 z=v>mUu?eP`L=>d!{eRSNT~4!B3+BX-wGX#EjMuMZ#jZw$34pAe1YlQR`#S=9zW?|C z!JPl!CZzxTL2e43k52Z2@yO(@P_z`PSqySR+#S8;&yu8586~D6>PwHOi21jOj;VE= zjnTo5tP1?%;hC3m1KeElJAC*`eNAQ4^bUCStfTMcUg#$N?w#q-`E=yTY0>FrSUo@cW%a?W?@UFq0gHn1Oq|LKGafhJ;*O|l}_}&8ZGlQJ{*RblpD=DDV6SxPGLRJyNIE) zfYjRed0*5keZ3?Qm57cJr6;lP8s9oT)FpopkYmLdsf7i<#|^^s^2YZ!CU>v;z}4H4 zLZ47@qL{KYqXyh3V!%-bT_>i0`fG*zang$|}8GRI( zq=y#F^M>W0@84fqjbHaecxbd->XfH}_`Dbw;2n9jcX^tqi=;m3A7J_ZuYdgqZ@l7$ z_nBtnBT8FZe~)Tmc6cSw#*#h0?$oW|UdCE!5dP2a@&Gd&-F*JnI4G4tll zqx^Mw>hBU^1s>A}7Kvxfedy;w}Kzj@c6*ymXnMU zZ8+>;s;K+)Xq4&VK$i%SGMaH;RN0dHK3So1mxZ+B9NZ0JOToZ?%@(YB2a3w14;F<1 zuaa-ex)69(M9@*~cWwW$m1hdmI~XU#Y`&Y0`Tjl-{w^^6g0aQ30Y5-Ml=L?GGgIYb zV_m%O{1<|sYx+Tu!cVf)Qbzr1O0MZ4Rn!|Y!u-@Bt!*|RepA!=gQn(xl$Pz^-IEIW z%AgOGZQr3HgZ#|on)x>&ce`CMf9yX#>^&H-#HOlMoy<->*@)KZK5H+NJjOB) zFNp%{XNJzpX5l^H&km^q7uTpfFD&{1khMuO(>@f)9{Iwh+0}trmBA!u3!@;p5XNk@ z-|fW(ZlOVV4Eb=FTdC|G>>Yji)6%BXUJd?|W8TzPaG6SZT!!zLJSR|>)CB?!;tf|+ zn!R~YP+%z9T${Jg&eUFwJRODty&Kq%*UxA5@1W$qy*@!*Yak~AnF?_I7W`}KwzvsZ zbzcNTPVsXQqB?Tw(w?$Q@9+5Vivj8`C0$u)Q&3Irpm#H|Sb}^r1D!kSHdzbj^@a=l ziYPI@JINQLFru1yav>@$dqCxc{Pz({X|K{98-Ui$KA?DvY_#YnH|WPV9rkr=ndBBiE@4|>eTxt#i@~u z8widnjS2;07Q&(RGSflfJBHDk-qfihqb+-3gENcT=(Id2#3TqaVe3Uo5MKOYIqW?y zS9nqQM^g8F5HDhOc+2Z40nd_)nbaC2)gKJRl~(8p)K{#F6eH8g2DPwY>&fTeED^SL zbDwO>ZlW~)`9nO(Jt>b0!_sQRx{}tU@T{U-ob`d20=#4Ox=Z>O@cBA+k=A-tkk>NK`|n?zFx#0iBcm3z;+xK0QQG$W(wX{LQ>f0g~iJp~QR zx+3~+GMfy?Y+}wpbZ7AWr60cS{)cj1UFq>I!qGYA+#BvHV?z!6ZA!4YNcz2AUc{-$mWoTDP;#Em=R!GjhnEk>%` z9@tBMd6UE*r746yW#mht&8=`M5!FIq@5JT~eQT+-VtyIyd%WBx=EFy5xe=G!h;wm; zD~T=h%)%fKS?}wYAiQ7}VrulzU_H+CE;<|Ik^H&AM1i^!Y;X8=d{~vTvQ7qbj!c!V zAg3gp(zxNZbHGoaKPP$a`snC|3|Cn@N&?S&D&j)*KCz~mpN~9wFXr%Y_%2XE=*UQk z!}|t%3ScdY^&O(skp{VEbTq(gN113L`-*Fp*$e*$7~>L7l#_@_m@Xen4SD ze*ip=-eKGfyA~8IHLoH22(%&I=m9gQ>4hCMo}Y~n0uF>F1h9lZc-usGX^CCz1%y^G z%eS$=b*o}LPjf35ot>Mh>y?kbqsu$k3UB-F>IC!PYv*hlPvm) zWu6?1#_RVobvsYW#ShS97oj zpxci8N1tMISabb(p282G$=+o@injfFYF?^deG( zU%t+Ar>*d*t@@+3j3M>&`Ib1PAU#E~$%6IzBEs7AR?r7ati^mv@oxIW!9{f~@Du8w z%Y1!dNvx_qHi z;PY%#darB)p&?y2X&2JKmgh zQiN3MQaEc)iZHxp4R>5zFCp1ODgxlSm2YctE$OWd-;PI416o6gKv^5ZrR0}MmeOrJ z`|PeIbR3+h_=T|%YylD(`E>@^$PzI0#fS!+C<(-I$bVGgkvmKHh8EuIX`}P5+`6@*D4v4e>5~}pede`%@<+#V)T)HV z*Ws^xSvodqt@0~NnopSMtyo~gvCmBRFnt=)l8=u8@)go-c}5&P=v0O!YQ>n_6_}Wj z73S>%Etn{J3G3x~;;{m&j`sFkn(B^vz-jRC?VIGR52WrMExaw}cq{b6OP*kri%Vmj z`0V7sE2ipzmxYR4g~#@6yvrtySceG`2C*3bqDC{+7d;ew_@0;Nsco3Ah+Zy{X2dga z>%0q;R|Hl?GUyx#`$(}AGIanLi>(mLX|ZH93`@0&tqbZb96N%NM|KURH@v_I^T=YX zAa9wYv~yNFEy;k>r{Q~{?8)Cj!x)`Q&|j>Yk6D~9tOVl2Gf5MUgLkhJNgpP9z|1f( zuZ4)2eBzCd2HXjV9jDa4EJlmKC0r;_eWfhI(1yW;us3NG(knqPG4_rQ^Fu2 z8rR{D236-l$@Wmp2|;HfRB7@8zff^<9wF3AkdZIyk_0M7u? zUBUER@0XAYQAjQX(*d}@C&4CZ5Wn>_W_D6Jj2GR;~um8rh_PFkuAMEVy5ii(Hf&W z>Lgfp1d|?Y8!{0Flx$kcvlCA`?g?c$cJ&&2(kpd}g3p zDo*e7=J(~~|9rF)DuKg5L!6?~QVX+CEKswM zkJD-W7x&&dp|3uvm16uP0ws7}GF;sLPzOvHSCv7;ogv16)@AU^Kex*(e{?@zw3LY52 zLSMX}G46tjX>p*o)>(xs=BgpESf~eHz+?aus(0%b4uYf&}1xBa%trc^jF51*BlAT zRnC%kAmK0_f@ZyaTqA-Z%(Kptprsm$-*VCyAf@tmQ+9myU^J;^$_Fh2J%q7-af+-g zHQNVVgd)&c4c2}BuF-~PO)B&*K;F&sK9GpxU0rV$v<=qcFP31qLLF%Qad=5<|m zuxUs!r4nf*s-R-69oO$oJpv3IPkyZjKcjxkx6gl;u9d0cR;#h;{Io&k1fbe!ANLT; zG$G9Qp%2X9nVyRBaff10JaN#*msOa4OXuIm6+iEe6&o4#7e!PfzK3*6T??#6D*nbL z9WZi)9KIEVgI7b+v`ySIyjA#srOujKa$1DkY~WXUjp$8PA;4m;fsSYtjO)}UuVt6V|CJs}PX)k% zdfHHF)y>?{J-x{jBE5~KUj3$L0pyRVk@e*v zs(5&rK?6?P;n>TjVp_zYA9#Iv3`b%Rt8XW&18v;ahJ$07vg0AZ#fBWby`%(o#rJoH zEl<8M$Hllx73Vz;Ovc79cdPe%lOGWH)~+^d*w?isLwzM-XlN}E!6uQy-dR)Sws_`_Sw^XD7&xp zVD+fj^+uXQ%{CFsn$qt8w7CX8urF4??$Sn5M5812Fv-dP6XQUbHYCR#%x=h8V^Wy7 zVasDg8Aw-&l-zIVGEKs&4w5(j;VN)e#A65V%#~a`O;-^7=26&@>T}MS4Kf=jqv)f1 zq@y(907Ssk-+l6?yaTQ&%N23Ao|SG&vQdp{96Hii$3z>PDzhaEe+*0kc``JkuD7Wz z!z0dGTdva499Xmc+DhJbBf-xBOyrMTOO|UB-FQ=cAA;R*%^3osxm<81W z4DUj1Dp=CYIE_j-N<2?u9;R(k>A>0*J;=kRQ*iLHVHI7ZmyzX?$Y`hnl&hfs#~J}0 ziHvvyrY42|GM=Kvs^L?lJ)1g}ypCX!j!(H4gi$M9Xlid$oHM&z>ZvErit+n6Jq5!- zw(>2TR1S@-TIoUsT;3Q~j1BzWey{c2ir^vBF1He(%zhN3!FME6TxjmWK0GT-jH#K2 z80b}Xvk$=_i3}G;e3~pAhmS{B+!d*4aCGF54i@4GNn$LbP6U`H+eGZsG?4S*SQ3eh zH(fl`El#j1(Zd@?$Yjj*lyg(f4Ng(Qwaozo7%+3GY7*cK+)U}(a43eWVbpCu&cL`P z9ItX?9Nb*oK>iwki}AA?~^}0~c-fy-&EDS+Io7Dfy5pe~?s`d}@OFh_rX;7vmGSE?7-Cw%_U3M2# zMVd)y9%f`w7uV|0FfVZc_M4B1JE)t-H{4e7oQe8HO3WRgL0E)7Z-B)-Rt#KL;tlZ@-obp~=(j_0cl@G|r2ts^JA zK42l>&INqBS_V^Vv=SWI6|9mp(65YOGQ3u2vR0Tk!u*C^bjg@U^nwz$V1AH4vQhNn zqT8NYk zBShd|!LpvK*6_3XlJE%c;RGS$RNhk;!7>8|F$So59`m_$Q_(O)5FumhN26715l!)= zQlE1(P1>}p^mR^-;ew+h4vL~j`?zTGtHg{9NQ~+E)X*K(TU#a`oX@5XZ??Y8$tw=W zw!+TDCa1qFy1&f4j-4H`E;(&S8K6dbL;R=8zVqj?WiA>uZEtmY+T1)~$pCCYv@@P- z8JMP``P?|2G=?yP2Y&{dz>yi9 zXt*i7_s4G^#41~bL>T`y6XV@Bv%7?b;0vn&dHrs=B4Vcf)ZNRFMQK>pmHUVd99HLk zDaJ}L%QhL=Y|1TQ4L@K{K}KH_6(wu27x%8?ECg82q6JZx);nBhsO%wv`yY@_jSScQ z-*yGwWSojp-%jkSqOOE=j<3*=7rha%38o~RMtYPKkkgv$VVwO@+;y#`XZlyQi)bx@-TsPi% z6+d*@SPv);tdXo6<; literal 0 HcmV?d00001 diff --git a/sdk/ai/azure-ai-assistants/samples/assistant-WhEPqpcbmXadRJmCzMUeTi_image_file.png b/sdk/ai/azure-ai-assistants/samples/assistant-WhEPqpcbmXadRJmCzMUeTi_image_file.png new file mode 100644 index 0000000000000000000000000000000000000000..02f9bcd170a6abbd4ac83d0be8852797baa3899e GIT binary patch literal 181757 zcmeFZ2UnHX7B)&WcF|Z7P>4!X5m7*-Yp?;*dlwau4$^xx3c&)1NRcK=kuJT1f{K9B z`=*F=HcepD!TT(8#`oPZ?q9fP3{Mh8c=vkOn)7LM`6$R;q2J21m5z>%Uh3*aB|5q- zGIVqsxi@db?+6@`%f&y0>@I28DO(xYIU3wCq?0wUvo^P~GdH>Mr-R`gTN5iwKAy8Y zeB6KDwzIRg6+U&!;{SXBkJTOHQ?;}b8otRE>#Lf!baX6b{rXuOB>aDDEAyDI)%uW*VoqJ=w``*wZ9rcFtIJ=khm_wsI4;4$UfjP{ zKD&>rr-tKK@@R8XlHEu{q@`&|gs8L972ku3c6M0-0RgwtHB>TgNj-S*AVDoftDx75 zS+wh-&Jg$hN4_j#iG3A*)UozVX=&+zw6rwoAnt_Ys)=vpB80bY+csmzCrh0jX(TK4 zU>k$o;y!T<6Yd7*+?V{~I3vE=h zwt4FjXX9`0?h3oizKI^Izrx!5M)CEuYMRA8J@SylDY@Nu@=J1tUZpc`BSGB_*2^lsUUxCyNu6u$S2!oB zr6l5~40pRus$vOevP#BEB*|}HdbGc?r-!EPkfNR2(wwB8()fU;@a0EeMfUCbBPn{t zS~fN|`8E29D#rxO+Y|(Q+%mBUv6r7P4dkU^7wqK^h^3?ZTYYIDSbr8j@i+&^r-a4E z#>Q;hA-VC+eC5V?`BuJK(btw;1^cW|C?4?mx$1c7@@1`f5xe1+OEjwM+e_AJ($Wu= zl$7k&R-DPrCNegtZ3JVb?Nq0wc?fO>I@C;4F!>(KVMy4Tb^tg z!S_hQF4<5ZCx0S&{w!yOA8RZ5+jt-4u>;uMv6H1g^?E#i7Mr%FrqsW_lH|TTx1mXz z&bq6}d4_Cs>g@WRq6)YCnm=0)RPEEvzjJA*CZh5EKbx{|Rql3Q==HXgP^Xl7mg>F= zWJ}RV|Lf@G$6WqlKd^cY{)Z)1lGIM-wnjwUtneMdK5FPLcGcCIXi88PwjT}jSR7!G zYVea-8_tOg+4|jWWo2>Z#W|Z-YH51ovqGBY=E?iHHD2PtCCUWzxU4M>*p8o$ESK^_+aQFqUd4%CkG+KEJ$elZ+`2JE==u4ipQP+lsy92X%Y?bPuk{myZ%p zY0oq^o9N6hnD2HKaGAZ0;!_wa7B3&w^8MqZ=2B13+$ME@tTw+z2fzFBcy0l9dSkV2 ziJK-R$D%REg2Io|M1M$F<@FUmo_9enb`JeTM;Ee3@F%6>0@^12~bCdsTdHBs@kS9ELP%ap2cpRDgIh>Z^Jfzo9`bEy82B0_@Rg`5*Qhoib8a*$j7Pw^DeRZPTSFr z?4(amnAQKZ*g&W5KHuf&rKd^`nB^ZX(O+z{Wy8*zG{i_bPi){yN*Bon-YZo78~-Yjn^t|QloX^h+cParaB*EVF)@*rl1g_yDJG`7je(~r zPBv^76)Zuo*u~PMHCW#xp>VQ9`?ueIvrI_)lZ~Byq>?jA<>JMQIWwfz^71P9`1nZ6 z$W%L&+*{*0bF}6G9UU*juS45|N%@5;$aEv_0qK2)xFCV?)Yv-~^%9U{K=ch+i4Ehb1H=q!@h;lDe_Bx}0rX%R=_OFiuiQhr)rOzS5s-g`a#6nFZ@8 z=9p`u?8{=!)y6xrQ+zgT*|J60VJxI2S@X4OlG+F=M2jJhUeTNQNEhk8PfyT2BBnDc z1J1a(+ca%c$5yO~5Q!Iaox9yr;_lVTqhFHDpyzZU)9C9Nw)MVD0#zdhk@FK>NmyRy zDjlC?G~&Sp7l$ogN}oI;{Qdnc{G<%40&NmbRtKLFob_@jrxzT)yNSix#revzt_Qcq z+w-WGCA!WU8X*Oald1S&R4-;rg~|YScEd55pDQzdq^PYbqhy-}V~vx1_Hge1Jhdaw!@Q@Yt5KFK1LcRD5s?b#9IKwx zR(;Pd-Yg@OUXE)aFUc3%?b-HqqN|X;(S2#wW@%#|gT9+0-O1y^(=*+JttjNfOliz#_vmj0c;rXTT9+Wwh|+oMYQ_&5tr?&q{=z>SD=R80 zEIqIhf&Pwhb6>*-Ysst?Pa^AE|LT%fv5S3VheF*@->zM|TyToS%)<|G{>ZeGn;MRu5?m6r)iJ8 zX!Mpn9Jg^UTUyg}@?RN8I$zE-u2p-&Brx6eDXnx(3*G(X+eq`7zT6%UzJu~?2{=?P z=yUsa@BVHb3N-$}+xvv5sLoD_mA5rf5~R+UzQ4EOLER1Dm~fjbJF)j3kRFP&=as>1 znRfx--ywu+;3^8^K|MN3k#nX|ZRB`P&N^#dKouGaVxms|9k1yp%z~re-v7g5`h5A2 zghwauSO2FIeF^gpt4p*c92a^+tndp_=L}S%=N7L5k7k=TH!S?d6|>C`|+T-LW#kOmUO>LoBMv8uh{vp$ok{ITfu~klUzXmPtKo zsEJ*ZZHliikCZG=*oJUDj|j6YkV-3_dl7U>KNX!fzc4gl+C$u9q=9{PNaANYvzHU< zvdb#1^$>@2`_>&hT27=Dy&8&itx<>!3X1!=G9=-3U)a$Ml%-_h`?f-sH#j?%t<*rq zHjV>9Ji1!zH|^m6a+6v8(tA=A2g9tGTnuP4GmRpXMX&KvJ{i0UOsT6=Af41f>?S)K z+i+~KUX=~k_}IeYBBRUTBR{`Z;4hujcwRTW9j}oBd+KPPpG3`B#j2;%1J!0z-`;B# zIi+vhe&E~SelU$ZuUp6H=oI?V6Quc!zVHT;2B92xP4V{aH!}8yJjSk1-LP(6yfiuC`7Bm{$0Bi`%|ro0O}khHA`k2WC*&c zw*Q55>R)?DkDITU$HfY6JYY;l;OL27q^mN>B2mz|Vr>m)zXlBbXxh(9ym8 z^C|ZRI=Y8NBaQJV*x5fS%;6~p7t5~UDQ5AnOjY2nRez}ey~U2&@^xaPf+EwA(?UY+ zyyMb->pq8ac2lfMSJg;W3#4a0dkw$7ICkL>69_n?i!(IQ6f46}m{BxWl6m9f)^dMW z!A|RcNNtG}b7dN|b^Km|2c{p|Qs_9@GWa#DOU+XaR8ioXph?5UwY9Yk89a_Q18$M0j1Ocf4t6Q(zpk>-oG`jjV$=_ ziJ5=~NoU1TT~pu}g2JN5TC}ZHOFdSnb%q|DD!JYIs3Tm+7Cf3gn99U^{cnSXNz|`K znx4m4+i8&BuL}EFSSF%dAR^>>=X+|RHT9F1NZOyI=FH&NW&i-UW(u~#>O!AiN4{-* z_82-=J?Ku~9V=DM*qE5xAY!Ds6CMxFr}{?W0!m!9da*Bl^yQ~dpFVu_sPW??234?j zvY%&rboB|NVC}PEu8YFE++#T$rU2 zs!`|LeGh8!gYY)_KN#8B*+q^*EFg@={`-e5*ws=cB_+r8ON!7qCUk;}oqoKU1JY<~ z)A(3Xk>vE_`*H1@_}^Ef3}{`Be71wdsGPhNvA z4C8U{j4n^0JqV-ypopJhWo3PSN?#94cfx-%V`0IGn$CUfm=th2v<>A)`%iW@XnS+| zIX|!&t4#Xh&IeS>z@y{;@rGi1hM{V#^gaQLj;52#gQS+#O8guKE|tD^Ef9b1vzvYI z?%l7j;?z&fR{Yza+ZN~9jRZJMcBee!ls_&cq>19J_VMAq=2w><9ch;|zz&%kYk$pW zTr-T~Kw4{}O2Vr!tCIEn0&S+~|8e#9iF4;k{5yGk<>kiBFF&0WR+8>#z;eNGI5INFe+BBB?E+FZ`;yznc_VI z07y2&54CsKZ`wZNd*JjD`JrbbgpqByC={30{$LZkH4UeWSJ-z zUd&kmgsdam3(Ox|h>GXhx^=4(peMMx#X~A5$WI5DH>97}d3onz#nz$Z@G@{@A&^pSr!gZhpyXrPid+4=tOH;RPQ=#7ncIHRGrYi2mh&id{J-7rZxH! z5)Bz;7%$bK8hmaP?W+ZfP%;DG%#p)~FW_u9CaNacegEKNK9r=DowzhNp0qqsC=YTc z*IaBjqEx&%7}hcZ3O4dYu-l*Q&^hb#=t*@@iwx$SRZ?}@4KkW~%e`sTw9-+$lP1Uy z7P&fPn>1SFcE#ZUXq?o_`kb9&RrJHF>+ADtRwavZ1|sH$U8%_#%jTK3V5?O8rEIxV zni&^?73)EFvY^O(4duIHToZ1SXO8bBU^lEl8>a}$3+~>%XODpG;I)&w1u78eb_|(7 zrot(V15=W!8je0mwGJxxqHn}Y*VS`0czcMWvHq+j@bT0K7o3jd;_`@dHh<~6{dmxl zs+*Lgp4M?PtnpH*fLY7!?jq-oT&t+AXSt!|{LPJ1%-Y@>5KQh%31}=ievA@7k!dL( z2R*cGDgx(-zi_qXtZybzx2CTJPV=3+;&&|v=g*!xYuR~Qu;?;q%S`y#bA3W9v7u8? zYL5vBH1~=U)eabopVasS<%*F4+oiPy;7am6$GZw!doxh_8_^<1-g52+JBadJ(*!oF z$*J%M`5b=0pjQVmtzSjkcFFSGo&JC0u7$n=$rlpk*syV9BJ>%T=}J!31SR(Pmf(ax z{hT%T&81fys_?}+>&^_-s)IE9E*k;b(mw2w9|q~MPFN%52`iulbxkfxJfg2iTwI(8 z$6C3T5XW5heO3rgEio2~@I*5*0k6UDN0hsx_{pPgwGP+6-o~xi?GRNx3a(OV(@w}o zEVIS({A6pENn;iiE$=OF@g|jXExY*d^j{_TZp6i6->mouz4^0PP$Zqyn}p}-1-rSq zZ5NZKjkP!Wa}P{>@Wb{pL%nEeBQ=l+kKPqYD{o~>NF~*#p5m$r_lu8>HNleohhlTP z9JEVXJpnJ@phI|d$DpY`pS+md`TK?qjBzs=Rt#(u+n!{}$}am>U7|WQG&HaovgH9H z>RT>~iV|Y#P;AoWLQNrMsf0Swk`gAq*uSUTx*Ot^y^yAKz|!oL;e z1~$mC?jYSkI4Qx@I|&I19a$#NBAvc(DF!NWDCf~T^}Vl8;Kql| zvLU>ODPhe@pDtB`(+s1c<}wR&3zE(8?(TX49CPzMu`!Q-&oz#6R|6ZdU9#odMp>{P z*z%KXeUykOyu*R1d-@JHZrtEUF?e!FM9p<>tnzH=m8z@0@GuhaobjQ2#=^xrtW;Hc zv?+bhKq!fhHavb6wU-b?yi60E#rE!n;4=ZzWG9gd{({%w9?eLL$XM=>l9|L@>~egR zls?fvO}|+&3UZijHjnL1gJRTwE{k9U+VE>XQ*>Zw%cQ`kD*R4U*HiRM#VL~?AZMoN zz z7>)k7j~KFv6f76D=Qsvp2k0?uQO1!1o}>8RIx-M>?%P%hGYue3?c&Azlu^eIhn5{K z*aTh=v1i?}Ws4#fl)k!u_WFBTS^awL3F+^fH*c2Qv7GpTMcf)HbJ0XqB9PLgogt5VT+GoqY-&x zrt|g9xFS8l-{{_%|MGG!scC7w=%Q}}951vOe)0F;bcK460C?1fuYF|Y(f=LFPoP`~ z9V{rnk$xK_gGs`psBw$({zsR<#Ol%QEL$J>1ls&l0)K5sE5n{WuZc~D#jw5e=rJLZ zsK;OZJHPey5t(+m@S zPSXf@T~1+}I&j#*sPhs4&MfY;_2zrTGad~!HL2i{WTC;d*yon+4|=uIJ<*X}&FkLx zL@?33J>x-#7vQbf+Cra$k9H^?bYv>TdAvj5q)r~Mz7$`S`&=m8nVVeOHg6_IS^44+ zyc$-Ig$qiTl^Sq9#(iZkT)42qN;)n+p88x}uO71KbW;mz<1n7;EZgNT9JLqd0Ac*91{5{v>B*D~*EsIv!k z*o{wITu)w%7TC4uD7W=y)a-uLoOIXoADSbjK>thyn_OD-P=P-9wWMe>S9L}#{6J?s zP$P*_IymcCnrZIJC55AvlveVhFTH$wxDacu*xXe{v9-KMuM5m;OFNH&TJqd2eCyg( z=`+6OIrCMJl3jb&7vIjfpgUb>;I@RVv6W_S=bcB&R6D#+G2*6B?MzJ*O#(8R`EOr$ z+;RHmr|sk2#f9%$Bd|!|-g00CHcrJUI2r9MGmemgv8KXB8v`QpZHG*uYP9cQebKlS zQ^wdWcJicbTG8~wlBLlT-aa}YTD5aJ9u>ZaT%a@Cqg{)ph@|~$7e{?{K#D&TgRpB) z51Nu4Rha3l`R!rZQPeD9kCnW}@;$`Y03#2GdHhIT(rddSz)CWM0O@I6+fD09&blZ`hj;BZjQtiD5 z!7@I}ec53Z=YT+i>=gn)iBAAoq0^uk50}yHq=E9;`$-+ls~N(rIYmXGyy-EsV2#Vs zQfw?HQKz?-h2v+bPadGPn?P+Nd+)~0n<>?#lB2;nrV8!{I+>07xl%E4!x5l~G*Wnm ze_@e>!JD8+!Q@H-8sGv)0?)`QRJ^GMJnv8(Z8&cDrwBM5sU9(pnX4dXH=%2$0L8u0 zNWTGt@1d`64)r7l76lJq)b+6y`<@!@&7y=QCk{8bE$PINPm@a4mdwiQ_+)~(6+vNV z(S$m(Pf$yaO9936&2FDZ%|CarJ0AXfzGdc7*%$ov@M_o%**u|~cTT++rw>fdC_E6R zerde;>nMaGhvK%p<%Pg7&?eGOy?3&n4Qx%;REEc>TIevYf`cX1fSs1*Fs}9M-S*%6 z8YW16?I+#<3GI7B!Ow-hov6tAN}uX1u}kU?Id&QxDj*sZF@E@+r*DA^y)Wp*K};p2 z4m~)N$FosZaHQ9pwFUIRNHza8DCo5 z6_^bI=#>Ii-8v9Vq~+yXVOi49H%9QCb83Q7?Q^?bWMDSfFNME+>FX?XZ1|?UUO4nK zn8{g(gVwKKZ%=pzF@0WNev&NZMu;7XIKNf5X!(;$Si=dhZH0Ceui(j;S`>?%Y-rS0 zdGruf?4s@Ee{jNXf{a5b+F7ga@FBM8GV6sY17Z%~CBg!1`un%_c4YGcD^JsC&p+O% zCl85rVDc)JM(N12>Fex1(L>1BEBGW1hN(OFN??Lht+h*ZNUB{vOD?Ddmjr*C@crl~E~1CJ0X+p8xVArBRD z27BD+6YxOLdNZ_J26$>B{U{u#mzS3d`M+3MqLv(mV^gJ$v?9<*J$xVfo@yvm z4ckr?y#%BE6uW8tv`g3Tbaa`&Je|FozL5cm+PSIPt-WtbU~CDwKNXxAtSO%FP72m@ zshoJ_LXL3TO+T{GOlVrs+vaM;m#FF^a1pRa4<6UN*{*PuXqN~u2AZ0 zBpM)`y_kT&4&*6AK4QsPlx~9LcUXC4{bxP|2L52YB^zYgo)Jqa7;yXuagI&uf24X` z?-Ya$m-&elie;fJ(OVDSy#-exzx(YPC=Gq}!otGsv9@%FEslghYG%fH?PN_wz~T0X ze|J6f@kyUZGRie=KF-e0&RBIuf*O8{BNaev1XU$N`x9sq5L^p*#`XaKu`K46Y1G07 ztbk#j!Wgg=Sp+0k$a@2~Gt{sI5i{*7$VR;Bdh1-W1u<>6S7%%GbSI44so&111Pha9=fLpggvy(j=sXES zI3Y*Kf#Ev3bcOoJ5oZRH#_f4A-T%3KsEDmc{X}mR=fiP)iPT7=%d^3!S6J$az%7_1 z4~2)IH!S>!=g;M+Y&X1GM2}Qpl|)#Yp?AbQ4*QMR7hMWI9v&VABXMDNsE}xFaxT3O z7?K5z5XhlHwlPHw5b8d6eE6oG+}5SGzC-{zVsM;hi*15Cw}<1J0(8&u!JUwmkf~~l zmfYkHiQ3CH63$;%#fiKiYR9j+k8dwSz*uek6=EKs;Np0(+U6!zK+L zZj&}TqKofHgS*hd?!L6J@CJa~WhlZ?1qPd>-#(&e1CJ1UFgH6MtJd-pg0w~6g*KuV z&ldzZ?`VZGEgODL5e}_j(ZtYDJj6lGXB@H($Oy!u(rM{yBM1e-Apf8%?F6Z1;plZe zZZl7!lYy%pBs~W>s6$+aoga8KGh({j@E%T0eekPbalM8vm_C=~v@~l%Bx$B|)-Sb3 z(-flbEzM7+0XWwvctB~BjTF^{cBzKFj^@9o3oA=fO`|O-)TvxO1DiRw&bfIP{ApK5 znEsFNA5}ArWF>Fp@kyo*e#GOAc{rl#QRwRjkNNK2UT8ORLr_2c#z#A>foX5qgO&_@ zjb3Y|QchHUVQoGdb|RGTa+@P*1RDA*0$XK88sC~zIA>6H-z`_56t&n$Fxh}eb*N6r zRFQBVaUz)nEt9dG697mS<*tSC8TxZh8+(@mV+QUov^3Y&` zj_&-^=h>U?Uz=e0{<;1^6nwjUl`Hu0v*?9G3$r%RVTd?TZcTJw2ZpZ}gjIWO)12?| zEG4g6=TxNk{rkg^9?pu!7D1jUWVw2$51WHj+drl(8D$`z}eyzfxA ziNdby3?v+p+6P){BYa^H<=bJ>4ABAF7rxs5kORA5fE`ngJK%DIU<-A9_W?VRM+L9e zx`l=1Zq0pHIe_IG_j(6&`9Zf+-;;w@O2ii=Kcs8JYSHXPb5RxW&1&sq@HmIjU75g2y}ZFzaBRAg|jK=K^*Z1vKcagRaL07r;29-3?flP zh;~s?u+1!Lb|Vog!I#IqHn57j<#jz72yf#Mk@j`{{PfIj3B;#NxiV$5s?q@PsDST$ zfdmygv&6MLMS|ma1lrzGFv@lvz5xr(Qx9+PeILjtjI#_Q^yHNl-7bBU@oh3e~NO9^0?F$L3YCvGut9pY5+z)eB|o_fDP z@Zmhkzy3PNA#FH-Eeo-%exh&zEkVJ}t*F|kC3*T) zqF&RCDj=0&(Up7!UHQ&m8^K2j*3O9xMIrwi&WOU5qUiV11X{mUMTHG~8F;Gyi zj3|$#%8>!~1pQJE9@@93`ozpPZ~+T>kTbDDjIcl5N_xgK7xAyv_Tu{b z`p`}(3PxUG>8UQG3GsIhmT@S<()#6JYs!h;-!~}kT&$gcC)Q)3cT-_0K-*;!|AEUE zgFV%L=n?jcWw7gYBDeL5C4-&$7I(ID6zsqFy#kMCz7T#B-4lLQVagiPBBXB0&3m9& zsgm#~(HqE4A$~`gpp}ERYewJbV&3?of1ziRNMhsjo>8zQ7vH&YLfv7vxb#20LX<-Y z79ZYrqBNu;TQgL+I`Iwhk@(-IP~9lBwXq4d^0FIYTB_8BsD{z@;&bB_^A& z1%iY?+HTDwJS3X~f)Vb)`uf$YU!g;I1PY|WA+2*R;@T2E!GRldg#Q2=gjW`;Qe zQFx62$}_pd-MI_|J&R}3u(~?dnco;YFj+jW7d%*-#%xtoU$F$w|DG ziH0YCi%jH6V#A-_PG9IeV-)W?cns!mWBHtFidHl4{eVE5yWQ=aIaUQD9OZU{f25}l zBlDx|b1M{jVX^hW7rS{e4K(sZMw-I`Ao5ccSWv;8C2XK=NAkJy%sB zr!^suf79_<_uR%Fn3U6PrZY%C*^f9Gg>^cvEZEGSee?J8W6Ga;6_nadT9QLcR_C;- zc5)Q$kmY_F;b%NwSPicaYH>C~*t#AR) zBf*voET~ysvN*_$!dYwK!w3tIq-L7&CV`NuExD4z55oN;r+OlZSnp<~Y(B{bxJJ0kt;@hg%KoAlsW9P_u>? zM3N4^=|-1*UCtc;*Vd9E%;$Zp#t-L!ikMxl+)w;J3CnfaQ1+ntBmy+55^Na6t@#Qv z%em}M#EEA~91X<<3X5B6=^^tpllHmqLyr;8X|(QaLs9{m;ah?|$lPo%_66*!ANjqx zEDP^ZQlvWYSf7f#cIa^ce=Npk=W?&a`t-Yha;LewW#$&KnHoU8^Ggx#ls|{V! zVayow6`8O$aR+=OTEce0i#TRfUd^hr=?PFEcv57l@}AnD!f9CXunOI|sGh20lPKs9 z)i+OcMNbZDBNqn_V#{ikh zrouPXss?$u`Qko{{131B99~~}q4e<$Rn7CQzyZicoMBTeXvkE7FTP#!Mn7QV7k}>O zG$~i(ps&XZC#%BqT$16S76Xu__{W%<+^yh8~;)S^XT%B);xeG&K6!Dy| z_94pZgu#F^HUS=CVnGIVf6(m0v**yE6v%JYz4n{r#K`No6z&;5Po>vGdjzU24I{f{h5PVBa zZ$zV&^HOe_0XSf0#WZ5ajvWprpZl-nR8Yn|GTT^Y@c5=L?pGw6Q+ncLnSuDD{C3fjzmxc=8)_lV>mZoJ#)%)r29DKrLT4t>4eNl8m* zFmJsMDGhZmt(scx^4l_A15!D*rp#SWx$Kd_1Q@wSghuicg1t|sYj7jA@VW78NZFvd zk~{~c_FerS>ya{V2EBuJ+mDNui^eO8@k^M<34%(r51j| zLKapA6VJMD;gymQE<&^4rvJ#wk-RHl+H@6iO_hBP1BfK#4 zcKcv%RgIy}oQmC!G(9{jmqmNV$E7YZmSWLWpbDd{PPD8!(xu_s|?bCg1GKy$275$KQNAK7ZZ8CpJf{Z-D)D5^r0i za$cXK4yIY;!p}ux9}N{GC;hS)L`dNN{L_6*Os4AlSy?Ar>~D>a98rF-bzit%cQdn= zZ;(xs7ZNVz57?!a07P~4f{#MS%A#tPJw=V|Hxom($P`~5z5UpEBBm-^@XAog=0(Fc(vp z0stJJQ>lj86na5UTgp#S`!6TnKv@3p@grj#W~J06twf!l>zWgMLBjJi07|9~Ue7f* z5<>zm#z2%W84m*;9}d#Ctenr6HL#}feYJY*q~$T+8=jND?=oX6&VqM# zZX$m5RyNh!)_{&q$6)QpRg}o-&-EXlplsyZAMzc|EWR7Q zhZ0YTFmSbLsK4~&UeT7mv8m0A&Zdr_ev} zI2B41WtEt;ND#Gvzo$X@Q-+0KJ0rVAea~m}o7`!WetbX1EQ)$TnB4Bf8po}?E#CEw zN)3yjG^5N5iixcjP@jHce31UrdHTOT%Jt^{r?DDsgvJDbk<7iLugS}+L&_$DQzVLz zS~$@F3&l+E+N1W65x(N?eRJZPO}``YgEmhTQKOo0A|WyA^<*0R$?UXU!DNQl%@E8H z0R9ZlPUjSl+~V-aQ^x9$9Ghf&*@JBuN)*aXtzZP4X@ZN3EXG0a;)5iz6}yBnuFg5P zVI;R z?1$?vV5_UZ0VH!e&yTB0BWgQ6FBsVX#h}Ye??{m@U?zQ|bC+Por(J)q90O)2c}L`8 zG^$C*+?lgEvh4Ka&%I6gGIB#FQ8gNg6bAsT?Yc<_%Tn&#*wBFd%w>ztJVj(o>)|H5 z=+aVuYjwU6#a#ufUNJ(AZtgJUqTP z_>d6Bf^-kICaKq+v^OT=;>S;)P(-C#GT^O9l2Qz*e|(N6RDBj>Tf-K@5@K>N0Gh^6 zE7zf07(0C)N2p8{7>44qcE@hVWXZ;rXh@MnTEQ^^b`;Wh`P zdDz)6VUwqe?AY5ca1!aIYiu%vP$q$Qn|9`<8+`T#XC!&;kzSTN82G*MMmqpf-VMj2 ze4IpAtmMv5cVE|0Z!O|mb=IOI=H0vV!7T$@05b)y;p51&y3amsl-zGLeuH&R{^DTv z5QC$HvuAPUlhcmHX|3h{Pd9wk?xqkP?aysLJFJ-7omc5Q?pOx2Y=_y%;3(-FchA+8 z7156Mx)+WOuln8W{+WKLxV?EAVyM~)n^o9>r_R@yag&kzXkLMdkOTw9EvD4_D> zHbvRLDZ7RmU@g-XO0tz0EYW1WP9g)yn2spP8yJ1VBck7qz&H{Jj1qNj!05|d=OXgT z4d^o4Vsw4If-+_K`5zHEi41y~IIgN0IEm5yom=3cREvc9^h-byuMx~JGp=^+iN}Aww(0mTKfY-?E?CiSy1jA~@GirLi zmvVZJLLNIFQ0|G@z5WD-1IE712WK9e&woX#+Ar%UG*$eqW(4TpkO=@N+{Ep>KA#V* zLK%RGc9mK2+?Ndb{p?`MNR4GhbI-(i%8@Y?IO@6Npq6)$^=wYnr3go0)&%sRY@BLy z3AAynN_2I33bsy;4BMTde18jFXlJnOLMfxQRhTx#t1OD<=HZcpw|~N)HW2r6_jYz& z$BGibz7E>E{?t`&oDIu2<>aJ8oUoBu4TZCpNR*W-#E%|PGvrIrNy*Z`M5xba~%>-wo+gZ5R(Y<_Imq|3s1~Lf;)p;10t|bHUp7$r^ zck!Sb@Qy?7gpHS{QUPE4kX~MmMQAdN6f;4za6=UkQLLgy^^v1TN$xbbMYYZMpwNwl zWSrUW4SU)!0I=cC(E%4CPm@U_Oft4)U}q-6(35L@`r?J@fdV>4hW9ve$;)B?ndi+J zhBCpULckmb1vqRFt!Py^v%oz`@J+n7{Un3vkd4>YjVEyYmzD;A$__$C`Q5;Lg~R7v zS1Mk;`V*cll9UP{AK`k)SyWnb30^5rxBkMH2=YSw-U#2vk7uu&t;vH^|MI>aC%Yj{ z+@G85v2wRS9dGTjzGxk(385N}Vh$Q>|Q-L{{* z4iZL6Ben?f9j8^)9f*~I%qxliwPfHhwbMUP4>?HV9UIZ}CRQA&_3=?289|t)(jg)| zrea^D72t{n%lWrgAXjb8;!J_=MFzS%-WvRqZH}aF0fuVFcc0$+!s4C*CWBZ!SAK{# z+fRMFa8j?RO}H4dC{S;|7cj2l!A?d#B8klOoLolCn9MkIS(noj>vR1Yt?tUfoK|8j{b3Z@7|IihQC&B6Vo|#0X zn`gujVRF(sBG=6K9K^(%;-AYm&>Xk#{HTKw9LwS6vOwBlrel4gAC$|>4eg0=kz!`* zsPsaG891JdEK|f5Lyy>&teYdrnF>$;yvsz@WWN8nRVn}!$+X7`BTjD?Vu{a~Q(1-E z672u$=T?YzILP~hB`V@^oD(O}9Z2LlIvJrhm+wBDI;m#h+dTG@#UMT99|M8)jYR_g zwg_kgizeVSn4%G4F2PBO=eI?6@OCRDm=EDZlMo(hNhU)SpfCd$)JQpryCH}HrIjot z{`tbbEA!BxEsOy|1skg|$y-}+XW*fI2OJnU@prVBr>mc(D>n5|t`UJ0muFaUAr5mn zGV+g{eay^Bj@`}`y=D-AG35|V!W+8tIDF$?y_qSQT$&iwlM$35E<*V%>lskF)&2 znJ>vO*9h6NU&2R-qg;Un~cF#FdOE86pz)V2i-fa;7u5|ip-$2U`ilMv{eXc zL1G>#v;eg=Lqe;y7bcFMDSBwMIkLkKmfVNnB}j;w!ESA%&8i-dyi%x)#U2kZAW;oB z_jpgtEDC$B6-ApPSh7KQb{{*1cuxwDm7o2aZK`%|9I%Aonc{nK+pn zW0LG6Z_EPzbK9-Yl`E0kps9SStAQF+~hh@RoX{K_nDsDr||M~5_Szi zxRRPWI^iU`alq>{BPNsTP$I{89*RK=a2_oqN<498lo{MG^2P5sp(CjY?wfH)JQ^F# zD?U^icPdn!NXuJk(p1}EZ*W|>g;^(y#rJb%cNDFuDY;B@ zwHSXV!zDgCu zCLwKXH$=~=$_Xxi_3k-KDIAqJ;0eYm1l5PE^~jd_($mvp`EMfX#31!RV3)Iup6k}P zt1MY!-sfvjQqWUo6lB7cLzZK5ZJG+3paqRtF2vzt3Jg~Wkn$;@PfbPU;f0`;CD(~J zAN)X=bPru}ozipmJqh0co7m2d?$3=illdg=(u|P_ z$#(eN7!I6lnkE(UaJ$5xfBte?o5 zNCDN21*Gew8&>bfSeJOkHXZc!dXhI9zEOOuGQgp$l0q-!~|Q-teAEFFVnqf>fWyb zR#N&hihF|*^ufl0@o1Y%17fRi5ResAwesHQ_V=hPvRQOs2HJlamsFt}8r)$UH zwV4fDa3JOi2zzI7r4QV%88aJkWZI{fim1)6;cB21Azfo|LTKVQxXd6{%8XAt_0X+7vcA=ydh!zqlMhtgttMY!PBk$%%b?ibxju z5IoTolzzC2k$?U5S7&?wL6`+?mIrD{JV3!q;MBjrOCIZc{|_L7c06Q?ahLehh0yIr z@$&=%W07aazmW8BI8zM|s2*_qD`L7yI^*gkavx2hp5(SNq&9Cm4IdU4FHs1H#Yj;W zrsqfofegMuW*t#lHwDPV4>!AT-Wm0_oky9p*d8g247I?#)nVE%2v$t)1j4rpxv5nD z@2;cUTgAx08*(s(n+|6@3lAp;n!+A$G~jC+jhhjg>LkD8cX0j9@Ils?tLW3@rkNg( z<*y3j*Wdm9*Q&?w7vV9~f2C6~k^hf}hM8r`IQgm%Gr=dld66CO( zjy5!MrHqD#hA%A_d1;a)5?`5;G;0UmB?d9M9RdQq3NfP@DA4=}FeUdZbih%=t#?-TkvEIW zRp96|v)8k+8$pcDlUD_X0m^txim@jbZ>{lHW4JYjU~qy&bl_eq5I1H2DLr$ipR1U_#iyopt$bZ_F<_nfk_ts)37I?^V);}YDctAM z@ATs%1DS*+^KH--6yT;LR~*Jap4?-BDG_1k=@=sYfStGaUnBhVxAhzMPKhXGWg-8`n>C3=BDry5dmvSM zw)0<%ebsNh*_Ir>m((AyGBU|En%%0G`HOj#u1r6&-)ur*YsTDIzG2ib0&zs6F}ruc z==+rmY<~cGsX(=`U-~U#>;oVJG9aYVB0)^JDtSPRS}4MENa@j5Pt$8g<;D*`OJ@CA zZeCU+11P%kxFQW-T#Oby_KMy~QcoMU?QdiE$kV>SeqbYR-$TbGSIA|X<0;~#3v4e#i6cYD zl787_utv-^ha6}S`8NQzVGD`b=+wm_9HHb z7)ILuWFV~__bWAG^aerUGl)N^B6OyVE!l)n!2>TZrrn>&{4&bF37Rt@s$G+T(5nUu zc9Lthh^GZvA;l1rBMnfb?NE@R)v6I59$i6$*E0+S8dre%6NL;lM_9n_4t|1w!~YfpBEaGl`@BA#H?R~)*k(KJ z;^oVUu;rM9?cR{qQ>^9$p$}%+CDfQ4=8ZW(=MLo`*U^2m`0v8M*XaKwg-vdKDUAQ~ zCoUo*BccIkcskK^PLQjyx(^9EP9&q)@lgz>!hhh}1(HrKz~nXNI^RIFm#)e?;EcsE z_tqw{5TsKln}osF#6cy^kT+`?$EA(+I3*;WDWwF3nOu;7aI_HZqTvNHyhD5ta#3DO z2GAop-k2_c#1b1A9&X-R1F#O`#3nzQ|{_-C(D z-}moX*1dmWoUBgR3Y+se=o(&mzf*XI`yHVZxVnLS5M0I9^7QDH03YN)_<_mOmSXGsCv7Y!9;sz#fzy(=Tv0h1aJ%EpxNzAXtHM5zb_EKjd^@v+t#fC zh|#0_X1aHHBE0U^>-)5jj9=p|H$#R?HY7nQ1m}mW*9iXUt5j0zoqX2aPg*JFc8S1h z)=tKe5Jpi(H}$%;r&3IcRq3+)<(w_O8$uUOLlZ*ZYCpOB5wi34QeAdUKL(7^A{A0) zBZkZOK07#R$Wt+TaPOagXrMg@T%HwJ_ueB09@X)Z(QX;J%AF{OB>H6szly>9b!L5$ zvn{zp2v@b`2K~DA4j^ZH`NRA$i+jCbK_S%qe=~^>E}4P5)9z!yDgpGMFm|o%41hOm z)Z0M#UNcFZ!7B3q(8uW9;|!D$*+yHIJ67;ovMan z-c<<|Ng#J*rt>T9y(Hov1c_nC3g}ckf)Td*cKc+@@98LXob!bZYHI5Wl+vm0m*G|cSW(cMJ% zjds9`+-dY2_nh7)5meAW6?i$`GZ0v0;c!>YZM1_@h72z~nMl^hl<(!VKgf_XJU99Q z(6${`Ys(7@!@#1gWW-6SR-DW=0r{z9o2q)J>4O2{(g7j8&D@ZtkEmS-#k_72@! z)nhgr8UG*l-UO`1we25X88fr7O_gY&$y_wcY)MITX)qMcqCusUd8}9z8Whblr9mWf zlLnel6sc6wKoY9&cP^oxZFrvdd*1Imj{ouR*!!s6rnS27>%Ok@{7vV=DL1~fC_p`Z z_eK4ad)DB=s9Yp}g2P+Cm;f@J!Q{94Mp}JKgkV~ddl@bsp+B&W*PGWsuYsLsll!35 zh0knGftT~Hw_1w`2^2aSiLy(v%B5sxk=^E|D;FKmvWeTqgkYO^jRSDLjL^2rS`-m; zXyGctw}{Wg)0358iB2_UAk@eC5K|YSd^!}n^?W4y3`3tW6*V&-nxMECcA4Z-gZqm?}1<(8=r8Oig3BjT^gq&vCT)Y z3jPor`DSpV@ImBk_*ox8Vf0`96K`OPe>!b^%4X;%K;0%uH?D_3wzO{c8vKrrAd!fK zxJRdDMVF0&ha9wFgHs!d&p{p)kmp?vvDcC>bFJW>^ILl8GO9$Es-ox zH~ZZP899DDwQ}{S=){L^^ixNaVq%TuIdrbIYCG~_*KUPN+h|wL98_^AMjzNsvfkxU zwtdzsFe&I!)}np0eSD1KW1#4!x|Xb*_%&x5SO7l}4A@G|tS9RH3%F&*P|xXH-gE#v z^A4w}4^SDPOjC!ev@mex2&DcdN6&;|ppb}ZG%HI8ibe(WrW!gzbv(CJR+7kLZT;1T z#8Rr_0gJ5$enUn54(FFK*vToc(FOu;l2x~~&Gs!w$J(a2O>ar-5R`tt4fmQL*x*Fd z!Mx)g`-&I+1vygIp<^WA6Bl|daDEh$$~$)KI9bPg6YmpZ7!3g0QO{oZ!=49PhmJGs zVI0#7dOcGJ&03d~L)ka9)v&>lZ^Af_G+YSUXRRB8;wlBHDg<6oy+=Y8iE`|i(I{q(DIgH4 zMUW@#`ZjIcW?Q&T>TV63{NO|NgdI4(C2>C#i6g-6Ahe%q141{`cP-3OTnznE6w;+= zog&49Aut}BRqou{Amp89O^Ok$U2H`G``b=VzEsGk4>>4G60+P?(20xU9 zLqupoj517iwoxd+T1ZYbapMZXRMteeM(0mR<;U6|nm*#W(SlA=3<=urs=^&~iT>Jn-% z&;Xt=(^Zer)dp8iYf9$4@uSeSlgUQh^i2B%#V?_@s|Dm&{4NwJPj^n#NYKAh&nrJg$=6uk^iWaW(=|H_VryQg z+E}Ggx|-D#L=U$Ugfwuyp(XXifFvl5m}0Haw|$GX!Wn+Kyn5fl8%cOEiVnRP7-$RS zeK2xT5$J^HRRxMJ()A@xo1>2g6$XaxU@QLa5H9@M8d%K3Qbootr`%ZdzEK5OD*5nC zY&<&yeth~nu!h52Lt&*91^o0C9xD>C@-l5nx;1N#qwt$%G_;M_%=*MW?c1sGMff#^F5{&_JJf*2*a{EEft9!#$@=!k7zd6L*vh!60A^C# zL#E|0Io$T|-_KJ66LJ(GbExj0D?bO$frci9-)GRbzpA|9i|jMbe;n8f^RZ(?LWx?G z)?C_Ku_>!!2c(V&33GyA!vVF>5pWvP9<2pHQK%nwa(1Yu_s{vz_2>NVjO)rsR>0{< z)Ii}ExFk5XI9%Jm&F_5svy8zbJ@)s-f9YR@&zQV43fU-+BCwZz0y95`k56BICjEcT zhak^;$`NGPAHjBSinm7FG`fdi!Ylw9uLJJ^!p8>dgv3Mik|?*s2t}eKShOG2wISbs zTJiy!hG3sEs{5yZ{iK}FsbGJ6&sCG3B4U4h{CB?5)MnGS!jGta)I||c2@=8(>ht~G z(d*5Yj~gTN5_BCL`X6lOyDmXU#}pR!=yQkG_px8NY1Et%FGtN8h|l!zx39ZDTa7sQ zqSsHW82k0UeE&@<#?r4kcpWm;LVXzWcc*Tcepg__aD1kJ`MSUE{#?N)_xG`TFKbua z{JbBTrczQms0Rro{MYw1(#vhgIBrPW(%2U7T2xEe=->V z#y2V&YX>k~7ovu4wi(ShjJLJ_H*fJy`licTGS9f}{hOMs0ECLb=ZUJP$p+IsetcB) z|LIy9OnDUmNl}sCtRHpE)nTNEtK`UIoPWQ4{q6Iv6Ik&F1hG});5u(PprZBg@|!Y; zSKj2oXZrWs*WW(N{^KV955B?K4w|OpA!_;GzZj5LLb4+J?_7+JyHdCq6^b~nWMTB5 z`Zw47DSTYk{C{))eaHXx>u;aiIQ9CEyTQ=k!`UEzeCOZz#;x*#VGPDL70BJ!F645> z1z+=ri|@Jw5soiwr!z)g<~}>9nfq*Ce5QW~c7NUdIe8%Yw|aau4aV*U6>nu_?cer? zUfdC2vI;^MzOz!ez7Nmc-#4SCPMx>uviAH?cnOZ9Y4J_{~Pme_I4jPO>*Fev7>7smw-5e+G=^Whezmo zM)Ks95H}9^6uZobxB++xrKhDzuBpCC49u zw}^J#Cb-D?(-kV~0BVo}nXqTHNezh>y&l!Ue`Tsj3SvMG#4nL3D}+&y14O3eCdz6> zIf-HkkdqvZ)saI^nr{6NRG_G>to>qvohl_X0wm4{yt~OS2ABrTcmZTU0lsLCPcx|r zB4Gv)AOV*y-$gBCpMn)n6PanuaBAI36QiM-i46-Aq&^Yg53GenAlHZ$Kp8Jtr2l>0 z;ES85R(Zue>~40!o)n=hdTn){cwKyzk6Sd2MNJo3{LocZY3(E~7cT9jE7kz?4=xJ_ zAY6w`Z9>OE9ss4lLZIdz&UJlClYl7R$HEU)HNnE->nQAkXc>}*`cX|uh(BFdS~L_0 z1VMd;mqo9QYA}22efRm7$t6L73Jcb)W|J)tw?-Fgo6BA~e$q*Eytj(){$gL>1>IsX zwA-euPP2n>H(!GkNTT6Lig*i8iY1Y4k5M%1*r=x!oxvC!TqeY#T#xtobJS$9gTN)4 zVNJF_=j(Z)0s>BodaeX=P7VlLM-bldcvDsbC>%C*8x#leB-cm08QjBSx@33SL7Ebf zQ~#cVXn=c4_$I*tKq1ezL`B+dAHry~eX%59mM-^us%FLkrMfAPg01iGcJYHRvJat^ zU*ENM%^J}wtuZ;eTXGOuZEZhnX;hWF^H*Pn5Qkl{^rDZq^c&2aRiu zFBr-rk?*Zb(Llsrf`+hhSkKrNzhSX69b`eDT8}KhDf+v!?6tP=WC%VCJJPT%97udl zNk5-lz%1|ZO9R_b+y!l93`FnO91zE3-mwTV_rPByi=P1gHp^3mmxhzG*wFL(GGvaN z4{i}e^+>rlN^O7$N{oo8$J<=KY#H?z_!)q4ebFa`5CZ8PaJm+G2*q7K2$87&0^lUE z(;vJASc(Wpt=c2vaXV?)6*eCuJv1gCuFe{0f32y>nDxk=cO#$Wpo(V~1923I9{6ArWA(1IvHDXZ)8fYKIaUx|P%6~2!r?AY->C*-NZlB9# z&-eS&Nb)hzFhHU{l``maA3={EQ&Caj6fJ@SfI=`x@eJYJYHV9}5**~N0I0K45wcR2 zo5iUiv(UX2-rrlbQu8Gv3MPQUDnL-@4&P9a3Jwu8y6pQ$#!5(3XaJA&^fkDUl&Xa{1PKj{raCWvhStf=c^~=w6TXDO2 z{iT`Kntr(w=tM2AR(bK%XahRc19pTQXr>8Rb4l~{cpRr0y?^a_ZljMn_8%H`!a92N z=#H~ZgaDXnSfWy-7?5f>Yz{(2!ejxy7@l+ieK1lY)ZjTs9m$(pS!*#-?n_s7duz!= zDMztaU1y}9hx8v(SC8TZ#Y5DdzP#HC-q%hjMf~6O6uX4-kRjvxBy3oGf5Cx6E7$MR z%YsRCWVH6F%Zhp_%fjs}K3hD!k;+a84cXIIKs_<4qsGw7!nE-9^;vqtmVUDamom?_ zS-+ItYmsH?GjH_a`K1P95;OlUy}LkfcB$bckL4SEj=rq9yI^dGMV8v>#BjmgO6IB$ z%R?SiZbIhy$tNLeU_9RBbA7Xqd7kRA+Y6p-@_8ImztIQS_30BQHfWZ#URPY+{Pj=F z8yjpPeAIt*soMJ_A+v!(%Z+^St-K=^!e=eAbno-7^2%BaTu96?VSUfmUNj8DK zQW)z5dZ9<)53qlJG~czCbdn^Au`*7D`!Wpc036hr>Ri7_P^MX%E)#}A=pwg+}cZpNt1MZ4+E>L2hIDK+Y5?Dt9ybW3* zHKX%_&BWLZ6CB5OOJ9NoT@QrfiW02acf-#IfcOsv&&py$)7E7|tY1leVR&Fr5R*mk zzX_i2VaQr$jP0z+_N*|%}k;A z8i6sQ?p7H}TSBf$&(G(2HSBlKoUB6CViai8m3+hCpMb|-yI4Pj{Carf7?MHra-e+z zsvd`t4YbSRh|ZhiVKG7-8=ulN1*3-c%yqLhA?__U6xzD$NLIAcRv;Vfq5We)IUc5l z`@Ur$m;K+rdlQ)|?M-mq5rL9|34c)HO-6#X2ChzywKIM+Vb}@K&9NSlrDx$<;Elry zfLkd!PXVY{{3$K49a<*J%;%&C`kHRDwC!^#X^SGKnK9%&HNmw8h2t-q&yS8NTqW^1 z)57pr7uN#$thttprJ~*CaqL+9E?KDB0kT|H0*@eEA_dtN$sFo!utmM84%fSu*xH3{ zUb?6lOC|!@ML#~~jAjS72${Wn1d^deuN?SYRM#2It9c=#J^B=78Jqc`ph@U#1muX* zr|McRYAT9sc+tX$0~UEY#pXy0=P*SX8z%Ut!<1eOv*JFbNguqJ?HvZoT(#ACs7Kd# zyP)A0Kmuu!G~QjK-&u$d(v) zSb~{C{QUC3?B_nP227^A%Q%Y=W*TT$OtViSKt>F&0q=bky46`M(w2k2j*H%j&73So zOd*U->^oqnl>~)ty+^90R_G#9;pDALBvl3Onoj7Up%d5*pL*5B@%-At$0(3i*71El zNCDv0RpvF)09yGRUr*s^aNk9Z3jLat(VYim-Z!x*Xjt9+*OJN#G%~7OW>JHparSkkkO; z`115U06TTa4wk6&u)7A24){>b6O`Ox1H0|+1-y_F7*J`yqj_6lHl)pjA0e>uW^b%{#HB?ev>jk@u1P6@@dU^nmVyy{YhOG%OF?;)rX zLAi#uhsH6!sWyXUkkroP*nT>g|IUX7PZ@wTY+-faH1XZo3}H3j@R@I)+nO#L+KukV zdW1TOteKD-DZrHR2*YAXMZ}trxeh=S5E!>YTurb_s@VCS6hi?_6TKi_G$!>QBulsj zHZN}XhjK@|9&(g(6r_gYo$CErN$t=WK$!8a`_WqgEFIQSw$2sf3y`dZp>CX2;(%v_ zVl4=YC9N_zTOn+5q0cm z%V&}$YD*E%i zd~%SXKTB9x;BVx|$&`*{06F~YS9Hfu(YGGg;wv2Y`)_pP)tt_MG-K7^q0B#Sg&2Xb zc443AGtf=Ltp#wc0Dc&mXFRJALu@mI(N@Vl@W*RraKa#e{5J&V$o>cziADxf=bmyN z9S%N*UjI*zj_Z=bIHTh_iN`sV3#I9w9slRLs>f@Syq%IqdVKsl-*BPvK7n&dY@DNPN)SA_`|Cn z`cL8bp?7*bc_<*i!tur*o4AKf=6k_`;~|?FH3}QbfImB&Mt)hj!s=jI7zre#5aLf zG~}pi!JqMa-K{VOQrrn-L9YRYk)jaNOpLN~ChQ(qf(BXw-I#H`eupvaU~?3OE7CeX z_}nQ0pf?8Z0KGWD!;}a{KpE_Am?d7LltSGM61Z6q-OoLZRCghO6TpR&NQnt#i!&sm zpn;}~h;M(n3;_!hvpjgEL!dILrIb&aLxSPW&~Zqs*O0S={tjn2AUR)13yX4@B(4;l zOMqBPbkFzdIs*{zJ}?|)9j7Sa9dI5I;o18FoIJoM7I2TihAc(ge z79u@=p$e03X8vp@hS!XNSpdrDILkH?Q+CEDlwir-2A)cBx8r-a_8>($UDiy;E)T07FVi_l^d0?rl# zaUct0(C~U5a1^#@Z-BbLHsy0%-IMrGur@PUbINl~l4p<82P5EXE_0Dl zE2U(PP9%jK#9lN5(e53&dC=-!8s4!G48XJP>AX)8i$<}79~?3i8nc||MUUI0-A-7F z2;|tD6ya2OBM!YICBC;HiHa~&N}^QCz`0eu{nHjFCeVkB#9R;$>qiHZM+U-k!}T>w zN5~pBO!+zUP%#I535D^Zo6#bouim_5#LY1D3TBNe44D}ir}^o9e@5Qa-w@{L-Y1h2 zyJOMZxes5+&UYS?iT!oXQma|RHo2PQ}%o*x^A6Br4l&kf0as&ixzCLH6k1m!N1oX12Z|@@gN!E( zdjwoPMI^$H4`Rf!0tuz`mwP7l4F2tF4iJ1^1crhUb}zz#b6+hkMPPY-7fqqQ<3KqB zR-JDQ^|rxe3{=&-QHo+C;5(Jj?L{HY5kMFpG9ZJXCN{xxPpOZ&hTU)`i)5=D6KJ~! zVmUe%p|eD4$<-Kv`V)>2caRDEMJu4XdMgKZC112TIdJC~1_a&>iL%Hs(Mo&sbaii8 z%Y$5;v~%J?wS^;BRWcV7!)K6N7qn*Lt_IR%b}e1H)K6zCbb@c{GQ3}(PEgXUiw5v3 z-V8ukE=qU<1cN)brTD5mZBaT^Jb&?J+OO@aDRwMZ0}ai5KqeKcN&*akSUWh9nFW`6twmXYcXitfFO>>9K6!w)9@#&@22M#`XQO#vPAiY~~1g@Z_sd|Fz?KADyPM+;2bL^ekP1){NIr;fAI730N z>p%q%bbRP_poKKV?_fI}2?%yr$+p34CU}yKt`_^@K>_fl6$)8(I8=Q)p-6s-26(L# zil^kI+FE6D2cQOGvbsK(caaz4PCYsN5J%250bB_q+U6Kr*m16T&oes83XMq>7zrm@3+3P8Np79N3RCx&W+bb}zJ;y%o4g_A!M1CFH(AU%)n zk}iG6ZgA4BKS85)ueJNJys~s+EqKovoy$~lFo_J5@_SKVuP|@S_0?EwY(rkt;jUW^ z?Phu>d^TpB|0Y=bnRx95&>-DBa#GQ2vuPr%F(9ymB|z@YIu5$$9?sO{iax%2%UP4l*&it0WrXsNE=k zjfx*7G~)cbQNISRMoKlB1^dvCkF%pdfs7eFdUHp)onQ(A#^ArXEtLv3LbtvYNN8&p z1p5-cp+L|bV&d^_gdkj^Ljb~>3n+G~wQ7I>l2TY`vvmB2OZ|WRXwX&k!;x(h3K~-< zl(#j8g39Z=z*%KnzClhjFag8QM?AqLzdnZt^pw**%>|g9l)6Gq|#}C_( zKWgzrp4%_yjkX$R+pquf31@C?KR01{$5r?9>)fUHiqyU48hg+68voHrbGS_>W-v@| zGPwHi@(1lXae&eKS>V+9{ZSi&>!)>e=#DE1uiIT8T_bL-I;Q4=u(ULsHmw@b)}4)A zow10D=V!GzrlR`U=~hX$8<6O$+4yz%PEaLwAu1_19?7`-+hHj|BEuT1 zE=(5SmXoYg2K|Jm#(Qkp5K6vn^vRQq(cX5=QQ;xX~$`IHvAddU& z92qA%JPPi2#8J4Nhk&K zYoe}KDI}}p*$f+ z#q&C}BaVoBnMTJOg(;(a22n91J5EhmIe@qgI6%X8lwGYk9|?v2I{Yc|-1Qn7AtEu! zOQ5+KzT58hpc$`&PARFFn1qu9v8t-7e_CX0c=&CaA`0V1zcyJu4<%3uWX=aOL6ejB zmHXK}HF3m@LDT5Vw;?GCgm6X&gFnwODQS=_{4o5$>Jb3_d&EnZd3+u#G4<|bdS!T0 z@zRH)Mtt7E`D3;p=piOnTM@4|4Npi3XoZ4`W_W%c(WGHiI!f?LM_RwZL&q`A(A%Eu>1L$vizhi=-^U@H)P^oNnr@QhY(K>x(rp4G5=-vVFjs z;TJ^B9ZGEDeBVPCwXckKpwi#6D#RItyVnqm@s%g#)>3RS};5gCSaz>Y`w$Un&JXkRXOV*{guM9p+O&@ zLmUwBYqYQ05>Y;MY@vBI1_w+XSW8RG7_g4y+Cz&1>aN$ ze|it%WQ0tUYoK)^=%$6w?E@)|b7f7hEwSvc7k0fEJ;unvc;TMACijqkeB-1g(0~_A zY3W>qhpnPK>udcOU5kHtHx3$=CN|iO;$Ea5tGPSP0P9W8Kd)g2e`Z`+48saB%RgRE z-sn0RN`HLFYXv}#zeI56%#|i4l9p$<_NV&#@>IRTeed@Qnkwy{_x;;4*qRR&^_0TY zUGmSa4dm=ed*tQ!$lq+3$2ct0Cxri4&&{ZNZ>kvT-5bo@-~T`V#(`ByLl~}B?2{)e z>;^HI)0lXhV}AWMwa~d+j@#2z+3Hx;_PMIr{?)tovRMP!qS+0D88y8h!TV;I1JvV# z^Y-}nuW-wH6t+&Ab77*QM2(D2{`SHwOVML6_MiIr@vkHWkUK&9MR7U4?#;AS?*plg zt0Vb!!Ab0!@yE6BkHztiZ+u-XhMixS^pZkCM}GT~cj7Ec?XGd}pB!5vl2$(3BVE8h zuf|UDgrT;jCPTtQqzBF$Fi*cXM8_}6x$k1TokH$=3zPfblU%Tx))gY12}=6pe2HGH<%x7gcxrj*&WnbKZNmi>aTAzeqrtl z>jLA%Ox3(fjTjqei-;&_YhzpG`ns+D>{b48pZ^=*@IrH2x}5)u{f{G7E-8BtGkeFi zJ~th7djc$;&HtkA=iE2$3$I?evQPEpJP-2x0rLD6cF=0@ajE{|pa?4M&ybgxut0Pr z8k%#=_n|a$ z(6a7xV&1&J%4T8l?(g^SEZ#Mj;ZlEwk;9nzaRD4T(Av*BrG)RDi zg@q$*A0QrwV(}<6XPEbV0fD82F!_m-WET~)D%{Bv#cD3}coxb><#27~-Li<+O08mW zkFG52_zVSgwx6F~^g3bWr~zP5K~NeA_m3YRN^*Mk6~oNG^z2_8aD>vnXhacU~ba9qDc|K4vB>G6HIbqBg*X|O`&Qm2%w}NsvV&pVFy92 z51dyE?Pf8s(klpxpyVz|NlA*gqm(0PwIUa{_EN=LzRXndqzF>7$l6k!jG@yhuh!>TA6h;922s!zj@*@ zD^SlmNBk2|*J+*$Pm#9%=y93rH>A&_PdG0+b|MCav)GRcRCQyRGFAtcTExX=#h+y_ z`I{^0B&QVTRA}8NQ};pTI|@bT^jgeFM6?SnB-=ts~Hjkc1%>*6*)f+*1s)5cxeA>ye+%aifb6M7 z`>2M^gUOjOjR=LlB7JTzb#gj_Kn?GL_DO32~&g3|Ok~EjDiVtX3m)JcgBL(uGPOEiJybh9}P;xh-6mK