From 00372703fe1348658a88a8499b8600f5f1079d45 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Thu, 10 Apr 2025 17:52:00 -0700 Subject: [PATCH 01/11] Use 1DP endpoint --- pylintrc | 1 + .../azure/ai/assistants/_client.py | 44 +-- .../azure/ai/assistants/_configuration.py | 34 +- .../ai/assistants/_operations/_operations.py | 360 ------------------ .../azure/ai/assistants/_patch.py | 133 ++----- .../azure/ai/assistants/aio/_client.py | 42 +- .../azure/ai/assistants/aio/_configuration.py | 32 +- .../assistants/aio/_operations/_operations.py | 360 ------------------ .../ai/assistants/aio/_operations/_patch.py | 24 +- .../azure/ai/assistants/aio/_patch.py | 174 +++------ .../azure/ai/assistants/models/_patch.py | 109 +----- .../azure/ai/assistants/telemetry/__init__.py | 6 +- .../telemetry/_ai_assistants_instrumentor.py | 64 ++-- .../azure/ai/assistants/telemetry/_utils.py | 3 +- .../azure-ai-assistants/dev_requirements.txt | 7 +- ...tant-5szzLs73bsbQ2k75xUGKv8_image_file.png | Bin 162061 -> 0 bytes ...tant-6Q824dJfHkRzsy46hPatQA_image_file.png | Bin 181757 -> 0 bytes ...tant-WhEPqpcbmXadRJmCzMUeTi_image_file.png | Bin 181757 -> 0 bytes ...sample_assistants_azure_functions_async.py | 5 +- .../sample_assistants_basics_async.py | 5 +- ...basics_async_with_azure_monitor_tracing.py | 7 +- ...tants_basics_async_with_console_tracing.py | 4 +- ...ample_assistants_code_interpreter_async.py | 15 +- ...tants_code_interpreter_attachment_async.py | 9 +- ...eter_attachment_enterprise_search_async.py | 6 +- .../sample_assistants_functions_async.py | 4 +- .../sample_assistants_json_schema_async.py | 4 +- ...ample_assistants_run_with_toolset_async.py | 4 +- ...le_assistants_stream_eventhandler_async.py | 5 +- ...tream_eventhandler_with_functions_async.py | 9 +- ..._stream_eventhandler_with_toolset_async.py | 5 +- ...ample_assistants_stream_iteration_async.py | 5 +- ...m_with_base_override_eventhandler_async.py | 6 +- ...tore_batch_enterprise_file_search_async.py | 11 +- ...ts_vector_store_batch_file_search_async.py | 9 +- ...ctor_store_enterprise_file_search_async.py | 7 +- ...sistants_vector_store_file_search_async.py | 5 +- ...tants_with_file_search_attachment_async.py | 5 +- .../samples/multiassistant/assistant_team.py | 9 +- .../assistant_trace_configurator.py | 4 +- .../sample_assistants_assistant_team.py | 4 +- ...tants_assistant_team_custom_team_leader.py | 4 +- .../sample_assistants_multi_assistant_team.py | 4 +- .../sample_assistants_azure_ai_search.py | 10 +- .../sample_assistants_azure_functions.py | 6 +- .../samples/sample_assistants_basics.py | 9 +- ...tants_basics_with_azure_monitor_tracing.py | 6 +- ..._assistants_basics_with_console_tracing.py | 8 +- ..._with_console_tracing_custom_attributes.py | 8 +- .../sample_assistants_bing_grounding.py | 4 +- .../sample_assistants_code_interpreter.py | 5 +- ...nterpreter_attachment_enterprise_search.py | 7 +- ...ample_assistants_enterprise_file_search.py | 7 +- .../samples/sample_assistants_fabric.py | 6 +- .../samples/sample_assistants_file_search.py | 5 +- .../samples/sample_assistants_functions.py | 5 +- ...ts_functions_with_azure_monitor_tracing.py | 7 +- ...sistants_functions_with_console_tracing.py | 5 +- .../samples/sample_assistants_json_schema.py | 4 +- .../samples/sample_assistants_logic_apps.py | 9 +- .../samples/sample_assistants_openapi.py | 4 +- ...mple_assistants_openapi_connection_auth.py | 18 +- .../sample_assistants_run_with_toolset.py | 4 +- .../samples/sample_assistants_sharepoint.py | 4 +- .../sample_assistants_stream_eventhandler.py | 4 +- ...eventhandler_with_azure_monitor_tracing.py | 10 +- ...stream_eventhandler_with_bing_grounding.py | 6 +- ...tream_eventhandler_with_console_tracing.py | 8 +- ...ants_stream_eventhandler_with_functions.py | 5 +- ...stants_stream_eventhandler_with_toolset.py | 5 +- .../sample_assistants_stream_iteration.py | 4 +- ...ts_stream_iteration_with_bing_grounding.py | 8 +- ...tants_stream_iteration_with_file_search.py | 5 +- ...ssistants_stream_iteration_with_toolset.py | 5 +- ..._stream_with_base_override_eventhandler.py | 5 +- ...ctor_store_batch_enterprise_file_search.py | 7 +- ...sistants_vector_store_batch_file_search.py | 5 +- ...ple_assistants_vector_store_file_search.py | 5 +- ...s_with_code_interpreter_file_attachment.py | 5 +- ...tants_with_enterprise_search_attachment.py | 5 +- ..._assistants_with_file_search_attachment.py | 5 +- ...ple_assistants_with_resources_in_thread.py | 5 +- .../tests/overload_assert_utils.py | 4 +- .../tests/test_assistant_mock_overloads.py | 4 +- .../tests/test_assistant_models.py | 1 + .../tests/test_assistant_models_async.py | 1 + .../tests/test_assistants_client.py | 203 ++++++---- .../tests/test_assistants_client_async.py | 41 +- .../tests/test_assistants_mock.py | 3 - .../tests/test_assistants_mock_async.py | 7 +- .../tests/test_overload_assert.py | 5 +- sdk/ai/azure-ai-assistants/tsp-location.yaml | 2 +- 92 files changed, 542 insertions(+), 1520 deletions(-) delete mode 100644 sdk/ai/azure-ai-assistants/samples/assistant-5szzLs73bsbQ2k75xUGKv8_image_file.png delete mode 100644 sdk/ai/azure-ai-assistants/samples/assistant-6Q824dJfHkRzsy46hPatQA_image_file.png delete mode 100644 sdk/ai/azure-ai-assistants/samples/assistant-WhEPqpcbmXadRJmCzMUeTi_image_file.png diff --git a/pylintrc b/pylintrc index e58b01fd5c1b..2621c1bd1041 100644 --- a/pylintrc +++ b/pylintrc @@ -8,6 +8,7 @@ ignore-paths= azure\\mixedreality\\remoterendering\\_api_version.py, azure/mixedreality/remoterendering/_api_version.py, (?:.*[/\\]|^)projects/(models/_models.py|_model_base.py|operations/_operations.py|aio/operations/_operations.py)$, + (?:.*[/\\]|^)assistants/(_models.py|_model_base.py|_operations/_operations.py|aio/_operations/_operations.py)$, # Exclude any path that contains the following directory names (?:.*[/\\]|^)(?:_vendor|_generated|_restclient|samples|examples|test|tests|doc|\.tox)(?:[/\\]|$) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py index 433f72ca45b5..402e3499a801 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py @@ -1,4 +1,3 @@ -# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -27,18 +26,9 @@ class AssistantsClient(AssistantsClientOperationsMixin): """AssistantsClient. - :param endpoint: The Azure AI Foundry project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``, where is the - Azure region where the project is deployed (e.g. westus) and is the GUID of - the Enterprise private link. Required. + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Foundry project name. Required. - :type project_name: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or @@ -48,24 +38,9 @@ class AssistantsClient(AssistantsClientOperationsMixin): :paramtype api_version: str """ - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: Union[AzureKeyCredential, "TokenCredential"], - **kwargs: Any - ) -> None: - _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" - self._config = AssistantsClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - **kwargs - ) + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + _endpoint = "{endpoint}" + self._config = AssistantsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -110,15 +85,6 @@ def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: request_copy = deepcopy(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py index 2b2e6944f1dd..167da10d4f73 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py @@ -23,18 +23,9 @@ class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attrib Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: The Azure AI Foundry project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``, where is the - Azure region where the project is deployed (e.g. westus) and is the GUID of - the Enterprise private link. Required. + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Foundry project name. Required. - :type project_name: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or @@ -44,35 +35,18 @@ class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attrib :paramtype api_version: str """ - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: Union[AzureKeyCredential, "TokenCredential"], - **kwargs: Any, - ) -> None: + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: api_version: str = kwargs.pop("api_version", "latest") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") - if subscription_id is None: - raise ValueError("Parameter 'subscription_id' must not be None.") - if resource_group_name is None: - raise ValueError("Parameter 'resource_group_name' must not be None.") - if project_name is None: - raise ValueError("Parameter 'project_name' must not be None.") if credential is None: raise ValueError("Parameter 'credential' must not be None.") self.endpoint = endpoint - self.subscription_id = subscription_id - self.resource_group_name = resource_group_name - self.project_name = project_name self.credential = credential self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-assistants/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py index bf3858e5c3c9..6032feb2a16c 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py @@ -1400,15 +1400,6 @@ def create_assistant( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1495,15 +1486,6 @@ def list_assistants( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1564,15 +1546,6 @@ def get_assistant(self, assistant_id: str, **kwargs: Any) -> _models.Assistant: ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1821,15 +1794,6 @@ def update_assistant( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1890,15 +1854,6 @@ def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.Assistan ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2059,15 +2014,6 @@ def create_thread( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2128,15 +2074,6 @@ def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AssistantThread: ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2300,15 +2237,6 @@ def update_thread( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2369,15 +2297,6 @@ def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletion ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2567,15 +2486,6 @@ def create_message( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2670,15 +2580,6 @@ def list_messages( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2742,15 +2643,6 @@ def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2911,15 +2803,6 @@ def update_message( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3289,15 +3172,6 @@ def create_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3388,15 +3262,6 @@ def list_runs( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3460,15 +3325,6 @@ def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadR ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3629,15 +3485,6 @@ def update_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3810,15 +3657,6 @@ def submit_tool_outputs_to_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3882,15 +3720,6 @@ def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Thre ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4214,15 +4043,6 @@ def create_thread_and_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4303,15 +4123,6 @@ def get_run_step( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4413,15 +4224,6 @@ def list_run_steps( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4486,15 +4288,6 @@ def list_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4583,15 +4376,6 @@ def upload_file(self, body: Union[_models.UploadFileRequest, JSON], **kwargs: An ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4652,15 +4436,6 @@ def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4721,15 +4496,6 @@ def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4790,15 +4556,6 @@ def _get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4882,15 +4639,6 @@ def list_vector_stores( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5072,15 +4820,6 @@ def create_vector_store( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5141,15 +4880,6 @@ def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Vecto ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5311,15 +5041,6 @@ def modify_vector_store( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5381,15 +5102,6 @@ def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Ve ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5485,15 +5197,6 @@ def list_vector_store_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5651,15 +5354,6 @@ def create_vector_store_file( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5723,15 +5417,6 @@ def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: An ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5800,15 +5485,6 @@ def delete_vector_store_file( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5966,15 +5642,6 @@ def create_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6040,15 +5707,6 @@ def get_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6115,15 +5773,6 @@ def cancel_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6223,15 +5872,6 @@ def list_vector_store_file_batch_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py index de107d2f836f..b5f3733040e0 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py @@ -1,3 +1,4 @@ +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -11,8 +12,6 @@ import os import sys import time -import uuid -from os import PathLike from pathlib import Path from typing import ( IO, @@ -22,20 +21,18 @@ Iterator, List, Optional, - Self, - Tuple, Union, cast, overload, ) +from azure.core.credentials import TokenCredential, AzureKeyCredential from azure.core.tracing.decorator import distributed_trace from . import models as _models from ._vendor import FileType from .models._enums import FilePurpose, RunStatus from ._client import AssistantsClient as AssistantsClientGenerated -from azure.core.credentials import TokenCredential if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -44,8 +41,6 @@ if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports - from openai import AzureOpenAI - from . import _types JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object @@ -54,10 +49,10 @@ logger = logging.getLogger(__name__) -class AssistantsClient(AssistantsClientGenerated): +class AssistantsClient(AssistantsClientGenerated): # pylint: disable=client-accepts-api-version-keyword - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, TokenCredential], **kwargs: Any) -> None: + super().__init__(endpoint, credential, **kwargs) self._toolset: Dict[str, _models.ToolSet] = {} # pylint: disable=arguments-differ @@ -184,7 +179,9 @@ def create_assistant( # pylint: disable=arguments-differ """ @overload - def create_assistant(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Assistant: + def create_assistant( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: """Creates a new assistant. :param body: Required. @@ -198,7 +195,9 @@ def create_assistant(self, body: JSON, *, content_type: str = "application/json" """ @overload - def create_assistant(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Assistant: + def create_assistant( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: """Creates a new assistant. :param body: Required. @@ -1492,7 +1491,11 @@ def create_stream( # pyright: ignore[reportInconsistentOverload] if not event_handler: event_handler = cast(_models.BaseAssistantEventHandlerT, _models.AssistantEventHandler()) - return _models.AssistantRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + return _models.AssistantRunStream( + response_iterator=response_iterator, + submit_tool_outputs=self._handle_submit_tool_outputs, + event_handler=event_handler, + ) # pylint: disable=arguments-differ @overload @@ -1728,7 +1731,9 @@ def submit_tool_outputs_to_stream( # pyright: ignore[reportInconsistentOverload event_handler.initialize(response_iterator, self._handle_submit_tool_outputs) - def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handler: _models.BaseAssistantEventHandler) -> None: + def _handle_submit_tool_outputs( + self, run: _models.ThreadRun, event_handler: _models.BaseAssistantEventHandler + ) -> None: if isinstance(run.required_action, _models.SubmitToolOutputsAction): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: @@ -1770,7 +1775,7 @@ def upload_file( # pylint: disable=arguments-differ :rtype: ~azure.ai.projects.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ - + # pylint: disable=arguments-differ @overload def upload_file( # pylint: disable=arguments-differ @@ -1804,7 +1809,7 @@ def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: @distributed_trace def upload_file( self, - body: Optional[Union[_models.UploadFileRequest, JSON]] = None, + body: Union[_models.UploadFileRequest, JSON] = _Unset, *, file: Optional[FileType] = None, file_path: Optional[str] = None, @@ -1832,18 +1837,14 @@ def upload_file( :raises IOError: If there are issues with reading the file. :raises: HttpResponseError for HTTP errors. """ - if body is not None: + if body is not _Unset: return super().upload_file(body=body, **kwargs) if isinstance(purpose, FilePurpose): purpose = purpose.value if file is not None and purpose is not None: - file_body = _models.UploadFileRequest( - file=file, - purpose=purpose, - filename=filename - ) + file_body = _models.UploadFileRequest(file=file, purpose=purpose, filename=filename) return super().upload_file(body=file_body, **kwargs) if file_path is not None and purpose is not None: @@ -1857,11 +1858,7 @@ def upload_file( # Determine filename and create correct FileType base_filename = filename or os.path.basename(file_path) file_content: FileType = (base_filename, content) - file_body = _models.UploadFileRequest( - file=file_content, - purpose=purpose, - filename=filename - ) + file_body = _models.UploadFileRequest(file=file_content, purpose=purpose, filename=filename) return super().upload_file(body=file_body, **kwargs) except IOError as e: @@ -2315,7 +2312,8 @@ def get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: return cast(Iterator[bytes], response) @distributed_trace - def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: + def save_file( # pylint: disable=client-method-missing-kwargs + self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: """ Synchronously saves file content retrieved using a file identifier to the specified local directory. @@ -2521,74 +2519,9 @@ def create_vector_store_file_and_poll( return vector_store_file - @classmethod - def from_connection_string(cls, conn_str: str, credential: "TokenCredential", **kwargs) -> Self: - """ - Create an asynchronous AIProjectClient from a connection string. - - :param str conn_str: The connection string, copied from your AI Foundry project. - :param TokenCredential credential: Credential used to authenticate requests to the service. - :return: An AssistantsClient instance. - :rtype: AssistantsClient - """ - if not conn_str: - raise ValueError("Connection string is required") - parts = conn_str.split(";") - if len(parts) != 4: - raise ValueError("Invalid connection string format") - endpoint = "https://" + parts[0] - subscription_id = parts[1] - resource_group_name = parts[2] - project_name = parts[3] - return cls( - endpoint, - subscription_id, - resource_group_name, - project_name, - credential, - **kwargs, - ) - - def upload_file_to_azure_blob(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: - """Upload a file to the Azure AI Foundry project. - This method required *azure-ai-ml* to be installed. - - :param file_path: The path to the file to upload. - :type file_path: Union[str, Path, PathLike] - :return: The tuple, containing asset id and asset URI of uploaded file. - :rtype: Tuple[str, str] - """ - try: - from azure.ai.ml import MLClient # type: ignore - from azure.ai.ml.constants import AssetTypes # type: ignore - from azure.ai.ml.entities import Data # type: ignore - except ImportError as e: - raise ImportError( - "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" - ) from e - - data = Data( - path=str(file_path), - type=AssetTypes.URI_FILE, - name=str(uuid.uuid4()), # generating random name - is_anonymous=True, - version="1", - ) - # We have to wrap async method get_token of - - ml_client = MLClient( - self._config.credential, - self._config.subscription_id, - self._config.resource_group_name, - self._config.project_name, - ) - - data_asset = ml_client.data.create_or_update(data) - - return data_asset.id, data_asset.path - @distributed_trace - def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.AssistantDeletionStatus: + def delete_assistant( # pylint: disable=delete-operation-wrong-return-type + self, assistant_id: str, **kwargs: Any) -> _models.AssistantDeletionStatus: """Deletes an assistant. :param assistant_id: Identifier of the assistant. Required. @@ -2601,16 +2534,8 @@ def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.Assistan del self._toolset[assistant_id] return super().delete_assistant(assistant_id, **kwargs) - @property - def scope(self) -> Dict[str, str]: - return { - "subscription_id": self._config.subscription_id, - "resource_group_name": self._config.resource_group_name, - "project_name": self._config.project_name, - } - -__all__: List[str] = ['AssistantsClient'] # Add all objects you want publicly available to users at this package level +__all__: List[str] = ["AssistantsClient"] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py index 5efb9a86adcc..67dfd5d3edcc 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py @@ -1,4 +1,3 @@ -# pylint: disable=line-too-long,useless-suppression # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -27,18 +26,9 @@ class AssistantsClient(AssistantsClientOperationsMixin): """AssistantsClient. - :param endpoint: The Azure AI Foundry project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``, where is the - Azure region where the project is deployed (e.g. westus) and is the GUID of - the Enterprise private link. Required. + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Foundry project name. Required. - :type project_name: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or @@ -49,23 +39,10 @@ class AssistantsClient(AssistantsClientOperationsMixin): """ def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: Union[AzureKeyCredential, "AsyncTokenCredential"], - **kwargs: Any + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: - _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" - self._config = AssistantsClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - **kwargs - ) + _endpoint = "{endpoint}" + self._config = AssistantsClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -112,15 +89,6 @@ def send_request( request_copy = deepcopy(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py index b5a7bf17f277..2fd4bfd489bd 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py @@ -23,18 +23,9 @@ class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attrib Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: The Azure AI Foundry project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``, where is the - Azure region where the project is deployed (e.g. westus) and is the GUID of - the Enterprise private link. Required. + :param endpoint: Project endpoint in the form of: + https://.services.ai.azure.com/api/projects/. Required. :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Foundry project name. Required. - :type project_name: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or @@ -45,34 +36,19 @@ class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attrib """ def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: Union[AzureKeyCredential, "AsyncTokenCredential"], - **kwargs: Any, + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: api_version: str = kwargs.pop("api_version", "latest") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") - if subscription_id is None: - raise ValueError("Parameter 'subscription_id' must not be None.") - if resource_group_name is None: - raise ValueError("Parameter 'resource_group_name' must not be None.") - if project_name is None: - raise ValueError("Parameter 'project_name' must not be None.") if credential is None: raise ValueError("Parameter 'credential' must not be None.") self.endpoint = endpoint - self.subscription_id = subscription_id - self.resource_group_name = resource_group_name - self.project_name = project_name self.credential = credential self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "ai-assistants/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py index 5db7010cfdf3..573a8fed7a5e 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py @@ -296,15 +296,6 @@ async def create_assistant( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -391,15 +382,6 @@ async def list_assistants( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -460,15 +442,6 @@ async def get_assistant(self, assistant_id: str, **kwargs: Any) -> _models.Assis ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -717,15 +690,6 @@ async def update_assistant( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -786,15 +750,6 @@ async def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.As ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -955,15 +910,6 @@ async def create_thread( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1024,15 +970,6 @@ async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AssistantTh ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1196,15 +1133,6 @@ async def update_thread( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1265,15 +1193,6 @@ async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDe ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1463,15 +1382,6 @@ async def create_message( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1566,15 +1476,6 @@ async def list_messages( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1638,15 +1539,6 @@ async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _ ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1807,15 +1699,6 @@ async def update_message( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2185,15 +2068,6 @@ async def create_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2284,15 +2158,6 @@ async def list_runs( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2356,15 +2221,6 @@ async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.T ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2525,15 +2381,6 @@ async def update_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2706,15 +2553,6 @@ async def submit_tool_outputs_to_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2778,15 +2616,6 @@ async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _model ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3110,15 +2939,6 @@ async def create_thread_and_run( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3199,15 +3019,6 @@ async def get_run_step( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3309,15 +3120,6 @@ async def list_run_steps( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3382,15 +3184,6 @@ async def list_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3479,15 +3272,6 @@ async def upload_file(self, body: Union[_models.UploadFileRequest, JSON], **kwar ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3548,15 +3332,6 @@ async def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletion ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3617,15 +3392,6 @@ async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3686,15 +3452,6 @@ async def _get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[ ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3778,15 +3535,6 @@ async def list_vector_stores( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3968,15 +3716,6 @@ async def create_vector_store( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4037,15 +3776,6 @@ async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4207,15 +3937,6 @@ async def modify_vector_store( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4277,15 +3998,6 @@ async def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _mod ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4381,15 +4093,6 @@ async def list_vector_store_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4547,15 +4250,6 @@ async def create_vector_store_file( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4619,15 +4313,6 @@ async def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwar ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4696,15 +4381,6 @@ async def delete_vector_store_file( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4862,15 +4538,6 @@ async def create_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4936,15 +4603,6 @@ async def get_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5011,15 +4669,6 @@ async def cancel_vector_store_file_batch( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5119,15 +4768,6 @@ async def list_vector_store_file_batch_files( ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str", skip_quote=True - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str", skip_quote=True - ), - "projectName": self._serialize.url( - "self._config.project_name", self._config.project_name, "str", skip_quote=True - ), } _request.url = self._client.format_url(_request.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py index 292578c140f1..8e56156b502f 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_patch.py @@ -36,16 +36,20 @@ def get_token( enable_cae: bool = False, **kwargs: Any, ) -> "AccessToken": - return concurrent.futures.ThreadPoolExecutor().submit( - asyncio.run, - self._async_credential.get_token( - *scopes, - claims=claims, - tenant_id=tenant_id, - enable_cae=enable_cae, - **kwargs, - ), - ).result() + return ( + concurrent.futures.ThreadPoolExecutor() + .submit( + asyncio.run, + self._async_credential.get_token( + *scopes, + claims=claims, + tenant_id=tenant_id, + enable_cae=enable_cae, + **kwargs, + ), + ) + .result() + ) __all__: List[str] = [] diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py index fa880f254f35..d87a3f0629fd 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py @@ -1,8 +1,9 @@ +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -from openai.types import file_purpose + """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize @@ -11,12 +12,10 @@ import io import logging import os -import uuid import time -from os import PathLike from pathlib import Path -from .. import models as _models + from typing import ( IO, @@ -27,24 +26,21 @@ List, MutableMapping, Optional, - Self, - Tuple, Union, cast, overload, ) from azure.core.tracing.decorator_async import distributed_trace_async + +from .. import models as _models from .._vendor import FileType from ..models._enums import FilePurpose, RunStatus - from ._client import AssistantsClient as AssistantsClientGenerated -from ._operations._patch import _SyncCredentialWrapper if TYPE_CHECKING: + from .. import _types # pylint: disable=unused-import,ungrouped-imports - from openai import AsyncAzureOpenAI - - from azure.core.credentials import AccessToken + from azure.core.credentials import AccessToken, AzureKeyCredential from azure.core.credentials_async import AsyncTokenCredential logger = logging.getLogger(__name__) @@ -53,13 +49,12 @@ _Unset: Any = object() -class AssistantsClient(AssistantsClientGenerated): - - - - - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) +class AssistantsClient(AssistantsClientGenerated): # pylint: disable=client-accepts-api-version-keyword + + def __init__( + self, endpoint: str, credential: Union["AzureKeyCredential", "AsyncTokenCredential"], **kwargs: Any + ) -> None: + super().__init__(endpoint, credential, **kwargs) self._toolset: Dict[str, _models.AsyncToolSet] = {} # pylint: disable=arguments-differ @@ -139,7 +134,7 @@ async def create_assistant( # pylint: disable=arguments-differ toolset: Optional[_models.AsyncToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> _models.Assistant: @@ -186,7 +181,9 @@ async def create_assistant( # pylint: disable=arguments-differ """ @overload - async def create_assistant(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Assistant: + async def create_assistant( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Assistant: """Creates a new assistant. :param body: Required. @@ -229,7 +226,7 @@ async def create_assistant( toolset: Optional[_models.AsyncToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, content_type: str = "application/json", **kwargs: Any, @@ -259,7 +256,7 @@ async def create_assistant( :keyword top_p: Nucleus sampling parameter. :paramtype top_p: Optional[float] :keyword response_format: Response format for tool calls. - :paramtype response_format: Optional["_types.assistantsApiResponseFormatOption"] + :paramtype response_format: Optional["_types.AssistantsApiResponseFormatOption"] :keyword metadata: Key/value pairs for storing additional information. :paramtype metadata: Optional[Dict[str, str]] :keyword content_type: Content type of the body. @@ -310,7 +307,7 @@ async def update_assistant( # pylint: disable=arguments-differ tool_resources: Optional[_models.ToolResources] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> _models.Assistant: @@ -378,7 +375,7 @@ async def update_assistant( # pylint: disable=arguments-differ toolset: Optional[_models.AsyncToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> _models.Assistant: @@ -478,7 +475,7 @@ async def update_assistant( toolset: Optional[_models.AsyncToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, content_type: str = "application/json", metadata: Optional[Dict[str, str]] = None, **kwargs: Any, @@ -604,8 +601,8 @@ async def create_run( # pylint: disable=arguments-differ max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any, @@ -768,8 +765,8 @@ async def create_run( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any, @@ -909,8 +906,8 @@ async def create_and_process_run( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, sleep_interval: int = 1, @@ -1073,8 +1070,8 @@ async def create_stream( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, event_handler: None = None, @@ -1182,8 +1179,8 @@ async def create_stream( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, event_handler: _models.BaseAsyncAssistantEventHandlerT, @@ -1360,8 +1357,8 @@ async def create_stream( # pyright: ignore[reportInconsistentOverload] max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.assistantsApiToolChoiceOption"] = None, - response_format: Optional["_types.assistantsApiResponseFormatOption"] = None, + tool_choice: Optional["_types.AssistantsApiToolChoiceOption"] = None, + response_format: Optional["_types.AssistantsApiResponseFormatOption"] = None, parallel_tool_calls: Optional[bool] = None, metadata: Optional[Dict[str, str]] = None, event_handler: Optional[_models.BaseAsyncAssistantEventHandlerT] = None, @@ -1491,9 +1488,13 @@ async def create_stream( # pyright: ignore[reportInconsistentOverload] response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) if not event_handler: - event_handler = cast(_models.BaseAssistantEventHandlerT, _models.AsyncAssistantEventHandler()) + event_handler = cast(_models.BaseAsyncAssistantEventHandlerT, _models.AsyncAssistantEventHandler()) - return _models.AsyncAssistantRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + return _models.AsyncAssistantRunStream( + response_iterator=response_iterator, + submit_tool_outputs=self._handle_submit_tool_outputs, + event_handler=event_handler, + ) # pylint: disable=arguments-differ @overload @@ -1796,7 +1797,7 @@ async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: @distributed_trace_async async def upload_file( self, - body: Optional[Union[_models.UploadFileRequest, JSON]] = None, + body: Union[_models.UploadFileRequest, JSON] = _Unset, *, file: Optional[FileType] = None, file_path: Optional[str] = None, @@ -1824,18 +1825,14 @@ async def upload_file( :raises IOError: If there are issues with reading the file. :raises: HttpResponseError for HTTP errors. """ - if body is not None: + if body is not _Unset: return await super().upload_file(body=body, **kwargs) if isinstance(purpose, FilePurpose): purpose = purpose.value if file is not None and purpose is not None: - file_body = _models.UploadFileRequest( - file=file, - purpose=purpose, - filename=filename - ) + file_body = _models.UploadFileRequest(file=file, purpose=purpose, filename=filename) return await super().upload_file(body=file_body, **kwargs) if file_path is not None and purpose is not None: @@ -1849,11 +1846,7 @@ async def upload_file( # Determine filename and create correct FileType base_filename = filename or os.path.basename(file_path) file_content: FileType = (base_filename, content) - file_body = _models.UploadFileRequest( - file=file_content, - purpose=purpose, - filename=filename - ) + file_body = _models.UploadFileRequest(file=file_content, purpose=purpose, filename=filename) return await super().upload_file(body=file_body, **kwargs) except IOError as e: @@ -2465,7 +2458,8 @@ async def get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[b return cast(AsyncIterator[bytes], response) @distributed_trace_async - async def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: + async def save_file( # pylint: disable=client-method-missing-kwargs + self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: """ Asynchronously saves file content retrieved using a file identifier to the specified local directory. @@ -2522,72 +2516,6 @@ def write_file(collected_chunks: list): except (ValueError, RuntimeError, TypeError, IOError) as e: logger.error("An error occurred in save_file: %s", e) raise - - @classmethod - def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential", **kwargs) -> Self: - """ - Create an asynchronous AIProjectClient from a connection string. - - :param str conn_str: The connection string, copied from your AI Foundry project. - :param AsyncTokenCredential credential: Credential used to authenticate requests to the service. - :return: An AssistantsClient instance. - :rtype: AssistantsClient - """ - if not conn_str: - raise ValueError("Connection string is required") - parts = conn_str.split(";") - if len(parts) != 4: - raise ValueError("Invalid connection string format") - endpoint = "https://" + parts[0] - subscription_id = parts[1] - resource_group_name = parts[2] - project_name = parts[3] - return cls( - endpoint, - subscription_id, - resource_group_name, - project_name, - credential, - **kwargs, - ) - - def upload_file_to_azure_blob(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: - """Upload a file to the Azure AI Foundry project. - This method required *azure-ai-ml* to be installed. - - :param file_path: The path to the file to upload. - :type file_path: Union[str, Path, PathLike] - :return: The tuple, containing asset id and asset URI of uploaded file. - :rtype: Tuple[str, str] - """ - try: - from azure.ai.ml import MLClient # type: ignore - from azure.ai.ml.constants import AssetTypes # type: ignore - from azure.ai.ml.entities import Data # type: ignore - except ImportError as e: - raise ImportError( - "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" - ) from e - - data = Data( - path=str(file_path), - type=AssetTypes.URI_FILE, - name=str(uuid.uuid4()), # generating random name - is_anonymous=True, - version="1", - ) - # We have to wrap async method get_token of - - ml_client = MLClient( - _SyncCredentialWrapper(self._config.credential), - self._config.subscription_id, - self._config.resource_group_name, - self._config.project_name, - ) - - data_asset = ml_client.data.create_or_update(data) - - return data_asset.id, data_asset.path @distributed_trace_async async def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.AssistantDeletionStatus: @@ -2603,16 +2531,8 @@ async def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.As del self._toolset[assistant_id] return await super().delete_assistant(assistant_id, **kwargs) - @property - def scope(self) -> Dict[str, str]: - return { - "subscription_id": self._config.subscription_id, - "resource_group_name": self._config.resource_group_name, - "project_name": self._config.project_name, - } - -__all__: List[str] = ['AssistantsClient'] # Add all objects you want publicly available to users at this package level +__all__: List[str] = ["AssistantsClient"] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py index ba79c2794d4e..e7e0ed0fd702 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py @@ -1,3 +1,4 @@ +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -7,13 +8,10 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ import asyncio -import base64 -import datetime import inspect import itertools import json import logging -import math import re from abc import ABC, abstractmethod from typing import ( @@ -38,8 +36,6 @@ overload, ) -from azure.core.credentials import AccessToken, TokenCredential - from ._enums import AssistantStreamEvent, MessageRole, AzureAISearchQueryType from ._models import ( AISearchIndexResource, @@ -76,6 +72,7 @@ ToolDefinition, ToolResources, MessageDeltaTextContent, + VectorStoreDataSource, ) from ._models import MessageDeltaChunk as MessageDeltaChunkGenerated @@ -198,87 +195,6 @@ def _parse_event(event_data_str: str) -> Tuple[str, StreamEventData]: return event_type, event_obj -# TODO: Look into adding an async version of this class -class SASTokenCredential(TokenCredential): - def __init__( - self, - *, - sas_token: str, - credential: TokenCredential, - subscription_id: str, - resource_group_name: str, - project_name: str, - connection_name: str, - ): - self._sas_token = sas_token - self._credential = credential - self._subscription_id = subscription_id - self._resource_group_name = resource_group_name - self._project_name = project_name - self._connection_name = connection_name - self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) - logger.debug("[SASTokenCredential.__init__] Exit. Given token expires on %s.", self._expires_on) - - @classmethod - def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime.datetime: - payload = jwt_token.split(".")[1] - padded_payload = payload + "=" * (4 - len(payload) % 4) # Add padding if necessary - decoded_bytes = base64.urlsafe_b64decode(padded_payload) - decoded_str = decoded_bytes.decode("utf-8") - decoded_payload = json.loads(decoded_str) - expiration_date = decoded_payload.get("exp") - return datetime.datetime.fromtimestamp(expiration_date, datetime.timezone.utc) - - def _refresh_token(self) -> None: - logger.debug("[SASTokenCredential._refresh_token] Enter") - from azure.ai.assistants import AssistantsClient - - project_client = AssistantsClient( - credential=self._credential, - # Since we are only going to use the "connections" operations, we don't need to supply an endpoint. - # http://management.azure.com is hard coded in the SDK. - endpoint="not-needed", - subscription_id=self._subscription_id, - resource_group_name=self._resource_group_name, - project_name=self._project_name, - ) - - connection = project_client.connections.get(connection_name=self._connection_name, include_credentials=True) - - self._sas_token = "" - if connection is not None and connection.token_credential is not None: - sas_credential = cast(SASTokenCredential, connection.token_credential) - self._sas_token = sas_credential._sas_token # pylint: disable=protected-access - self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) - logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) - - def get_token( - self, - *scopes: str, - claims: Optional[str] = None, - tenant_id: Optional[str] = None, - enable_cae: bool = False, - **kwargs: Any, - ) -> AccessToken: - """Request an access token for `scopes`. - - :param str scopes: The type of access needed. - - :keyword str claims: Additional claims required in the token, such as those returned in a resource - provider's claims challenge following an authorization failure. - :keyword str tenant_id: Optional tenant to include in the token request. - :keyword bool enable_cae: Indicates whether to enable Continuous Access Evaluation (CAE) for the requested - token. Defaults to False. - - :rtype: AccessToken - :return: An AccessToken instance containing the token string and its expiration time in Unix time. - """ - logger.debug("SASTokenCredential.get_token] Enter") - if self._expires_on < datetime.datetime.now(datetime.timezone.utc): - self._refresh_token() - return AccessToken(self._sas_token, math.floor(self._expires_on.timestamp())) - - # Define type_map to translate Python type annotations to JSON Schema types type_map = { "str": "string", @@ -675,7 +591,7 @@ def __init__( :type index_connection_id: str :param index_name: Name of Index in search resource to be used by tool. :type index_name: str - :param query_type: Type of query in an AIIndexResource attached to this assistant. + :param query_type: Type of query in an AIIndexResource attached to this assistant. Default value is AzureAISearchQueryType.SIMPLE. :type query_type: AzureAISearchQueryType :param filter: Odata filter string for search resource. @@ -1271,9 +1187,9 @@ class BaseAsyncAssistantEventHandler(AsyncIterator[T]): def __init__(self) -> None: self.response_iterator: Optional[AsyncIterator[bytes]] = None - self.submit_tool_outputs: Optional[Callable[[ThreadRun, "BaseAsyncAssistantEventHandler[T]"], Awaitable[None]]] = ( - None - ) + self.submit_tool_outputs: Optional[ + Callable[[ThreadRun, "BaseAsyncAssistantEventHandler[T]"], Awaitable[None]] + ] = None self.buffer: Optional[bytes] = None def initialize( @@ -1394,7 +1310,9 @@ def until_done(self) -> None: pass -class AsyncAssistantEventHandler(BaseAsyncAssistantEventHandler[Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]]): +class AsyncAssistantEventHandler( + BaseAsyncAssistantEventHandler[Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]] +): async def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData, Optional[EventFunctionReturnT]]: event_type, event_data_obj = _parse_event(event_data_str) @@ -1403,9 +1321,9 @@ async def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventDat and event_data_obj.status == "requires_action" and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) ): - await cast(Callable[[ThreadRun, "BaseAsyncAssistantEventHandler"], Awaitable[None]], self.submit_tool_outputs)( - event_data_obj, self - ) + await cast( + Callable[[ThreadRun, "BaseAsyncAssistantEventHandler"], Awaitable[None]], self.submit_tool_outputs + )(event_data_obj, self) func_rt: Optional[EventFunctionReturnT] = None try: @@ -1753,7 +1671,6 @@ def get_last_text_message_by_role(self, role: MessageRole) -> Optional[MessageTe "SharepointTool", "FabricTool", "AzureAISearchTool", - "SASTokenCredential", "Tool", "ToolSet", "BaseAsyncAssistantEventHandlerT", @@ -1762,7 +1679,7 @@ def get_last_text_message_by_role(self, role: MessageRole) -> Optional[MessageTe "MessageTextFileCitationAnnotation", "MessageDeltaChunk", "MessageAttachment", -] +] def patch_sdk(): diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py index cc45b34ae7d5..dae476622707 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/__init__.py @@ -10,8 +10,4 @@ from ._utils import enable_telemetry from ._trace_function import trace_function -__all__ = [ - "AIAssistantsInstrumentor", - "enable_telemetry" - "trace_function" -] +__all__ = ["AIAssistantsInstrumentor", "enable_telemetry", "trace_function"] diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py index 8729f04d1f6e..9b69a7a6b9d0 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_ai_assistants_instrumentor.py @@ -80,7 +80,7 @@ class TraceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): # pylint: disable=C4747 """An enumeration class to represent different types of traces.""" - AssistantS = "Assistants" + ASSISTANTS = "Assistants" class AIAssistantsInstrumentor: @@ -532,7 +532,9 @@ def start_submit_tool_outputs_span( ) -> "Optional[AbstractSpan]": run_span = event_handler.span if isinstance(event_handler, _AssistantEventHandlerTraceWrapper) else None if run_span is None: - run_span = event_handler.span if isinstance(event_handler, _AsyncAssistantEventHandlerTraceWrapper) else None + run_span = ( + event_handler.span if isinstance(event_handler, _AsyncAssistantEventHandlerTraceWrapper) else None + ) if run_span: recorded = self._add_tool_message_events(run_span, tool_outputs) @@ -1347,7 +1349,7 @@ def _trace_sync_function( function: Callable, *, _args_to_ignore: Optional[List[str]] = None, - _trace_type=TraceType.AssistantS, + _trace_type=TraceType.ASSISTANTS, _name: Optional[str] = None, ) -> Callable: """ @@ -1358,7 +1360,7 @@ def _trace_sync_function( :param args_to_ignore: A list of argument names to be ignored in the trace. Defaults to None. :type: args_to_ignore: [List[str]], optional - :param trace_type: The type of the trace. Defaults to TraceType.AssistantS. + :param trace_type: The type of the trace. Defaults to TraceType.ASSISTANTS. :type trace_type: TraceType, optional :param name: The name of the trace, will set to func name if not provided. :type name: str, optional @@ -1415,7 +1417,7 @@ def _trace_async_function( function: Callable, *, _args_to_ignore: Optional[List[str]] = None, - _trace_type=TraceType.AssistantS, + _trace_type=TraceType.ASSISTANTS, _name: Optional[str] = None, ) -> Callable: """ @@ -1426,7 +1428,7 @@ def _trace_async_function( :param args_to_ignore: A list of argument names to be ignored in the trace. Defaults to None. :type: args_to_ignore: [List[str]], optional - :param trace_type: The type of the trace. Defaults to TraceType.AssistantS. + :param trace_type: The type of the trace. Defaults to TraceType.ASSISTANTS. :type trace_type: TraceType, optional :param name: The name of the trace, will set to func name if not provided. :type name: str, optional @@ -1490,102 +1492,108 @@ def _inject_sync(self, f, _trace_type, _name): def _assistants_apis(self): sync_apis = ( - ("azure.ai.assistants", "AssistantsClient", "create_assistant", TraceType.AssistantS, "assistant_create"), - ("azure.ai.assistants", "AssistantsClient", "create_thread", TraceType.AssistantS, "thread_create"), - ("azure.ai.assistants", "AssistantsClient", "create_message", TraceType.AssistantS, "message_create"), - ("azure.ai.assistants", "AssistantsClient", "create_run", TraceType.AssistantS, "create_run"), + ("azure.ai.assistants", "AssistantsClient", "create_assistant", TraceType.ASSISTANTS, "assistant_create"), + ("azure.ai.assistants", "AssistantsClient", "create_thread", TraceType.ASSISTANTS, "thread_create"), + ("azure.ai.assistants", "AssistantsClient", "create_message", TraceType.ASSISTANTS, "message_create"), + ("azure.ai.assistants", "AssistantsClient", "create_run", TraceType.ASSISTANTS, "create_run"), ( "azure.ai.assistants", "AssistantsClient", "create_and_process_run", - TraceType.AssistantS, + TraceType.ASSISTANTS, "create_and_process_run", ), ( "azure.ai.assistants", "AssistantsClient", "submit_tool_outputs_to_run", - TraceType.AssistantS, + TraceType.ASSISTANTS, "submit_tool_outputs_to_run", ), ( "azure.ai.assistants", "AssistantsClient", "submit_tool_outputs_to_stream", - TraceType.AssistantS, + TraceType.ASSISTANTS, "submit_tool_outputs_to_stream", ), ( "azure.ai.assistants", "AssistantsClient", "_handle_submit_tool_outputs", - TraceType.AssistantS, + TraceType.ASSISTANTS, "_handle_submit_tool_outputs", ), - ("azure.ai.assistants", "AssistantsClient", "create_stream", TraceType.AssistantS, "create_stream"), - ("azure.ai.assistants", "AssistantsClient", "list_messages", TraceType.AssistantS, "list_messages"), - ("azure.ai.assistants.models", "AssistantRunStream", "__exit__", TraceType.AssistantS, "__exit__"), + ("azure.ai.assistants", "AssistantsClient", "create_stream", TraceType.ASSISTANTS, "create_stream"), + ("azure.ai.assistants", "AssistantsClient", "list_messages", TraceType.ASSISTANTS, "list_messages"), + ("azure.ai.assistants.models", "AssistantRunStream", "__exit__", TraceType.ASSISTANTS, "__exit__"), ) async_apis = ( - ("azure.ai.assistants.aio", "AssistantsClient", "create_assistant", TraceType.AssistantS, "assistant_create"), + ( + "azure.ai.assistants.aio", + "AssistantsClient", + "create_assistant", + TraceType.ASSISTANTS, + "assistant_create", + ), ( "azure.ai.assistants.aio", "AssistantsClient", "create_thread", - TraceType.AssistantS, + TraceType.ASSISTANTS, "assistants_thread_create", ), ( "azure.ai.assistants.aio", "AssistantsClient", "create_message", - TraceType.AssistantS, + TraceType.ASSISTANTS, "assistants_thread_message", ), - ("azure.ai.assistants.aio", "AssistantsClient", "create_run", TraceType.AssistantS, "create_run"), + ("azure.ai.assistants.aio", "AssistantsClient", "create_run", TraceType.ASSISTANTS, "create_run"), ( "azure.ai.assistants.aio", "AssistantsClient", "create_and_process_run", - TraceType.AssistantS, + TraceType.ASSISTANTS, "create_and_process_run", ), ( "azure.ai.assistants.aio", "AssistantsClient", "submit_tool_outputs_to_run", - TraceType.AssistantS, + TraceType.ASSISTANTS, "submit_tool_outputs_to_run", ), ( "azure.ai.assistants.aio", "AssistantsClient", "submit_tool_outputs_to_stream", - TraceType.AssistantS, + TraceType.ASSISTANTS, "submit_tool_outputs_to_stream", ), ( "azure.ai.assistants.aio", "AssistantsClient", "_handle_submit_tool_outputs", - TraceType.AssistantS, + TraceType.ASSISTANTS, "_handle_submit_tool_outputs", ), ( "azure.ai.assistants.aio", "AssistantsClient", "create_stream", - TraceType.AssistantS, + TraceType.ASSISTANTS, "create_stream", ), ( "azure.ai.assistants.aio", "AssistantsClient", "list_messages", - TraceType.AssistantS, + TraceType.ASSISTANTS, "list_messages", ), - ("azure.ai.assistants.models", "AsyncAssistantRunStream", "__aexit__", TraceType.AssistantS, "__aexit__"), + ("azure.ai.assistants.models", "AsyncAssistantRunStream", "__aexit__", TraceType.ASSISTANTS, "__aexit__"), ) return sync_apis, async_apis diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py index 3d897fd1c874..424771f27914 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_utils.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -281,7 +282,7 @@ def enable_telemetry(destination: Union[TextIO, str, None] = None, **kwargs) -> log_exporter = _get_log_exporter(destination) _configure_logging(log_exporter) - + try: from azure.ai.assistants.telemetry import AIAssistantsInstrumentor diff --git a/sdk/ai/azure-ai-assistants/dev_requirements.txt b/sdk/ai/azure-ai-assistants/dev_requirements.txt index 105486471444..b5272c25b382 100644 --- a/sdk/ai/azure-ai-assistants/dev_requirements.txt +++ b/sdk/ai/azure-ai-assistants/dev_requirements.txt @@ -1,3 +1,8 @@ -e ../../../tools/azure-sdk-tools ../../core/azure-core -aiohttp \ No newline at end of file +../../identity/azure-identity +../../core/azure-core-tracing-opentelemetry +aiohttp +opentelemetry-sdk +opentelemetry-exporter-otlp-proto-grpc +azure-ai-ml diff --git a/sdk/ai/azure-ai-assistants/samples/assistant-5szzLs73bsbQ2k75xUGKv8_image_file.png b/sdk/ai/azure-ai-assistants/samples/assistant-5szzLs73bsbQ2k75xUGKv8_image_file.png deleted file mode 100644 index 928fec58517aee4c8bc063b37949f8400224d95d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 162061 zcmd43hdY<;{|Bt1L5r-E5>jOEQA*jXY?;}c>`f_38JSU}viB%tN5hQFgh+{u5X#o` zy6)d|Jje0<51x+WKJM;reLmN9Ug!CKzt;N-y`n0&kDP&=goI?Dg1odk3CXT15)#s+ z-8=CoYbw)j_?MWQjINu8lck%diHik^iiz8GdnY%08&f6^3l~=#Cr1HpVQv9VCTlmh z>#pKFJP!Zg4{$rVSn+iHsr`cw*>hd~sw)Y}u?FIwZH^z^B}ulCkSIt?X?i8ieDT!P zm{>nQ@7eErdN(--2l?zaN9W5!zU2j#r4jobe#zOD+D_;BYUgN8=V;xpn&sjk=Wsko zO?|aQCm?X1#aMMG>jTn@!-dmRTCSoKVwa;On#DRc_eSY*(%!oJKfmPf&{KPo{m&oW zy{q}oT!iv}eoY=_s|x*}pOT;8{j4JWKflE9G&?f?&#x{qWsgJu^FdMVCMA#m=Y#IL z?T`8LKc6F==9)0W_CFuQoc!khf0tyI_Ulk>;J;(bb0c!suk$v%dGmW~(>Ez| zRd%1-Oh2Xn`rqKp%*+s5`Tt#=N@-Tw`|lo|7Cm!@qA`N;O_{^sL3;Yrp9`iYrJ19~ zM*d9ozM(VryZPjtZ^OrrR~+0=GIQ)xS64qsMHM9CG|a@!z2C;hhF3@^I6t3@LpyKx z<;#~79zUk@6v`%D{QH;oY_02rDqrvIC7FvCw{NXZ93QInKhlwMUJn1MuAy-dZU!4W7?hF-vXZ`5l;Na1wD3(`&|0MDF9z1w(BP?uh^N8=#&zocv zwA99A?iXxrIi5)R-@AKv_uIE`nS_LB-oJleR9t*BJbd5KmoI9?rZV;CNYxbP|Nhcn zn(9?r&|O;(H~%fvdffOp^E3Rd+qZ8s2@BJvNcvy&@DRp2n^;%`FZ^gty%+PJp5 z-I&F1ZE4Sm6DM{bI6z!-S9iCGrR7dvUti4}%{|AC9pmNWW1qE+j<_Eh8fD;mij9q# zg=NS1`1tkf*Dw5Ni5s1r-Os|3f0;h_5Y?eWzgK>nRr@aQC%#Wnk-V*~?V6+GemOZg z&8#ciXwGQg&d#=R)``2DrKz@F7_H0s{@JBFMYg>hw}XQf%6^o*e0e)F^Hh#b;eIPC zt5|l8o%o)E_ut8@sJv^8JWhG|aHv(slM}40qzVcO(Pqcbdfl+;&KP}Y98jNk^~J}! zz`*1>c|5**SjHq#=j~ftTdzFlj+K;@P|w;~Ti;##GyUe%C%W3&+Ujj=T}&2-iZU`X zib_g?YHH4%J$Fv6+|khO=f`ax9v+YKukE@1cjoHGQupN`yP7Tz35g>|L>zCYq#Q4K z@#21!xcEeO=4H`<4bk>wi92@=93NlT-$l~b*GKH#N{?UngdGN6x3wuN37uqScF-@n zwk_iL`GPpt=IJXRUszqn5*55Q3l9&+(;&06v(wDiqj~b=iFJ3zKHPl4U!U25QVP4i zJW6|450Ceqsd6UQu3h=*k}jb;hMQ?1{=m#E)g^IreOb=hI&19r#_D1xJ~urwaC1#) zKco0Gk)<$i()koUa66?TIgpQ1LZ_KKRTrKP2{?tD(>^JglU zmP?O%%J0UvxsTyTM4mf;f6wu7d9maCcl>;Gc3J3~H zm)iDT$y8+&bQ(U2zZDV}7x#WzL0a=sw$NwP35SjOHwSj_-@o%g%ouH`^!&oY#d60l zA4@Ehda^a-CP_BzdnunOyL6B; zyA>s<*zyB|w2TaUX`r???d)*P{VLBn)5Y-*D+{eWouzg<`#wd#9Bl}u*u7^@)LF;D zGOn{{&)zGRP0M@vQZOw&eH6>=wY4FJ4H6M|MyCNA6kF>~X{o!jjv9;O(5ssjt`j+D zzena=ym;~YXaiZZ*?|KGUOEhFr%uFL5#O`9JW?89WccvWBl7d-&u^?wq{yqPYOlSz zoFX}jqyEZom7kM~i_gBFH;kTddwhI6Lmn4p)WYmg#jP7RNSe)fczDX}VqFd&K72Pk zT*ls>v!S7Z_#*p(mwQU>KE0p${6ekJ=v-;C((vwoO-=GBa8_&HjICG%;&RYt(?d&!{y%6(sI$y@9ekl-zkqB zi#VfGa1FPRrC-inyRp#f7gKl7+{)_v*ZN!i)!qX5p*V3iF0Oq#g+}jsb9L0R)ld2@ zeh{(l%S&vT(AFNFV5FqHY4@qXWc}~2EcJA9e1_6X+g?Sp32i#;iHcBBMQo>O6;mHej zQIhgXN^hRa++V0UD9p=y1Lq4B^~dl=;L7|qEYh2FrHAu%|2892huwyPDdM_Q!mAu-vH_Ev&vwF^=Pz!(iEIor0umYv)BoMDqLcS$(lJYd8Pu*IV;=6Xo32 z_Fh^VtGT6R%f>l&_C5OLj_>Mk?JBnW#El!b%W098R;1Xy`@!MfrkzHc)JKno;l!Fu z^<=Niyy)mrA8vif^J7aTCbX(IN5R;PN;v?cKExD>LtUdPx!y>yW_y4#h;X0rSKovFtT zq1pP<)U+EXO8)ZYj&hcplI>gX>`pMNi7)1`L1wBclHs_ZujL~F-+hLq8G34bJ$v%?bu4`W7oIPI8=03=$hT-| zXo3$5SYDfGPfq8MqQf5p(a0Zr$fZw6=t_VSdiJg~g+u6~I{`4>>E=dr zF)@({ShdF<{@`%!+KqTF{pS{~@z%IJ)LYi%(zm93#1~Oh-^QjJt^2oiNU-VM`}fR} zlC_%t@ptc@&ScR~7I%*w5RsHThR#WCe*}+(Ln|k(=XZ-+gI#Y<+)z|r1xr=RS??h1 zKo%C3OAF;QGxi&Q2c3$_%XyZT-c&3+cGSmK>FLq5AE{*=@mmZZXpI;4oHa<4E-x(= z!jj!0`s|l4hVw@c9Xdn_#P;>;HG>KlGGI&|b<=G1bVcljgtotVd7N+2V}5^ceC59` z+||`Z8i<|CXz$?AS?)Bly@T?|k^6fNu{F6F@bdCr$kopOSmA2rZ9dW9mo7|GVjxJakw>9;Q{>|uBmGKi?08+vGF^0)hqAC4CRYB z@cuw-^(()>pB(h_^V6K7FLs+$uQ!uf)((y`-W7%0OFHLE>%X?7=Cy&Y)`_o9Z>OQ9 z4LRrgcXye}Bf&joWo3$tg|qAFMMb>I3A|F4(j}pxd(gS>s3wa)Gpg~yirqn-(bHoX zuJylvpN{9-+t5Qn+3hb%N{W2`*b>)?hpDNwDeO43`}pw`s;b+Qi2_z*Y?WAsa9sN{ z{1fCG3E%`MHj3@Pd-v|w@9xw6Mjdz1P6S}nuoL!ajy_4O=X+mrhQo(V=D*g{^IjwQ zJNqTK)0L7kj@q=C-gTlqPQgt=Ow6P`No@Z2?40KFA*)9v)VVB=ueYo3C6_iBsg&~@E zL@AD9k$dj9y1>AGoPK2;@Uz=LQ6My~w{Z2d*t#O-{ES!s*9qTCl{V#0}ZLLl7)c<9a z@OTZpQBY7oazn4wHe2vVsauJDqVE|C1xtW3#0$ok69KZryCy1KCnB&g*f3dF zo&|MTWGNXK9L-Qo=7{-Opi_7(U}bcdX12P_IK76327W`Pm7`g2mEs>#r&* z^BHoqazjASm<0q5eJV6=G!EPhMiIA59gvnzf28+OO-_(V=l&UwJC?;oFC z0$dHQ%vF?@*0kQgf4^>}8@sss6l?zzkUG?Rn#v)ihunLj9}5c$(+gTZxOUuo+M7;h zxU&MNi(xKd1?xh73^0cgJ4G|$XMCpp=Ms88Q__f|VnH`|?8>PK5j%SHXb}#A+phtw zqH|naDwf_yY|d!sy-t;jBpsaU%fFv2;mJ``R#jNY<1y2JLcnJYmnDi@QMaHDLAk8m zTC+=JUlkA#Ai8yUa(U61`@zE^j!9pguU&h3O*^OSTj0O-<0v3!`uoZp^l=3;j?eWU z-MqOi3jNU2+h-9snIPol17|ou6{7lkdc7(ePAXW$yF{t zT6F6IYRsD-8sHIJxOg#0(IVS+W92tLqnFdrt7o0Av>|T0C_(M~{(iQ~GOQ}QcHZ5S zirtamRlwAwgA78pw@)g?XH~P>;L?6i7uJy%ydcUoHqynD*>Yj@CsvI1Qc$#ddWU>- znd~}Q{~b)|X~5=MF!$vc0n>w&lN5x=Jp{xMfz#BaOqbvO*6_}L#;=o;?A}RnalzOLx%nH_ z=tC!8^p$a>p1ywzjH}J3T}@4mT;gb+aosV~V%-H@d_KN4c$RZ=b~d8etdYD7kf9~) zfsP0^P1}4%-i|+_!!O==3sO32?@nreLOTWQYSxM{g5>=F%hLJ=Gx=zZ1%0? zx_>m4-{oIeelXAQ0qb>t7Tp6m;?lpZO|ro<$1nGmmXE` z_YaWV)a$@Y6SGSh1g&qN?a^cp6&O~1L+1?=dJWDL&QrIc{-g7ocRGsg=$kuUf2L@t zsHm>l*zAFj(Nkz#C;y{m5rPY$tzZZJ{{6eCsOUl>zr__uQLid0vi}%VNcykc-gV#@ z)7i5}0Zw`O`7Z*PcLMnPMG}QBSEukG1H-*nuSCH^-V6;LMTfuc>?{R(4uqW(B~C|I ztQ+>=!NF!cs9cR}nQdGdI5tGEOe} zI4UYCnB&aJlVkuY(z?3z#MJ}DfedhzzBdH>Iwt9N7i$1WRR8uZ>EFM9vviA@ZTs@B z{!F)95dNw*TJ5(=#bZ=`0b~$>7-GW?;t~oB_03pfp+L+NG7o+PkE*GqB?!WpxZ6(> zU*C1+hjkz}^=o%Y=6{usC6#*ko{ac9vIAxS5>HIP>clK`0X`m8PPj zqk~?*zHmDvgc(QH($X?ZGn}2Z;I@)FWU&=5yy5QIyfrw&QD0ce5)zb}Pf4!L+L?+Y03YXCZK-?8K5 zryr;sfN65KcJAw0^L-Ycz?qM(#`d!(AwE6?s!N>Q@3r-HiU7GYS~;wTE&74!>tFVx zP$h^uKOYeP3LL1wOVJ8k$p;=(%JYSpM=gg!V7w<+=d$HX4|8)?T*m2sT2LRY-^rOc|Z<;th&2w-*L(6rn*TG{Wm~Z4-yhie=dV2=l4lM z#~hWF=wlBc$S`HzYJ#-K3ptIsS1nmGVB@rx!O)0IXJ;pL7g>Azys@d};D?)4RaLTe`mr>i`KK;FA(YHLd-g!FyNJ{Fb#}I( z*iI7X1W?cCy%`4t**xjPX9-@{or|rU{sw-AUdB3fJf`{`943%>)T%FrzQ3W50a4EyHy$n)e+?!2t-IKv||m(L8wK&>q1 zxAaqu5L|H%MBRQKgrt7*#eUra+@{IW&rV!SVT(rkHZi+(s8RcHJ(sZNL;i#e!c)6) zu_R-YXeq7R{C>ZqOW-vLYMJQUIcc$T>|7&(%E2t68^aldqTAXJf}?Qhmwg5Oiy5Sl ztTSK6Qz!;6A|)eZKU#my&hCC+zW(?7Jhi9iGq~z>$(X$Md;jp4tau-JoM)vSdSHz5 zkYZrOqBxZCT6cEiTE8ec357uJG=)zhI#Rz05ozOh<8e{vZ_>5N$8)sv@1d?<`}+DO z$H{v%{#)RDzt`vLz5|g3*&K7NsHkA~Q*CK!Ym2>lz67^p4IL(`g)0xGjv$a=!5X65 z677c|rq>O{AhB z!SOG$>1L~_(;&HKWp#%THgGA5r)p^YJ2Epe2rUfkBcGgM^7<>igQzQz9Q9?fMy#m~{z-wk}`<(ZsI zMMJQw*^fmL^_piQG*<{E977!tTh93`y+-*mN=x&D_=`gm{8+?keEq7hkdO=n>yMCf zo9*{h2>NryK6-SI(68hokI6uOZ`g#;_jP*uKBS_iO-60$&Kfl^aq!~%J9h17;^0sm z7j<=Ar23~Xqsd_HaO@rWI!MT2=yBSwW{tq22x5%u{D#FRG(aG&d0feJJa&GK+dz7^ zAe&Jf)4zK48TN-QdlghVe!4210t2;*Brr#_k58rg%bik&q(vTdkFrC#0pEH9hC?A# zxrjX|GdEmQ4Ee_I(3cCUJncZ`&jSB#d2Or|w@kFyfymN>L310|COI{T7XqD{Ha$4e zU+XUs6cj{$mK$~tzKPH;im%mguL#|5c#w|n4l1Q7us1nAvi-wXKnq^qzxIhXPbjF+ zfY72%P|b+{sk$o|jlvYlfnuH0$>61lOL0&7|7|QDKs^jXjm#1JqOYk*EgN|(92zWy z-(9P0V(943GyQx^b0f!bbsB3&HAT^6WZ){zV_(pTw*14>)~#?kxwbrK{4@KCD;XIX zaTWZ4vt}K9^>aKPp9x?p7RigFX69V#}D{r4p85Jhdbw^<Z6I`#MS98| zW>nI*34CVP0bGMWaexGhdd$#6dS;{N%4P@9)qP>PZ|7e68z8pS^p7Ktoohfhn14rS zJhm>8r~{dgAfs(h&K-fku5Me|x3c18@ap=u^BeQG8gAPv97Mx}1VQlb)mje|6O)y# z^|@l}PUhq)$EIm?T0)-27LS`-UjUX^m4{ka7bi-^Z+*rt{RqiD&!B>TgxK}Z zgv(!CFp*paYD*AvWs?nO2*xv@SrH@76R0a+;FjOY>T0%MZ3Fr4-50hg(m=9x$o;g2Jd&K ze2_um5HPv^R+iQru&}iJhAkE30N8b9(Crh1Z2r`|MUM>;Q&0$-Sdt%Hte37 z&mhjjS0a?abf=y87-9SV8@RVt2VD_C!P{`{h8AUHWK2LQU%5}mM|a3ux5jf3^yg{R zMRc(D@7YG8oL7ZD>g(#h?(5SYy*mz=U;Ue2uxv4ZXL6Ha<>iVOU|vdIdx8B<^KZf|OO4P01see`1TeI3JMrxD4vWC@*7&lG^=p`G1msHQ*(!H(B_ zU>@N6g1|#|mpy*`IJf4Rx=)MhQ?I_S1~ooH_+IYv9TA`_4N(U>o}7CFU`l!Ps6p1^ z&Rx6AAnyh(_XAsj8@U8p!5F+*ShuCOkT_W5E25pJdqTb)=Y~Bj91q$A96C714s_+e zMZRHfV)AT$U<`!HSpMa#rI_hj~VS!^t=sfpt)k`G1f#Go-dYXL);zuy7{o{nKNgCQ#uLQ1lXdO za!H>ro9D^-KyEL4Sh@>M5|eM;;fq+vKClsY{*bVQ-k*FFs74`x;a_i7?YgM}rj8YoO^%;TsGV~etUP|>Nwx0^Pyfy$v z^Necr&Mdzr=mUg?A6xMZ^z={{WoMlNKq!Ht$zZspcl??8ETC{l&)a40Dz%}Rb?%PW zs1Ljj1A@W7q(QS>teEbFhrc7dm@fA#Msp)|jS#9&uI3rwuyBOapJM*=aX@f<+qP{a zE)4qc4LBE<6B6i;p7lBi9T)N*;h_))38D=fbAkUZSzDhDx^>GLnBHx6=&F7%8#9MC z0vEA1M+;zR>va&`w-i4sNFI252X{gojcs8dbU02K6B0~x&$Q=u4zGoxQ(s`hMoA(oBx4jI4 zy8-8eU?`vrUA%P3d8&sUcSVt%oh^F(>y5EL9FzP40v47mS1b)bWM!S^gkF20UBwDV zfyf-dc%p+!3y1t}SlC-QUZjobX=&F$f%rv2aKZK1=ABq%U0vM^oU|+uc&yr!>LH4g zrM?4!A_v5SYA>5%lb&+_wqu9dH{?MuUU?vfUxr_1l4Zr0tp3xJXIC6U##i8XFr!nvi0pIsst-_3u4I0Mg_vmm`WmfoH7g zx#R%|$J>+dtKaiX<#Xcnrm$MUuH3)|k#q`co=YL25 z5MDZj`kWrZ!TkuI4_X@)qCHu9r8(CWM{qGGL`CTdX%gG+Wb-7{2*Q{FEYkMQe~nWp zZ+VHZun2Ri*59|;HwB^}D#0}%tDuohU=LIW23Q+0iW3?k=xoa)fswe_J!K-cJ-fl9 z?>v0?a7-W}IXN7wxU#-Z2I>pe&O@8BSuo=F3?P%x+qFf97e+DZZ7ERC8+ zmiiC`{2-D7Kvkpf!;if3TP=}|*_08&gKM7Ck6}~cHE%iy7e%3VE;Tik*t?+8TG#zY zCnip<&g6nwM2?kKyZpHKb*%~>3O|d}qWZW9A087-1R=C`(kS`cIAp{l35qB|;EYK1 zY^{7hAk|<-_&mU|HI=JYc0xkaU=ac+4n zDnm4u=L}`oR35ReaPNVIhHZ3DP%FTQ2(%1WS1?URCna%S4R{JyFw3~EcHp7rX%6pX zh4jK->4m{0ABL7EROX=)(h`n#<`JHpq3OtK@Ls){X@-gDS*EV1QafI~dbPU@osR&S z{dIwL=7W$@d)5*=g}qsB^d7cCp~SNdil8P77jN$X<|MKP&F-?ZC8`=3%Iq$N_j7Or z_(y7>37sM|9^$w`SzriVMyRf3$XHxl+!R>Zx;KXg8MBikHvwT@18@^dS`pCE($avC z0@RlXXxL-E1*k5-k6Pp6g{JTB5xA1@S#YYGoxFiVK$;0Vn)u{oXN5zbE+QJ&c;Ng7 zQ}<{cwg|!x$AKAlfeo5lSP;=Js8#iVuB?oQ@Z^iko2hWH6yo`>^mUy7$63*yrXPIc=Ok;YH7XarRK|!RGFwkujFcTRnR>Fse^aEQ<|CbeFWou7<4Cjqz zt}k%?mk7=bpyn`&v$^b3$US>eW@99}dU|_lh@uOKPB5V6C2xpBD<~;idJ?n9tdO{H z=U@w+S$u?Z-|*psisL2g8lOKO@sPJ4@q}+TIyH3<$~h5X35t?NKBWPVY)trqm#?o< zU;18haw@^+`29o`+l!1b;XZaf-+q{SFx6yn0el`ax>7Fggy_vym7W<)6)K$|U=^W! zXHS$oDH0qoi?<=}1__ZF{`I}H=`YF%0~=|JAcvN`&?Mg-b~ZMl2<9~%Wx?Xr`gw;j z2r2$&aPn*LMjjQT!j~K}co8biOq3WxJ?hIpLSRc#N4SqfMvH(XQ%}mdvE;Nz(NLzD z3s;kLhC9;GKbSrnR=8NeK)v8F5uFL96x7%iPTBJ0A6!V?qH8yqIvw!!uvJtnWsxFe zX`UX+P&v|;a^5(4LrFoQzS?{7>%>H6*WI?E_aBKM1AHI;K!8#iEO#t7&-t!8cSlFZ z)pqHpjzDx^gz4AxD_njQnY`JVIOmB1uW+f2Xf@Nu0fbdg$MYT#GH0p6^-u%Cdb*{y zamqXHqf-!80NRQPx>iv3F3-8)a6}QXAqg0;0wvpA{CGlQ;xqU}M8Hq---c_mr#0eU zL=qL6*BL{dum1uT{r&ui4V$Re7ZDLb&u_k)NCLt<-6K9rG*n14P33C60|N|jEiM)s z)hsypm6w&t;=exTT@~1SRP-P$TWNsU8%h1=+^1q&>TXrkr6SAL0Nv6&BU2Df3`G&l zMUT_q)s-TTB|;4bLhTufvRpd+pu0I6&m|alPr3@hUWuBGDs!wVT!U2_9VZ`)ykzszdWTo;YDC%C666q%ZG8`4z<0G zii;yaDf-QuzCKz!UKz;$tSMqz;?J`6f##pGFq4Rii9JKv3UXMOU8hi#&Q@HjJzvg} zd*{v_y^bAY)pK1+v{?*;kDlA_qGZbKb{sbMgdYJDi)C?ZlH8$`=ZA_%Bhnt&&%uft zj}j9Jfi>vr0Fsh-pnta*_FZ^9Q0|1l-hspj4nl?sBBQ`+5+Py&I00YEet0BAc1w_) zOz=vlRNfKd;@wO-hHyi~9 zB%~hr7FyykVZ%OCOO;*ucAw|^L_1@m#r|FhR*8wBbG7TL4}J>s^EaT7hT$_P{t5{S z#wOJvRs_nNVJ=&UluX-WkpsK-AH8Bj`(ns<9JgQ!;KeV-gNPZle5v#aY3bn1(%A?g z_gxRVvv@8KBZjfFlt|XTd9wp9-{{gKc=Bq)orM)WUmkGGjT`$63 z^tfyC3=u^|MULxV(xGbZW)KM3&v*|yBTavnEu>Wl2t;}!+5t*Klti*#0MZr3$g#x9 z9R>`(lpeykW5*6co(_7yIus&j$bbFog^JMG>ZIgkKG$*O`Z8d)i7HiZNNYqQyS@y8 z7CKRZ3-I864?5Mw^~-wMps76w>uQ5fs|qP-3R~d>)m!doo3dMb`-fR_f$GAz1TVV?Wlb~Typaje#q}4(FT3_ zlt^dt!&|p@5LlvLyWd^)%a<>NToBwHxJ zlBHTM&wNh3&aql8&`N*!aP~{Tgs3RWO1H^qr?b@M(1V*)+$OYYeB2X1ySbTgUGpA? zmV;~6L8s^}G(KNn7L+m3YpERX0kdZXX)ZDa6DBJ?1MkqOjj>-|J4hg2YKdxuyItfF zan4i)f}`5dhW08yO0_3g4eh(_k&x2p$ahG9KU!!AbsUFF4lYG!U0@)mQ8hgsx4{*g z3U2q69}kVqEz@lW%ktX-7A%{tct#sBE^m^p!l^@E?f=qK4|$A_p@3;mSCA`J-nxsP z)X-rFm-R}itrp_P9ZB3#j>av9IWoyTj8=r>^Kuds39)y5RRIejnX=obv?8&Jc?IG~ z9}igB26N~FsvXhnX9mkJJL(%tA@$6eAeD!mv@)5QLY|Zin7tRxhPI9MFLW2bhgFr= zM}yj^C~>n5h_%Pg#=-&(hwOBw)X60Ie4vr#o>bJ=Cf3SyA;%$lM9Qe_?}vqP_Do1D z`*Yd6xOZ@pCEuUiRqphU1t!hU3p{qTFxC?xZ)O6ryvvSB#c2ej}2?v>5@UJ<3X;;WHmMlS(j{ZYir#pJ#v?e zV}W4jpdsa?d-j5=zJAx|fdNHJOLlpH67y&#^bCzx|9~|~_dQ2SgI>ZTPkn;GrlB?l z&6h7+AbB5ln5Db_$M^3n0(yFsNSSRv@@f&r@PnrUp{1pzU8()V`Gl8KUq(U4&BV{I zKDij<2a}4wxsr&Wp;Ia{o=1!+G2t`RY(;k>2Uzs|UZs48`O^>V+#p|MssC;!u?Ei8 z$Vm|(M1U(frx1lbzY_xV_T@7eXc)S^0^$?*NVOIqH)y{VjxHhtlP8 zi1i+k4(IKbp4E(B7;{+aFnnC~^17?5hR@P{|s%;dH$ubqhHf=f!cA`mzm6Ls01m zWjStGD_CTX#Z4l5Q(9mqfd%5cS|UBU{te?Q8wj$kOyw4S%-0to0y`g@RSv;zy$dm- z#mxZL#XA@Y+%E!$Nwv8hhL)9#Z~ZtWD5&XZKQ~x@K)=kMh{ckDy(Ze}A|=d;1$^sv zFZz=dYuzWk%1e=Tb#>Z(7Ja0X9>-KRhUrTE6&^uhRknJG#f`%u0n7*JI5To`Y(Y_8 zo+vFuNDj$9MzE_=bK&|Rgq&4*S#GAMp9KBQbqnf!`&K$5e;fdUe0ik8HS?=83~^q> z!_(W-RbXd5{OXKK7Wbq$jW9Vr@#PFR5-MNPzRk3CU%R!jD(pk1GGx=l4af=HMkhAy}0!R85TU~}uC zk^U=y8AYL@iC!>TBG4cGu~i~HoacANQ+aoOcJlR`)=zIQ{vLEoYk zbg2#4)EliF=&}p-z-&XhF5#!P`BV?Mq1{#_i(}^J_vo8lq@<*~W&3_`=$CQUu8to` z@6hW{;e({(x;x`Y{SGt@4)v{wdlHZ3 zM}%8(Sy{aDj^l?8-D+xT@_U%jc@m^bL)_aMhAAAVNabCar6n9P$e|KSdnGg?q z#hD!%ZtoF=NN-O42UYhvG=Ri(qkGQ*Y^=M|_7GeiBQ2T>jM@+S`*m?j$Ow+vm9C^^ z#KPD#X?~qJnZF>;-GvQFoK!^1D?V-EAV*hu*g1b$Rw{IUoPr^roU?-&7=@|*x3x|s z@T`wr%X~ zFU+6uLik{g%iT|)3*-)o5F{4v%$HR@pbS_egcr(d*+w6B{JiAIbR4SBp4vG$MtKWP zA^855c&;;(O`b4SF)PvP_fJk%RE@%fxziW@1$mfyNIYZ0+YL|-7v`X@w4ReJr%T9D*w4&t$mTR<;qJK> zrSpVCL$chaTQ+?Wuxvlr2o(dcAxg6>BQ!(CIbt@xP1oI1DWb}_Z|vU zFfMER-X-hY>a-*BP$M#3Zpom%Mrfe;^9HdTBXIf znK$KBi!2)S4sPATC0IWkfpWx9$Fpb8PgMklqlx4*}(FY@|rnmCblU$aISKT643 z)w^12dy8Y_6!aZLN-&R-Z9WLnJ!a%-Edk#S4g^roP9C1Zi;OAd-$@>H+Y~?b4MAhw zBdce}>^uSBjiU3P^yUC(TQ*ZQS##|P*IlHyh&cy^7`Fcewt!7>Vu*=gT%rqq0f`9l zfN+Qw{AF}?6WSIIlko`(zJ-}V&0U}Nv(`8{wd%*0<;MvLS;zna`4qt=j?4V<=_QU5 z#cUnp(WB&Ho5-Hj4-V?{v)DL_RVK{DM+m6wJ$=S@J^Cym>hqdxuU(&&Xf6Z6CgRYS z5L39PStgE&8YC6S3@hF0f#{uof0Kc(xeNLx=XFhA&~GB=2eNKbA4EpJjIoo0XSFT0 zPkUp)ij~pzJR*1q3%4rw^7wP>>knQmeP=TOZ;x!`540hU4=%gL+V)$3ZNTwqE<*?{ zJ?6_mwH4d*ARDgFza}R`iF6n*??LSK8;};qoV+t*wO-cRNK2EF6qWC#oTTxtF{KdT zvI-cB%T#6-eOS_cM(KEr$Y(C6wJGfKMEA?E19DxQTrTvRox=bjh_K>0Id|4d0^4q^ zIO8Gj#C%fxY!@GNVo7kRz)y^DLq2xlP)pzd56`!oq zLelLXo~a1{1V_yrD6&qr)C~R=N<2J1Ig--gw=+jGdpn|-QqfzmA{9?oadB~J zJt{Nw>8Z*Ysp|SfbFbEuhP}tg|Ry zr*sQA5VyqH?rqAuibqTcn1u1gG$FYF3D9d{Y!9)sF>OK-Z`Ty}RjUWhzv^y+r_XCKDg=c8&xxpJWECpvgbu+pUQhNpBjHsxMY zRt|frwvPnn>$8Q5vCzm!ot(^!oP8l(1j{BScU)3ZIHF@>+?hY;Pl;D`v0SEhCw<_U zR8mxQTJw#vLQ+UiPY*Q~r0B(qTbT4EQtL#TNifcfTkE^lntN$+F}voia5v^>S-(|F z3ZLaPKQchE3oE?1k4AF^Fo+0M0kQWbuDfWBe0mOuACuETb7f0Ud5|x=wRYtj z1u>CR#D&%k%Z-GF%>l z!$oNAK^C{moJI;3)TM(>933Mdr=;R!5Mnw%KmYZu4Q~S0v315`ngBB!yJf>@utgj)p4*Va+mW#TeE;UPf3Q5sXjL(yGG%Xf+BS)unJSdm zri-;LNlc^U7s6YFgC43PN;ts~ARxs41Q#G-f{2gfa;L3}=|cNn{LIpW$)gXFlexP0 zzk=XU+M_nS`E02omhnfP!_=$D7|$e*Pi+5R;r#-ZPq;~hHH=*O57lzMfDNyTMrdw_ zV2T_>k%MoNfk_KZ)26B{kgRMyC)!0-!vv)`n=@Jt*1OD)EdMaPmKz;QyvxwzO>CLFLaxmZ+L5<7NKzmh3-sEful+Gz_tr(0^0qb;`C!NDUK46DatpG@9OPz%U7*Dz70D3pXE{2hM|A)oEn zjietU0jodg!cWcOOb0`$(&zUPgkJ?M@AX|umL8ucLG(wC5ao*~wM4iB1M(N=mw~G2 za5dU%RWMd$`%5hD>rJ1YtNjOIIrx!`3Z=I|F133FIpj8kah}4b$ik=(!KlDa5pUgl zq1}lXK8J^GG&Aj?h~(?S7tY0eb3X|D#X8_*yb^BmmOFWh1;!Sqx7sk5@qa!U-xGGK)(bPr92WNK{@7 zrjXLT?$RKqiF5OYZS7+9`J=G+v~$k=3`c60FL*lxTc?AmX%Mzd1J)h}&Yjd$mc0CY z^j(gakCWi+08yEp;!{+UrDNnrj(DG!lh$&uk5kbwzjt* zE5a#EgW5gx%4;fMT?H$px)~-uq%xfFVg~wC$`uLp`E6R`u#t*11qEba&!_BAFKJ=7LG{_m|Iarv2T&J?FtE zskAJGVIV2^HW~dfD>)D7tS&z}tEOL~59gniHSM)4?K@<02o0$DJMM_(^y%G*2&hea z3+6*TBakLUu~Yp4pizk(HEe@?f?tGXrPEaOpzGfN^&#{U`{C-tgvAc*dj;3BH1qio zh;)pvva<4C*yW^5R;=6bij=g=Vd3FwcnQEzU7&HvudU5>M8Py#Pd~!gz0_wCqO`}! zcAq?XvYC~Ig89p&!tl$h3uW-DWGrjCqOB3k2x`Q@#cRx@#Xi~6>dn?sl9+t*HP}#J z`SCQ#^!CY)gi0eF!vvSQG!30LrQ+{aLF}|LC&&80 z1`%k@{)&WDF>+HF$5zN*gwmL0P#&$gCX{1XCA_h*@zTW+BSSjEo$Id(L@Xd=UUV zw?^`j;t*2%M~P^%n>mrbK{kPil$1V^BTy1?MB2pbKuOvA%sqq2X}*q)-3ISzF00n~ zK-UY}Yo0Oh*kAN)prs=hsf+|+hey}4Z-F^}gC6#?EB(G~`_+TqjM6=$AhQW)y-NmK zdt)*D_8Lmh$EaV(lG{FZK)^tETu<$b^EiZ0NZPw4k3&j$XmhKR47Wvxz4KNCm&g|=n^ zzeUSe>|_iOG@=H-{X}>=_)KPNjrb)& zM1+vAcY4)ptEW9x-X^;3x&@&69rtc~J~HDf;)gJ`i&TdHvhq%GS?nq&!AS#D@pD{E^xn(y)G2m?PZ zvN;!-9sc$Hfq|AK#{+xz+>D7qwxp{JtaSHqE==YY!y`&j#C()nNbH-gE-EN?7ZJra zAH;fS>6sz4d}(%VX)O=$G(hwx_U&U`PVOE-4|ru%XSF?kNvJEY3B^9~P(KVz5&ehC zwRydOYY&PsOrueU$R}h?p%zF4gg8OFetP4%-mcg z!q*%}!_;+&ql43Ykdbj8(*(o2|Mz7a)84Tsoid7|d3|+}(XyEBK<3@}tNgzppqFpx zA(Zwnii@S7n%~!S41gm*U^>6eKcAukzwBv-lI@?=_4ND3EKN)I}Rd|x}J|m&UEga zVS3`Jcc#T{o5nUZkt|)i^o|p+)5>vnak)q+HG_}@?|_6-XAj|{z7f)>1DvoR@OH%? zV>R_x-eU_}Fr?9Zd95@eIIbcetN9-T$YGm6S}<=x3_OA?Qpv2uz=!K2Xj8V^1~fSG zb2M%?7LJbGF>cYvu3m+DMNKHRMft4xgE_e=;eag^$DqEouNYvWMh4iW45c(jaCm!x$yBi+&*j5zSgxsxo|9dvf`y zC^0}bE`nfp41a4xHrRwAKZ^jCt1#Z8bD)g;}Y9V*Cw zko~Z;?Rk|!AiZ&McWgG7%nt2$5CSYL_i4T~&lXq4(Ar172|^k(&WtDR8j%CpkE=>? z8%$seyj27HB@$wYkW1}zBZ^+U!Q$W!i2WSphk>zlECp+pYD6g~BTV73HMiI|1e^c9!ZDk7o@VT z=>tIs4QE}YCQ1_!JJ}#ozQhzYL;!>vAQ=-4i$4&UffDcKIvI)KUqXpaZ&k&l{9&&2 zW*e#dg8)gC2M<17+Cn<-;KqI=G?2Zbby?&4IrMyUX9N@{lXu{+cU@lK@I>2e=v!I z?)u;?zWW^*xu8oyP}8)*%;>1vUZE%p{Ay-@RXDg<7ZfSH?% zlg?r8=~tnHT48WrUY_{?Rr=gbiV|ee>%XH~m{3d* zaXLmoW=syKTaI8goHYkU6gSQs(C>pLn#WcnK4{`wAy&|fmfs;?hLP;QM7<6tO>QZvSoF!qiB#(Qs5sEkJ!HO2nV0`InQ#;vrwGnE zyY})P;QDa@Spa2%KNW6bZ(5Kx1KjZ$)@<%HQ^U3Af z!Nx|p*)4cjmvAD;1_5Msen#+K*m5vKr>*$<8xNy*SW1EM)5WgkSDv#&^|;U6o?_$@ z;uAj?yc}tyjPuaD>`|y+zMn`Q;#5E?*;lN~xqA5+CYobmxnS6v2phL_lT%T~(Ly4R zJIr?&rVesE29w!7#4Ex&B-fS4Lkgj1^8Y`aeFt36>-+vEgp+kpA`&UmB%y6ZKFMl` zG$^61ElrWgNFNQQK~gFj(q2d^qqO(lLsO-a{@0UL=leb9{J#HQ$C1Woyx-4rKlgoI z*L~gDrixI}vZ8nFL)=S(E+AoNf#VHF1>7nae9CpOv>le;B|T4w`P?^k5U|i1(<>j zKxiDVb;1`4+HeKI&?F8Q9X@_&;9&aVd7=VH=bTO^XcVo81=#7=&5+#-DHiQCvan#b zwnMEr6L(qQ4a@onP_hm)DF`$;qFB(~J-pHaB)Zr9KwzBt*Ye%~IV9dDtGuQ)-YCZz z7h36B=8O({O&nUSHEDJi3%XG1sC+!@g#ZJDVu`SC19AZ^(Q`3|HEKbJlUr#y{UZ&iG0uT*oPkWZOtzV2ogQR9n$30P?yc3G_Sb~$))iMR;8N_`wYjrEs z_}WAyjFO9gYshCbN6dvBln5+BDiO{Y-fr5uRr*tTjJu>-4b`PG5#66MHAWdb(COiV zNUzm(4FJ9)%H-?@)S_Mk=fup~6&g+AbW09IriQFtYyoJHG)6ZXLBdKr&?F_%>`wZ0 zaScfi9x;)8)IClV*kpB%9;PTSYkK00lW_?u)<5;EdTQwi421p3t@s8JY$M1-ulZfX zA`?BYxbd&7v+9ct*?IwAUx^B_6NC>064!k9# z2K66GB$?aW1E$8>|`H^x-p^zz5K4S?ft5%oRBTY5P^(-CG^H)!NZE)`u zl?C}=0tK4LKW@z^vu2aGZ(D9fp?1}b~;i3HgwnBzuAr*#Rc0B$S;;5P&(t1?v0g3-xW3q zF}s57$pHdNEX4^gE+~P1dvRqyx%Wq-${~T{v9giHH@V?Fb_s7`g{bLl{5a{TRDOPT zb;4|yt5=Dv9&OBaS(OyD7Cn7owtqR?KS*07`Uz?JI5)_h%>xflXi5Mg3qK#o&C1%2 zo-t(iSMW9oLu*`60M+kKiGy0mkse@)mPVg(IaPst{ZF`<;X*$|0YM9t!7C)rg#-)` zbdE&D4jTixwzW@Dm_~w_=U9o1iyRKH5L^PxR#09fDIo#mT+-u%iXC{(7XgIVgEsKc zBi?VM9S9lX0^}=F==XF5gNxWz50;eXx}I$Dj}I=L@D5Ebuy2bdHyvtQI3?cEL*M3d zA}%LJND%hX?%u?8zz;;iYR>EMHqukwcK&eXkA{RD4pz_^qJ7~a5G|l>^fflf5ui0b zZls_{%xg%yCODjZEXIo$Eh5}F0ZDMyZijs<%rWheYbZxbS zxH$2@B8c?{hc-lfVh%?-l}Sa0Lzj5nacn^&AAniXhL5f^CekK;;gok+7!U5eBck}R zXP-dN6Fo1^VG}vSt zp2IC6a2^h&EK|9aTl9{mcDxxvXE{n2+G{I(7cj;}^xjW{L>OG-8hfA1xi~nC&^<=2 zmj+RTduJRxR-^^5A5tXG=y=73b%tFj4Iqz@-T)wHi|P_nLvVq_t1w(W?G| zEP4_(c#p@P(zBk!+JjAKB^p=#o7p8hTp;#qJeQ3xU&h&@)3K z34ueas#tMP9J=*!ETN(cx~ns4N7^FJ0yHG&(~05$>~EPpxx_i=^bGaG(vQKMeVAa! zI|gwvTJb_$-+7$I?SlCC1ggZ{-0MhWb79LwV>gtc)7aS90E)9=3tYT(>C%Z~HwXs{ zkPuw;D;&S4XbrgE1z9G145(%Z;k0y{ZF23%SG;Kgo4{Uel5rbN_a$&Fa!cVI!T6lH z5w#5~@iNqxWTJ=|B@zcxlQQ% zSz5aLh|`ckl_+6*&V3*SJOOJ_TeKF2EC!J^2mN*6s{?_`)T^MBraScpmu{K9)kS}Xn{e}a196SLmb@1h9=MkghVhg0Xg@j=j2O!Mj$)TR$VQ5 zCM9}6cS25qf6m^X!~BKDxsP}uP(%t08a+FSEh^rjD>zx8_8U>#Uw8*!N1VW!#4Dqq z;T!NKV%=x&b7clPD8Xtqiv`5v>VH9CWje4X{01^7(w{&yJlG)|H<^(vz?M--xJVnz z+U=Vp8#XW)lqmEDWcO3i7EHQKba#2(z54=Bg_ej6^M`kIQ6iviF8PF1IfCsuSkx^A zy9LQUC^5l-=g*&$TxcV-pYAzlPM^MlC7_A@MAIkTcEJ29$6((4;7+yy1#KAf!Gw~A zr0iJma3rk-4&!Nc1JUeAxXSv1FyU^lB{ru)_o4sd1db1fw!8i2dw5&>E5nooyh3th z2uIw2jFQ%U3-OIm%w4<-aL)xcW*e+JQ+yM!y(T2hw3#)IjGzB^$mq8~g@$E}k zbC1X|khJXNJABv&V#i20A`lkc;YuD}5-`+b8?^32m=EY2q8*>^Ma&R(3d+B3E&?ow z)cTJ-I%u5L08~WmP5_UcKws^-wi#IJkP3>|n7GY^E8{%U*hajjotzqQ%FVdlKZ@M= z(K*vIsDd_H_BC3+8|>+M0XzaN_;lH^C};57EAT#?93AN)?QS|yA`C#9!ey3i0NE|> za1g=2HRb^2#b-v0 z0fHfapuaTFc^Ay1-fwn8osDF_F?Os`3vv-Guf}`UIYoeSh~Et0i|D}jSTQdXZsLxj zX8=}N>}LEX=<5Kox&ig!4zV4`JxBpax&7P_K3E|_@AmPVsmxJ?LmgyPugAX)ZKGN_ z9_95gb$+YQrug|DX%PWo7l25{UE+ULX3104JmkQnixfvCY4+jBdpZ|43?~)(KutdE zXV15SsTy(017XFrtcimUI3o5DTq6K-(H4sDrkYL{S>sv)MksoV0pu~2ukY^Vc!l1< zUKG_~Liy*Q96Oq1&IkLA#%~h1N)Sp7S4#ob=TYHN@P*GJIsDxO)YX~|fp#D?c!mT+ zQePaQuHL#QjC8a=={?S}jDP2Ze^17%vw^jOQ8Z zc9zTqZAh3Z0vNbwUD>&+CFl!aO@TfORikByF&@0f*e&!kaDh2EII{FBpTJrQ?VXpQ zl-7Ret)m3#Jc8>iz(enbqW#b~jQs%5JapcpjgAR9JO4R}VYKShhDD^(mh&K- zU~!0j)?he;&8JryNry9fiQYk^cBLEO#3bF4_hM7Ou0Y*KgOvdJfz>-xng|_lV;|>7`W-xnl z=g#7G6n3?n&)`_W2jkjApsX9w9svd~L8@i}?vp020%_^i)6;A0x>FOgih|R2A@D>(!%N-~blH zBM8^ob94yN79OrG_gZ7o#a|lhp@gi1JU(=CZ^x_*Mm+*LUOS{l6q)fAB42>;|47%E zn!)Jf9k;Royd$Qp zsC485-z>p{l9K^d^tJWaVBtP-}{=nb)Pds;(Kq2uO z84a5WHP6`^2ea#;m?YMk$Le-D$)^$WBV)Gu^~kkf{@#*`x`(KEGTKx8S`M*EXN3WG}@Chkg%87IZ}rX9lF?Y zz#FR>u=rBpbE~|CwB=e`q+XEH0ZbI>)dc$U!-4R&x3_ZroweT1RGZWK`Zv*-M^IRr zKEMxBXKU5(@Li)}QnD&Xj6Dw|Fs@8tR`+q7!Fixo9HRx-%VAsuS>yoR;*Faah!_s% z>`UGa!$u}1yHNM!VHfzu+tM3%Spo68f_$* z1ZNQJuwASQ)`#eS+XY17dWy+YkPB!8L6M&b{R)$(u*|s)$7-RFW6Mr*=^i>d~Og6 zJ17)!N+B_I0!;yhG->|@HXHgVF6{H?5&(n?#Sb7i1qa5-{1|!`)Q(aB2AOV22|R%f z0G!oUL)L>HV<<1KAaw)wU5mmAy}n)xTQXJ;ZMKBe8qpSpk1%AkQoy!HhaK<2d4Gg+JR`ON5yb-sBtM6zBewuO>9xwrs(6M!VWx$N3%EV2zuRNNRz?b-QC)NM zObB0Ne0Y%cKn#0+sEZYF=4~2n+MGFa$mA3tJ1|aGzSe3=W+b4*UIPjPg^&X}aftse z0G#v>AD+gCNlUTZk2z(5*LNGrcvzU7fLE78+WI@4elSvkW{`qM6bBWvToFTtL>Oeu z7rRTL-)WI-Kc3?jj<*db_er_AIM9BLCCb>Z+yAL4eADBz-boIyjigZUj1OZ)@t$36 zZ~NuHuc)|pZPM%`*5H^^all)M@?ZS?{l2K|>}dgcD>l0XK$tC>Qw@ zq)2<{B0SiS52nRWIg`Kv(o<&7o0oz8rzmQUWC>=YbQH83KTa{gYY1FUKrvLVE08nn zm6d&OQG;@fSdqU$MMqzO^f3>;4EvAV`8{sw`HQ{B*RjCjoS3MfU?K-3N%c+|8miYC z0S0>mN{6oTNrcFW7nJ=NYjgFgOe7X7ai+P1P>FoQ5grNm;oU(TVxi|DLK9RTI9vnWsTe1R`_779ow0Oi4WIh?9;RbpZdfX`Qr%QicuZ0V>9U+#Z^b{Q2MjRIq zbauc;2Z_LRsE?K|8&ls*EOx++tr8Vwp{!bRosI&^pUl{KeoPg2gqu(<(~+|!y znVXo&(QI%9_in?HxqtvZh`UB8z%e#yDe#5efXPsP5q1Qx63UPOaB$?5M>;WGNvWhh zoUE@OA0b|Qd3}!+)3Bj6Z~($JL=eTe);9x`L$Fakf!B{ABF8os-u`{J$!9X58vD#! zFr(xZ_A!|^glyv_&;b-Az&Exa;c$-D`FWeZ|E;hS?E)z{F?<#~OQETr1G2LiBjul0 z;QQa)j_?sPH#2kmJ`4n8O2ke1NlO?n`|x_L`%f1`PeY-g0(ovG^Or|>gh$|43SVLG zqy2vBY_Y4(JAti0jIfJ$Ndxega#7Bo*Wvr$?s8a9SnX^SqVHRq{0D5bFX5;|wRfaa zarKlZVhuzvz#%~<*Ff;D30&gp_3H)2At5tBuS=lE2~)d9>%$q^E+Th;Fl~v&=Bq-^ zMh8ahC;f4Vn)2_&_j~WH!wzL4z5*Dl1i=8AM<4}O?WC@5*aw|IUHw9^ON^8*?rm7w z61ZT}AAzMEQ~v!<3SH(;ZZ4h&y9BZ}jnH<7&4iorUV8Gv7z=E$yCxzp zk=AT!X}KEVA2b1yOs=kW*yH{FaJ`jRp4A1YX&-By8Wl-AKtvCIQ(7#oo*7*v68}drcGWM4uuBIU`2YZ>^5bRNbN({3PJvO`|IJr`MX$-|H7F^rX4Fp`w*s*XjNSPXrgbPs0Q$iv9m`Katr&Var65vLN2cXxV}o zibs4cQZzEL2Z#-M7MS7#7bOCVk4Ivr?%l^T^5J)e@VFbN1LZ!BHW+VOrCu|joHI6l z1^#0rjur|zIskwM_a-iCTjY9moauATo0}rzOcoNRP1`1KUsW9CL%4ht3Nob$7aCD> zoF2#;W>5%zSfw~AIFnyweTZK`l!SH8M;w3DdgWvIP0~4o!x%;cLJGSkKR!-lCd3k0 zUQDN~U}c?SW{t2-M*(W16E<~n;lj4HYxI4yV7$S$&Gt3Y90oIML~si6jPosGXP1by zG#RT8pXU0eC?Z!lINxQ`=K0T!vnB~P`?Em|107zyem(V(CIo>-pUOAmZUZqmylXSZ z)R(FH0AUQ(FJ?qisdu;8rOIUT1N%arN3lmW5SpYTrSMkqDT`C6VuN!MixdkJJWb)O zR3ARmbnDCpaB~9O%Qq6D9sS*N zD8%^sLo@*$uMu;?z3DCJ~CV1h_p@r@n_B((O;jFuL(YV zTJ{OedWhr=KtXV1yn=|HbhSP=tXTF(g?hh zvtgLUH2GuVpqfWPKEPR>X14%VcZ9Bh$31FqGA5#O3om2_**`DxifKhxh;GxgQ|D%%h;3#-82{IQV)PgH+%;4b2bENeogf zXHLKQw-v;Zle8grCK@c4Q%-1VUP4$xy>oPV@S{a<{}TD6gN^E^%WeJim3#6j!F02@ zxbqvFB~V%&N;21nH|V5ydG+e!z2ta49{Z$rTuQ3EzKQ7%frzY$#k70R9(F;R(Sb+H z8+<;~{rE$#>HF(*e9hDg`55|~DE2^A^nGbpXH#lFvCG4>oeNg%W_ju=N2W^!66LS@rq z{_H>dNaG8kn=BfdrS^9cEO2E?E-P)sD1iu-;L1A~67-J8`=+h1c1< z`0?lE(chorYpQD(;bHWBvWkin`I@l4p)T3Or**iu7c>4~k?-T{3pw4P%A!1N%aPlE zhB%trGEKgZ&UOpaia3M~oAUAB2FJ1-kG2BL^t0x9fnM7tL1HEeL~S~S zjQ@j1C#KkS@|(SS)%4CDS`{&4MotVLJOYTx`UpoIYQm_8FvV8I&wyF z>XP3(OPixC1+CZ=dsG1Mucg4`dZ)W4ib5e2hCxhPz$09v4B|T>j38u>;uY!VO{D!1QxRy7Euk_TevY%_2mijCJYK*%Si5z^)BH z2@s~PwRJXy6sF2=IoQWGo;@^s<~#-rNBvV<0T*iL&ZR91iT`wVgxIk3SFV4L;b)JE zjd7CQMDa{SpgMe>S7o+YjTTOvhmV2;b3#6A`rMH`@%>Ngi6_54Ctu^2AI<6lX+XJs zmu%b0m~}}QX-A=unMizF^&L4T-Ufq9a)Q=n%$k2x2%-Pv@8o7sZts!+JG*1s+awz5xfmKh#`y10nkw?2=9 zQh_%-1wQrkJwklAwD%$6Lww;Z_Y42Bwve)|1SAE+QW$>EhWUbOU>~#ymvD%FKOBGn zK7I%ntH~dM>4kosv6#JzLYqPXnFQ`l@Ed5To$h8B4}mOT8NP`13PFN6Fu6h$%K6h5 zubrXs>hg<;AP(C#_6@iHEr^$wbk4LVN)X~Lk3|IP)EeD*Lr072`PSE+)qaugs zQYbJ<*#Tz^VxO{fS<|?OI_6D)uGO@h~YdfaQBN#JTY0nY@lbp?6)+OYMc zA|ZoI$x{KJZ0zcXu@0fI@zUrh#1tAQz)pbhja_KV(V+%ZfDl4!yASv^4qy?_7465s2XdQWZqcZ#D;;W{a^j%01;X}o)A~`0+2dcm&nJ+j4~owBg!f$ zG+raq-UdpI7@PnScnvg`xXwVjuqzC;AEB3_H4E90H;)31-BFCHglKJjSuCiFgwZ|H z(wf-+w9Gk6c>m-`q(h_K3{;!D0LoGtImSGBkI@VzP>ymB;}xcCG^`f)Dhb7*N6Mha zUhsu~E?)dF@;wxNtDwuI)S^_!`OdJYib675DE8%_sSRPp!*3(Q`mcA19K%7eozo6k>INY&)B^Xny09YgHgyfhtX> zU|l*A{Y59Su_1m@6c85nSV&wx&8oah(law>;q6SLzzdWOP@HF(0MLJ|dryue=mDG{ zQRKGnf)aZ(_Uv>@Yh}VF+wn0A$I-V9O-+tK%u$A~S+#2WBTeew9m0oK4i1J;>`RYE zYlL4j;7V1e=XNeE^by{=NfYTp#y_N!LA|b3!%>(J9vvNhU~MAZeUM5tyLD(9t!hEp zM6+AKBDD7;COjN#5$`_R3(Af0XpoD`!PE|~>r(8*n5iquQWd7gv{oD*psXTZ310MR zp>+Ur27!}fVHZ%(xLo&*)K>Qk=r~MX`bh4d3gIV5?^Gprp)jP`B_$?e0xt%_;~u+< z=NQiGQsGm6e*3GKg|DGD7(pIWFM{hTD|^3}w9IX4YND5yFR*^62htlFWV%N2TojIs zM;8RtYZ&@-FRR+Gp!|rSEhiRDL0kR*BTE{ z`JaNTquFh<8kFslgwd~OQX0Eu?mcuc0W4pVAgigV2`lus5`K>ZmkDvrxHQv|k)Vg% z0HIf}zG1>$H3??=4~Xe*=Mi}QFc5(n%0z-JA;!<3C_s{ejxG1HafExCotUsN*JgUD z>h;+$870)#%NjI+aTeU1-Y4z056Wz2*#v%pjlOZ=!~3s?KMt2QpMqPsZdt};K`-+R zeTokNIuCah0FHTbJnleOVgq=t?~iYG01`+q!M?Sj{T?AQ38<9raOB|W`kscFvi9+8 z@=O+VcLni;OrtsJAX;h}6t_%C&)n5;8EI(9wuK`qjJ%H5NrLjA+3{=&Jzi~V`Ho_b zFmFAi6r|E77#J54CaXoX76@cH7Hl{oz4WmYX5GGXYQemD?#H(xYp00F=Rmfc-Khyb z8>HORDR7%=THERC<5Sz&Id|0=mIao&Xp>c0hT--Udmy(7xi4YZi8Tl6PEnX??@9lH z%YTf=u*SM#^xX7oP z?u(^IHZ(K7YWzxq4mOhT`DdJN(;%i2;wL%sUhAo8623<@5aH0e2x4*;n-YE8+Q{ePkL& zU$-bcZ*dC~k_gWKBH1g#G$z8iZrvJA&fU${iC=&JfLQ#Vk%YFsU%!4QS-51NMS-Pe zon&x;#5zl(@Qz&SO37f_fKQ=gc9%6=U{9jPppe!BL^U$n4(x{%QUkaQ-ayu)2|YRJ zH1GGlmNinu1U|>B!k#ArbIx413TVCikmMimra=Y5Af5G-_#+fsd*umSty1a!aWj$< zA~Odjy(^ zOgrDZGsLwN%4b4zQz*m_77`z>FUZxzK#x!;5P9r@#~cH%3BoseZKqT26O~DSKTpqH z6`)7hG*Ylg?t#L)IY}URW3K;Rh04ub!^YAQEw@eteeG4+v9)Qi^WCRM(h4m9RLvNj zvRI8UOH9(CD}RF=k@I14uCLfEoNL*(psul=#rlq2NK4kD*h541{@YuQq<#+`)jeIY zm8UFDwsS|wYC%G>!D~NDCdbyc>1tPLKT96+_kWZr(|Ucr_h5dL=pYt;Z!63#rB_kmNXlIunrwX zme!yUUe@%7H;@M@yxN9bE;|ilZw|%k9eY{Bx?%+Y6CTSID_5$W@PZtM)Ju&<)!O+@ zMVk#--JXUe3tp3P29O9-QI&~G1Qi*iR7#aatMa-22^CnIbe+qvUIZC(PaHDFmoHYe zaDI2=;g3uPW-9Yd6>71B(Z>C}v*_tV6oQaC;6PPwMC!Mxw=sKhLgr$3AsnUfy8Ad$ zWjJDxqLACaR2j?eRPAPDV&WW*`>arn5P$)hu8qBjDRX#<3mnZZ4zoDBBypZ_nR-=M zS8u=^&a!pg3Dc%cyY1)aXXr(QtWSFNkqIuXJ1{B71I?<9$hHGc3=o&_Sn?X0n&2$r z90%ZNm)Zn5Wi5h}v5TTYGphSuEE(%}AFQe|0XVBF7^xYC4A4GB5_{-H!!sKG(Q zG$w7E5~&=UpYP$iujFt)_b7~MJ)3WSHI z5QCjgBUB>-Hu0@MMH30x2%2|f4ZNU=+yujU3I$t)4A!U|Bc9+eVX6h)g&~)(AXtus z@pi%}-q~_UBW6&L{E(qj#zUL7YYV;0ThTXd~Zh35h867t&Vs6fwISV$vU5Qzw6zCa)e zNPxL4OIBc(+;wz`tmicW3r%$1gscYvg#`SMw6VeE1RsV*0!@eVk%L3s%g00;?`gOg!zbTXALA^XuIL75q$T3?r<$Ab!n_!EW69F zqIiK^6GdGH7(l4f=4D&8bap;DCna*2x@fsB_)%b1n>PI$Ji%^sGCGHw0hR!eQE?WS zBK!N<$dHnfaze^@N8dhPW=DU2Hez)5XebU)0{C>|!?Bcwr8&tEq&V>jB^tjvNun@? zDl+Jxe|X_Ax>G<^UqV6-UE{7gTil*EXw751rQm6f?l>|84CDjD5cv1`C_M!%e2uj* z5MvVIsj$p#SNhnty$iHklu)St`gnX6p!GMn$3~)U7GNo)x#S}D^z?ju6QbA$)d3Nz zK^>KjErzvw@$%&v6p?Iw6pMwBK*Pd^G|mECROB84MO25cixd2%1Nn zy;0tTskC5(2XR}AH*H%)rqrVtCGP8JS|Z9f^n5)v_99#nmZc*S=~ZwS0IK{v@crOMJ=n?m9K#)ie9{}Pg7v^(6Bx@eF;7%2N zf1mRV%8VJpAZtyhBavP&D7Y@i*LPWq!i9f2=a!xAkisBiU^{2ao^dXQvy5VO_2Hhu zTig@KABNWj1pFKF2i8dcL13ydatr z04oo6Iq7tKmI};NOOE8};{H<`@c4JA{h#viAN+e&ywP+@56&QBu1551(A$!>T8P=m z`HYtEkZ1TjC_9w4Wa@J4YHtWlm3-!)nqXd)8kod6efkp=o zpd1?e*V_>O3a?;>i>itWbk_Of1Q#ohx_fo?l40U8<|vSK8Phjknr>vAN- zvHJgU2Zd}Vgl53G&*rvvYuNKS{s*L6_t_{Sz2nI8DYt&1EBF8Rl`c#qSDI?tEZZ~z zw=0If|5x%5Iaxb5?{@`BJNJS02nV3&^V>y2S^GR~X*e$5AiapfOdoXng~jNxbNjB9ojrr1SFMsZ!AM=a2u3PR^4}P#m|LAIH=Uk~ z_8ZdE34kyFtPP38kX||>pfDy@;{PX*zL77YvyL=HaBy_=r#YiRxZ@6}AhDnQdz0V~7FV&nb#G()=}!uADITL zV2tR#il9ikWT3p+jQL+7Z8}p~+Prbi?$q=!u7YtFyS29uP#<5Mv1Wgy+%%b(Y1{!v z_fjqv?Ov#0x|{cCNq0b4z$$0<#3c&ebgBy5XYj7#bfYZ%lRbrF#k9+m)sC0nce{Kw z-m==!P2M;sS-r&loyZwK%jT82AMRvVp7HBlb>-Lw>G*p-qGmEsSkeLk+@X^s)3@%$ zZnVRmiY{XW1q+_Z!l@x|ZhF;11-l0ij?n@aubWo6;@X*W-G=lE=&`f)n>FKhOOINk z$P;^UC#KsikqpMHN4EQ=Gh~?@rL&xWu=%rCl0YB;KgM`EVW!;%N9ooDL{95l@$+Zs z6Xx@*$~{T{{Q=hX8(B~&wR(7}!K5TQLv)g9jn7~{Bg0kpi$B%)`?o;3)HQF$IrhJO&kZ>Szg`Q)KGUeox9Xtea3Dtr(z3*k@FebJ z!0~nwWEj(xlV{;8aBKd8-@j11L*+>4t(!O9PcCJcKR;X{N^JG&m({Z}COu~4mY|KZ ze*eb#6`oaPs)|9{GPJqr=S(;#;vcRprt6wdcP`PM(zl}8<@;hG3t>n2&&7h$c=5r<_{@-4cd~qYBJe}^gw#kMpA}_#TlmydRnYw@ICmFNaC3#b6kc}j zw`-}+E(=UdIPCjN^@c51F4^OkZspqY^Hy+YnUSUaW7&#($vameW998FD-Y7Rj+}wk>qIOIVr=QsDE6VCB6}7zjQhIgI-$L5{t%mHBHzuObVp1S<{j1xct)(4$9ztCtN3OO^T9y5 z-rO?~Ni;1G=WmjZ=kM=P1e%^{3_)0-5);OaX%zQ~9WWp~{hCx&pqQd2%{PE$JVIsl z80Q38T7`Hy3_}>Ld)fvzW6IH|Z(?p1FI6W$@ERs$g`y-6g?^T)YZH`RP#xrqXt#R@A#PfrV-{19HyV`m%3T#%U1#*1A^HP#sv>xt)@CuY2t#3 zpgb#hYUd|eIK_+2c+mzrtCDqEt?7>tbpz^M`f5ZrvQ^Z!ypP59Q~HL?c3qTu0gONRu9gDxhn@zWmAQrTS7+lujjlG10Dcl zui|fH_)2?qN)p=Rp91~VuTCyY?aJRLP=wix7B7>c;`A$mlDZQ2VQ!}@$1sfgAXg;! z4gxfEgK#%BH_On-@8Y-)12VvTW5Aj*iPh=4_Bo-6_@}V4rRAJZPA&tHieVG5Y!qxyd4+Jex{{mx1PS|6ur84 z;z!R2%0UE)ysc-EJM$#8-djip^ zXK4#|89Z|&sl&Nv2v-~xl6w!u9NJP_-z+y&0IdY#jm#`M3j+|&!9Nsc@f{DM)@!gN zAIJAN4(zR~`f*^_eqC|o<=vmLk{sOY`V-FGao)JU;G2ALV(3@leb{HDElzGum1EU~ zj;HOg6d4;JCIcEtm~6`>B65tT02@Jx-q(vC4T_+VdSXxlMC1?TMgbu`hI;9g-Po%+ z0*pMS%<;W%mKR|zEj1rV@#i8Xb@#+^2r6EsiW@g($@%x9Zq=0d{BhS{2|x{jn1jCq z+qs!e)n%&1pX+n{E>yCDoeV@6puY95PU67Yx??kprYUfQZB^UB!HP>?u^F&2Q`M^ zB+g%Y2AA($(V2^oNsxF5vRNQMs3(;KM0A}9j_P5nB4CCi9W9Rwo|O5#P+TE-Gr=8k zC_S|jnh%YRgU~|3*SAz%-6(^JxwHQ*PsDS93cW497}9t29@dRVX{r!Hl@54?YZXed zx&0OHh3kI5Y~ky>x;sHwd$^5@cR&sx_N+r^-TL(*SydnDb2Iq$0442K!)!hE$eR8N zG8hf23xSv#q}hF!!!V-+EBE>9cn&fx+r2&d1;Ed@G4pEYFW(|$ZZX@wV6%a&U;5~e zWBA3e$PSx%W?hTs_sd3&A-qo*T`+rgBy-Z7q19C6>%WdCZU@W5(ZpN^FkW3l4-Go5ZrfO`>)IOUT)up{)PGZ|-T1dfa|Tg+#V+WB zpkjCG*+XRL7BWRzAbw<#3z_=LFnji)BS$|+k*NFN@d3VgxB7>rdfaY$6uirWYPfzgo>4=Xlkj0T(f7uS6)Y5ElRMDkHU)DnZ^9=SA}}uQL6)fD-8Wys0kqeo zrGcR1pn9rZ2>CWzGaia{#~C|b=fozYQ^-XfmbZm|DnVrw?7eTw*t-UgQl|xyzInURiW>gX7M!XN z(BGy;Bbmb3*Zy4Q%AoDv3l-!gfoK!(DD%*M|L*kCu3PsF~Nvm&_lA7fs$hU(KdpeM#6d}h*ZhkNb znNA*Tem^;OAqV;3BNAP>GmZm(wiTW{#w{jwv5up+QFPvyt2ly4QWGZY-;$}QhO?Y8 z`gc91aCg`BDw5RU@z+u>paX9Jk(c?Hp?460$DxH#y1;hjWGk+);vdnkpqJYWOB=+M zgL2<|`jJg0oLIe0NT@Isw34=#mX>OdWhwUcV2jzljNluy=gxhen<7Ak8jP=GBcp** zz7V6MCSQD5a;VL;UvbSo`itx2mJ&HaWew~F_?B#vJ!TlDS{Ef!S{q9-9txl}dI=4>iYMPa?W z>svLN8tCJo$GUDK!pr;QwRS#E2z~WxfFbmw&&%|7cfI0Iw$U0D-&Txu|NEKG7_|yD z(@&(=NRD<_!^h?J$)$;yaJ3nN;WM4Z4?v2U!Y@r1!~m@?c)>Ufi_D4?l@uTQV_&qT zjgLieXhWWcOIp&cT@?A|fmsHHe=ECly1$N&t=p0TNS+ zkHEykved6gV$fk5T^Nh|tPJCku>rogN;!=r@CR9$>&72ez^n?kdhc$m-m)e3^?3$e z+!0r?Vh}gSPy0KTs9$J%H23o3VuZLnKZ(^{^QWH+h10a-<;(G;O#l0|#^hL(7_56S zh#2LTSrI=^6OQc*pYx;#kf-jY5wHh_25sfK5lJ;NDgbtO z5K&QI_vLOZr)!^Z%O4*%Oey*{5>ukaGjOaKV(t>4W}H;m{Cxz>CQ~(zyLLw14}I{i zA-QufGdT|jxlHAxx>x0;Rz?>JFm8VS1Ek;&Ge34RNLvS|zHWPG6{7D8jzjGHUGY6%E}fr5v2PQ!ZmhZI`guk2F@JgTm<7ns0^SqI&0 zAd*xQV(mV~l*m}y!BkoqSrcK_Ac1Pb#L!~0T0oM>&>&HJjIy(=VgY1Ym{qFPMS|-E zgwaoqpCJ+5LyU5;e}bSmh6Bg9d?@ESKMACKmLpXLcl1FJGd5p)Gf9POiUV99BU*_2 zE2vTeoB_6l&|48M7_J;7UmYLLwNr>*tNnZPy@;t)djv1XIE=9h1=;y1I5-T8npCsl z@%0QbdS&^KqE^7*EUN>#n+`ITrs~w^8gU#cV}li;m=1{m$X_6YG(DA_-hc`K1J7Bq zPas7hd~DuI@uNZ7?f&CmgK=AcD+NJ~$D~{GP$yJad9W6lxu)t7+ zfRK4l>v5HdZ(uwUG^Dgh%PFak=jcOpoC~DECq2E zhBhlvGd6he9fF<7it;n(&&T^+Xb)k}of#2|w(csB@+2~{7EexjodKYbaW_}yE(-u+ zF=~YaOeZhEk#Jw2zO_!DCRX?5@M`Sq>*Ju}k@Il}!k~6&LpExvmG%3@+UsO(5HPih zxzyO)+!yL7D@(mM(E*0h2kb#pa3X<+_g~n^o7q} zc7O;w?tKV?=|M(jW|G(nmm?RKwe#@^3GJhiMD5<1-nrq!PbtzEYn$6Yfy7<^4n?{2 z_s?ngnyewbZAj%t+fS$q#p`)NHl3DRoNL!|Z*&!RvRLy2ef=-#J3=WiEh>#ap^+w|uO6woW83fBN9iy=7FAJFqC|58 zj~em`Rm}0gNkyH#S;CXjs>qYOyn;fIc`NFSI*<_GniVioZKkx(7U;T1}_(d3>T6W4ZnunmJ8);XkZJ0dr=EC6W>?y7{Tk z%Jf8$)FUu6GBO6D-I#}$_r2^N^@~>@gsHO$Vhn!+CC3gPQrcN z?!9xa^0)50HaGnc(wFOuiIX1VF&DP&V-y**Km^G6ze2cTNid-eM6uh{*hs#)iYjWv z{-SC%4-ILI{ds;m*Y7DgZ~zpWW$+)q`FE@2!}nG4dh#lH7qrpk z+BH{>PcS2g_Qs;fVA6|MBijH;9m%e-7F{F;??Nrs5~?~qG9J}rK!6^}%ee1-SbOj0 zzin9q>cm+D^sfzFx$OnPE^4sw#@V(XC}a5C>Gm0>?WR&d=#9vFL3S!7eGBo!;P2t$ zBGaN+JAgJ$=1p+Sf6%O)ywSXm+&`Z^D_+#6&0T_^GT1| zV~j^d2T8n+66MhAt}3LgAX%^`NlMN7_iOS>_Sd>|m!Q@Gyo!~zUQ)8Us_wkdO3ek* z4RNS`{;~;aVo3x^#g2yQj4Cq9V$?c;NLUgyN+!KTfyU<=76u04lEaOQ$6B_HAC{4k zi5eg$IsnSgFQzZZDk&*Bc9LR$VW3=qsvOIk|G|}#6K!W_>nFX3_M1rjFDU)6@WTSw zMi9f<3&`(G;bVqS>QI(H$HQfXNI;32)qXPT9p(9^S7`i}d*`98QzkgE_1%#HA=f1Q z;6I!gAU%-u^^qRX`wgo`?WeWKQV`V~XcPbIvEnsx7PX{SlYK+n$k{%;FvkS&gNX4sEEpCn5Qx?XS_aye5dsC= zVlh!h%54t)(vPJiB9z2${Fl1ydKDa7XdIgXVW@n9pFlVWp;2>6EpUvJ6U#Aj!z9ix z2PE!aQVgeqn&>n;MwKHdSotVWv9d`;jQt#=^HIwwp_no&mGPP*krZNptlaPJ^-E9s z2SBh#`#s(L#6RYJFedn+mX>QvkL}-YYojw$m|x*fIbBH8v{6+v>v?*3oL28DkLjSV zSbpUJQZXe?q2E{3-pNEcN(|cqvyPi|beCS?AfZMpcGLcX>LtH_?1U;-*u9{(8A7$j*P{I&r&6G;l)92|N2o%tr3^` zr1dI(>!f#3Ti2=+5|Gj0Ca1BoYagV z_ds0Vo{rA0sZj@ylLtI97}OD|Et$Fi-ujY^ip*0+vBrb0dcyoUJLg08R*1uZVaUtd zI~0)s;_JPnr1sYKYEj*jDXwsaO(uTkANP-%;8m7XSiP(#DkFHRTG z$xoX4zu@D0qDI9bdM*a*3v_R$tiVNW3m*)uWhVhEaY5+;G2P1JWdmIg{SUEGsu z5L1+Zt=>xcQ~d<0P?Cy*6b>d_I_l&gWR!~Gk_cR(NS8=Pw!516l4ATd8A)l=m%WaZ zS%s+Gj58U2FN<3{@p6EEn4W(iwr-vD0`VSu941>z#R!L_dWjsV&QO4K8%v za>L{Y&{W@CwyD`+EOnsxxHsHy$IQTMnxWEshpP?HEy`~38BC}U1+GtQFp8cW8>+=K$`!^FH)Vh)5Tg%gDDc%uu^aHqxg?e?I%1ECbJDDjuDI_@1vxv1X4Mgf>1 z5CA5_qr3$z7ixhYNlxG4b;i#X5cXIKNK;*4G2j{Uf$U(?j+@~1R&d%EzSKDZIE56d z6?m2cxk#pvTipsL6%Iseae$9qlt$% zF54am0v_UAIv(;3_(u?YwM)N^pvqn^*XqH6Mp{B;CpO#?0ZHI3@=*zCkIxP4)CfuS97gv zQ-B>y>Rm$rI}LX1#g2*nu4CtQPnC`WO6W>ekUG|j#H~BE7%FW&B}r`=pnBPN>^W=^rs2&!l&|o zKD-i4iAdzrHqI1_Z@G@;D8QxsBe*G zp*>{a{ifWr*jM4AKE`E0#|UiFRC7`_U$wj?sqZb%r+4UF3?)_?1!lKKE zu+b<)p3B&bK%jsK0uUCfk|XuE zHKEtLu1*~IVfh6G1IX{%0a39?TE`=qx=#Wkc-=IBrVp`0A0v(?cBhVh&JpWAOTZ@3 z^q7M96x(MPz-qFoK6ggvw&Pa_Jjao~;gomK#2^CnQ7Age^P^`gR$%MdkK5_!=s?o+ zxHY0_1tsEn*8#{DqYrN7rveON?%cFt!y&Zk#HI18l=81HeipP_fREt$q1UTZhQc5U zn*ghEPUw!?B7Hz*MD%#m{!6+AZ5Db1pV)nEU=xKIIhlR~U=-I12BU2V3WrJlXw@wt z0m=cb$6k;|uv34?C4?{z3{jKpC6ECYgAd%u5R%S!^H_wqO17Y_n73HEZl_tDBu~B} zc5tU-zk^|`+mepqjc%Z+X_LuQ;NkA=zf%SKAs{GZ5(t5|2m=!AcOi6V1xkrK-XbPJ zgxmqg!d|g$r1jxQD(j*81n?L#ks8{C{aOPsv;oXC@RDDLwSA#$IUK$Weh(6_>?BBchDljh2COh4b zKnEMFRi-6N)>6sI54~g57Bun=hDg8&NJjh$evu1(#@HeFHgD8^Ef?*^tnrw#6fvoH z9!+fW5o)7F&$N0q%Of8|)i?CJtY5qK@Ph{#F4|x`M%^6NNPGM(g?gmkcRA2At}R=} z@1ovCRq>^z@v3HG=d%V;)5jiTFG@g9R(}D9t0)#k>#Hnq$zHx~9R>zwD#-a+jcjzktQo47 z*~rCz?~OIOC4ok(CYl~b-N*f0AA;W^*coHdK+nN=ObLCDkUt%USZ@9LY}eIF$Lbd3 z<>p4HR4}T%;0sx>ew^L#B-q+>N%{OMnR^BX%=Y^K4n4KJglOe4H$~%52gFY-7+{O3 za);_Z`CB$G!&B`;*1$KY*F~}s0HYo_XAvG69^bx>zWM+h)Ew)_hZ~TyCDm>r6fG!J zMXDTt+2!;A^UpO@ieEr9mHfr2;u&49 z#{6oeUs};;biIA8Ea%@;*U+$Qm>^rGEI=`80(v0PKVqmSK9#Y(A&*% zHlWLG5&{SDQ-e!$T^>BMnO3y>W%LNe@JNp=(~EomP~SZ}W3FydWd_RZy>0!qbu!7j zk~k1?FW~deTsQvK;7A-V?>5ZctXKNch)!%fY!g1cLrCQ*VJ&d<7hAb#Q2=rKJ63}f z8}lbizKx)4L!8A)2?P^yR($^jYLv%NS&QRel`cTuFnh`$4)PpS!pjk~6YdTk2_KN~ z6e4FLy3&b~e6%YmV(&dzpr6GtPYdLUNY3xQsI&vGrK#c0cv`8@2aQ}7fou@mo$sK- z=a1T7_o@$h8c*RFY@b5JEAAn$d-uXHKFBRz;!u^;?lMLutqE`w$W56ZM6t{N9_7J) zOeVY*6cV59#)s^%F=3!qKqwd}%}BmQuxMysuzJnV8A#-RlKW$|y=j3s5n_)XHbXof zY{EvWCPx)cC;(OuaSE=Y5}X8(!Q0BU{QNnoV6ui5RhV`Z?*~FWD2XgvwM!DC%{c7r z#x?W-e1_xvQ>Q^79f5?TJk9424SInJ|6W!EV9*@y%cj@jFIHEMj7+Blkwq^oZ>`AA z?VkV+zEc$c{Ul>b;?#b9bzvX$fH>|?!&ed&gL0y&c2q1qXtBQj{sNO+iC{PzGwPPW zuAhkOXr{oAMA!GdO=|Ku-btXBQo%U|unOGWWuAsF$$T;a!9oi}VMH{oI@s(XW9Z5& z_e7@CjCYBm4?~Lb&~s-BUBMV41tGu_Mvx=~V`#UG{+!sB-H>p9{*FQzMl;4nxV zGavO$57J&T>e_d`60~{&`a1XVn(oWjkE;qo4oG16)B8P(yWVk%ik4&GF_IuaONu%b zcH^Tpu;%sQSkKKJTBu!yCW#1OMEht!N`r}NpiuJ6`@0XY)Ic{w%GBE0x~KvWP&=3cC|`$*(=Td||e{e2d;kR2`;Y zzZj8o0T%wC`U^m#hT~3=6pD4Vs4Qj|K~J|p8#x;-5b72xGTMA>hm(ddX?z%0ePJJ( zOXJcY;>dL`>y9^S!1RJu)O;dTMKEq{3-d%vA zE5bE3gSRLVw-JJK7Mt~U#fHC{E?Gdf#s#D8D#?v~=?4=}VQ?4Cg)b!(OB z9kFz-(~ztIY}j2*MswSUK%|Z3${$-rB)#?hbN$ZO|M65kO$`LB9OO_dZNHo(Jn7Fm z6x*cS-)R-~pu~V1fM4a)H*k#$w|(nfkb>P!3tS-iWjiL}+zd}0(Sf2%7kj%rO4I58 zk@hFxRK8vNIKGi4B^7C)B4kQP8H+@wWGu=Q8c;HZ219cx$&OScAyd&HQ^rV&N~R`5 z8e}L#k~06!r9N*x&+~qtIeUJW8TzqIvK>rdR`{wZts0*Il{OI5^S_68^%zQcZ_H4X2JLvq|LDi8bFL z1{pvL>#4je=gyhf?I#)m#I=g)HCxbKz@(uD`|J(EeDOA1WGkPDjTtCc~CuJk7pG5!m^15ai;qXebwT} zyl$Y6##o0n@%r;H4A}-cogE=!)hJ}d|58_bs-C4jmcqJj3#|F*9>7Ae;F)OQTtiDp zX7sBL1fbMUh?l^%2L~^H7NjoE2&eovXAL7V)y@9DP&`%w z+uHus|M9;NzN=E-7$L0_COH@MH-BGI8LkNMOu3wX3Z}InT~B?pgdvJ#90c%K(FEbU zun6df|HAsj#h#Y?T-}z%`0>)?& z#JJx36BanYQI0CG>t26#+jI*qw@QzNep#2nJBi-`#zjn_A38ZG};~& ztTZb0+5brx{A(lDzX8rpN_Wy=Bqw+?2cnYpdpOaFY$!hL+3zs#535-_Fstk9fe zS7E>vnw2X4tc&;0YqqQ;MIO`fQrKp?=HGwR_A1_eMGEnCD>)0T?Ck5t+DqQt6{vVO zj7wxcjbi)Ze~Nykvm55_hX2~X>M}kBB(O0w5sF-=J@inR`$4g3{%Q0{Jp4_f2j>7Y z-vNZ$7z-??m=P(Cq;%6JO7=dW)$S|b#zLR}=wmVZGQR1%o!tzcnde%^G*qZr?F50G zKlI_0z@wm>`R_6bPT%d`n}AzO=zy(Chi=v8T?Y(6VvufXGXaS4&jBI-&!58LEbIbE z@NwU8f)p5+bOOQr92>j}MD?_PX$MmXj(AuvrwyOO`?!6N(*gDWx?+#*f6~RJa`TeB+t^dJ8_qIjZ z(p_dBhdE@XtU)tZ;_j?>$C8h*7arR8qj9MY(2X%YLFLGB|K4Yl#B%5Szn{3`wO0+8 zjA_?}>;a8m5C&=^t1j3AO)LQuxEQCpj1=@IY)p5kESmG~uTY_K8hvYsmlA!};QoY? zpd*_Lwb?SSzjD`G6Bn3u-ne6O;V%Nm@IrOeYu|i`YO=vN|M|1D zY4QEL;%Caa?2>om|5p;I$r&V8)`Slbm3-y<=ULJxg{E$4H;2QM~4Y`Ol- z#6Q$C?Op%lO*e&AF)KW zqPPe$r=|bpi9tn%8sL(&?V-HFr;_bx!8A!&#Drd@%j=`IUzCUXWS9T4#iW*a`2)`< z()!Cy3^s*`3aBFfMw24J(qM|;oNB@e4QP}O+--?}zEObFlhO^!1o^{y6ng(`SU#;2$?4N!g#y!Ks&;H#rr`jp)pbF2HGj8 zH}Kh%nE||S0G^GdnIL)5y2m|Z#(RoocK^?_;10A+m17hH!A(8+0DOEEmn}2FAt{xS z0y#5eJ>QWU!`6Pov#HzxFE>GT&>#>UsM~*rJ8)rzM3GzrKa2QbELE;KI?%qz{AZMu z%1IlOb_F)nT4iPVg622QoW{v{UI-=lVvWDfh;AXgGzVHDCA3D8yX?ndiefz|?>!$2 zhlgej<30%MAO(5GF(bgNy;j11sEKoi&$8cX;^dLecNX8fx1Vl_3ceU#cHDBQ>UQfM z%gCI<|7obcqnrbE_mbbz&n{}#>1#$94$taX;@^G6 z3(<}afO$+oL&sG%r$kdd`|o~iA6YCALfFRuPcN^G;bBtvkh!4ar2%x5^+JTz4o;@; zJMojmC7wICNC(7H#C7%G+N3S3kKH%e>$hr1dbG5`&8G;djVmYK_GV6}l1>%ImeOD~ z!OuXQoozs-Ws;yg38e^WbKoIz6%`#?3Za)q(Viu~_0K|5Ti#5N&)()R;^C+P5$8K~ z!u9Qe2M$~nOfY^Tww_z~&)1ajaJ)UJ4z4R1Vrg;ozoC;N1VS*BP!IVZNMV-P4K&SyWY#Ggy|Wt3 zD>nqYNSQz5iRwB7sFV=}k=RL;uc~PA*2mKbh$-Uf`cT{kifY$X`h3SuA() z=hRUTY|5RO-N}r@!lnMld(a7Y5R&&D)a6d}O)}%4E=sY^+l=%+<+I%e^7qrk1> zXn>9w@|k@h=o>+#F~BhHY(nOBuzE=vTK*TOxWySND40>2Q?DLI7>c2^q<50`9q=4< z_w>9gg%-~lbyNXC!G5P83gbQzCol8QH+1|<1^TDSo3`6O-UL`Ey6c20-Zjide>#{z zsSBU(!LJJCi8zg-#f-VI;XRn5d$9wufa}l-L)7kFT2i98d{wkK^+wIT)cmiF{eK%J zK+75$2I)Z#;?6lF=KMrAK-TD4D`mt22*wb*Fm6aW6L62s_CY8qA+vjJ!z&f~C$0yi z;rD2WB`|j2dD%|ki2QBP_096KcMnd^>}!Uy z|NPDfJkHnQ)m2sE>2H5`%k}@cU`9($w`DD-oYxm~bOe14l5c!Y5(vy4Zg4B*zXzUg zLxG5t10t55Cj;|XChV}N;uzuu1Cjgm%qsb7v85`^d9px)^ee1E18 zB+%$&)H`6Z-~BS;rXX%*oQ{2odb##)%@) z#se45NctiZ#MfW20*4W-pN8z)Yj+kvu;#Z84T&2M92k*-6iHm}?oXK|R&$e9?pGi2 z9~EAJ2sH1q!t`jUSV|s*9$2@)H`IR}JRXv~yWW?Q0^80WG>~{1nkfSP*Pl^{SK9Lo zy%|qN-po6rCZ;=&AAMM3oh0F7wTc(=u?OT0=eRgkwN(+2}E{qJ( zqoce7K8*C6)BGJTCosqatbH)8umVD1W{K>zftn%Fg=` z?elCPa>k70m*!I0oo~-2ET~2VKh;q^#sDQ$ozdwzEu-$uXo9j{1g)1nWXV(m5_S?Jt$t?OnF z3|77K5PvJv&*Recwbax-w{gXJJ>%+l$eWBE645c~9ud5%Y}Dn{EaR@ml9yK$1WF!$ zTB@a~1Iu!kwEdZPcWoMRziAC6V@Vwl_ma{UC=EWAdgfo#yS8$FPdLZh*+%BOZY(>K zSvhior8 zbEwW66KUh{GTd{`GIfIZ)8(}y4AHV{)faLSQ_tvPwqwT9@E=kgMIDn}Y^IO3_r3FE zuinqSu9n4T(eE?i7m#>;;Y%p^vv+)O_wE06U_?=>`K!`MJn~%qTM!>wge~h}kNYZ| ze`Ud{KWAoWNofO1^gf48tt@=a!*=SJfb5$M;)64I(w`UU0w-X`Az`iy_x|z6!|s4kkI)kW_&g8WLjbziN#m^bP44_Br?&IhOMBzdl*?$+ikWoRFQu z*5U?OSOnhnAV1x_wa;_&@!i1dKSK`#UgP0I0Fucyebh3J0zkM~^=3rH)Y=WW)`7kb z37yRaetGcsX#aGAQTo51)xYkOLCbkNZ+u?ytel7d>*;Fu`xv5C&n@%kxm$3zt}bL~~&}3Ioee zUU>RpblSIv0yBRXXt?Av%DPPd`B=E77-%WCrI4`_O=|;asZ9l<{$^wjjqb+xGlBl8 zVGc%M{PTwc>H=j1_i3s_x9~K#U5W?1LFw`^8uIT!QdVA%X8in~uaBpFM<(U1nRUJv z4rzF7?GQSIhx)e+)BS)`{E|&~G;s5nB;F^n+y?Y|G|Z9u=YtrGQ|_Q;sUH;9^5{n& z>B}r=qb9x|86Z$hP;{rmd6c?PP8z8*kv?2w=a|)scBg7cOA>H80q9sq1QSWD(JAGG z$4YG)IcxqL-CK+(;jy}B%ETzsB`cv@=4l*Z>Pb~%y>(Os9`oq$?-x^2+6!vH?;zTl4W%Iuk3_GH6r$)e0h0!+S0dPbI+UHYqGn!Mf73Z33-Vfi-aQUZW@E0rkZ+=n#G}xeO2+sh{1<80aa=GS4FARa# z)z^pitR1qSnXyN%{~R17i6%f5L*zIAVkg8)m$+sC*%OHYE+LtOdWq~x;589rS%<1H z4_Py_=SrO~G;>Nax>;qY0sv$GStHXv-8uPVKgt~`_}|Lu4`Yq(gYyWf>OQSNM@21) zt<=y^ZWMD6ALxf$x}Q8H_}9%hH6tO!b+7X%2hbeThEj}4LOxU-YhWJy0@<=yv9ugK zOW>m<1gNQk4%7D<&HD-+cVa4;;qK&@f?s(gRiTT%5c)d!vy<$piO$Pf_5}#7) zEp@L|F#N$^XiH+FrS+Hdad2os&ZTd2;OkLT9vhuLb`E#!VoXS%E4$%zpVubrYKW7*k z{hgsQ<|o7}d&8?Z6`w$jnSc<@5HN`pMWCY2J5wx7{AcqsuD{-<_AD+AHa*XwU4R;d zb=!^|;cuSd6d;-w{`t?(AC<5iv!fh^<$2$p zDQj8>&sp|RA3YN@`T3n~R!ms~!{gnj$Ge_EXrHy$N}-Fz1z`J_Hx&`u@xyo zfshVF03-^N%KvF+c4SInIgFhjNlzL4jIDG}W^SS4 zss;dBw9f&ub%p~q;*gB~*< ztbAFz6;>Jf-5^8~tw=AhMwiv%wUY~Ve`-#3G3!1mQ&@*!3`mLmb=y2mrbn>BX9V?f7nPsml+C(6$!_%AQmL zdEG@Ex&{VIJtDBCTS*Q@$RVN-aKP6iUv~fXX?w=H5MLRA3l?(35lCn%E|E^J@0fuA zAQ&oH80?S|h5Ww|=AEDTgJ% zj1IB0KK1&#HnMt4RQOm|UHibyP%OA?JA15&65A|=Hh`te!P;8S=4ZM}isA+-4VvG5 z7`5h_r3VkGCYKGaGUPWEl3yBHE}Y_BqUqSAKKt-5HVI^DYP5DK7Pr-2kK8SmEA-Q9 zoXe`myDlf-PLWdrrUMaeQyHck_r~*9JbhYQbd-bexN14H30#Ldk$bNOrNY!KbNTX; zV(E`9EeY7|$q<E1=3rO9i^!qIH z%g8b&Fm@@Ykkns;f?y*q!3>c`*SP>js&rBhK>ol3tN?+|7|EBX?E|%v%ZeUr?H`^7 z#Bd4{@kD~DkW2}G8h|2aIoi-kb^(~CCMB~m8St|Nq;^w7*qTd!MPD8|GV>>v)oHel8@K>dN} zL*N*VE|MH+yJ&Y{P#p-|E?RJq)B;i)iK*f+sZp}>dcEz^%KaGt<-PBB{B}YiyE34) zY1Ehk)q2)KyXg0zsfb5LjzU@qJb1B}HBk`#-!7eBJ=FiLHcn?ICAiSzNu8LT7>QiK z0WHC3%6<;4t6cIl_bYUOkX!?nQw;~cJtZlwC6}0?BJrOn=wzTWfhxj?TmT5&g}TGa zHG-ZFMbc+5(p!AxfHCf@+v+&AX7Yi88wM@D@9^-#FajgwVqjaSYqJ?E0@f2cpeEey z;?!z?A6hOgA3CAn5dTzhU~Guv^g6s3YZ78(idb4>A{ele@7Cfq-Y%7L#?XR zasv&MOYYaT*;i~GXCH$_;#4f3^6hDkh>`2ha3|){r%`bqp`ARL5JSud9xG73AcMWn9|+v>pA5{yEv#uL8GEnv6@^L9}nH2ZPxvSqIW#s?)_ z!=y@tY$SK5RDd1?O!OV`pGxi@Pbe3$G~GuYiTqRCvQ^6PRs+yF z{yDRn43T}Djnh@N$Ol8hj6=epBq>r@IWqLk01+4Q%G7BPe^WY)h0A$niYwo*I3j^5 zhxuh30NRey4bSUC(<4Ty&ieJ|$yCUUPD2#nUv6Ds09iiHdd><^fcI?opK&6)`b?%qpGrg2gS z{5&?tEr?bUe^ibB2- z&mC(+&m}Z$Wb9YqH;kup9VZOF>0of2;7t(9(J&d=j6~A}2O2dyf8k>BxdoLD?mpJ| z#-Gy?0i%M`(_sRwjO5~PnvEcM9!EXUXT-G)SDnef4Uh-%z`!|SaInD)zIph4yl^aA z0Bh0#yMg1xr4hgaj9jM*IXeos`(EtBj9MccgeYXNanjNJKiN44crnEL3&aS@Q}TMcR(H1vcR4 zVM@7Ytxv!jiUOz<@1s!`jviD68^1HGyf#sJP)R zK%G?pA40>!&FGoZeV}#Hbl2_&qI@rOoV*wJNg_r-RUA%3714?}jLc(mw)-_QxFCB3 z4BQ~k7xeB6k?9I-F)PtxluFxB!jv&zQbgoo{t$>$CkP_ZbhgAJf?_%3gw^O_GqO`v z+w(26U|1h<;j9ojnXEZBr4l=~tbRMb{ai@=$l7$Q@t;7Hgwn~TFRZ*9w%PcRgco-> zbZ5@!$d3X-M^56Ajb$SE9`*1?W%jt#%!cP^cEa99#At-YZT`N9OiIG53a1~NwjdRi zneLe5AZ|<>qEHpYUV6K(piqLfVcUzX zK-G1GFfhGfQ}xh2T#L;2%h4})wr<_(fax97@`jdp#KIw%3E{@H1Cwy`%wnk2qlbu` zN>ID&7=t$REamqWg+PxClK>%X-yY9oGVcOnBam1Z+uHFGZZFAN3Fneuz;2i!|Mq|h z&a2xk*+Nzz4beCWFjc=}Zxl*kta0+*ts32KIA`TP&ybm>(f4HZO^Ds5X=P%gdcjad zzC_52#cts#|Ax|#z%ybBeN|kjuMfwD;OP2*_G7;i{W_BQ(dQ$f!1qC0KlQJ-!j3`g z>%Ce;Gz4lD0=dw=hm_l?KI=BM2F8!mOz)#Fjz#wZMcJ2@dc+=6XhKg}xll*b@F%uv zxec*vr_wpqUYB_@BA8W_pAyQ?OK!0yaOV&l_sfWxtdm|uDtd5Z_|Q8)6vuJM0(JsM zLvD#C12%RsMhV?`B=MVbyi(v0GCD&_@BZ_bI7Xggn&AEW_fseGBr@?Wtk8Rv-r}-p zuXWZ_v@G=Etj!Wy8!dij{>tRmRtIGJ*VX;i5G=q{J0Y{iu@y3N(KtS$#gXOKQ5ZmY zPkS3@boK*8aN_Qj01}J)lx;cOS7OId;{!~AIx>D9?4ct8);VIxq@Ije>)7vmr}u=E z!!VIaS>-!0_PM{4;!F=8GLv4N+29oc8w3Fg?gI*SO25|Tr(jQi%QAF| zl37;*D)wxlwT08EXhLqFjS^f}M@KB(3aUP`>Is@HqmJ+EVT&{VpOocOx7`o={>T&| zj3k)^6wwA@gOQM8W8a`-g2K;d$f3Q|;uyJ=*qLL70pvr1v@r~Q!6>m{3}8-ok`$a| zz?PcRX}3?n=PtgNBQ_)fvlzg+yW$%mlR%Wr5L58)65hurnZChz9yz%F5_9W6(w~4# zY^O~N3W9Q}oOJ+$s^Qy*b^{LiwUbSMM-}<<$B&J`sZ;iZtY{7LhLSfwfBtMuzMeYf zV+3^hQAI_?)DYG)w>?_c18Yft<>Lp*a3T((b#5HNIR38VyvPkHKGgtOZUXhs0n8PJ zf4G46I+*Py;?k+c1B)!@KQfd&fqI04-JtOqEV+wUW+{VU2(Uv(S1d%|X|KXl$(MKj zWp>8=BY03nD2oqYI@9?aE!WahKu;*{a@vA(4gX&oR(&lnFaD~;JBgYm%nK_P<)G zyPiO=u}*)>S=2XfM>ADnw*~ovunzGATO9(5e;sLzZcf~duu%V$xGlSGo`46J1M(b1 zK`a6cOWoDsSt{~{M&75`x;d#-feCmLHDemrVfcX!{R@gD ze!e+P3Rt2f`iJY8c~S>hQOZAXxJ}R+ukZ}aD>bH)NZh#2ADv2MrL}I|>BEcI_>TYD z43jbG0Z7L(s$9a}_~8tMKI0-Gf@AN1sI&*FD|o!c7}q6SyVi#O5)#1t^9}$n%nxD( zK;3i}LnMS1@4h`Wf)2l~=YA||%yreC-V+%C;?I=(?Wxhg3Ak>071pU*8{tm_B z$PO3lkOLeU5PgIO`#H*95wu)k^Lig=QyAO4SkmJ-uV{$?S)>dD>+UMfZ6`odmt?mE z!vE>h44yzkauGxlGkT! z(YWM~kcEluX~6d>v$Qc^9fgr-|6-%B7RFuQY{W!jjd$62wgvs#;MXHShDa3*xoaJr zWIBmXJe3LJpCBgMoa=L&Cb*&qAV(QxAvGpeq*ePpJRc*F6{2RU(A$%75{v`k%>wSC z^WJAZ9uY%l>8RLyv@6P_FL#^v!yb)FQoOQnW`xD=Y|UvadkwQkwpeBEzPxJWr#&Te zeH{^Ujxn$l`_?hNum}q~aAnTT z96BK+FjHI`o&0N&-G~O^Rnka4_&B{#lo1$`>W5_k96uRqmchN{97}VfL6|Bkp!Vil z8O6J+F3Zh+iPgQ(&)JEyelM5}ibKzKi}@=Z#t^%91obNPiXi8hwbc3sF7!GuKeENf zQi_FM;kT0(R4~+Kf_fR0!#q2^NnCA z^rNHB46Z;a=LE-#pZOEV_+EsQm zrhx0vGIoP<1(+TtJenG9898*Bzpw+o6oNX$JfT^46Cm>%41LU!lQ_K?i8=IeuVW^3Kr4UC@$KOK) z6$8$$Lq}0W?5_YyI67;BfjuXqmj7-4LDa$BUYNiDIvH;Vrqq@0E|5!?9~2y6Lm#KW zdUMAmk(QQjC%Q?gnT0cd{x5zy|ATb@|Kp9r`8UZGP*vnL-^80H={9oU;K$JR`p5v@f_^yxGI>xB9+&Y9BA)z+{wKfX%9==-E-vBtB>@de+ z>lQsT;&&!(B$_Sxkjim+iJxpIe}fFbuy>&?`=gNL!Tnd8I(|RkFzQv1iTqm1>;L?! z(xhqATE@*d4^^PcKk#X!ePNyA=QC!}_XxP6DqWKRPvAfvWaflEo)_dz)pBD;E^F7g1*Z(i$f&Q&4 zQPTxW_#j*h=};kyCUf+>o1u>og{)2$*h#**zpNAj7R#=yhNO8MLt*p(rb-FWLs7E; zEwV{e`$5vtiloR8d}vNFtCRzJY3IsrJ&{^4vd{ZCtAhR*HS&*(zM0P;y!6NVJ#1&w zu9_)04wFhQ{9@+v9=*LvE;EEQpwG$s_=M0l2svMZFG>OOa8H>#`i%m9$ycSIHRe=3 zKc}G-SX-YUw1AB1^%0=LE?h`F^zP}HKrC9j4)}s24n1ZZx=@`816ZVjc|~u6`)UW% z#1taL8=$u7$qsA3{%yV^{VgSp1!%}z?pP&+?&jhoH`WbChT~`g0={J&7zWI8sc*1l z;lxtf2oNX%QXR5qrPB@r6tgpf&YbbG+X|+R4=#P=r6!bQcGH?j*i-3`%gT(A6cb3f z%L>&zRsYK!o)@;=W(@=u*?=*~l!1hWp7%()BVk4?h(NSh7IT})y8doafqD*Ud9(EySU6AU$Q%^7WG71FCET^rb@o5 zYTJfTqblM3@R1PT;3M7MYY-oE%$7TcGYx3rO7iAF*=;pl%5Y3<4Mt(P|HuJY!#csZ zk17ik$@Tnmo45!uM9GN&G=J~MkQt{Mkmx~*dYJ4}x2n8N-x)rpn^7I@4~FqVN7=1| zliwu^FoJP+mB^5jY!4iNIEAw@5Y%)uR+|!D1eoFlwPk;N6k?n?4`>?QZa^Q7@YMk` z63!7jzRU&Th#7~K(~1;bDr42sr4@c5PcSwKLI=thNLY@pAYdUx2hmXujJ3Jy)xjQe z79a;=lrTDFOWVn96-c=uI9EEe;ktZ*@7OienYzpiGq`ITaeCZ<5tKQ!LcBvW#0{;0 zAKv>|uF0KaWaa$o)@tLCy;>;HPo=10Y7+S+je5Jo5;++_9CdtvoTDrf(q~d#U-NUE z=bHIjW0hWjnaTykUtsiz1u4ko;CtTyj`zT8?)e}+{L+?&ugGbG>Nf`NX8Ti*A2}il zLJ#+B1KN)<2X0P^Ww!AcJ~Ss$8r^g%n}0VxL-e3mcBS1F=^a5@WH<=cJ+H=@w9Pb7 z08|m=Q3+rrcH*CylP?MG2jqJOWezqo5mCPJhj0TdVVTW@i4Wa2fNvgBLHwd* z?4nE9A!c>`qo<;$DReD9o(i)Ur3?ITpu4L9V9Pzw(PBUE8zZIuV&ez5I1i)=ZAfwC z+&^kVH}LxHP9RlMzyvT2N!Db7imqn<4Wp{dDZHBvzK*MX4QLfB48VxR`*l8$5o`d^ zh@=?(w##jc#tq(sIch6%xG*7t9r0kCebEp-=5dB0LNK{`?Q-cch*gYhybIT3ptjn8 z?Q5`1ef(rps?ZQD+9ShcSiQ)}+n#06^=6c@Si|UTIIaBmWtW!^^0TxApmbBU@HZdFMBB zgH{5>z9NDop~3z!!YR)R$ErAv3>_@lP%)D)HqU7Y2F??-H~sd2LLYZsh0VWl$jQ{f zz=WV*&$7V1S$DAKEpTnYHRn-MMA@r1zLxLThLV3Tucw1k&D`s=1#ekgxfHl;Fod(m9IyT4@>Hh zQ7f^kw4bn`aYwFVL7zegTW|&dNF8IOw!Qc$oOo2cpdb1Lss%oRQeg6kQ3zb(I`=~@ zsK=-;XfFg0DGVom|uNqW4 zwG@xRfag4TJ&oZ#d)fANC%wNQ_FzBF%2ufcF z;qpmkLftdCdJCL;FK{Zie|mKA4nNxSnYzB5zW1lt;Z5#Aw{+^pP8`Dk`R<}2eelUS zwLKqyakvl)3P89DrvTITivsK=#W;tQ6l4qC$oMNvCCCo@6=xs88=%s0tjC&196I;< zkf1=5BTZR>otgvKeK=GxQ?ozK)#)D<0fU8#$q<6PbX7nd95iQB_35Cx>D7+ zw95dVebzAOfynZ4kQRGr%9gd45GjmXkG<5@)2oJZ4L=bhejF8wXxj57&2JTd!uX;c zspVZ4|Mw3j@oG*nwY>lKJo_?UZZ8;Qz@Or!-^zF)7<7e)j_z_PtfrHj>|V+3kr*2~ z`{1iJoU4KcEK&!Me;AZ|#65phN=`Xc>OU|z7@p+^m>n?$OEQ}p>~{Iy-zCSc=@G5; z9y1*0r!>7++jhljlWXY)&8U*`S(~v?$w&=e{C1x)0Q@Fg68GPp*S#H(IJGk&WuYKK zvYu919y7yv4%Gli@u9{|IH@F21>fnf@X!3 za0b{e0&Rw6Bm?AW0tXT)WB@=|QYJfFkTF(Qqm|xyNfMoidd0q>o@^Lp#L#tN{cDay z4S`nrQcicQ`$^X!W!~C%n4@tX} zySit9y(ffvOiE40TqH-pD-bbBw(Q}3PYO_-oNMWx48V>oHK~maJBu3Kkki~T5H>?& z4{%^@Ov(uv&ZBV>5YSX1+IM(|GdgF?pP!0RU^vTBE)+T<_Mp)bXI59IAtON%u(?nY zOdUbBlYqeVJ%5_o9yg@tdQ9l&dSM+1^D+}=FC^kS$;N)l8bm3H?|VzoFbQGrJ@{B! zamzHRVBqJNDJh70h;w$1k;cU%-aEjhg}lT;Iz|%$5^iX?%0?bwa3CzXeD>@Xe^tyv z!Cegz8ni#5dj}$JD5AT@Crj*hVQH!)`xjh`%^LtAoS49IwQUBklx1Q~O^vMoMYh`e z_>SLEc}d_sC>{{eReRIMh8P~ee#lb5@J7u@IIavF3keSXfzldzjcKM8U zmKyffg+*3~^peKP3_t)JBK@skNG#NYF6S zHuxo3WEI@T=x%kw4XM^VK7XIK@yPcw@QpcN9T;sWiO6dQ&lri-m?8~d;W-j{Q0Q-d z&NNUldY?gg&hRFx8eKO$Qaeh<*}*R`TZRO@Zfysts*@R2%6wY{%l+hx$B> z$rQw4U+0|JMzR3g8nqGtog2|dT1&ht(EGtb1g08DIQ545uA$=?-%hCcN|rSQ8B(Ws z6K~+9jtE~AI(?HL9O7`I{DSfx>;!m^CRmmffnjdNl%-l2>RuGbP_MM@`bZSd54@UG zI1l6#slRK;PMD;I0A4rTw9cc(L-JL@I=$^bsZAZ3>t((rhKdlX00~PYSvRrAs3Vtn zTL;gRUk?+ezMRB>_Aj_yFQ{pQ26qA2Gy&iODOxgCM%&57QKCKDs#)o^j&8$4LqK|u zfJ7n(V9H=|SiATaG`UogULLA~N+k1eFp7{Jq_I|%y5U%GLO3$(`j=fGpGhX zADG*Psi36NCgA&I9m>i#;bb4CZ}e~5q?`X zHg&o(5-|Kx)VNpTLVHFG)C;P-a}sycJS6IP0NKop!`adfA-&nf>irljW&(VpN7YjFILEtV+E*^Mg;G$DR-nmR{T zBNK81oa5p(KWH~%v-fz5CI=l#>c`jKS&;hbrcHryg$;=)F`N&xqsS8SiE)5a5DJ8w zx4?80#g7S$%sAPli9A#GWBy_#?rrKRse6xvWmp-yW`=xX2Y7$rG;k?c^t?2o$KY*0 z#vvqO+l8K*;+liyZU*S!IXQ$Og~}d-s)Jivc=IF<$;_*5pz;d*FZbhb*HiE5LX-;= zx+GRMHuB~A_;D*8$QXRc9Qom5i+}_%P^5+3gt8C4ZPk6vfSYn;z+OHZRW>ED*cdj! zuXIKw zgO&|9V#v!Ap-Bv>A`DZ{T@N4wtrFxY2q|JKzB=iMutkVn*wb16T3~UC+8*-jq6QdT z`j$_uKBut@KBL~RMQk#^mXmd`T@pHe2+K*(gVR`*L^++DlNg2+$q4yF^5K6!1E5Y7 zEfu6P_ZG@VJE3f)Z$;}A_r`-b9ZUEUAmbj}#=n?YMxqe;6q$Pb1_FB$ffDA;h|YR7(6O!MhXSD?TL(l z$vDP&N_P2#fk`t*az=Pa&Kj1;cpSmZI52|{#|9V4=h6^1x!v^&I3pKF`NXK#p_9W5 z1tZL`C`}_zSmnjq(SZcD8$Jbce>M21U5&nL-H~Q%rg*Oy#(a zfKF-wI?PGNs#Ar9^~jv_CYc2d_8{exxZ44=h^+T?s-R&KLslOHUPoF$PnCBjEJ#psD zS|=xQ6ik8jsfZ9x>oiT3*gFI_)L*`4z4XG&FkVC34!K168F%HZXEVVY=cz=S66^&j_ z>$OZ}q59-fk47|QpFeH#WaIp(70z=35lmx9FI!d&mrYw2muWgoEWWg3Er{p#V4RI4Vq^tOk z@Ii+<_G%38J2)px zoxbA&)OW19ymCKcL2NAB;>8IEO0frn0Fk`M>MF4o3{SjamA8)v4{O7$HGvkbB2`|J zUe*3O#>O+S>kmRIz$Q9$Y5sx*!BCUiqxf|7^n3tt4A0K!ywSP^jK7FXvEu%(2LlGxY8z80Rt6Y-|b&KkzQK zKElO{&Aznz-L0@N9x2PLXvs@69b>`io}RyQE>uls`3cvrpF(C+j5U1kUaqsSp!iZ$ zOVa37Qo+p&8uak>^_@LujsTAQy}_{xi$z5z;#;m!Q)6&6#?|ui^TSO2AnJ=n^76c4 z;v1?wg#^PqyuB-a+|XbOp|TK!(dpZEyDq^aTW&|(eF4Pj78r*IrKR@RE|AxT#xI(l z6q%pD3cF(AxyQOS)|KnEw6wx*pD+zHn0hkNGRO5DE3&ll0^@{q4uynF2hPK0VPP># zE3rsgR`$?35tB0y4%DJd#@?GEaKyd8=jEJv^Vk?4-Lp4)%m{l3Z0IC5#hQsMGiS~` z0RCaNjNOr*AK@oXlm!zbk!>l+=xwj%0Ia+gmt^bcn542)MrJxTPa)QUYj3L&qY&VV zH0Dz6LigjM*Czw*ptUI59}Yap9zepSGa7NT*!4x5+VxCBko+z{1ze0o7n!MQ;q&KQ zNQ-MdB?ZHhH>S+pDmw>okV#BmpjUnV(Ytf5t7({~?%ls%!p6I;iXFg3*$+lgq9AV5 z`1vY60jRX|+^}5i%U&Mk$>G^=fb=(M@xqYF<0edC^HKcv0o{0?@>4W4nyHj@Lu2nm z0W6{bBoNVPq+pM-NZGu+x9jZ!Y)UOdLoW2|9RlUNZ2?y6(ht2FnkG=rN?K%0frPjS z%XZbZ_R*pZ^oP`zuP3_ox($(20kk)_UgV=JU0hraAnw4nWZUgN*-rQo zU8jVOyo04E{YwdO3k>MXU+RSm7k&cy< zuFckq67nlng&I?6qQG-N6|&lTrm~V=GmWy(U$`()U>wl&C+hMW%(31c;xuMq0E{h0 z8;R}i-JA;+EU3@%o~Aw#fYG(W<|7b>yS6;z#~Wd_$gW>+>WMqWFuXDSp+@tjb?c_& z=H}x3>3-oX()<0p3^w$-h!sI;nu(T%Q`W+scU5F$WMHiqBy8q)wobz2g}5)rwdbLj z847x@ZE49z;>H_(=6nKzRh)r#QSaY8YaU1vbaQv7b;5;!2e8Q=hMTxcN3yf`UUiOA z^*@7}@dGXw2M2mhXRkWoGbdg|eEzH)?Biw2muuSD2}(*z%JCo7c1FyZpscKHdCwDP z-Xsw2Yx6&-PsAmr6lS0%I$Pe1Hvx{Z?der??5`tl1W z5{J_6+$odyIhiO}_3~vYh*cR-Ka4_Tw3R>vrtm9qLr%IH1^1R+yQW-RcMQfiD1XLd zRO&&*oIV-!6$vhFGl-is_W1gc1q;Z?giI?=EP^6!5y|5$5#!$uDk&+A9a=6fHd}H~ zj*Hr0^4o@#IVW{vK`LZ&QqWeIJ_XdG+fM9~Kv%)uRjA3%kIDUDC zk>aP0j;UC?*bV}igZ*eQW_p+w)|i6($BCFo9Snd0DCz`Q1&e#`?ZKh4?vmUj{9IZ_ z=3r)~_v`T@;>YmRiYqIpyJzzulr^tPF%>|V+j-8!H=$REx*<%Cg&S)Oe(Zrr%>lX2;U zcDZGa*HlDowOzX;k?4m+L~s}w80eP@3dRU3?dug)QBgsH^Zl7eN=nLYu+a(%3ZbiI zW2$WF2u(*soHARQf&8Hql$$ibF?}uk<_rw{Sw==X=9@Pcy?n{LV#Nvy^~Qm#_o+|3 zyX*A~pe0C#_V37nu|QUK_B&uCT?f=_{eO~A9C$^cn51697al_|7gY&JGF#Vo`Z6^(Fwu$8X+TiZbN;GcRaz zUH9xM!2u+2q_FCmo{am)N3z=M@I}5s%5uN7bV6aoU=^?Fm+=9pY6TZAWMIcA_>ZVi zF(k6-C28TFA=7en^+RYi|!k8NktF2YbVGvh^N^FqTi zM$pKzf{)`Z;}Z=wO_BXGKUcu<0Vs=*KHlkTry42&FF_k-&Xh>$8_J3X6-`8XFsrHtxsvjR9fu=+UD>qzJd&o6GK;F)-bQMqkQz zal*oDjM-(ry8JuM134=+2`F+A;#j#XJ-B1VaVHb&B@!nVxv7{ML6bHMC*;Ow2UsUx zVHq6i_wa0e&Voz>F<>e{OA|`3_1!u?Esqozhl=)5v-_j1cV?uU-KEJs+mLUirKJ@k zbL>XeLX{FzNMP~e3ErLc_rZttTpny+UH}}*Jl%0WSVR`3Fnfgx%uqfZ}U?ghH50=-v47aWtQ6L-FM9% zc?l4dA4z{P^n<96P6MUl#=L`VJjgk%i|yp6hwZdndtyWaHNOnXZ+gyZm4OFm&6OILBau} z#6{4c1%fjz#;(GtdbF_si7MLy-`?q9=NXtpBfDPLREaWM)JLvJaSpHX58c@PdWUZ$f@_}4YIt4Vqls`!O8jAPh*qy9Cdn=r`Zy%3?Tg7i^HYUxB-fch- za6{6BT>m}R{%ko1!%dnZf-x`0;?AY;@Ng>sPJ+?=fFCm$s3%U(mSM%=dLJ1MqT^Nd zIz?q=dxSqm0Kk{i3sm#h^~{)F8G&TzG=!-OSFM_l`Bp`Q8Gp<`q?C_R*~^Mt30+8Z z_wBcDKZb_8Es$zwVGIESwVC@RiDbI=hXo#&O62Xk_ikua*R7Cq zs+q`WR8+o&=q-(G2v7iy(F(jeWIXObab4X!hBS0=w8Aim__1;wI)Hd^sU{;|-oKAY zJ)z-L4YB*I^d$9*7|uL0GU(D=0WNc`uI^-LQ3Q?z%2y^p7HC zPm~d^%n{2_kldA1QFLbd25zNGoN5|~+%51!$Q2mIoeVwN8H47PIYK%`h#NK2gHN0Y zeDv%YClc6=oi;eJIg^UPU&LQLoAO#nSiA{nKumo#R8vR{QE-mK(8-pDNN!zWo zwCY|h0l@)-96U=Pdklebl1r9+MV&LYy5Kb46N&vxSwa?WFt+J2bP%g+X-zobU=7P7q#nq4Fb4lW$egd zZuGH`i8#7Dxm6)8;skOE2vm&8K zV$cMnQ7ocRL)Z{hF6v^5u1$f9=nkb?9z^wiX}O87f*?<}|7gDZa?X`2^XxL^mn~ys z#Kp%aE6p{!4T(W`>ue*<`xgn}XejVs0I9;OyN)wq@O2PzN3_}tb4%d`!1PMtWROBE{mt$!jQhv_}e`>e{(&yU5oxcl3;!#IB8RavK3`dd@6 z7L9O;b^uZL9E5`pP{kb<()o#OG8D;XEcP-AQU{v(Q{t2Z<_+K(?!S2`A))wu}h5yHUtgDA`^fAQoB)d-A zR2D&>p_~8m;yMvF!=AhsC(1UjT{|8f@h3qcy+a}4Gcu$IxHa(Zm-AeQ^d5!0SSBa; z0JZ%g(0!C(psV;kqGAZf$f+dYP;zL@RfJ;y5V%>Ai~ws3y@Dp@{3|vMZ+pJmUo(pI zU3Rj}*wa()2$#!~R=@OD_|+hc(<`X-LgGoK@N^!Yqe$kX@hscV=$Q(prly+TwqyV3 zu1E=^+|Cy?t-*H(CHz}(;sYdn@?;9oqNRuLY6zQL)8zqhW!YaJm&Gm7Dt3N&F%mK0 zBPYa07v^cOh^(ZH&_M=uvhwWOgF};k>)y0I=z5 znYo73ZQ#|kn}GQ;vTky3*o7}$3UD?|H6boeP2}ADSFdIRQk%POZLb$#zW%^{H)mb5 zQQbI~&@kirgxpm#yK9AAx5>@{RIz!>AU;-5R+eaYLyNUBRcMmpL~?kfxjcN4fB*}` z8V_&DZM$YVUPnQI{S|{Om|aS1x06$upnTZ8 zz@09uk2-I#4A15hbzFu6Aub_7qQF8t_nx~sqQupb-6f~HARkZPCS-gR2T~KzO$FB` z2G;&T%#GLra-n~`7nX+PTfdUP0+vNx($`i9N4KA0>%Q95oFgP)AjlD zDHK?!?lJ6bdAyJ~|Ni~^CkjZz<5kl97N)BJA4#>meORI`B*Sj3ia}Pk?xL(_cKz9; zBr$SCg zJ$;J20xYMYy>3hulqN+$vli;i5I0Lb4R)`nwwA}*+Pb^1?-&;=4p8Jws7)Ut+qt#p z^F}CL*^up+=lZxEOj~zCt^@nVyufcIu$!YemzxJs*seibkcPI7b8U?K4x}N%N^4Dx z&=EOv+JaRY00Q%W%#(A7KH%Hm)2x0<1do@|-7_c|vSOxs6S(su2?>igrdpnaHj5Ep zDP_i~Ykb2q+Azsj_y7o=q5W#jMbKb%Lvg~w0OZ83s#+lEEZ~hnc0*8VFc>(#(Uh0C zd~ry;8_9sqjB=}9AKXU*ySFgytb*m7OP8N5Qknx#DOz1;IK&sn@!ydD_TF>C==pKtF9K`Pur zwh5n?dC1M4WkxE7WB>;q`HU5aAI#O?g3h_|4uNwg`*AJ3e@V*}_-8Y*mvJ{eG1%~o_5 zf-Rew9HbA0-S_^Bt@!jtI6{tvglY+QdaeS}dw@(R@z%$rj(IDFUw{Mv4SO&`WHs&2M zTx`y4VhYg}Y~Q~9g34a^O(ly2V~nYs#t``c$WYaP1w}^p$Y59Tix;y{a;`yQzx`R{ zNiZK`X8g)1M3EhZ4bJgs78WVm^|$)xMr0d==H@pSBR`_VR(5M+a_mcXBlX@^)(lZ&kWoV zd~>($>Nw{k557-0k;s>pnW+Ul19H8?5WR8dkS&@k>Nk{Y?|@f1X=}$RK|XaK0++X< zm$)-_#J~6T_V%VCeA`TB;t@Ek;D|6GegTG-!9Bjfa++m7VG)bFmQTvIJOhp1LSbQc z+{XLZ;AOH(mGhtw07*PEJmTm#O?uo;*2=x`TsPstMUX3M8ET&w-jj z@kDs}yIu{AUMZplQ1zh7$NdjGoXFCIfI>(}PopKm_WASYA3b^U|50|{aW(h<|377v ztV-FdlyS0UujHWY>LeqgWL}}H5Jg307Req7m65E3h7qn@M%hJWM3ew-q(_L${o zwoo$FJ4*le`t@t~u+Zoz9{aE}(-V#yv4DUdjVOWV8e?K&N5$7mVPH42&$6#I6cFvr zwnQgI?v89`>d_6CMB9nyjkxYMnYD2VouIqlXH?PN?b{pB5ni+I9$uYzbA{qQxjwmN z=g(Ip=w}>h@y>-w7;8AT2Zot7MyTk~AUGtQ$h)3Dk|2uZET&Gd+{ee2F$iAZghHXZd3d$msnomxxGh>7?A-JCTF|2 zWXBCM2h^imATR@jxd);JeLg-p3O+YW5@z13$`iY{KpkmKUzU*i&D?fXLPGb8Nl9y; z9d<1X$F{V!bCb{?zbOcQYv|Ukt3Q4EB)9=Xvd$~r3}V9L<2y-Q&C^FcV7*H7Z*nMI z`8R0jHRyyH+F?issm?it-IuGj5A*+Tf7>l|5}0Y|T?RryzrWS`b|tc=q;__Q4w z;rr!tbxrgMeEaIV28csB!fwI<7nf%2-Wi!u)xaWkot&KLYHh3h8+6}agXM{_;AyTG zJv(Ue07PRsPd;)6lz!D}j8-G7vtjr{z-Vq`PUOT_{T#;vZ1z}EY{p9S=ibkch-M#0 zvMardUsVGQ@+U^_%9AJKp0sb*uI9&&AAw@t(`H&-uTO?EPjPvPsxx}rsY6292Dm=B;52T+gn_y% z*@&*7yA5pyMXk`Z!s><;NmfKCQXJ8EGh4aN@NFHnBz#oIU5^{$I&?x2;ae}UjYC!X z>kIB7NTcPKYC$;^%{8o^mwn_}vLbXoc{T7#YwJPZ-Z|}U)iAT?@8VKc`}&!Xe#=BXWyZS6KwASTLnAxkA-G0%B0Vp}Mg4EVwQ3>UTn+J@p zd%Sw@!jkzxr=LM;XQ1Z+td`=0hx7=(_Do*a?aE%a-?xnPn#+!;-+8JoBLQpJhRp-I zZrp2ut^!I?K`l?gD-_I=!9_aB^i4o#;e);E7(0S*AhEsNz*oJWkhG=%QYw}aaP8!| zbL0AS!x^-zho|*8MIuE*a?*O^M_<5_lIQzQ_xxBMA*DCEt&Lh#AzYx28_3 zk_t#v2eu{}b#y#hb6iVj==Y{1K~N|w>ifJ0Av-|0!YQ@8C{MWS;pYK(>iL2k2-3s{0$i*~eY%{;5sS&{iEf)TWxa#yQ;y>8#-Fw{PDrcyg-M zf(1RCHl<@h>96Rpw}YXh+WN(&TBflIF2<@{u3QU8Z+Jj%Rq4>7!^gdY$Iqp#7kC2^ zj~|TK>|V7(vtGS=MW#+YaO}BlpLB@0`TXC1t3e_HPa-dP5F1EAR~M00+neW9fy$4a zJ)u{B7t<)y+&68q>1venE>8JjPj;A@>?1goHS5=pi0NV&bLCx183f?Fhc$(+Ci!gN zk(-I;>P~;aj5x|uDMPrvXhBDDsP%u*xAOz*{jqe zrN%%)gL~>SI;@W9goX3xG57;29(rkDkWq2?35)g?T7i1p6?lbX#W6m6)b`F*K(eHArc^w-IzaVy;q%KR zL06v4l?URJSiC&#%*0WnYV$=`uU%W63hVQ=G0v(iS*^e^R0w3oZKL>V^ycaLh1H=c ztd@fr+qbU5T|gRxR4co2rv?R`cGl2FZEfv?r}e33QoMe6OH5RJGFQ)s$$Ah6rl zkI%%c1gqRHx06X_B54G}mFAe6Eo1c1$jfa)o^ew>#wHV=O+Vk5i^e}V*f!a?c2Ig3 zh;-pPdDYiI5}MnFd0A`kP1tnSYvIB{G)DWg?U5ZRxD8QqQ9p1GS3-2Q5e81W;vS3M z*ALJa>sUv|Cf4-Pj$C~2!2*S8WaQ|7{_>#Kpa5GV;&Jm|6bf}9r8s@ycuzOHekjA& zmg|82I-+~i8uhNiCsUe<>Nvf512jx_8TA`B6l8;?gYZcUmMip}NdfJ4#-ftCj7&&tgSjBbk-$qoCoAk%dF zp6F-cmX{tzO#rmR@vOa3hjd_*yLRvHgl5TySUxTl zq~X!tbQW@zgChG_bZhf*A-dZlG6megS-$61l&?WUG|(wx<9nM^jT<+f2VeqT`<$rq zW{h&ERlWM!#b<{B22rPlMnveCROUCOH*XjELr`a8%;|PU&oMYWws^bEPV<&6wH3Id z?DC3$EVLB>4|z-uEF8-*lyTN$g7dj~D<{}*8uM~@7XzTRHQI>ywpq?JWMi$7Uf%xT zPzY$1D)z)*Pfm9KYH}75M`S$i?p6L2BHg;xWst(Tr6o#5b8d6_?wZ?gb<4;)1{o9J ze*}qw(z6*hY#?w|XqG1({2kDxdAtO_dg(Gh<5@4u@##;TXR{7LI1%Y(m!C9F_zyt?vtyEoZw z<1YO)tGNZVHh{iea>cBb7r2(VYBE%PB29@w(Dt(~w2+e9q%tWwlk)bpBnzF#Ghm!$ ztIWZgS08z9`xdim5HeSRUgH^=4Rt;2%HONQa zPrkWsf%LmytlOUX6a{h83zLcF9JNN<|0XHx`bu0iwj}wRw}x1Um1Ra5|rW;W!u| zpsP3cU3ozm2QRNxr%uh^KWt!~nbmpsyjYS@FM7U*gs>Soa<#s7qEG2N_Li2yE7AE_ z&)}h&4#Np)V(6w#evDmHbQrP+B)m2b0V{TA*G3zlJ^q4M`;BwTj)HZ_N%W%1s>w>< zARYeWgt^U7YsDig2Io4r&c2R;-2fz49f`xxi0{g_4sb5ttkmX2;|061IM4Z%p5#(0 zTttDBhsHqY1!{-6rwg;rSP(`O0yZXfyh6Wz{Tqm8pHeo-O+JM#$ouPiI5cZCJn}BM zncnpN_F#4*B@O~Ly&*%cpWHB9Z3?I;M<8AceIug)P-;b~Z`_FC!v(1WNjre9ru6K= z=9$wlT8L(mEqtsy6xpKTrRPt$E{1UQix@QD)FVwuXp1pp#(Zb@3;ra^Tx(skmMt~h z3#P~K9sSBT)6zhz2r}Q;!&aRm@+ZaQ88d>Iky>}jT0Mv&9!-5z6eR|$0yb(;(>j;(MWUAK;rXI6h1vZbyQ8D!ghDX&lkN%>T?ZIIuSt}e23q6| zh(4Eue*M-m^EB&HmNkEP5Sm@TpH=1n{|Y1~_5P!{W*mu@#^KRdF?a>jX5{e+c#oZh z$ji%bE-9zbT8cBk?iiW6vuzPX*Pi2?)4=}947gW46rsEfaCa7cT&9!h(N05GexS+ila zMNUG=MoN!^gSDxtGK4Q9e4e!~ZfFH7&i~Aru1K5afix=oVA_j+(h!PbA*G_cH*b%m z`uJ(L8X|?9@fL~LJ@nN;+iS_A)xP0UAGdK!DIugTKm6?4zHXRXNsn@T! z$}KO6BQn;SRsx@y=1@Y3MdZb+N^Y4;AtDgO(8^B>&Vq8D_@p+>;|2yq6qlTIzd6~% zb@YAQb#mpAGS%UvNh3;(~Aj@~*VjpYY=GGCW|w$0p;ZmzB!nLC4) zb^s`px)s`FVUVdhZA2o_+D1i5{|{1G1mKSstvb!%tKt2XkLWdEgQXnF z51R+Iag{wGHN^jrElIBr926+$z0hc#pqDB$KSX2z&p2q2=dR>4wvHIsR>|t&edzW z1d)~5YaKqxh#_Xm zP#zU(%o<4B_LQ(|mcH!6r6_GGMu6|~hS=bUZ3ecFcuAeeA>2w5q*jiy`Z5R1;2H&% zT#JyXO>ahY=-`zNIirPmnmy7|ZhNRkqzksghE@9Hb?E*Pj>qEN4v)(t&YnGcZh2WZ z(W)kIQXQ0N=@CP)z8X*7zq5Rply?YYa-j#_cZOje?och#_%XnlLYnPQ2YeN!+PXMPJcw9E9&!= zA837#JoHh${R%X}ZEXtJ$r3to;)E47hh>ws(N@CipWQyXo~Eg_**z9iPwyS!56nRD z(CGKs)(G$Tf1;I-=g}qHu4|Ub)DoOb`zQi`%cVhvTM(L+_IHq)C-rN+jt8}ebUBzv zMo(^J8`eAk>h(H@Xih2MFMrK?`m|;Qn%^9-SiPUnu*wN@bFZQL{G60~e&Ou=g)4ZI zA~MMx+v^J_x)kmK`q3Q9+ud6#k8wMK;5VmrhYec((ivM)4EjwyTII8@1?4Fj5X(ZD@_|9%~n)2v>Gq3KVu zvs?U@(Ol=phA&%;Jb_!TmZ(&vT*FLUBX{|?CUCHuLPJ+l=0OmSdNPXL+qJ|XDC9kT z)wZ7VTM{N+Yu~4`Fq%q^CR2W-R5@ZXu&_ANdH7KE30}_ZUfQ8qVo73$PyTn+hVa1y zei?>$O`EkuFRs=r?_XnQ&ePVKX<<@S&BP$Q(!+-jDXzp&bYk*=_-xTnqbIsV{ZlQk zVvb5oR!9;+=Z%dIzJ07x7BM8?Ip-%jt1Da-ALL?2E=^U%a-|$-e1kgYq)+Xuv@St_XGv$0%xuwDHRPd< zDM-#yWwEApWQU+s$!YEN4#4ZT#`H#d`qBe3a^nO-1fQE_bqqq6(nedjq?G?GeBgHQ z(wmT|nQoWI$_?>0Oo-PVQBj3kxhdR9^v|BCPC!epB2NIDtX{8PJ+50% z^IFPBh{``P4Iz)WW;1;FKa0V<7Kuj!z%7uIMLXN+$e5aRV^jCG6oJNF@JUIb^#q^u$MFT(tWZ$JUEy~cP`L9~Ml4xj8d(vRaBG%h{yHbq5zDeK zj=8<$V8{l(X0&tb5C430>WjjXtmZ1mT~hYUZO~#vs+k9#j2;XV+7I{+bGIz?>#mIm zFQ8eAz8AT{;LjU_mas0ZzF$g8e_=%+-`eCltvem3jxm%D1UljXF3{gtqUo7LGGf)~%O&yGq*RNZ57yoe$@F#G+my`TJWG_>g(PywoL+&OThsfJ^oB=`j2NEaBTOQifwor(|?%TA9RXnR-0Fh2BmHv zj4hKvC;q6Ch<8ey8m(c;*X-W?tan9ClgflM?cY<|3jJg_rv%6TWf~Jei1dVur(P^| zp><&eP7!7_>-MUkAhisE5oue$?|GFPGphCR2byqydP6~uIXE2$T5CRRMXlcm<}TL; z6_47K2c#@Ly6;;~;IPs<1~WHay{h7@k-rI$t8_z~;~&i19oIJqorO6|AC#HrEP8sx z;^+*dTEHwM-YpISWh5AXepw>#HL$70dZ#idN>lr;6G!N9??*0Pmi_qjstulP8-*_x zu`?)yh~mg+@#!)#cTjklok2L3#@H!9A@#e@X^c-LKx!9|5cn_qN!f)@6AwJ!qEc0O z@Y}4Bas?+uj6RKj{}3dsaW;okc*;g=UGB}g-bXDb6{4OFvU$KkPoL?9VsXa?EA=$? zb^ydjO%{5)qJ1^%UWkdDK}jYovS{!50%=?TU7LXLDume0kd<-B$T{#b(4 zK50$Hj!@g-O%c%of0311ZNzfhM0#S>*{2OR1lgSW^EZokeL>~O&f>iRsj>=-GVDLr zA?Qd$>8|o&v_Aek-+?VR8(%o)I=hFdhhvsU8;UM^ zb8YGtD&d&c#-X!3@4yQ3c+PPV3Z;{7JxL_~umenH*vHGb`=H&A`nR}cj##t|9iRW| zl{D*uxu6w8kfQZ!ucIUIdyNL2D>Kn6Q$5^NPG%h!%>`0&Ek3h#G@ri9{MW;|t;Jr) z*}zq@Y&wEjUC}V%US0*xy|wK)`>M9VK?NSTWVKvea_ws>^Oskj*~RhcUJLA``)LpA zcl)b8@K9CC*YGdH)Pi!dL_@F1CCvE_R|1Wx}BVzMre~zmLD4mAz81mk~ufZw*_{o#}WuF*v zwo)DvuZpifWX_!SqL4=*BQ&-DUSK1-p;aLAN}VDyU~~vHm7e{dTes{Iz4A@Jr8$<` zOo(|~?mWP*N8H2<-QajubL_{S_Kge^At&--25w+)JbnLHPmC zPo}!VZsq||dw(e~08HKT3*Ge;c*6p^jVCKw|JMDhqVfrs7aOWHeyR_6?L^s5#Sqvf z%4W8ZOPoET3lrD>zdcm53~&^}$HDL9(W0b(bNMxN8q&Dw)2G*H)U5_JafWI*6vUd# zTPrPJemC=7|9Zv|#jQ-0jY0-IqL95#%QJ;AGHmd(Og8`Bk>Wu5FS)UZs;F{_D0qrq{O-TZ#JZVePd$32D5(iUcGj7IUi#!V`Y+`EZ2hzzsj{lHCY4G z@(9tNxq3f(8v1DC{NvROLccC4dQk<%1~wW@DEpVAap|$3Kp!(oaOZR~#*%BMJpTUf08B%Ea3JzAK=h(H=ioU}Spc@0&Lrl3^&X(LS?z)^Y87 zS{WMH_4LB-LGx$dJFS~V?5DGldZ8`1@7yVxVDJ`=6(1z+D&^n)0|)%!uj-cW_{-fr zkpIQp)AICwf<-I1Q!=;I6ja8yK}3zP>2pzk1OP|_LFUfnG?RP$pS}H?31j~0 zC$+OEfqIap<5e{{Jlt3UP)@EJp>8YPElrnrQ3xiwbosJ1M6`M&b}1?Ve_;r*oN_(G zzu5O=r=tNCgQ@28J{`5Ey!!hO9~?W^AEmnza>m%q)IN!YC|?TAPlh~?C%}vf`6HYJ zOX9N_gw&XRDUH3rIZ?|!zL9sL*End#PX!q*@FR{mxIH*zt;0oblhaRuvT9IXyBCdW zxmobuZ9P0qF)DuE-N?uYIC+gv;73q2LHo>eKJScfD}X0IoH|Lgx0X%T*tENR`SJ1t~**JFxji z5z)(m4YvHcC_}asew5H*)cjhS5+Yp-a7&IW(f%eU_*Sf-sLVq_E{-lln*ypx;zF3& zrK?vf)0_5Y_~)(Vn{3}zGdVHyUQ0`6WQtmW8-UMs@ZYg2CE}2-tQBP!(azE?-X#=O z%(W1o*J!4?Ho=1s!q?OTjXrVq?2323?DYX832gZgppVATsp%WT*HO11vjp;?N|;>! zvlKc$sxFOU6~$C{_weW>*JY|f{o@#QT1mm_w54N{&{?{{SKb9h@9GVlNthk1mmdy= zdWp{{>O;40ANPKdY6tN=y?&BvlD<>T4iKjlUd42~&+<}HOOIyw;fTTJ%CmU!+KxZ1 z%M&C{f#e%MXacxf`zUuKjbfp#e-tg~ChB2Vc75rzA#IN5ve5=p6$u`S&C?oBh(o8~ zAe%H0fD@?nk2)tB(s4sCueosH!XY<`PqWdK$>nYTn<3)L%5U_#@^Q#`-A0mG2`2}t0 zn{*ZCS*8cf5L!&9_+ zBC5bV!TH$$CvAl%j9FoyR)83k5u8MK@7){7&CeXwI&f%pOMAD`S-j~ir>c?g*(Q}m z^iAJgH_>DAlSCZi>Sh=^~u7g_7lQ5>jYXc#^T!bKtgFpmn7 zT6+9L$TA-D+tl9(v3bt0O*RiC?ZXET(Kl&*-}Q7<1NI1vOLcdGumXzK@%KUt`u~%a zb$+aWw7KnzeY0ani^&PxU`*nzfQX2Cj1T>Jdl66W3d{`%gqBHVPF5io@{6FZ#j4CD zU67m7Um{I!ZyfMtjD}b2MlKcWK@^h%sY>Frg+oNpFU%j|B=?abmB4KT2&uy9cHkegD!P03ZRN2Qzz%ii3;tz#*+vSOOFU(~ zs{<(5g|q_9L#|pGh!&pOYQ~J{;-4g-x?#&M9TWPL){bcxaIivJ3h&^*H>~{Kg04;c zAXzy%4*&F@U{KE8fJD6M>QcJtj3QYA3COk_rj7| zk;TXP9thVv3!=qCl5$nIMhz81#EN%8_B}`2^qn3&e8PDBnd#ZblP-m@#{KL==sUMb3s_Fjx(EVz}VA!sQCxj`IOH%2_oC-9wgKRfIvbU?VjEr$Gk zng6KL(!^DET+*PaGq2Gr6tQEBFZ?2;ZwY7!Yd`q$cd`EM;%#wba2g_DqC$=p1! z%OqP1es;IqJ8_d1i{0ts9*03)-LBOT5kS?dRmou3bLgeM!LK)cx!VL+wz9Gc&6cVP zlK7>nVdm0ZPXI7>96#P3hT^x7+T3G^daLwhFxaA&4>J>N>N^RGc^^OSnX5hsbqib* z1YaRH7SeaoM9K-zgMQ9ug=5%LwSP6c=SIt{hO;NO3Vq_m+0o zj0+P}Ry0v~&mE6qS+|yJ*t&g$#T}tCNhg$i(ka}82(9C*)~RFM zL~@gkj~hF-dCQiEt`2M5&-;@VNcY(vvgpdHjHfu|szmL)eE%DO^XmooW9o=pmGY-1 zWv9q@L_V|=$gU-8K+q)dL@t{4eLW%T8Kr5@B3A8mF z92^!7Hn;DM8kGx6PEyKg+%bxM7vGqeMi$Rz1OLX7HH2Fe5g=!vvd694+IOU_tq8Bs zbEizVPdxbfqXV^=6AB3=4DDT#*AZ)MbmRj@nt3SEbxp)U5Z%TkbQYN{v`8|t=vd&K zKK`BF(8`>= zMm^^3t4d>>ae*sQ1kY_3I3!-vY|9c@YzU?SG_Q5rI|l(}aKFPENcx3I<)vQ=_G6De z2z|JU&&g?+IQ9b$t{RlS65k-UHnFsUWO(fVjBC=r_H zmc`AnMLKSmr>8@&wFaB9G!p`K;Afe2YiQI^{;l-U1Uh@Un=4AE&C7oBL=Ir4r9F=& zwPO6f9yd$gpo^t>=a-_q@W&+TD1hV*!-f=WN zw{`Z^>C@e)P(*B}kc0~EfObc2sM6{JsiMdYKewFY(UUbLUVP>7K~dxRglU*&IvFH6(S}(#&%w#~aUN?2CJGp?=Jc;VlPp zTuGK2M>cJ)`DM+w1XBFYJlanHZ|M0@*SF}=qdxV?V6<{}*nqhbzP>b&>z369L~uajeF=Lx*80TBBrau|)gp1x8*ObM=avR_^kg>zl_=>UMaT zx;{+b8fGtlFy2}@zXAdR^0=_6{nFk3$Rfm;E88PIeW=JHHV6>=O_cRdL3y}3SLti> zh+9-stm_#0%ov{ap7{Wh z%!eytLn_$Nh*|;ZfKm9EA>{j#FKXiJIH$eSp29gQhmQF`%LPIYnM2l)Y{DjSh5tJhY1{zvgu3 zI!VsK*KiP0s1{yaY}feCty^g{81m@;+^Fsb;nAvqs~5aXW3A|NYdC!9mzGrO3M-nOj3Z7R!b`+MPVuB;ttL_;K7zje}LWIgbT1MvrA*k9t|53HiAav|;9_ z&HBc-FJ00C`bJ!4jUZvz{UIz!3=7=d$6qWUCWTXE9Yj{o9&wy>^1G-62;thm8D1|@ z{#i^X6c7dmk_rcA)w*yZeN@g%Ss?&==#_CMLpo^K5{bo z``vA)9m#|d?b4So3p7nz&-sUnAM;m2#|HuWT06Cd?d2%F+P~tb!{hE*l4`MBCq1j-E7~(uG72*^I?F-zB&{V;`Sgqa>~jb<<*f9h%SNVy!)7KF(5w_ z{v8SNL?@M$F-CVKywpp{K{#?w<^BR39GU+W@FQ*&Ex&C0O<$|muC1d$52Nt3*hyjv@MP}SX4Sw@=v z7ijk_E~;W3aM2&`npRLS)TZ}nWLYr(!C-4L2FTfrvp*U|fc7m@^qH(di zt?hHh#y-)@{g1K^k<6>MFx89TbVnj-KBru$BrZ*k12`f~(J=e&zd zky_&-9aq;50|$3M`i#pcU~;}M_f2~I3!#G)2`plPWvFh*3bKnQY)0EESm)n1S#5rp zXjHPH50@=$=47HuHg`~;x&H3mFT8fIndBok=q0)l5>cX_*0~F1K^F&BU7c{>7tl>TU-79?`Px>doNoSu}~Fh58{4_1>;FsjvVC* z(2xdX3^QAYL|FU{lTINN4i=u3&x;dDA}i$}z-(6&mg9z^G$FOdYFdY+S{J zh|Hxgt2%fdwbXkHU1s?P4qjRX>|D+TJ*jrD@}n6OR*C9(A3U)8woTDfB`b{YEwVM? z`Olm=BaxiLd0Pwz6%9N+g0}amRQgj7DFlec!gBGtZ;$W^$lN3VL^Lge(9+RJK$;h# zdTZoZ$UFWBB=I#SdG6s!puv#fNO_~5N=ni=-_d(XCe?f7`g948Qd>1wHlz!X$gK3e zjk?YX;8J~w!5&ej*ZvQ(evnsC2~sg_OL-G@hGSPBOAfL&)B=Ve85UmIY)CoUbhXe- zX`&OF2xX}ZNoW_rRUK0fp8r8?+&+{b-b|YbJ^mb{!d2{&=l*IO-UyAba1zWWq(0hOQ;byC7Dd}wd7KofuQkU3G?wSFKxx= zMDI{*`0iegi6L5cgUAqw^O-ny|Apq@+7eb`Ke@jut0h@Se3i-yj$k(wKRs6b>>&zM zoZ1)9cmOEwkBh6VaBdfNdORJ8mI4dccLa)Pp2kEi0^<=74^l>~OUuci63n*Kp3GtdzTZ5Cm>4k-bxxS$n0Uk=ctE-6jH80o}L9_nA! zZa-!0mfCeuDZ|Af052k@B@)^}d}C5-viobcGGCe?(lWx(26%aO_WSAYAS!S6lPnvK zBTb}?t6})Ih=@nM@BB-C9BVm91l06#^pSlTJ*4g6aL+W(`O}YnCsbM1@k6CHU+et; z%B`3TQXR~fDu9ZpvXj#z(}V|)9*KxsvW9?5G-0FSb)@3;{_&-aTxKYxMHmnL@3-Oh z&d*cKZ7;)pp1>d`#uIB@aHpilzSV=$8KI|&D#YAAl?AzkN)f(G;Fen&m&u=;Oo(X3lGv9)wXC#nio{hSRc3MEu)9l2K7 zP6y~w8jc*UKF3bChPf31P5fEZoPyPDmxiQT_#J`M{}a*WGK2FM&zQYhj$rABul z0D$Vl2b})2{>LW|9RGp3Na2T`UGTZIIc)Of&WT=CXzL{UvKMNK?*u?}#FHuB4$i6i zT>X^!(z^O=u1TV=Y8XFAw7n&7{YCJlIl-a9R5DSMn+JbJ7L+j+X=VzA#tjlcGpy1g_! zqJ)(`1*+DKlH_sHvhMUDM|{qaDi{oVFD@IhL5z`OZh-57J-5E{Z*ciWeQm<{@TqkJ zO**ja$uG#inIt>Kd|I)p{2ecHHP(JM@L`px|7h%Gh{Ow2Setk3(96qDT4F0nOMb`O z-)tN8i|mFF#G-O^e5X|qsH(uKZ<>_{)>VY&DeE>H8t>f{();}u-sSzvSy|&fPW2{N zS<_R^`gd>iX$Bdr=j{`$SP#OZ?IIabDv*mvyMRo@i0A$i*zxh1+4V$W#QM)K3rVTX zQlW|JEumy12AOFqgjf+ofI}nsn{mn!X$N7qq?p{cb7xA%B+p!M%0?WOhlh8@Taoc0 zH>H`#odoxWczT?bRX6bi7q@sD6cQ;>B$g+OmUc0%lh-g1s6OFs(FF9r!16&xIlG-wT15E^)-VG}5^Mkab`*)2&m5%^*Jq1G?& z8r2S4JHLHsHv+b+`WY4$F%7J%B5#kM`s0(VtjolM2+YXsFMNjvjI?xx&tEz07D^XJ zde;&Spuk-Sq+2^Z9{qG@z?wBJX!H92`cg+M!?4iuTse6k&J*oY2zd2nWaqRf!Uu}= zgk6|?Y9h^upwYHrSYK1EyU*;3xmhwUun@IblufD64+c2k);?lFn1LWe9K;0-3=qe9 zWdZG`T)lc#oRSoNL(sC|P7K|=*&*tzwJtq(bWf9vY)atxjw33@(S9LwyNZzzRSYz` zA~p27dJ16Hu?gM9X-p=?cV1>ZFNHwWr310MOD54z?j2cULZ{P*jv4o5>&<*Q?GWV- zA2C8+G+TLgkFE&Cg-+{886&%fn@PSbFu$I2W;gC^s)fsZZ4uAl2Fc?`b||~6N)w`x z7n8^O6EO|sEa5~5;{nx2=Ool|RBza@f!nvGCUN=xg2s{Bo-`iHZ`=rSw@XEiwfymi z#EV_11eqpUpE6j&2B=JM3$1>t%7M{?~Z3VOPd>9VH4SJd7pb%jiGh;@oPUVK!Q=2-X6fF5-=o!*Tff#e(5n&^uiV(bs< zEm*jaCsIT&q!`(-CDI))63qjU&`h$df?fIAyz~{P=ZzWL{!kLb^ z+QMEOJA7&Gr$Bn27M7M@j#VlGR<8o=PE3OK^wY|m*;r=jKzi2MWciV60zj4V7cpq| zBg4`m){sSL{O!xCOuXXKmBFBLx>(Quqxs$HTE9lCt&T2&g=j0KH3 zcMl5K#(GNdO52dIev@xWx35g2xdPypZfa^5{CB<=(0_d#tz7Thybk3ZNx2EQo`!8f zz9M^JHtzZDi4a~$I#;rX-b^rzsD00-ksznQxWuA~Lx*oxy>EB6H<-dDe%I%A-~O64 zOAic|66E!Sw{#N?#DlT<4ruD2`LvWGOAEk? zCG2@uShzRt?k^e~iNuw)qmYPJO1ag9K2uC^ddh4J&h*$qd^dLS>;y41)2zsW4Z5(2 z(&dk91yw@FjvenV5OG({-)s{%IL{bIyr^zW?L#S+BpOtlX>z-Q_mH%10M3EhNXnAG zJUlYEkboZi%MREjO2+Wq2inBtasRksxgl%y!Gb=f>oXx*Nc_jlR79Gvn1gy58ekEi zih8I-@#6h1kp@mNsn)bu5GrotJ#$??Qh;5?TFqrC1)ZWfppnap(t}GcQ?mp^`P%ku zPUh+9W}1(VQ9$Dc_k6jUi`XDkubRnSrXtFzOK!b7mOgmiqD8BbMsP`psK)}`lpAt^ z9UBG&z>rsxxY2!aaV`Z*C@yIw9c!ghg*hi%e>*}(_p!$R6ute&Qv<;$8))`7|F*Ft z2GgUPT3Q-0MDDK0fJ`H`oAmMCbZQ_|E<&LV;kGC%lS(e~mQ2x)rQXz9Hg`htQ*<{Z zj9%e3@MM%g_-k|f1lX__NPW(&_|;2}>aAPh0qcN2w04^G7Do1vn<2QMZw<}&6&)XS z!*x)bPD~9J^jAgzGVFp+(4DyqDs?iU1eH>IC+wi$9G%S5?gat-<>6;O_&S<~mvo#5 z*W0LDYFDZx+_H%h$K-qwbVD><$;xU7TqfftZgw=W`bi3Rj^>Ao`KaFANF7wrKlB}vKJ z1Ux{b#CLVq_Z`%btT+hQQ*Ikvy|UNzT^ zoGX)nLvH#rH5Kef1hf+2!M+bDuD#T5@KhkZX|P<{-aR|7;B>>q(AKU$H>s!_+0|m% zLNbrpAfMr=dO#GO=xWdc4uDJR(4lZQin8e-a^)pLLJ(t% z0BkQ4LqFYKq47;p8^=~37Ue1#79gr8@4i6?$^7SPXgLC9M)ze9NvVJdiQw6YcpnPf zp+U=5%3Z@-UO`JPtcXq6mbi}52M+k7{So3)q_xn3)a2qD0rgYf*=(kX;3Q-pvgDzm z?_P8JdXD|;P1oA}=OVv(1{dPpkcN%3~lc9YJHrniFwP`7EZhvQ6uYK^4wiM5(jOk$7E3w$o zj~O4oQX0d;IB{|CAP#V9&GirmES9NAcsJ3=Br~20TpjdHY}~Nt16X=DUk}tzN~1uv zY_ihEjmI46C0JnkBk>u+Z(D9#ZPOReB1jl-XPEkZKdwrdpZ43BcL6`ULI$$nAv`$r z$6s_i)R-Hfe)F@tY_`c}?b2kmAW%bbmuD*g43wxwVY|eOKBAyth7|VjK@`osJQpwa zeV4f(27El8LwSNgp;bteM6Mv|XkgyfG@Q`qhZ=7}C239plfQd8P{@SR^Q}*%23zLS z3}C&Wx<}vHVax=o5wX3KKqv90B1rI-k$ygXX(6fd4n1BDV&F!(C<+WIJ{28Wy$}d2 z75swHCYGz^oPT6A?D^SQ4?+Uxm_~Y8c|M!-@aXoTWlW3jiMV(JMSsj`7gNU{w3i5Q zYSP|vG0RXffQz8uU}@{M6lA}YW(iz|_WLT>1DmO;$h-KWnJOO+%_v;wdC11n-z^4) zM?SG-S(0*^F7m`C@L?-vyTF@6AUKr&^FnuK7qlzVwUc9GJ}Yjd!u> ze+SeMq~_q;x%{V?@W2?wGs=Z62_J)I;we72z*3_@Lwv8W${$oGGDOuWJK$zcST{%; z(om-*DBJv4R`eopXc>kiKsGY^`<8Rz>^oBSG5$L8BRWe3f72S;n#l~V6$W;(8ry3H zT{=ac4hp^eU#DM`j}J4GU?xhe@2vdXClNT;fCCj5owpz2E_hpgq_!X!Mu-2i^7svP0ZB?c2)2F|r z##W@7pRuuEm5N*(s7d*uJDJJ~X+Xoj4vQKZl3VsD!sJm#X-bs@@Cl3xp9W_mFRcy9 zcLjgAfhG_nyb(a4ApV>s!WYqSb2qq>uTgq?vzcvAKga(X{gH6;^y#ZC&!JC~$+S}r z9~d=><1qkwSk5KXLk%gg)W?s1>qeJ>_F@RpR}@}Xm(|VG<6gkrJ<5%T6aHmf($a;( zd!ngGFGr!=MRUA-kO+3xc@eW-1hnDMqppm!U$U@v$|G7Cpi!4_>@}=Z2?#_tL(c=m zXXiLP(vTd`hRh@I{QP=E_}69pr>;pz*Ovj&N11T1r2u>RoHTdp)F_`{6bjZltx#kv z6NE=bl%PUUo+jW%GJ8p)VLaXAH{TcXzlK2hZ=zcS0hr9u=iVS3otGPDC-r8Yd{`Jm zk(yAs>LB=3p1#F#d1CSGW|>=>AMT>WgKN@Uz@@L@rn#GF3mxr>y*5L5p$9re%2^Kzsh~9r_owYXRG=ZxrjI9(JHJt7O$BBa zoL#0GHWPs?FGDi0S$NTR7ePTjh6hqKrwZj%?XdYGS;Q?zjk?o)&tWb$N>o(8uhxGy z&UFgZ2lt^u8yH&N1fyWv)*%_O?H4)4*jGudFuwPnYyVbGPQToU4x0GO{R#b477WEi zh!DGBi25)7dheHgkxp=bu=`Bg4Qd}Cf+LibJQN8bwMg+Zm?`9(NTAul5)8$WwCcbC zx8Sng#8skvP+atxmw|7`age!vBD+kI5;+$C{+7&}10{hIjmPOBAmH5h=5%69>FH6h zqiEUU{#T?cYB;_BF8HSzx6)l>(= z!^2^Tf!Ni&@BV;qnJc(syk*bp4T%4?_r+R!+v%x>Gk3VgoO5dP_1}*u!9|Di5GPGK zmJgCSud-{U(&iG-(H8Y7Ef|{KHWhUhnz9D(e9qrkK$grcN4(Rz`m;d~Ky%PF z*Ww!Z6N$sUjbonLgHufA8zNaFkPI@p~eUu2YT7BN7YQfGby^Kyn17 zel~5F(2O#gf_n<=WBRHPsKey%FV=Rp8Kw4;SiwDenz9+==jQffF59>z@6i_xVRQ(* zf^g1Mc6A>Q!6(@ft>wv(;p)FkOiV~Vx|1uVP=eS371*jr9!vyvlV?oZRZQL%wMxzz zBU8IUXs!q6@7q95r??KU1P2Yh&M*!EI;CpnW`eDH4=o~=8orhUfr~|pri2b+HQGYJ zIFY(BE(p@8()Sd{KERoxGrPhyx4GHcg58oa2zK?Hk}I13PIi;?0hmT|VhLw5ew@sH z8+&0MkgVpQZy$5IUKJ?P!Oo{q`Z)&=EepF`%fnGm0oGS zQl=Ln4~TO>ZMX>=jvN@(5G&8)Q`2ihVFcl@S9p)R$iacParpiZMwlBFn`j6$LH;bq z9|P;eNPehZ-)XaFuVsD7pfssHPtKspT`oW~_f3Z@Q7ZGzEMA3_jCRWyc$QZYk^?`W ziWNMXQ+^x*4h%MbJ`+_P0Mg3cyLSsCq6QS^junaQ6j~MNj>?Ms%vW&^M(4F;dM~uX z6$F9%4*nwCfiE!yL|8)%+m`6{K0;=_$djL_qPjN`%UzSuDfxsqf z?Ce0`e<6b|^LwfO`;#{s(zuqUS&JpYdFmr*Blx5s$s`7?Mqazqa@u zZP&J~XzF2XlxG3qO~jBss*~NIspS8`L!CQ!u5~U2fWxFKqBN8nnmK7&;6hA@ACY~RN)U~3_k$5%8qq>j=a~sD2;YhkHXmx!7vOqjv zz&1BZ{`GZ2{rdHrTUu_pxzy0Wu_ITf0EraRSUtgKYE-XY-I9?jKmj{ArQ1rqz$F37 z7$VInKSLOv|Cm#zZ_UgQrAzPsGzJ0jy=14PW3@)*Sa?ix$=U@EyN# zDejO{8JWOOKpYbAD~j{*z(W;MnFNznpYI(L@?{gcGT3aCt{ zwP0al$9mzlaZCLUWI!%O+!~WLqq~y7M^@Sn zx@?s(D#J6yCGRSQPU00-sX336=_H{9TGS&n&&IP0mzq=-ojNeUe2bo$vouh81T2Yw z4G8n*{)~T!)B-vcZ4)Sdc;PmEZIRk?yN&(S@))E-U!N}-L$Hz#qFj?ir~Uj*SUr7>iR|2%sdqze~CX^OX3OtMzbO;D_h*goNY8rAz2VC0&us()@3Xxulyvfd`_duU_P3 zO)NzYz?=-u&2RROu)Zmv$Rb1G91VX%h^35ymS8#ttHt`H5~{nHzTDBFC^*NqXtE5l zf^xCDT|PVObQ@7&($X6*S#VIa zQYJ%>0HPmbUXAfh?(cYtR-|Q>Zn)&F_+=Ns#DdqicUMQ<()kw8hNUm#H-T74bP43A zAl0cCb4*{G5ig_XTl3FMen>ch z_80P0X}`tyA`s(g@CR3})RKa&{8w4ZsfqtJH(1w1CcG@5x~XAU$Jk}SvV(25mbPu* zzS$5}R8K_85|Kt1M$5SJwRm_X3O9jpn51)Xw9OCVqUg#FsQn)!SFk zpKGBRgBF~AyRnG6pghS{e13TE3X~1Nb#16&qw)BeL!dByugkQ5QhCy}0r1j!uU@HN zoyFm}t+Trx*$WF71|B|q_{S>4aP4nHTOj-q*CA#JV{2WR7>TSXjppIg?M19{$2so& z5HO04?&(`PBT`ukY)@mml8r%8-L=V@=&OGM5L`bjEA5rD7bi#P`I3L{>5z^zm<$~B z32Clp!p*9&(?r=Pf=Ql2*MnxtCYqoP<$A7ig%#4NwT=Z<{jfiZ1{U*uD1nO}miXL% z$8e!4+E-k{Q5K@+x3PWP=z7wZ?dEbm1t7YWPefa8*~C9uEje$E7t~Z>v|R~P%IREd z{`~psAb zWVhjeg-3B}c9%6FLd?2JYljt$P6Q~4T{kFdjQC-SiUvroGVwiy-&`muzz>J;14M(1 z=V5loHg*|FG=jfFAMv4yt9cAUEujfy;2m-$;|ocW(8_e3lkxEbC_;$e@}py=V~@Mx z0flDJu%VtwWmu#8+rpxejcBkU2r)K0&wC#Dg6o+iu41Hbh@-&J9&C{);DUo-j%dI0 z*QLI&f10pmYFyXIlP$?jU^*3|Vq+QQL82ngX(F17Q=qs}oVbCQh&5E^7tuG)?A0ry zcbjS5JQr%m7Rca9x}Phgd~*(z9)F%z3eawOm`$SaqkfQ}gh9!63bSz&QLUot%uHT|B;*5(-BXu#MTb^9|+}cN=GeePn0UUSmp}~F7|9ThM_T7 z=V4&30>hDM$p-Ai!~&Us*3iV$yNN-#8}J|ptd2<4Yz-N*rbio2DJBHg++BMaV2}NV z!7k4^q!oT3w$A}#e?|_@C%~VWiCV0s4b&F-x@D1II!^O@L7FzgLdqYxMB^=nd`gNh zx4TJ_1-|A$^vU8y<-lw0&+*s7){y%T>Y%Dfg{_e*3I~IRur4J#ViSnxUV=f=6iHMM zIR26qR8N%7(f@9=R=1|{1r(7SFEa7`K&wP4{8ZEhq#{sbNF~Kwz=6m?flEl)l|Tb& z?$D`h*_`rQW+<827KL~4rLk#$D-8=jV48^sY%Q;U)j5=;F&aPm~x~%!^_;-t{Hn=IZ~!Io}tZZ z%iKkfuoDRhcbqC){*N?1G}+aud;=*|SA2i2gOGdVJ?{5eY*n$q|EG#s9W|~w{IDqX zu)PQ9i%y-dce6PY6~H>hAH;`V1?K@2PvVeGK5gMj-o(p__{F()b`uL9rGC@;chCSM zgDsD%Xf3NW;XP~2C_K8;wXpgQmZdE-wncZ!FsicN&h5Hcw-fta3#$yWv%B=Ty5pvt z)hlxQ`%GV9*rw7y9V>mjGyZ7TfBL>TF}+>mHD$XZnOQ`A7)i(}>T8NSaa6}8|InjW z2ze8rA9s3L;?x z69vmdL#HJ8p?XO4+GC{t=fmsh^!f~N;w)9|-SVmF7${9R<$L`Eail3^ZpY_N^8)V%ETj1Ya% z{~Z_$RcGt+Q?DORP(Kj+zdRT4cCKj2W5Wc;IMaIc5^+Y|^z`&(2#ln=fd&E{UZoPM zf*P@AvR-5ukHNUq$L@kr>wzo$p8yZZ*#{A?+_KTeIHatvlX=|>w)aqUuOjhVqS)Z8 zTFopb%~E?{1-59~0b4zAi22z^SZ=9#MF+wOCHfKc$GvE$Vlu-|sbbAK41SZJ zFY0SS1vIg2Lpc2e$VAqQia+=D%Hi)UUv1;U_ONhwhNeKZV66bUMem>_o>G@?WN(58WcHa=t z=$cGwvmoW7p;_hJs43xbBp&%0 z$*M*W8V>6;Mwmm7R)XO%?MqBE(qmHlsVL7hw^2}mu@tBVGZVQM72XFwO~;h$z3X>2 zuhEkfaZRGGWa%A%hcG@3#w?9EO}30A2@d=i!Hp+g0y(q9VgN*V`Ps83ihZ2G5;Vqf zP)Pxtu~i;Y*JbaHB}iN$(S1hp7+8~7Z~X1c29an8mIoX!+H}^Wg-JVujCbvjzFd7L zmKx0U63Q+|q5KXQURfm2OBPASDaVc-Q@)TCCM-!#(JA`c0taFX;*gXGUdUAAgMUi( zEVQi}KFQizu3DIW(HcB((j}42rE4orO6ncNacI0Q-2DtdpzgEc$CsJ$X>*pO!0eO+ zn#Jy8%~VoIkc-;xl{?`#R$Pmo3}S)8cd&b3)sIvZ$$FORMAYttMyS-M$^FU`*p+VO zZo*s_DM6UY#pUaJ(1WrP!dSwfa3ePX8kbZcWq(V9jM>pJ2Se)}#gf5GCfq%~^RF5v zl__0s)}feGPe#ErH06=SKzIQ8b6owzQXM_%8LX5VIkqkq`t=a&l*I@BS&q}^U*GSl z*lE+JuYoy)ncIK&Gi8cnA506K!QMtstIh)!y3uI>NB z*qaB`yuSP2A9FJ7Mz#!FDH@EKhfT7gGM2Rp5h9c^W9CpOV_4>?RBD-ah$P7nTWk@T zLMfT2vdu%!>stB#zUMrDJkM|aan3#twLZgr-`Dk?uG3Th!6XVU-yOWS(m)`Z-QQ~! zvNxbW79S>W8Md{mXvtZJA~EUIX-M&xp0KE3RUJFl0n20tswHcOm_3F|xv2lom$a4Ou1H7V3#0jWN#S~;hTc7{OQ1!%)9Xd2@ z+Vqvy$`^w})V_Ty7B4O@rH+Ii5CZ5K{|~&MxyyMd0`-^)(T;9FoFP;D4w-XexJRXl zkToS$8rknblw{wankDlG20c!S`203mrF{*Y@7}-G{1Y7{AvyT;rfy9pHwty?*RLjs z-T6xd5@^rgy0!7`ag{3PKAofLTR6FN=@F>-dI%; zF%YV=y_+|Dl^u|#=kKg68GM(Tn#z}63``zaVq+p5mL?Z+Ad&fC8Gnpr|GLQWe;qmv z-I){hEq$r|K?trpy%u%0l(%MP>L+RItnI$CmS1XLmN+BZb_sJdv4eGo@iZrd+* zamub$v}4>5b+>*)Y$iT_`qcZ`w@tI!e14l0F<|E2idHjhv&K#)?R1DJrRY5T&W2Vc zCZ#9V=64Tf};&i8;6^rt9T-}P(=+V|PDq402bTFKC=V(PE{`8HE zcQgoJs4?*{|A@p={!Q8R1lizaSV4WPE(i}K)KT_z6g0@`qYLh_8OfMh0A{n?;p?o? zTo%%59NPcAads`CZMeT+6>tME_XR_dg!OS9+x<0nna!O>sGBsDlAxFnizST^pkqw#;A0tnSu|9cBpux$d-qB)7 zylN=MHO)O<1*k*jRNz^ne-BJx>kz#VPqEtk_=+Kt-~+WQXFY1F6|ZE(I?8{PheQ5h zXGT(4MQ`^4u$62iOw0o(>|*JN^A;(jSC{rvAOGzMzT=vt!&tj+9*TLLapFS)X6heU?DAUr| zVc@~_9Zm}v-Kq+EPTbHRML!-FrtXJmk)xYie0j@i*S3&A(e?*uhzMPlad9HX^{6DL zH>JX$V-xB~a1Uxx&u{G2BT$uZ?l^5sWar;gQ!_e7Hb!@(5D(@;CxF64=3kL2roN#k z!7}&rSnIZ%kdB@}GPQ$!4HnT`6t3^zwL8RB+s&XWk2x=t-S;a6%FDkrYBHbYJ~jFc;Q4j-uEKGKBW?#1^R(GIM%uS;xMB_^YT!`v$zRWRhxQ* z-+aOuA!@r~Lh(Fv(~2+Dm2@4Fs|HlP1QSR#GM9?CJ&^9ysb{G~+$WTtF8%Qvct(YH zFP?7Qw9C|41nh)|Y`=FHHTUzl{<{u#IR1_By7O$u`=;kn*GRd?{wrhY;P;|lvTdoj zk>ZkrzSi_=;%VV6Z+;v{vM?;@KQK1CaD1{gRbi5oq_1p20D@Y<0H2qnWXV(5v}teh z!M%IsZ;08S^>~Kbkbxuz&YV}N+JIwcRR6qba>d%3&o!}u)26-StjqaPUtlo;_8 z=F2C(FCZH=a|QkUs)IYp-R~FP3ABBh65mw)F);mwMW;{bH zSZvZUiQfxQE`7zPckrW90$B9xuMPspJWG9QR$5uoYpE4v>IN;8NN~rDY5c3xXS$DF z*O%}I%mQWN!?1IQ7Q4=_RH;((!qTp^J7}E`exC1jfz}qRIDR^%_F_2nP@`lHcd*xp ziQh_elYm6zSm6z6dQ*JdNXRZCrf;Zc8DxUXjI@eqj53&JA=yT@Z|Ymrtt&Z%Vxget z{f)ZyS0_rPByImB!`S_tzJ>B+B1C`B@O|;upQ8|o~A8U87QK}&!81Q<#}ae zX_KK|?BKcx7xhSD!Zs74MXg|V`bOsWO+QLD1_6d-KuqTs6nIt*NAWU=@?p@CF{}Jm zlz>j3JAeMMb|%}Tc!FinS7WsGS6*-c9d#I zGhxSgjz`{=Xu)dDx5W?T21~%MXvl!0{NJ<)9nfcvRrUs+p45Y6b$5@_JS?@*1CwTB z=brv>%ZD%2H*#VYRu5w4Agbg_69@2GwvqQk#P$4Z!v~$4UcLOOVU_B?hjc^Bp*@l& zR5dG2NoV%wfCBd*s~Lsuo~YDC<{G8x%Pu=h%r1uCEMi#-_5;jfmgTmtA}$euKV?EA zOdApf(X|7!h#8zTj}~nLL>J4JExv!)s5Zr(%k3p?hk8>OgrhSS29Q`3%~<5vlZ>Bz6DItQI6EB6Vp2G&lJ12gx6(?-8CE zrms|jE3DMjMfeL)qY7F-h;R$C4QOHU)eTJeF_{jZt&aRM=9p##BSH^NOIu)*T0T0$ z4S1iYO18lo>fw#!OH907BjYX|e?0-QIjltQ3FGO;XoO`n4UYT(8yjoST|807ru6Ca z7m`oM4?oEJ63hv$j>t#9@9#gvfh*?M_we%CSa1SNPdu}nhVdRtj_1a|SbON^v7<+u zChnN$_=m#Qwwp@B=GZKPIs_^(H62*Y*q0a+c7+Vix**(@L&-S<)|4Rbo;@q`n@0o$ zgoEry@h_!frN7$u{BM=}qrH>a$K-BoV|5f#>KHkvqtJnWXGrq$>>q@V%A|I&lL&MO zOC@p~=A%|);d_mm8Xdt5p;~gTQp70!d9h1eeg2+o(VlU&2_F#^pJkGo*^lMA!=$GZ z`uF0+kXHvQXMnVICrV@r8ASbtS{?W(TQDFGL!Qytfc;6@f~XOPKJIL*Jy!UD?`HlQ zL}&Qjs~=BO%v8wF80|%5bpsh%MRDKlP3>kw)N#(AF5?zAItD(v0dYH*a zU*leHxyMk_wc z{EUB&fEer>eTDzogAIbT#)#IpmLHA$zVfz!&m4*HkzD#sbn;U6v3*PDMQ&L1^gIy} z@Ut-#zsTv0F3J9Hyn~|b9(-QiOqK)PlA>JM`KNwnDpau9ZBr8ygclLc`%-#4jMy6& zR{#Qt3xJb6@G>lZEGiIy7KzbqQ$NggqLlD~jac|BHpFE8!^iqsP^*)yC;KE5TYSPv zn6e32wbor{xtZ0Md*-hO^(Ri`SUg4Myd8 zkXDLqORA*M+xMA;D$>Da%c{^|0J{m+hSdm12lWMXwdY0N;`r$KpaeHdtpH;O@z;A| zN|(`El8PzfTTL}dP-m@&a~5SB8MZ-Fc(&Oy{5UqXj7=Vm(9_5{e%`kB$Y({? z&d8?NLX!kfntPFMkM{Qu=wuj~DT9Gez387d#GZ*d=VzAGy{NjE$rBT4#DZ_H+oS8i z-7oB=d>0O%kz>bJp%P~gw&D@+TH{o+mNWBo-mdu{>okd7=T(;)LHyX9xS}MX+F_P_ z^i;{d6g%LYf5!nu$W$31eD%tr7GhKDNy-oqxvfuYKrXOd+VU$a^;vNe%SiO+I*n*X*MJ2 zWaBpi-cn?WiDVIz)*sb#mG{&4;7^jnG3e+xV<50{iiq0LxQ`e;TAgq={O3Lp1IjW% zdXOw>+EI~DW9(!FQ1veJzpDLtE3$b;*8!w?E+Zg6Qb+dD+`GJQ%gu~)=X$*Ewj@59 z7i<|g_Tb_ZC#~VD&LfIz1&!qH+EbkpA0O|T3$`tFo(oDfCVi~{RM`ECq?%K^<{Lk! zd)DBdG+5`5n@WHdNCxLe18T*PlPBj!ej5C9JnbCn&Nd;nePR}()Ok^ZvEnxC7RKE@ zy|A0KAzbDjP+A!dLPP`ybHcy#gBDD()Xyx{1VBqkmq$~_`A*~QK`rk2EjBLB54cz$ zfKVeG8JgVlap4S7FQQk(nej~;8XMci@-Bs?ZL+Q^jUNi3%WZ0owGg<) z9=Y$-Q_T{8**gY*InT{ z>~ZUrrJZjKTChs)f+HtYK$#djthjZJQ#JKV_#dv2o`HVe)_dXXZa7#q7p!z^VI$a< z#&@dh5xc&2(%}23qknz;A&*p=tpp*`+hw-xK{BQAF#7t3yc3VRiGTMxv=8u5uY9s` zadFXPW+f&i9oVo-gt>e)wWTSFJ&}@901AKzQJS82jE!=5FpI_&zZW+PcuOsog$E|Hp-HXNL01z{^AI49gck&@?#CagS91~PdM)mva6 ziXN3ZmG76K>f--yvt_nEF<#pQi4c+&jvCwfH~Rj(b(;vE&tJZr;bH7B7VQ|9yj4QR zMI%H-`|_nv+^9q$RPg)acoRfLB7^lm|6f5CM6E9IB|;!EJXibX@~EY*+n$f>q!@d+ zumZ_p`VC))_k_KJ?rlQP$e11yap-aRXfIKRlv8L0{OzY#5NH*JiK3up(9CWpPcB-f z1B>8BWINm;%O*{FC476$DL@&dL)6^>Xg`75 z?%%&pi&T|9kx7eIbAF`GYdy$dQF?m1)gjIKg%|ZBv67JN^XL)sY&dZU<(tp(3o!g5 z;nG=R%r^EIiCpUf-Ez$5hX8rb2E6#ZL@-DEhm2YDxLM1YA1O%p)`-5p`1-`-$1k!2 zo99t12?PVb0?&7Qgrk*J1YY`A>BV`YnmoR39~~bgcV69$vsPhf#i^+uHho&ZriXfm zxtW<$uVvd-m>37R^QMTWNof&rzxT$b!j%_8ADsi7(Z3vu6KJPIVoRe#$tr+3+i6Qu z()(sK@igofVC0cBX40gZ6dDq|a#ZD(>#Lpdt?8t_NNiMF-<3q{Em`}?;c~|s9zqEy+&%xxWx5q5lG4qRhhX-iGX5I!cPE=96Gl3xI4vP z|BN+;U-~>c?Au7!oMH+%f^R0~W5-X!+2k?p6NklNn8TogKX)>yNOUYYw>WVyzz6nI zsawfpF;sIPOqX+pKRm>1pwERK7}w_vMK05fwoU!ef*lzMF^E9qoXU+z2#x&X!x$Pu zWW@FlCUd|u)9Ng5sNfjPLjQwm*3-LBwBg9IL^S&_WGE*N8ecDc)Vy*`UrrIwe01rG z!dV=;XHTKSYZ zNLbXuWpJ-jbqQSo4zl}A zva0Cp#4F+P?o=+)I9-EQ!;%}@XFhV@mD2T6>POU_F8ih`W;+F-AB>7FJC!U)jxH4cmDeYfqQvZyqKamB+Hj`AcT;;;+so8YBJgU5VTs zE`$;nz-IcEuXBQfsc$RsVUJ#d_8LMEedUE{V}Hg8r!vM;i_%Im-I4zx7s zD@wP|{qRB)mo_==OI$&>4}cr{I@Dnkhz$Ib@e>M(-3RYBR4EO*1c#Ju8}d#eL-f)O z2k1{6PElD&p#qVU>&43cYD=la0C%dntz*8(GBy?&pF%oMPvGRJV;s5sH-FMvf}k29 zxd1RDR2f9;`Zb4!_74Q4qflc`reInOoV5+|PAxt7Y-8<|xfl-zM`8doVB=AIU#-hsLl{km~(|n>bW*-fHztXoF2VVzVZ~#|TELB^w{;~F4#`f;ep#SEU-D#sa z@FL6)^Jc0zr!|hvBRerl^*4&*vmCyPf;fawIq@9`V2XNyTgJ$(As!7kyd5FK$#%|m z8^Q!9Sp<}#@Nanj+e@y0KJdVtqMyBGAu;Ay;P~_v&YviQ1fc7yYGM_=NjxNkghgDs zF%|ny#6{#WVJA<1bC~}J8I`EA!5IMP-^8nmM5Cw)D0w{)P>Y?V>yiUapZ4b&&-wE0 zn;xQJEhSErcm*s+XRcnYnK$o!4JacoX9=K-6E>pc43zRFu^E96nx4 z4JO*DCo#AvawgEbdR^L#6PZ~4p6q3l?k7jBYZZRe1r>y}ON=-37+3c_l9n#>J`X7V zu`o;bhvR4i`t8`>M| z7s92igH2!RCUur!KL}j&eqv zcNiO8uEl~wL?=U09sGAti&CF)xGH26ns&6c&frU}C)@;R1Y~yNYM(HnsVH?0b1O2K z03s)>L{nfM7I+Ul{Ls9mty;E}s(|2>%<>I29nqBw|1R{IAV~xcgn*!6Mq`_sOM{2F zdE3%~G-^Y|vx0!7QIjTBh=^`F-Rv0)Oyqw`I!bcpbPSO}Lqu4J37}q~Q0#Wy0ivjI2Mibx0@dOCn*TPcSFgJA zS7PEK=F~MVFtK~hNcA(U70b{N_90@^fg&Wal`O|UxEj%0@Iy$|i2!i4pJe-%z;s+t zpXqVh{%~Aw7P0NGs4J?~o1Q}{6FwA-BuPf&{f=_Ys)0usn_PJX#BfhGfYE_!3MZZC z^JLYFn|QtGaw|}_Gs5;9BoT0TIhGsUcojMSp2rwPXUZW-H!h(k!5?4M`API6)J`Fl zUeoD}9_>g#wT#?E(T;t0P?XGX?N~$9bCpE zqbgzT>f>3mvb-l8iC&qS+9hKPSbV#{K5j7t*3CpfODz5ylIOSa z@IL`Y9`)A7%^9Q-#RLKjWPigqbb>ZVq)!PpHZt^RKk{1?B0QsS9)ld98TAMp+M~xg z=R#AB4b+w1&@;*?A&h}W(jLruIJ*i589_C%1s2sLwY*uwhP&^K$Hz1dXk2hiv(!y_@1O61*raN3J!bB=}@HC?&(7G!Z9u2^C^ zV!bQAU>YMNB&@2$-3D7x6(z_+`s3&ThE1AbXw|Lu%qA#Xvvmr;*;Zs`ibU~VE zKYq8cI9oPk03RN8UmNBXi;9g`fF>-A@?8|d1?4MRO+4G91{YVi>%3$?mzO*&L49Mt z4&w$5u=EGzV=73DJ(Ydd%kZGW5$wF(M4 z{bEEn+5`zZHoxd$`>j-oY%Icfh7IxPSEB*YZ2o@1+u3=s`^8mj)>PSjKY}PN-8`&@ z>43$w>(yZ3zO!`X_0^cds`uiOuQR>8R!V~VBXs~|fEbb(h#~q+RzKU~1(rlB(gZ&?wdpP+~VacR7DxRNp;z06!ptR~`iib8fOH>Wa8*8Kky|K`~c&xRkeva&AHDfOO!_=`Z}umv@xad9gH;e>Pmp_CE6 zT$T*4qheP9wvQmP=@(82`3lTd{@uc-_24%QXfm#ko)_&Yv99SWUU%8l%qPxvy2E7- z6mHTvZby#s8dn)fMEL&jH`;k63s5WLok6fKsRKyWmePJoLkdD`-rzm8vTNVIVMfk2 zg~pntayvm(B(-$KM2ViH=QW(1T3i{koOHHoKg^?-nmE|aRo}Y&c>VUw8~X9V+T>Lo zjWx6RO_*4zwCXFVpL;OttqTE@fQT26ECq4SaXPpL)d)`faEmU%%qPzzz~PJT4e;2e z@lQ@^wdP9BBE=|>Z%W+o%BP;t__nPN^?UObt)G;B zM&;=@rfvBudJiNSGAo18sQl2_ooIITU|3zS6ez4v)JD6!}Z^wLeD?OAg{34?>tYA9b7Pf2Brc(kA;CZ+kDG8i!UL zS(K+HQKFSoB$?^+1MndqWO0SB*TqRq!j1#EQu`5bWy6wZ58l z^TNinah&*?molST`ilBSbW&8*WG;os$;pKZ`ig0Gkl!wukqj&@wr~q{7xW_OckUz$ z3DO@KjHEd5f;c1<9jJHQ&}L#APJ-kUC(0@KePrIR@Wxb^Dvr7E`R_|grZ8km{)~Sk zGEnNqY15YM-8+ZAT8tmgM@7zA!QuhwwpzLWZ2Rgv4s3e4F_^2>7H z$xORZ6d}Q9Tdq8DqNAW=^lr>lUds5EGg^*V)sU-1c~Q@c<_216#F#O0)AJ;AkVVKo z%(g-;p>74iq=Z0571AL{v^|blaS^k&&OniX57DB_*gwt#{OvV392p42-yemuKlbN# zFk{`fSR-=>@C|TA`bl`{XtnxhRp zMf;AnwlX_X2Hj9O1;$14SAX$YAaXcuh9<^NBfnGfeQ2;Uj2%kC>q%|S*=2U}o>9QQ z|4$yB0(_%|2|_dDQxXqssX9@*al*}bW3;;On6Y8rq>(3~DB??i?E4`PiX(j_!OdXh zt37`J%?V!QkoHH*5g{p7CYsIj9IkC*3=CBVJ78+IukY{1&JC%TrQd$>6bN@K!lvdt z1~vj*Zc=I}MrLL)<8F}7lu?MMrT~^eAAgJcT5usb6|XUADRXr{HQRZhC>SLZO{%;* zw>mt4HMOI4-lVb%UW2R9+r!hg$h^{N}OR3SVWi7cc3R(E>CBSQODId+A$YOCbYBc!dM zNSIpeg5hwOk~}PQs2=XyO_Je@)$VG`*ok@jUnON9Thcgt0}rr5aL}#d%j+ZEJ(+Qa zGHLh1(x0eB&H*!FYS3NXtn&Aw9}odIUM6R#L$j>fAAWwzd&5AU%ke?Dxxo*w|O^2l%y&3ddy$%W_+OIKqj*gL;5>$Np$H%%4 z=YJZ8ghl3ENbLJVC_`r-gZe0!Nfk;aj7Elp;t^F^Ty^$6JUSdJyIq)1aP~_g8_dYA z2)I$1ITG%lgd0R@vX~dOQKwE83l=Q+e&2b>C9?9+0m>v8jvvI?LMh^arx+8=lh~44 zoHN%D|0SoSM6)4nMQ=KWgH9#J&29^Q5B|irs|-HhTD6gPOV{cD&{IH|j!nA@bmk-x6;=WTJvYpSI3CQ5@(;^mWBkdn*rPrMu9b|*HRfC{kf74??6<_g=RQinmr$@~=2f^(5||0NAuK#$|cmqG7K{2Lf@ zJ!#pAr9(g-g+NF3Z|SA1Mpv&~Y5H&K_g7vDu6U2cA4Rzj8{60<5R4zl^y>8M*Nw^c zby~0>jTxT_8DIod)r!LZt|EfZ;5E(r`T1FiR9TY#6fxg%km~+)@R*HCueu`h(GLEE za#2D^W6}CUD;%B+fy-#;@(MhWL@fkyStj44-ew1P!QsNKz$-_rjeVo$9w(;MoyNbi z|C_Hu5Dr8Ur4L!m}oBzE3zs?P~l5w%(cr$^d+)xDnNy@8!bgc8M^ zXvUj?Hx(n05|=IXpxAtYsb!Fw%pLo$u93P2C}Aa_fE8P^-aQTDz5l+8Dmt@aUS!<< zeJMqce_-!cZ)D5Qya>S!=-UG|6tu}ZMiWMZ7KlU!#@c+B51phiiL|H=U$+uj0dBgr zTK|}e`U7LRs{@a_5Df()8W+|i648<5T+?4dhRb}Dre8g@1q7$kG~|P7hV7h8&&wtS zsIZ9MT&P>1o`9%!ve{7lfOeoX^S{v8-V}Z8`HT#^nT`#02c;zA{}@RxNQMig0xMg1 z`xO&fbPF_4L}mrpdGv6!iMBT&g%R_dp=73hShH$XCFSQr*q%z_JmAcf0o5v=Iy-6@+sJS-K28A@&5)Vd$sa{d%|Ge^1jVM1UsyQs)$b&v z#mAgi`M1Z5LY9}Mt*rr#GF6mJhVsJGDAy3drkYRFA5lMme<)TUw!?G3^yp-Wgc@yK z-6zIO(r6I1v~6n+;BkiWOdH;u1Os7YJ+Zemh`Lrbkbo{+h<>aswRVSDZ|X_s$b-n} zH-|<-&=qXh<**C&y=dcU0Y1s4qCM$tUw0p8iICVig3t(DKOho*<;<&~g>S=YKFqh|&sFqefc!=yfulhU-UxIorQ?78T1#T3b=80LXoHCmB%2RiTRa z_8J;JwHO&2r~5l(5fXaLONMkebbhO0ocQ<;0r{01M9m+Qfg} zx&B`&wEi+G2O9Q`MbDhdWmh!m@!yg5gUeY-^PX&IVw~`hwJ#;OOVml`IEdOr66X+P zq-Lj~i;}1pFt;h_r`V*SQTK4T1sASrZ{P9H$P%J|d|tJseS^)G(WEpuy}(<^jT1Kn zRm@ZEwAApUkrdv-tV)t4awcc=kCN2{2splz1y5$tk|h;nrC^(3oT)_X$b2eUr}S&5 zvSru^AsJ_Q_7VtTS&??afNUxP6WA_qSJhvc#wj4Tqp4;FZCRN)mn!Chn2<+}D)h^J z39d>0C2a&&fI=B0hI&>=O*)K2srjlvTG=X4=G_Sg%glgER-}FttbohS;7ySfSlOj` z?m|%oHa*zJ67e7r@0C!9a-bMfDTu~XKo9!;#ivf4@_QP~mLLdndg+hX_t?mp?yXv1 z%uf2O?HSoMA~z14`7#K?25%`PqO>*62AEhjB}c4v_0h+Qi)U2w5YMB-L=PyLM08G; zG!e+8{1Uevy#Yd*^`huVOSKZ;WGeKnyrzp^?_O>)1S8iE%G3bzZU>O$Y8km*OyI?l6f6M zfpL23glg=0k(~^}duJ?aW*Lcj?oGRDKa*~p+{9q)`F!7kP0U)MNqXR+b_`VNdySZtX<+{2IC)KVzm-RHJ z8NE_53>G2*0C@?4gLboj-+^T(T@mJha^kyj;{Tzkzsa>3>yTr*rYP>2mu!1%7(yT! z#tC-iWIs$Pw9{ z1(skpru?f7e4q!=>#vF*MElQcXFn?v6QozGnvusG0pW3X5Tl`&DJENN(`}<=C?6aY znn|L1rl@^h^?WaCABuhuxe(H884>0G?8OTiB*2V3X#}~~;9hGG!7#k%Y-Xk*MrZGN zjbWiO+cHHoDeoz+yDnXZ9zd6sst4hEY$=#Axc$ETC$-c&5Y{54G#7h|K-XXZ(d^1H zqAmvcbt5)RCe}&y?|pi~d|@5z*#Fr0;FFvfL~!#$Zmti($!f{Em3O=hL?H;sH>y}- ztiZxY&})JH_n;we;^d(nHGcf%=yoWG3+7LnvqSILT)b=7uDC`<8aL!xlJ_Y<3m$JI z?w^fOME{#;QuZs2&I+&5221Ll39@5b?<@4JP>p;Usm7ulmKCUjHQmR)TE*12EU zPR=t6vXUZESNdH*J@UTqcHdcgazN63QU2%-ORxZ}99wh`E-FGMYSL>-D3{a`i0Af9 z&y(rwty&oa`2F5(L8`u>rn{3KT!PU&JUoV)yQpG{9`D-q65%wPk9Or<0AJxb*!Bsr zOL4zI^%{LUm-Pe^<+_K50c!l@aqjOK85sc`s(KVdMXteiTqwpNQIb;Cu(OfFdGt5#BokY-? z3(_pgVbQQ~Is8;s_985HP_WXdfB}#1)L5F_F~7bads}4%MbWJ(Cea1hi>h)9q}hIl zXRrXI<19PH^+UtEN!`fMkN@h4&tl4~0tGP?nCkqla|cejuICgGS5N`X ztAxy<5F8*dm&k17-0l_A4%3lzg{UrMjyHHF4W{_#+_mf0mCw(}oTOQ|X4CfV`?G_> zZ`z>Z4=1h5ANR=3l6MNipe@DzREclBoNB3%Gn62E;(qH713y2nYhYzJPeY>TF(}=s7D_lL0v`3Tz0N0-?!CDwTQeQ8OZxO#Ob#E-D=k6=LYF%^>0`9RQ`?atl?j3gd@Qu#$lj_ zeAv~wD{j1uPL3Wvxbeg(m#1aV6g>~6RrKdHP)+%!_`Th?-;qotdOy07M`UgaXzW0f zlt?z7#vcFcLPhwh|{hnJsXj)IF)f143r2|hWQy2HEnk+i%MS1Ndm{LvmW^{ zb@sE|<1L!Zqft}iz>wVj932_0ZFEl7~lMKq$ixlW)R>Labll?yeC8gk74yc&^tU^VMz%;mdvdn-8|# z)3ia2UiVgJkRK6k?{wFB7A1#x8vqG*gO3W-Em;gtQ0v50=iFr<^gPK%DlePZs&iG;^k@{>0s zT@E)>(>*W7sr+xzn#%IyJi0@St5kzPr<>-cvc*WXtSD?Q6qLO*9^vXY2xC!2@&KsZ z{a^S%Z~^&<3rO zTs!mH%YJ2c54qU*IZ|XPrl#u+KeTVza=_rm(Ge0G$eCHyvLc1gJ7yG0wg=#?m#&oi zAUPz=d^M+x0SE))fSl1x!x$8Kdd;TegLbT2=Q&Q|ZLDTI!ZgW8XU3rvmUcWTsoBRN zE~#CZW20$KzA23_sj5*!%Y-@c!%iN<%c9*TxIN+D2z&%YVo8BU$beBAnClerZ6-gg z2T~Tw$Y>fZ@f%AX6q)2~2@drf(-$(|8MgBV>o3~>^AeyeJU_bl7p z)m1(7sau7aW|V|m;#{JIT>9Oa`=#|Nge78h`qqE9B6j;a)5*RZ2~o;<`NnlzCW;K zM~}O0dCgFkr9+N^yix;+k5PQ9aO)vEcGM^HpCGqzeh_@_IgEorK|v924f!Wdl%dkhJueqNA3?1Fp$!O5~ zY4f3s*zH7OAro@xWzBQfk3|>8Y)LrFhy%MxIBq%ZNgq7#<%2y2SZ8}leaI9%f(QuS z3u6}-4GWK8F*AM&TDt-FLP|<{E_2`p1({C7CA*oB4H!v}Eh5=1Q?}{niq*5y%9G?I zlflUudHZb4JNy2edyL{=hBQ$u**CIh4LiXLL0?rNLQg8s^nIP**7^N+iBH(NyqDS@ zB+d1u=E6r?6y#sO>V7;mGQS8ziY-IVf`9G-;D~IE7-l@oGPXo`MwAZK%$4CYSts*8 z5bIN;Mi}Ww(h=|=0-u>1+4WjyExQjyx4qizIu>kN^iU%KkjZq2>YBbHydK<~cIOs6 z-;Lc&{zrh$&5tb2i26p!hHIC*&V}e<2h~q|U(-NSFY&|FO9KoRQ4$yKnDy`sdk#K~ zzI`)n^a8M$504#ub6mQ%-s0lKj^?=+=;C*jd)KaC?>=J0ZolY(ZW)slmgVoAXExb+;UlFaXY&KTRzkc09lI?EwR*F(JO;ue@TPuqhYu07h|gAQml8f^;Mv zt+Gh7^z3XCN$sZ4AW0?pWI4K(lY}gjJ1awn*+9z$e>gIm9isSZ=eHwx;(sM2<*52Z zJBpI`Xanz5){n=Ge9|&#??Dck`sh?VFCb%5e#VIaabk4uUQXOn(7SPm{Y*8kkk_)H z+0q};tREW6fZeX*i=o9n^YEc>g{q>m zty`|wqN^=PS;Idk>$9`AZx`)~`Q6jB z&IaTZfH&sUUgx?B9li`LeNNUipPXADqJljjF1A9J>2&?+lr^}(P z9u8`Wz5pYZ89hwy(k2ja1%(^v-b>vICnGa&$h~*?_xt6H(?D2eKCk-q?OW;HLWlE= zje-9NseSq_=UvljrD#WBEz0FTMxj4(J*SYg+FQ1mZ{3eDY3p&ckN6fIQHv&V%l&-M<+#O3i#X&uEeww=Kh zF(WzrJ4}CO#F0sMYg<%mweC&C)A`#kC=;vsVq83D@O&-_r)HciqlH6_!rsK*BtQ-b ze+dKE2&Xu3X1eM{h8~Kll}Y9jMpMSCtl5aA*ter@xq8Qh4bw6;eU=8l?S~| zPo4c9+tuLk=i!{$egWUlP1e%|3OnFG`QoA*$AcG|w7peg(b1>+JDH6*7KGEh98ga5@M z)e;tP9xVbX9rd#E<8F)68UXhRYLL%QwoL|2J>q?kn$rNZp)+5ShO<0Jprl1eM;A4& ziJJ*1Kjp8yQLpZwc&|NMHL~5{(+f^Tuk{8Emz)$1-0Ct_`}sJPeIebn6x59yR|cpE zqpnqCLcruxA8JAJEMZ+%=eGf_E(P2qZJ6L&L)L9Z2Z`&8^OY`1GwOdtFDx9jv0XX0 zhs|k6Nh$bEa&o~-&SYf7uJbgdp2dBLwUeD3Yp23Dm$|p>yjxdeO%=E!X&o3M3O7VJ z)iF|2-a7pf;-DM}pnJBz%ihxpD0aVrrO(3M`mkEiO7=KM@s^h-`oR@C_x=)dZ9~2DVmYgaoUr7PB*ky8L$TL$TP3i zQ6^Xta_ZpV=n;GQ(xq|=*v9w{RSB{Cm9KELVE*sH#h+rheG((jLP@`JMbf@Wx$MPB z*Z^e_N({-{x-Q`n|2tq|xo>ZI5jh*hN0A!*+i%sYt9XUQ4@o7KPO=l*aY5i@-Aww? z=s-H;IwUE|Q%0gQ>|iVYv@Zkm(xqEw>vAb~a^$(CHW7HCy2b3sn^WapTuZMtUI0R$WdE)w-l z)Oa+idUCPP7SCr(i!aNPMY6O$_v^rLR7nA-ZRtrbC8mzwMXs_;qNIv3F+ zd9zzR7xti&H3dnPG1Q?(5yg~$e_vt(fp#>QA1^>UyOtDK90hi@E{IG6C5q%zNDIwP zJIDPYM$n>P-xv!$VpjXVb>WP(;U2kDPNn{69T^9>Ur4n??M=4Q!k=H7aKU?%7f3ok z1t6!Y7(k^>f`r3(V%EtN&w6z<7NSO!bYTG^6jlX}<$z{iRIzXY0Ai7ISdZ~{#&fr1 z@P!nwA|m4{>t`bG8Gd>JSWz#V#x}Yx;1N<{>}@|y%V;HakNfO&YNZO29#v8|@FfyAl;d^^m%!r>&TKR{R{S6AQ;?m6~dWiCbp zCWusIF_2NU1-jZe(u7L?UFNhBpXx7XOlQIlpt)e1+t+$+p|VS1G>}&b4V@y`AuLg(Fn=lYo4>blt{-!nNlmo z&EfAR6KOmp14R+-2ivK7ULMcMH(G zgPg3|T40B#*f%1wi`0j}PRM8N+|0LSP~`6>;tlmiZSmj9h+JAcdsnKE*x4t3|C6XL z?aD70RBRjYbN7wz0|d|kC#J~cg!QC4XayuH))a<;{UvczA-MZLr1V{r*QA0+Zt9=7*Ry{+{>TB5dVF(hUdaNa8l zek?+QhS`jwpRNx8iZHQImV z@T0Xj=GC#BZi-XX#LMNJdOa;|caVc}aa7bJhxv8e28JRzY6!PXTY;SB!lkhT(!PK5 z*^qtERHG4KmPf&`iB(^-2PP)owikIjRizkj?`~G< zq9bw`DWvYmIf5&4`gC$_(%dh!f$;&37j5D1yI^0vu}X+2{K~Fwf1rq1#s&it8wNI< zG4>k>W{thp_6_vk|58>B+-i1ItL+!8U%%c1Z>H5Z5l3@^v$sXzbz`QcX33v0q^rh;SFb>WAw$Zk$8&-4ceL0#k!r!_Y^NLYHway+oMAQxl&;%WHM|?YtDxFuj^}{)XZ-a0g9%G zmPJ>AH7HdR8Xa|0{v_Vr;Z5~;&JT&#XLX2us2EGqnaoVxfNivJmdM@#_Tar7N11A$ zvFp71__6uNIj*kD3xeEUM+EkdaU&Hzo9ReNd{O(`NhR)m`h>z}z&t1lE=|dL1xEE6 zFktts2a#;aH`}$Nh%}awESexrTSu@<}>5J}%NN>yEOQNDeRN5J8GSaLsQ^c_xY@ z06_ZlbK)|2{5ypRFX;aLf2(2(X8yzK@)F*U+vE8(*EGQgtWIP-P~z^z^}6K&-Sdg; zBn_BmD~Vh(d<8Lw%)7YO7Vj}~hzg>z<%>lAI9XO&nX=7jjW@-sTy`|;PZPD0b%GIc zIGjI@$tsvRu~QySXBc@H4S*?Y*86TS`wrRnZBya--RObBqiwR+?EVfVUuZrnQSAKKm7;K|P&SobS zr0dIF&GoE_mDPb+#PW!!T<}r!6vVj-s`~Gn2X$$jemb0-%ZWcG`Q*t)>}=EvY%qi^ zE`_&IZl|vL_9tc%i&VZ(>=3fs3Wkp45@llBorh?}!8evzyM-M)uRY|{iZOA} z&mi;r)2^xPNsXzXU8y&CB)h{3waW175 z3k~hE$KXosPQM2hTZxKAf*$1)9k_a#zy1ctc3k)~wV)W)uY1$!7cX4!qudFLxGxMd z=8_SF%kh)-6IW%Zh~x`V{;1zpYpB~@VwSRE>$C1L2RK6H-V}W6EI*RBNhfd9kQOpo z{!@#|v%ckxH}|;LE`@h7NJ_JdEFt^E11w1dw9(f-UGMeBNYc_H-j>U!`3Lv20y-Cx zP{gw2v<>V%GdhB_O0{l>wVSj>w9|i4K#6=14u))YAIB=G+{VkwdrV@^W@Dln28B?OXMe^}zdtpA( zj!Zm5cV2fU=ePvR%?oJ}^uw{-C)9@f8fTKtkfQ+;e5m&jC;*$9S?eYtdXRa$WrCq; z8BGJ)qR?U>ffcS}i3Q^R{g2;*4Z-#0-Fx?Hot$$r2K7PAd8T~UEKW5f3Kn~VBHk$TwB(c9%70pX(Dgr1TDZ~CbOYOu zeAH#Bgd%-MttA347($A$fhli!E)q1$K#P3ZK|~Cmqdf5?htLI&A|P2-Cn}+q2=)X9 zhrHm&2|vMjkLvjux=GB5^}NV5UMo~y4MlajK4o}$j}!9jpC6++m1V_|#xb%t_#us+ zuy?(PF(`Z0?3eMX>9T=JaGh2`?zg7Fu(&kfD{;8<3P@TP7>mvhnc^kV0`jr(pTkQB zSI7plgd1)a*hrkcIDXHtsE5BG+vttD%bS}C>oZ%MCS*dynUB5h@UrOr3S$3-kdWBl zj7$34!vD}UuHUyxLo1!Z>_z8k%>hO12j;cUn`cVxsDyyn>!SzE$I=RmUDn#o{$Cpa zvA|pV!?96pOnG?$NM{PpG5wsAE41;aN`F4w^wB2N>B!kl)}F+t`MIgOtH=EFPPCpN zCr{o~Dtnn!susdn>arh?J=zHYUC&jAPsO5iJ;N9rwCoUbWdm9>@u$mJLFwB0b6h-L zW+%RJtirm;NWj?DqLmW0h9cAk`Hk_NJ9(Vo-rGHp!If|oOJI;t3+HU6%g}$~UzlWH zzC4cW-wbys(n6dH&^PweO8KcG;i9M);0w8#u}1**Q7Y?DBTi}C8||_vS&(HZ@1i@z zlk<3RGcs}$nxv%Cm15=jF}AD*@yNM=iAWtwYV&4OOYJJ7^4z}W z-ot5j%=dDJ`HO?%%Ghh0%;kK(XGvz7aG2s4!X8J2oA754G~~uOF?T_hjBwcMSe$Xy zKqbKL9lT5F9TX?V>1gUmU9vp>-oU3lgI+X`F(sQ=rW=9v1B-Smnv=E3n&| z>;KCx-Qvc_B#Jii4;hs&Gfc}PQ=bBgcG08y|45B3Z^w+O`ZB_$tj0#fOeH9qMM|_0 zgxR})dO+=oEe4bTRt`^~e64YC`M`MIx&v^W&<#1;m%DbiT*n)P4}6cX@EaNg$s$uiqM|Gb0gu2Exbfaw_84*Q^=XW_UAziG zK_0Vc4y@*^+CThs1AWQ}8&DKa`OwiV*Bk}`RvK?HB+)OdpqT6d zhPqx_IH#)RhEBgt<6R*net6h>IVrT(D7KVq2?Luvdp6~i8GjO>AM%vhx_x`;Q9-Pa~8zpZj{> zp|rCI+r&4@`y?lUE&^Cm^{=6*^pPJilLa9jANTVCvsLP5y zcWEe`Aw3gN#aCjgql&!nl)uhHHy9&L{uKCzLY{%&Gqr3 zInsFnfGjX=6}c%5F|`ko1G3Q%EgoomVih@p|pOW8h23 zQIC5Tju}1L%mSQbx$7`miQ#r*danzG1w#F>DL`6k{vSKaPMB~P- z@$g{c%8L{0Y>#}EyB%*Ozs|k|%Y#$OdC;KE*$b!FsasdZ4@qhW8&(kAFJ4kzM@dzU z`(G^ND#zN@!i@ZV1Ds<5A>kzFo;nquAe1J=fmW;4gDQ+&m4>EQL%H{cvbPt_XYzrb z?=5JWfJ^TjVloNd16*41x;+sj`+ZMYC{u7g%S~z1l(I#TNty{|!~7hB$YWw%kZDR# zPV}#&^(UMiMYf0dwLu&v=6Y4K)4wQB%klttDc#Fjb8c!rYIuq@!b{m0J4+_8LU2Wk z#L1MS!eD~?SV0@^wSpZJ=HVWSJCH}Q$#P%Kh&WaBg?Z3 zL*aeGE|!(s05@eRe|^)Y`sj0aYz-e|PR0C0n=F}|GtToDZ4R(|L*w$5IMM|pm#MD$ zsS4MZXYPkKLD1BWr1Jk5gvFckDOmU!kXi~el&)g&PWg+g)PFRa%QkZr!)bv#gM#dC zw|h)$QAvTc-KTdSocTZW7Qp+aut~bZ=hM+P-sUseV@U~l3TfD4lb`*{bIdAopYYmqie zT@Aq6^>cm&8G8m`v1rAL-b82LPhKR$8c1=$QKAp=mU;BrwL5uFpJ`}uouRqJY6#r? zYwQEC7`D2l#Mq!f-T$0&MfLC2vImW_Rw0XL(uuEV_K48#+gGEva|zj~NX{3@Se`X_ zs3I7lEbmDf1Q`l+lSmsOy8-Qk3)_-VyT8}1+lWQR;V!B76ar>XZ(sJF?lXAsNXDZ> z!rXj(S+=#?13;K-weVeunA|fr<;01R@E3SyX4qKED9VQbRzWxP3N|3C?*y>s`jkXNFiW6oNQe3#;{l91px)8la^GCP%YJV8|YOIYy? zif3(~8qJrHgiq{DX1hGxMSUo@7@tA?SuvDi(eC2F&;+p0{nFVpo>~)^(+`(GIvXXW z3f=Uxc z)g*-Fnyz0BQ1l7EFX4R{mj2&LEg2UgN-ZRQE%oUE2kFA4mQh+XE=+=*!GzKX#UgWY z*qufYB_NaAc}tK1$m}i}j(whv-1_S;-CQ$Yb?c#uiDkn;0QQbmA)0oC5zq2YcT?ow zvRxpL<2gli1ud2KTolrl6)BiPrQJD^LJe=9B^gQp2~zw(3-v^1Es4LcWtMo^coHxN zJ%s{1K0?wX(C1N){8vru`Hi|BdQu8eMlvm^TmfTW$b}g+c(CL(i!57iA4FW=UNCu^HXMCq)V}Nh(RC43 zK)-&?XC-zb39}Wv8ThZh*orVJPc705Zli7eFbJl6gkeC}`n|byI7dVz1L?(Kg7~>@ zpzXrC9q}d6V{*zMWJUJO-;_WQJ^|%`ddf~`M#~)fzfz9s-3Nq<@QE3T^+zoPO~Y@}^vbZ*3mv15T0(7Q@7T^{A*FT zw+1u$U@iod#5R#~txrMe0!%aL*Zv^G@C^B)GD7M3GwMp)nGbEBKare7(6k2Nd9neE zToq@6VmAAb%g;&*FN*jWWuBNskS>q+l$21-Y>v&dlwTrDG9v*%82hm1o7r`y*E2gB7FVOT&AcHQ7U;0EZkusN6gOcIlT_uN#q<39tnh(~I7qZ6=_~47 zlGcazFKX{z@8@5_86d!2l%a9lv@^U|*HdRtD{EC{(LtGBJT-cGynAlS3n2x1Wq07DfvOy(^%_;*q;cTRsr_EQyQYz!9u3h^} zdJ>3W&3FjoU!UxnZS_OV1%y`=*rcKtZ?TedY*@8!Jy6THh+he89&bCT!hkLtt*cUH z3Ozal5Y=#%yzj)2nSy9X{d6`!|7W*)t?WCVwe{p4<$nWu?#sjOul+N8Gvn7FS8BgXq{pg{hAl`uHkgu|6@iv}rvibM-J@=Hn4xQD4HB7m#d;8z3rdT3`- z*$BPlY#_PBG%4OGZ>gzUB8Na+f(=??=vSe6DEsJ^Zt|Xa5t9HDgFQYICk%u9DE!!xcQ6HY`H7Cjb#Jmt|Y=?wMB5|HU`qE!%&HhEd1w?S)RG4KCCmiPRrRV8MI<2sB6?i@0_n_Wfgg}Gev z0uUyQgmQ`)&ub(PT2BO~N0>Xv{HC^P<+!LR90%Q_WE4K_YzsUE}P)eMbJuHjV7a2}o#$1{N`weMNL%rt%8wBpDN}DD~#l53{CES5fcMh4`MhMXW2o z2^(kPVtvP}Z|B9ruP+AdOW612E)k~X-oFn!xdc~ZHt_^1@CbaD? zJwNnbvBPbQ9p%2~88VU7DmtP}Rj|CxB&sJRgz{T5J6sMbcHtC*RQDp(HfE?UZD&#m zdV+Eaf5>(tCSGC~Wz65g?}-K9Fcz7m?A*0$4M-H>zeED$RATK%-xh@qpO~N$%>x{9 zy&zFD@8@M^8Y*jG?@^ueIcO8Xssz z`G`9p<%u(Wgz44&u$J&9#k#pXyg5U|$Sotd63IwKslfIWT8=d+%GGt%pDVNY#TO0#BH?Q=zg(xgveiVZo9KKt|H5M9Ro<{F1a^-VqkHOEW^>M0;$eV4DzWsn_tAiXfJ zbtZMTsO5+0jnRO0BRbpHXrTQU4j$ZVKnFdx;f!irvSv0>U{_h9-^XUqQRp1`xl3j4 z0W|eySyuI23GA4~(&|A7;XI(7E)TRUF#;ee(6E=h=a^3PqF1cOslBU*$3?KDgIs2kV}Dj^FF=_n>udm9^=K|j^1hJyY@fzrf7Zd(bKzr zfos>r#vQ*ONlUMezkT1NmHAg5t$e;2*M4x9KgJci?J*u#e@oe@#Hy+r2l{R?|KaA% zF7Y2PMrCh!XKj~f^xHqXhAs-R(HTD6ntdLqH~ox1HluardWGWo*-={?Qvn8maAR5W z2>a)MC{IEYC`jRkDQE=l&367_1ndL_oteFW*1#v z8mtpj@BS7NH4Aqz`o&K5d-uRze*jim*k+s0PWwlAT-qXfAX0PhJmM_>9H)lG=tms7 zj3MNUpC}YROorp5O~N_Ngy8Gf15xlaJhmQ5BVCQGx6SkCA&x%SLN4s6OobrD8L!5T zt!959OS_StK8-62w2TPc-?Bbkz}-UIyH2F=&O28|Qr>ZRMf&V<@Ze(I7}RJsv2Sk= z@yt|*v(lP%9BdBbGZ(dR>>y^F*ZwVq#*H`Gf#l zTWTp*TCtYPXlH8$9MfmwRN~#~WK89`2j&zYtwwa_P?kN^jBxS@6tWBUpE!V<_B0GG>Ng#nm4QYq{~Y*%L*f; zYCE|oHy7Qjx{t};k3}()Df~-vxD*l+qSPHQ;KF~*>MM%UnD`nXoGty-%qd>?wA(gs zX5*p>IL`F=s`6gxIK4p&>T_z}L~edKSODvA0=0~8yLZn3VU2s+^y-aw4phJz&pY|* z>$xpKYP4IoJ`I;WG{MI6vAXo}BP%s7xSI-&CqB$o;W~=l-_ej-dKjEcS_y?Sv}x=1 z?fTujk3lR_`tIFyu)K8IBlNdT`TOCjdOWh23=*^c#EeXV&9LwJwt@elpk9yH^_=>m z(X@EE`EQx75Q^yA9zHL6NY)|ew22MXXd&DoG zSKRw6r59Q4l|@gSKYv&wk7C=rm&eu@>4u+CxLOUQW{B);Jhmq`xDvGOTo(66artN^ zLFZ~JaoTP>0i0a$d-LI@p!Cm+ik7#{DJ%>k9tb4xJluQUW_s842WZ3d6auc?C7;Ws zDB>DzOB5yz5|=9m{1b5cn0ov6?MjM`CbnaKpyR9iCzusDo|}=%jw@$1jZd68fTGfo zmDvtNDxR?y!5wC<{->Uz^6*a!w*7kBD*$RXE8`P;z^aHdE1uq#yQ-%4M!9KIb^T=G zs(U=d{%AXWeSB^_c`^t0FuH9^o)&v|nWXmQW`^TE=$bQ|3}iapX3AcL-O=ma`)=q! zbFjAljOcPHZu_7A%r#ohkN3W+*@)+U5);F|&p2}k@Xx-(zDIFc+zK@uIyrv$!kg-o zJHO@0KWKK@QF^_BK=hf|Hz{NSkBI?OwCsr>K`gyA0|Ew*(Bo?F=LH2O51J_k^n6?0 zdEv%4yjgMiHQ?-FWdK>C#qCFQDRJMYR@-|O78RXk6D9VWxa|J96E8=xT>jwe>}t#G zUep(9^wtAzQ6=gOf>u4vdMTjtV;Wai8hdBa@eO{s*h78+sB|rg8Y<3B1eR8N6mLJu z2P(9@PMuodnK`iNLB~#=jul?w1mo4x`?TL*{IzOCBfCbdk91;C4AdAuTPCQPZJT!^ zHTA9Mu@{s&RrI5wkV^e~gH77?T@A;pRQKS}l&Wcg=I8C%2#U+^_V=1&%|l?@-?7i; zJK0O)YVNFlLM8a2S~+^`*wz+fcC}*E^A%p-d_}i_neGFj#>aMYQa;L}ZC%Gf zD~;7Oj-C=evYx^?U@W&?;W|IJxcGR^k`8{h6pqX$yvbblIJ0-tW^MaCpOhnu%h#G) zC>slaRxha%201Y)X*?#Z@i*VxMOGcm{I{91y@>L3^z_`CpTZT+A6awqMTcJ)9=)?0 zA1p2WBNpF#Hfg?`bWa*pjLC=kjwN0n?DGB`p^Q>@fqoQ`=3=t5i?mz;Q}M>(20?@% zmQCXpP%~$gv;hopsqb3OVbbL4rN^75e8oSP|ISGA3l69r_swf_kxo=i%F4FSYlEIg z4Jjh3b;1UJy!+_yTk~?AH3VzvKB|^Xjk2Y&?cqK)Zn>5WyF|GZ0H2c=HGiK+NtX-& zL|bgnHSM+vg@>8PW8QjhFi;Y900+5*^Jch|*1Yg#irAe?TQ%hwNQB&3|oSKIcLqCU$ zzqKlh=D}s4LyLw`#jzsU%V;XTcl2S5ibAIuB(F`IZOVMCTM7w;x!;6C_3vx~rYxr) z&W+wp8#ZdxdfEhD>iH9v#I<}`R@4-hbJ+AVGrRQa71ANWz%qOs^v%%;skh!0lAC8B z>e?}|AMaS%I_Zo|+J_ zAR{PUYlRA1aV+9Tc3-P@?ewsId3gtN$_W=N+!J=T z-DeYj86YIcrLt7*1Hi9sBe%@kJYY_IocKUeobl>sIXUHw>|KI*Z2BaY!=B$l+h3j? z0GeG|QIRQS(=@Osi~>KQk+r3?-ulU}fg;PGXU|D$1K;vwgx&foJ*x|62Csg(C)e^& z9~&@>zF0P_(h2DI?dqGR{)D#0CFDt@^6`0;Et0+_w8AL0r;4ja5R^zu6 zrhus7pc4|osI@wDKnCDkV$AwG*O<+{)|U!6^cBmgC%Po+Cbn=8G(O=Fo;n2`K{pj3 z=wX^N(|97-tBt}W5-U7TO{4U3bT(GEfuyDbs~zx|vQVO$gvuJf9Y}@tz3mOT118ZR zw01;0^4)xPzTN^57fAcV>xnn90=7RtXy$DWpBo_~+@|@P{$+o=chgv+VA)OD`v3}T zDI52UjXFK?h#ypPq3;3f6bXir>0=#H^U~+^r$<>NqAy9~)Y6fxwPuTWhnJR=+yjwG z54IYpGDCNZtvHEXaVqiN^hsC;lP25(Uullkf9_=kGd?YEvpR_bC*YvMSPHI6gn_nT zh};j!0l3XGCQR5VQKVH!>^n<$hs-k;3yhWncGP^?A5`|MR-Iq^;{jV>Jxh8MYfr65 zy0%3U8DM^MNc}Wa@x#z0m_S0k$wI19o}Zc!y4oq)KG&f7`&OMgorPm;t@>4QIauNi zv~vbvqlf%#E7RFMh8Cg9q=F8ICZtLtqqI45zvS-E&9&DqmEDxakkm_6Z;w~|GaW|V z=U6G5R_D9#GQLwN8lERJuQ4s3b2?XxIQX|0Teodn!p$Y&^itl$)ruq*{GlAHxCFEb zx4Q>V5=b7jrI>B;AHv#`)ZO`EPp6_*3S{pBpLrI($K@xrd_tCE)an@*qF6rrMd0{` z>F~K+^VD9kWopc+*)IY7DYVZmtIq)q6=$T8n%p7$4o$&^Pdhuy=!p^YBTRviv|F_3 zn%Zw#w7rcXp%KRYxF*MQesx~9G)bm6(Q&KtNAg*G#S%;)cYf{K86ev$Y>UvbMzG|^ zTMQ@Wz2eT3o=cQ)x%<%48$XKu#b)@Ui^v;Ng$Za!hjEILY!3V3S|XrFC00tJiTguc zrSca-kx+x*1u@VZuIk;x1Poxc;cG~`dOSJ5>2vu3RN+BTgG7w7GuaP!rmr4v#y#Y; zn9r$S1+e344pVbY-iP8srJmQu$$bc=5UknLbS_q<;Ppb-f=LMpJrYWQc+@^{2Ce2> z*1cJw)n5}KZq~u9(sj4grT#~f-*!XSo%4-a&SYn{vl2HqD-W7)ocjK~dI}RyjJ_?1 zADx^IvTe0HcMd5lD|5+X_dp%aA17(P$0UMOH)Nr2;(edLQ((D_=Xpz(C=rdf_YWQM z@U%*pLhcYY>&f{A+sEu|a{?ur%8Zb$j3RAf?jP@bJT_?6D0Qj9-}MyN7V#L{4RJg} zi)cHe1C_fM4?J76>g|X#ZMStv?e}(eJqe~R2S<~WQG@1<45zw-Ti9o|heYr({KdwVmAw1c(V^9cJmYBoCk`2rzs z5~=Vj{p!iCcujV5u{iFdJ_#p=AfePbC za4Q4sL{TITml9#mpQuh06MoV0b)W>`^rQNDThl2nE|+AyU`IN8@`C9u67^nw{12v> zhqha3sZe zdEL|B8ijBC>@0DXSYDz>r9en1UiN_Bo@K|BGlH4RO_gaV_>?g`>y+4vZ2i=JYoIg) zyMPi>_ZhxwD3-vb;@#H@tzeSb@oH4U&x?!acpFiBdYI`S;P~aF7&pma@rKiUP4>I> z5gck|rd8C+6QFt6ovQMXP5Bhi-&%79)QLj-lf?QA?VRS!kl%7zaWRU}_woq(_bak` zUR`)fTQNWuDHU*fbK+b)-%^*8#$vE@8ujMafjymg_-i-t&TAfR6nl=aaTx_MOYj^b zE5keqrwF1aZ=0t>Tk(YW%$#S>Bz)@Dy?a4$WOfR>en;QInk&pt4>}-21{-VZy`-Yd z^Z3;jpDKr901~2sFjA=k&?jsq{+}d$^lrbzQ+Uj4>!28rl$4}zbG!}~zxPI7N^ zFFubxdi1EZOUY=Grqc(fHc;5xvv5>xt<17+P*q{?h3GS97Of+g&@`QOkC7YQT7z76 zyM25_FU>wo?J#1co(frq(J>1B^Td!-c$s(cS_d&K^4V*B>0fM%mggbGNAv*-CI!%; z^B|u<@U#_!*n^MAbkEcOi zY;3Hd^hLiJ@hWb0zS^hy^C>P@;rb}=*L;z;7C4tdxA{`)0pi(1*d`R41bK~p>rCHU8`M#W$@ zM#B+2oPA4w?OwVS!GG|I#!YHcj1%}H%Q5w{U$P}5*%MAX-#Us{`0PU zMPh03PD`xm%0*Pk4<~?u$lwj|8c8LDmJVJYK_~J>x-+kh=j&~}(yejr`B7u5`*?O4 zL3o{e>G$7%r|CggRb5Ifi6)hRtgJ?YH+(a0e1FtIR-I+y;_y>a1y++Ha!WcCNuV-;>c4pv`3RCeTBf z9=cNvGQm&ND7wL#41IL$H=-PK)GidGpLyoils9EX&>ih01st=s&0GA}=7xBueYp?W zF9JCx-FZRowvRTkEn2ojiLA?#!?+j?9=wF0CXm{q{n-ShI?Xf*v1wOSu6kH$3;=QB zO?yM~P#N4Mi}A=Nmo+2^%whwx=rv?WtDdsd8eeTWdFTmL=WD1qM`gIt(+-i1X1j*; z-BEn&c=~IYJQSZvdJ}p1=&hlcWyJuQXY_y0u@xSA1QTh};XJ;&>XAghbQGF^G4rzt z>dK-z`HKJ?nj^}P?BBEbDK2p~bU=nXkKN?^|COUp{kitl{O8{seG*fb%@9GTeP{rg zmS|i18m&f}cWbr(A6P@QJ0g%EI%(*r$n@@1#K{#R>G|PmZv$URhB;hg+G`e(iC1-1 zMZDmoO|R;hJ}^}I*|u6Gmh)**AD&y+>UIr3J9XkmM{rWjI~B&d{T5tr+IHI%pE?WXvW+9G=GaS zfb5Oz#pS|JWPcwLMpT`^B`-9prgX)QB_g>vQ1^~*y)>sWRxN0=t%zqw?3VCw3cp+a zE#KqFI~q3@ke}iBod4O{+P(usatCC@!xQYVAL&rM{V)w0$dUJ$o;*5ygC)Zi1SvSS z@q;Z}(_#ol&^YeZnT`rY(chFDbXVz3_1lS?P`}2UWtJn=B4#A&B6tx*#~H$nB4F~> z2wDu%y3Rl9RLvclnNCMBqV8^*6m$)Z-X7p5vE=t6`@K*o4h&z}^y*#OHUx4d8TP?Yy&zv6N?R=65+d)CE#w9qI`w3BJ^p+# z*t+0sTw3W?QET$_GdFazu6I zZrJ!UUSOI(G>`|>s&nUF0wyeV&xCl!fANNXX1_VE#O1aOqYWDp*z z8MrWIJrBbc?S`;9jmNR@G#Ww4FVdtFB-KHDB}qZRq|S7d5NbZi+tS1H+NgcP!^4+m z0yImm&Lab&$Q|^AtWW*Ed0)*3xVERGL?Yn=woNj40prQKCjr?q>_^sb0a#iaEQ z*-jHu!yU=eOcRAKuoX+?ao(#k87{%&*s-3f=ptQc4L#i)-4&YiD;8#+9NlE%@!_-K z9Pd)a7r^|;nE+v=qQq`|m;KNQMg&u752p~Y>gLRSjSuKi6kA~}TL_WZ3~)vSeTJFL z2edN+-CX1TmN{_-s?xGDgNNPdQb zBne!JlY-h1PH?kJACKzl5afnc(Hg6iZ`)8>Q-20AlNCs;xUkLFNe5VFK_FRkQw5(z z1~v;<^MQDn?!{k2J+gzQk}tpwN<8S>JNh2=bigoOh(JiO^?e$-ru@0-8`)7y$J^V7 zG8H?-)m6b9+^JM{sH-Z%Ht-P2AwS>tX3gk0%CqOTruRe36Z`qyBPdDCZR2LmB0eQ# z46DdG#X~Skh?Z}c8%1z&P2E~y62pgD4iOXOTK{3p+7P*;Y*3uL$%i?Ay{5N|AX^}8 z>2NtZs6y?=;DdZU=9MmP<{ERE-6s9xgdY-8#YbKYYv~yox%NT{Foo<76l1wWAo%J4 zFv)AOP&vGRF(Pf}PW zx7N%eZvU?Ha08XSqodGxJM{529L9g*JY~X@F5L(>eJa_W>+)hmM?!3?4Ij#;pqs)W@F488p1HrN|C|Nn z8e5%5@W|R5%^ado%xr(Wvc_uoJ_dU!hf#0+VS}!ufm5JLHTQ)zf5yMlS2tXi5EFX) z^WAmQxl`RBFDFf(RO6ydFQLm@@LKZewIDvm0gh0?D)oT_2lnr)If<66K)bR05cQ)h zx9cDk4zaCTw?2?p-W~!?hfo2hd5?-Tpbf#GOn!~hd(=h)53+uU#dQn(-WLe1R zTk%(xuWhcQK?bF(1yNX`d*7OC;Vs_l-Ymp$?n_;?y6CyXkjcZ!wMN%EZ@&62*;;*t zLm2kg1{w#Fm!klOJR1IFeVc7vP@xLF4~JEd9UB95F!Pr`|Gg6Pnb$kGPE4Bh(52@7 z2S()7U8TbN;C%~jYL@Qsum*|?cbIPlzLp6KxWcZAEjumNc@4#er}hBtX%3wL&B^Fi zqE_xVw8}_SDHIvkw;!+ht7{5T6sCsux=LC855XQFse`@bF4r8_ezUn%?|wK|=-S_Z z`y)uudaN*Y<8F>?Q5|3V8lBMsx`dMwR;dka6Bbvv&jiB_*rS0R*P1@myGt`;N7;rq>GxefF((2xSQjCtpa zO%*fwcgx8iKT z9+p{H^G?OeAH{t1I^9rjW_W$Y(j-doyV<;osf&&q3dZMmzHE>ptK)}?;uV^WC~sZb zbbVdusrk|UXhiK7)|_}{5DSohk-l!K3P9$=Sn>tZaDxF!zyfcNE^4!lj?b1&Ddp50 zGixK?osgo10QantnB`7qZ4dWpdg>dqx@G+*D=urlq#_^@V9R<)lt4%&ZXq4XefDhr z>vam(4jn*4_<1TH1|zlB<=6AG_wcYZN6~PML{U53l_i6U>2VN(6VNp@G)uSH0uzbW9%Sqk;P`D0dxX?@n z;cnyM!8DW|ul_VvT}C3h1h@+iO2-q67Zj=H3uXbcYJOp7z^J^dlSZW_o)6q;m0psO}ylwp19GJOVXP-ei#jKkD7}V$}=0 z<_v%bClxfq+G#X+rbdS#A7T9MD3->HT^)?fV8+RKbvL2@5@-o{*F^LXl6#Q7%8wvT znYh$m+){kWAAfwq+fovXxjriQ_zkxUC3(>0w*c2=fIfk&Zi0k#nvS~l?pE`ST-6%H z-zvs2Sez_7Avf0-r)rCXK*@B%Cb|?oh(AHOGa|wWy2Fk?WY6SIFq+%#O6n=9=TH}@ zw0UHC<#y4g5of!1@BU&?Cef#GxVDEasi@p*L+&SsX{g3@A?ygseNXs24RWkWzYzAS z=(-F;bjK<#Uj)BWsz`J#ZUbCdxg$e5T=)rQWV~8p=CAT=PNnepMVylKORIJQPeM_hlUo$7n;9U#M~N_Pq0yWZqZ%fapb&L9x``l8l>)Et_5(SP9Ik57 zO-zh&sSH&H@NFae$77$>RWsBOsyW23duTu+IM^W}aHV z(0m#_U{8WFSX{w=Ze;%4e3%k>Gk0z=R@J#MIMN_9o0&`KDpUlUh&D1&^4Z5G@FZtX zsPnip+iUDHY+qFFdrG_0QTsIMC*4S#>BDUp0=i)R@&RFV-m6z<=tj60lPu9v=q&v? zahG66Pe8#X^-Ep~7|cEXK%2!g-`72&{cXCpe_a3EDjZ5G=Y_CV{uL1n`}F22Qr9p?q1ENMCJ96G zkoE7*NVbeMgef87P@x^4iYYNjdS-obwF**Jz0j!z!>Qb)0f8tqgL1)xYmUpo8ZKCx zOjydjburh{x{Hkn4^H`CcQk7vQNV_0Kq=|C;-Qz| z#n}6jZPPABP>njcf^?UCDPcM_Si3|?1)wk!dQlg4qeTm~%`0dgAtu4TNsOi+9+xw@rdWg+}f5Iuj#si{a1o?4s(Sac{DP1Vpm$~mp! zbJRYxpVM-hUVHj(8bpBNA0Kc@z5e~vOSK|V--vJ)y|<Qfxz?!elxW;c-B$f55r>2B*{kJjxm%iL0(!w zIJKWR&t}4sC4r30^9Rq+1GUDhh=(RooqO#$=8>_r2Q*ZL^RqvDEO**SWFCQdlDS9$ z=T6lH2q%Q(m>9K6RmxeG5J+3NEmzsETzMBIXn&hTj}Cc|cV}yuMub;o7hYhzHL8pz zeT5<7aBZPU#F<-#wwc)ik}(`x(5P8EAAaCVfewTZdR9+SWIBKTN#)Jtg!*%pjep~wD)Vs@k^QS7Yt%l3&szWct*u-4 z$lqi$ZeyPWo7)W(9!p7QJ+QJM#fEo~x2K`(Bq{_aoAdsk+7I;@CvIl5?P0ZYNX%v| zYMU{h%ydx)vl`VtusW#j{_%XlJjy)5qLulyD>Wz+hFcDk?nHqkZL|oG(l>8R*_k`~)P7CZ7Pw>HGadS%zeE9mG_-=I$SD(8JcM?QOBW#g zlL6APWhvtYwXo6NJurLS&#b*`9skQx+I^~jt{yxWiypwvl*s>W$9N|FAch+cBuz`g zh9BAy&c&o2#8J(KCM?;#=_UAk#L}@{R6NuvEJZ>k1Op$n7$AJgsVy9b>^c3LZuT35&~+NXsr=v>HrcOdF{E}8D)f01YcX9wN;SnTr*R`m z5R?~xt2wE{W1YsHDHN1Q&8QK(Ydx<0TjRfPKa?HeO8jJ^21;C(tAT|0)zXG6PEm+C zG%o^=l&nm1%+=Q*_(<9;Bh;+5jh~_-PSyNLaqfH87HIKBbjah?lh6W#y;vC4nG@5!ewuh71{g z3PeMW9|YBq1xX-jm>~K*l0h&;<=C-fQFYyT@WANF$2JKQ*;o4|8&qh{P1Lzob9asZ+>^QaXM-`dQQy;FYriI;yarE@SI=F5 z)vA4){OF*;qQCsY)oSz?CV^j`Gt!)*?$1Xw_~D-8s2F~AdYzNAuP|cBs+O3KUk%U9M=_g*=?^(}85ZAXp1s1=o> zWD*WKbeB`DR8Q;WMgp%)c;Y7gf2_F(SM{zQWa&FE4L@=<&rxyN@l!>7bvjXw7!aaS z!uoER=No?hxBNO~Vd412G*)!ou9+$hs$@Z--vlsQ^wlF+5%4VD-Q2%YUaG=D~tS3J7-wL-^9vLgv+Sfu0|0+j#u0=_=i`L*OP1j1#H zR>X@g1Sj0y7?#Sbb`D~p80CBZR2#n(7TX>qK@Zg=;%&ns&#NwF06&^2b1v-3rXEQuZp za9E%k4-JzCHvJinX|%_8j9aYdv`-lPR{@QYOcw2NV-R}1c{qf zZ06~czEa%Sh2w!*U!Bz1BysYK&)kqE<#Dy2Zqidg07D`p!qXe?%rAid6FNMn7lki{ zOY$2=f9-UDmqX;a>&ZbxsJ}ZMemw?taue(ysnFTcq>nI&5Nej58}-_abK3 zj({om{f=I7Iv(rAnR(_&`Ay=m+C|{u!UkOWkmY80wRagg&C5S~96RUO0n7(PQ=ILM zeiNle2WjoaQy(Q`2O9+M76<`#UgI7*HCN!;g2(PUkuhSPN|gAQ1wm`byzrBBHX5e} z!Vr}sZ!ft}6ai2h`=KC+Tu$9@{@>paKx$cj39B^0!1RbE(^O`FRsvc!UV>;jkakh{ zELMy4x_=sq){{!Yo_9a7Lu{!E7QW15&mPD@$l49fIuF0}2eFps&`~w*`*unj#krBf zXCiTs6)bFQ25M#@R4U&Q1dJYyHQBnI@-tUR`&)ITm)1fS&g9N-2VL!Z_w<~#W$N_6-6(POQ;H87Z z-rv5tYXJl^`i1i3flvGi1FxxT_07gN|LXGskP2Q&YK7RoxEA; z+b6W!c7j^UQ4FxGJzP)zqv4cJmwveQ-EpGP+9PU+=1v0>X1Le=PNC4Bi>%|z;hL;T z0lxgKSdP~9-=8kO&}2t-zgYU+|NP@1f#tRQ{x5`Unlrdw!L2_%dyWu$SH~gK32`F4 z`l8qzvvX*_|pY-pQSJ@mCJXc>gFyuPYBA6SxK*s+)=f?R*{U?s6FPkZ@SN~ zk!!Q@6cBDkx#qPIY7s(fH30hS$1yV?8zt948zM4*h~ z(dwfyyAB~mY|({AG-+62G|9vyan)HC7Bg2+T**|?8ikAIEv}=`?ma!r41>6h~-s< zxBt5$J$%`d4XC3H_>MkmIaPha3E(G8skP7j=bw467uS7?BC6T{3>2?kjQ#RA zh2jVc@ZTT*4?!jFR68lNq@tpiXULa(;5uXxh@Rz;uN2R(!4eUiRc3xmCRZBrPSinb|&7l-NKzFL& zOSr5+i!eXIyYyxAxDvzkTKp9NW=T2RpQJmE)*;w`AmV}kpj4`lTfwQ z9IjVT30?LhAu>^?F^H4GkS8fw+y#L-aQ3K(d%CTwd$mHRt0wdy1_IjvesZ*WOT)b- z(Zc-K97`L?+G<59rbp}#qA6WzJ?)pe^DDIK)orFpJKGPOgus<3Nv&jx;~IL<5_unB&?41< zR7m?{d!%gQF3+AHYsgbr_|wAUe_=q7D@~Vcc7l2ll`J82H?&XPXi@mdt{4eu8u@s4 z%1WO7@$VxT1&rWUCs14Jvtd-I0(|bX?kHd_KP<%uBRcuwN3-JS>e1a<6NRifpl~i9A9y@_ZctI z+|?YB)M8(r-3tW)ODTg~JsfANYTK}47tq*(UH@e*CbG1OOt7Y;!*WRsB=)BuGO1D+ z56q$LM$!7y4RCUDCLq)zCAl2Th7@4U47{>I^uP*7`ADUaqA{8+7~kpN6B*T>qNF z>B12wTH1q%1)!OdN*m;=PT^zPkFwWSTaXb@nZ4!Z_-)&Fp9J?7`g4 z;L_44o!6(Js`fBmCD@Du%UDW4aXDLtMSWROCc2hg6$7sIZrZI|w|v;t^uBk7p2BxR zI%W&^TYH$^lH6yDSQHi&Ten87#0D13i~pU$rP;DvOeRl`bTqGfn?kF>)k~`y%u?w5 zB{g9Zu%Zq@x&%H#bOEw61WX6lO6sqn1m<7+*Re%u^Q}yLks{QIqUhr#f|S~0 zX;V^gvtk^PTq*wv?)YW(DH{GO80F>Xj5K8db?>C+;)|q3es{kEx9rQ|nk=P*UylFR z{wOspa(Gx%M)WY$W``rLRk&@oGxfeGlspI*0Rw|!TVL|Drcj!6reo{1(FDU7(_Tr@ z0Jn2^hv8ip5~ZXP)O@*lc{2`{XwEzErwOV3f;(#Loase+!4w=>5qD2Cw*LP9^^Ct2td(4IZs9 zU5dz21ka327^gD2>{M|0Sh`|6p%`<$GKsytnfm;r@n7_f4@~)D<$ZQmMcTb)> zc*9w4Kt4s5dw1`ie5``5fuaLfs&JRnjE!A2Ee%AtDQa*jo*~E;V}m-jjEa&)(e&fK zh(N`Mrb1H?6ilam6^S}tV*IW)KaCu82~v}u@LD%1~Om!*kTd&g3G8y1w^OAPMz#zJDqkAj|M}-VQW2WWydM&`0=R~bTfqc-`4Dk~;aMpT6W$6; zf*3Ym%2(;2UOId5Fu+UL=DkW%e^D-}edx)#1j6_OpQzQURd-Npp?48BWw06vdqT4o zfwz9(dRL=FmKs88l|VW(xb8IUo|W?^X~z#Jq5((o+(hw6OwD1G%WwR+doiW8B31HT zFZs7VquNN?DHh$gQE)t1c!&Mzd$8`ea9RYFiT7-W24=pqhDE{ zoBNw=TnN+L!Ha-CJ$w9ym?037sSty3XVsZ`ZIA;7^0Yh)KQHeYL+ygKJaP&(jNG9` zGwniRJ#%fJAHntevt**PbEKDvQ~*Kz@5_ril2k+%VMikmLfYG_E`daD{8TcwKb6Oi zB6x!>Je}HSeNnW`6-*qsh=s;xTA=%oUaZGo|K6zKnw;VoJ_zx=1UvautExRphM|wx zO>d%uIiK3cw`Qi$f#b*LT==PVFKcs(b}7t?aCu8t)&j7?8xH+{=OVCk6A$(2cIV0_ z3N)n|2bntgq^NNS4WAFLV)tcW8IG*TmWWokAR0lGc9?;#azn|r)=RkgaTT(YT`!3Y zki+|i2ovmI<>X9Nj;^2s9bLwTCfQ*_@Trq%^JB&~p{GoKg3smjIaNA zNZ3oql`AEZMa5H5$g4~ku9VT+3*PtoQ zhrV&89D8qZX8gL6lTwV!gm#+v2}L8_WN06Q1;;;+S=ZFH!V41j zNX?jRJ2XE4*gcaz0XO?YiiwbiydxM|W&GYFM?}-^Rz!jnND|*L(b81l0|Qv}Uq)<7pI8UP~^RT`x`*!LfjjovYt(2Srf7zT=3V1U6c!l7hMh&4=#E zJ1#R_7PCM^@#0bPn|q^9>x`vZmM^Q_s+Au*TaQ{n7QZtYRX77ZNJpzK#2=95P{V{N zbnGRv%8ddL*}AcsBxI59MYQ7~B)ZbZ!qWi{?VNXXt;meWtGVVN{6-L_+$sAbrb(!E zp6Lk|co5}do_3P^Qq(3T4XU|vzYmgLNTXWW z#oWdI^xYD;?n!QVH``C|OGL9cyz=?JrM?cPrhBCN93cgWM1rTz{X(WPP zkj<&3vCo_h<@@DQ(9Po8hCQi}x^B_UF%Tmo zMzMu-;z)W=jd111kA>*Y_bNBgdkXKl=tAzDZn~^>QTh44q%GkNzez`F47S8({Tyyw zDnz~XED$&H{)8sV((%7i-OE@Wxnj9JKxNV@v<{8Sv!z@H*wG+lec8(sM7zK0Zs``l zBNyw76a3Yvg2k-Q%$YkMQ3Ds!RwbRg5~}rB4H$j0ZQo!0HHe+0j?zy<6el6^Rf8Yd zXCiHpmIqyt(5X@|O9qoE%_7Mz;A%BbqF5SFNK*lj!y`ErIb#IpAWup^J3FSs4pb0; z9Ap`1L^iUmvTkk`D!pu1)Tyl*-SHF{$(F>_L< zC!nf$orbra^f3|hE8iGX3L%B@S?^-e)Anb4HHwOUnWY!y`vQ9U&K-fzM7Zl&96Qy_ zOyqjR1~!l4AJ=FMHASVmIX}0MHic4V_nGyIUPCwM&O1VZrIleDXiC~~=(^qsAsPxjq8O8TKhYQ-WDmhhwWb2}+j!^${ZdQnt#Y}xLY=y`i(c1_L9 z%nV%}4Ji(t7&m8x!|vj&qI{w=S!veAK-AP@E{dy2PI9f+z#yUFgCSOGcVuNrFSJC-N$g;fI;5_c*q$>zzUZh=>ljh#;E0H|%u0@`SoQ@#r{BhYu|I z@JCC&HljzJ8&X-p8l}V!_SV2gpz@~|NSkHPyLrUKtoO`>KG^D9ean7^I`_}~PfQR^ zF>~x}WUst)9`Ap9&CTKobp7$n*=&${EV^H~&(H(;x6?>kToM+#Cw#_CM9%)ehH5+ zymGH$@9r;Xn7FP-wsWCjQp*g{w^7Ty1{3nQbc66=v_M-z;a{bg={O6j3Y7R}v zdcP>*hfNn&d7$BV0G zO)Az$-33FKN`Bd7b^L_r{V4s3;p=cOtySI4Q3ZU2xNg9I9c>Wzt%5MBq)cIK;N~=#~59!k7FMF>?aSsQ^ zteUlvZpR-twa$3I`Np!zxkqv<;&5!Xx)+;WHKm1-G(%j1g^}h6pd+V=9xXT8^qClD zJSV>QlaC|`2J4a1zP;la8|}NZ$0@e?QJbZ!oOtMl#SS>*=;0%eAE987z^IQ+q9_10 zMOic)6u=t#-?MlVe%8j?nWlpk7p@;q-+X4mp5vQco@$n({S_Ed4%e=kq+=*=CYfC? zP2HM*eRIY2XG3}obdPa5RIb*c`>t_Lsq>uLnlHnUMahSK5qai1hio36Jlh;Ao_>D8 zSQ5gC63bz~(n+ILDM$zB>q^<;S=!sh-?taVCbq|il3Byle&q=NY)Qo$9$ufH(oKVw zi(ZK=G>{lH+CB1J@yL-`??>*G{*sSczLyI%^+2bm`N#9+24!>_k%0)>4`cPs{pT8e z*Q}Z0T`zd-neMTdE;TkBKP9?ExA0Qd`>GWi=RjD88*H78eHyxBxo%;_4NK`LW<*lB z%01y)rTJPv$(cPXs_0l5BxtwsTavhCP9||gn@+7Bo85dr?1qRra|d`B{tK!8%5J8EB6(j;>MW~T<1oS zwpV5sUhftjoGari=yIS`N$y4&63u)z*-AdBBq10^xODAU5`9gn`Z|B9iliF7@%7Ka zbhtlI{Qg#jg&O~vX8(zs+Sr8P3x{KkMIB^YGoZl%K^xa*xZj)pDkRk_v!lS z1(7gfAhyq)c8k#O@x}fLmEF=Q5+mpK$~#DmmrvETXC^68lKn7;YCGL(8wNi` zk4d?bi!T#tSKwH*PDxxF9 z5Zp}wU6WEnN38K2=teLtz-+Wyt7Xem$Wj(sJe-yI{@msG>dz}A(If{SUABDrL}KUk6F)bi zxx>swMNMXdvEgrT9lj?j3}Wr^diTxVie4X~zEWzqB-POF*zu^%gr!T*h(|G;dCvRq zPo17GVt#^@{QKHL@JtfB2t=N=Z=KEK*hfJhYH#ko4f&zLqkYGX46PnxeMA8uT%S^cL*ZYM zP4<8&7tOPv1!k`C@1N}eOJ2V>?#Y!>bZ7+xvI1yHZK~8G!>6IMyvxikbT4~smO{#f zKE4VwE7TXwzw}j7|JKRn%}5#?j>3USS_z(aibPtKLS?@X&p2Xc_MD9p)L~qIH5$ku zHhtg9)pKI7&Zs8YyF&PeWomkK1Bpx!isCXCW{nBZ4(3ZOFU zEydm|nM;w+j?W}+5j}cMZHyTurP8HS`m0em!GsNhxk3ny3tInNIJb*?inMn}tqDCN z*d9q{A*ak>Q7v!^?GGRY_7{CSh~9Go?ob13kRT_ByA&JSfBn0q-Fo!Ur+6j7CWX!< zvD1^FtQd1&vFl(!Hwk&ZZa6#{u?Ua;gj{4?W!l&C8A) z*t*uyQF{JKDqRNQ7KAf=#&>{%`6?2~4UvjX#&M5X-&L`O&=wA;27!C&6s1X__2#Tj1Lh{cKq z)SAfodDIhZ+O#S5&6Oq^_?y_j&pd+HWI98sHc*80fkHM!g1W*#A{b=tWbl}$v2N$D8{1G?ppauNp{tp?PvQ!~ zWc|~LPsx%Pw>qVoTiv){Hc`Gb$8{!~6x@|sAyfEW(dOmkoMLU769t+R8kT-~Ggc*1 zC>nN~FeE6Ai;@Mmb<38Q>=;ImPL-$+m`TVa#D%|8v$NRZYnn)q!ET>-m@=T6Dvuw6 za9Xpm>id)e7m==?v+r8?)5NJFni8p)bYXLT^H-x_FAYsQci9&%WX}Ptla*T$SW+OU zt;h~JbiS?SR*nCB2a9ImzMY0B5TOS(q=>0mZAk6f!j8HMmfP=2Y5sR-=v%Ce2G}=g zTt{svZj^!Q6tZ3DzI;(w0RfZF3UokUujvzNQghXp&s#-6vVrIE;5Hu842&ssR9Cqq zV^y7M*Tjc-OJr)IK^7p8A7@I_39Qyv|3!GnFW7Td^Q5Tm-y=)!{|lz)|8Jk^Ro!U# Y!GEHrEjP)NTca91>6f#k=5GFf09v5oEC2ui diff --git a/sdk/ai/azure-ai-assistants/samples/assistant-6Q824dJfHkRzsy46hPatQA_image_file.png b/sdk/ai/azure-ai-assistants/samples/assistant-6Q824dJfHkRzsy46hPatQA_image_file.png deleted file mode 100644 index 02f9bcd170a6abbd4ac83d0be8852797baa3899e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 181757 zcmeFZ2UnHX7B)&WcF|Z7P>4!X5m7*-Yp?;*dlwau4$^xx3c&)1NRcK=kuJT1f{K9B z`=*F=HcepD!TT(8#`oPZ?q9fP3{Mh8c=vkOn)7LM`6$R;q2J21m5z>%Uh3*aB|5q- zGIVqsxi@db?+6@`%f&y0>@I28DO(xYIU3wCq?0wUvo^P~GdH>Mr-R`gTN5iwKAy8Y zeB6KDwzIRg6+U&!;{SXBkJTOHQ?;}b8otRE>#Lf!baX6b{rXuOB>aDDEAyDI)%uW*VoqJ=w``*wZ9rcFtIJ=khm_wsI4;4$UfjP{ zKD&>rr-tKK@@R8XlHEu{q@`&|gs8L972ku3c6M0-0RgwtHB>TgNj-S*AVDoftDx75 zS+wh-&Jg$hN4_j#iG3A*)UozVX=&+zw6rwoAnt_Ys)=vpB80bY+csmzCrh0jX(TK4 zU>k$o;y!T<6Yd7*+?V{~I3vE=h zwt4FjXX9`0?h3oizKI^Izrx!5M)CEuYMRA8J@SylDY@Nu@=J1tUZpc`BSGB_*2^lsUUxCyNu6u$S2!oB zr6l5~40pRus$vOevP#BEB*|}HdbGc?r-!EPkfNR2(wwB8()fU;@a0EeMfUCbBPn{t zS~fN|`8E29D#rxO+Y|(Q+%mBUv6r7P4dkU^7wqK^h^3?ZTYYIDSbr8j@i+&^r-a4E z#>Q;hA-VC+eC5V?`BuJK(btw;1^cW|C?4?mx$1c7@@1`f5xe1+OEjwM+e_AJ($Wu= zl$7k&R-DPrCNegtZ3JVb?Nq0wc?fO>I@C;4F!>(KVMy4Tb^tg z!S_hQF4<5ZCx0S&{w!yOA8RZ5+jt-4u>;uMv6H1g^?E#i7Mr%FrqsW_lH|TTx1mXz z&bq6}d4_Cs>g@WRq6)YCnm=0)RPEEvzjJA*CZh5EKbx{|Rql3Q==HXgP^Xl7mg>F= zWJ}RV|Lf@G$6WqlKd^cY{)Z)1lGIM-wnjwUtneMdK5FPLcGcCIXi88PwjT}jSR7!G zYVea-8_tOg+4|jWWo2>Z#W|Z-YH51ovqGBY=E?iHHD2PtCCUWzxU4M>*p8o$ESK^_+aQFqUd4%CkG+KEJ$elZ+`2JE==u4ipQP+lsy92X%Y?bPuk{myZ%p zY0oq^o9N6hnD2HKaGAZ0;!_wa7B3&w^8MqZ=2B13+$ME@tTw+z2fzFBcy0l9dSkV2 ziJK-R$D%REg2Io|M1M$F<@FUmo_9enb`JeTM;Ee3@F%6>0@^12~bCdsTdHBs@kS9ELP%ap2cpRDgIh>Z^Jfzo9`bEy82B0_@Rg`5*Qhoib8a*$j7Pw^DeRZPTSFr z?4(amnAQKZ*g&W5KHuf&rKd^`nB^ZX(O+z{Wy8*zG{i_bPi){yN*Bon-YZo78~-Yjn^t|QloX^h+cParaB*EVF)@*rl1g_yDJG`7je(~r zPBv^76)Zuo*u~PMHCW#xp>VQ9`?ueIvrI_)lZ~Byq>?jA<>JMQIWwfz^71P9`1nZ6 z$W%L&+*{*0bF}6G9UU*juS45|N%@5;$aEv_0qK2)xFCV?)Yv-~^%9U{K=ch+i4Ehb1H=q!@h;lDe_Bx}0rX%R=_OFiuiQhr)rOzS5s-g`a#6nFZ@8 z=9p`u?8{=!)y6xrQ+zgT*|J60VJxI2S@X4OlG+F=M2jJhUeTNQNEhk8PfyT2BBnDc z1J1a(+ca%c$5yO~5Q!Iaox9yr;_lVTqhFHDpyzZU)9C9Nw)MVD0#zdhk@FK>NmyRy zDjlC?G~&Sp7l$ogN}oI;{Qdnc{G<%40&NmbRtKLFob_@jrxzT)yNSix#revzt_Qcq z+w-WGCA!WU8X*Oald1S&R4-;rg~|YScEd55pDQzdq^PYbqhy-}V~vx1_Hge1Jhdaw!@Q@Yt5KFK1LcRD5s?b#9IKwx zR(;Pd-Yg@OUXE)aFUc3%?b-HqqN|X;(S2#wW@%#|gT9+0-O1y^(=*+JttjNfOliz#_vmj0c;rXTT9+Wwh|+oMYQ_&5tr?&q{=z>SD=R80 zEIqIhf&Pwhb6>*-Ysst?Pa^AE|LT%fv5S3VheF*@->zM|TyToS%)<|G{>ZeGn;MRu5?m6r)iJ8 zX!Mpn9Jg^UTUyg}@?RN8I$zE-u2p-&Brx6eDXnx(3*G(X+eq`7zT6%UzJu~?2{=?P z=yUsa@BVHb3N-$}+xvv5sLoD_mA5rf5~R+UzQ4EOLER1Dm~fjbJF)j3kRFP&=as>1 znRfx--ywu+;3^8^K|MN3k#nX|ZRB`P&N^#dKouGaVxms|9k1yp%z~re-v7g5`h5A2 zghwauSO2FIeF^gpt4p*c92a^+tndp_=L}S%=N7L5k7k=TH!S?d6|>C`|+T-LW#kOmUO>LoBMv8uh{vp$ok{ITfu~klUzXmPtKo zsEJ*ZZHliikCZG=*oJUDj|j6YkV-3_dl7U>KNX!fzc4gl+C$u9q=9{PNaANYvzHU< zvdb#1^$>@2`_>&hT27=Dy&8&itx<>!3X1!=G9=-3U)a$Ml%-_h`?f-sH#j?%t<*rq zHjV>9Ji1!zH|^m6a+6v8(tA=A2g9tGTnuP4GmRpXMX&KvJ{i0UOsT6=Af41f>?S)K z+i+~KUX=~k_}IeYBBRUTBR{`Z;4hujcwRTW9j}oBd+KPPpG3`B#j2;%1J!0z-`;B# zIi+vhe&E~SelU$ZuUp6H=oI?V6Quc!zVHT;2B92xP4V{aH!}8yJjSk1-LP(6yfiuC`7Bm{$0Bi`%|ro0O}khHA`k2WC*&c zw*Q55>R)?DkDITU$HfY6JYY;l;OL27q^mN>B2mz|Vr>m)zXlBbXxh(9ym8 z^C|ZRI=Y8NBaQJV*x5fS%;6~p7t5~UDQ5AnOjY2nRez}ey~U2&@^xaPf+EwA(?UY+ zyyMb->pq8ac2lfMSJg;W3#4a0dkw$7ICkL>69_n?i!(IQ6f46}m{BxWl6m9f)^dMW z!A|RcNNtG}b7dN|b^Km|2c{p|Qs_9@GWa#DOU+XaR8ioXph?5UwY9Yk89a_Q18$M0j1Ocf4t6Q(zpk>-oG`jjV$=_ ziJ5=~NoU1TT~pu}g2JN5TC}ZHOFdSnb%q|DD!JYIs3Tm+7Cf3gn99U^{cnSXNz|`K znx4m4+i8&BuL}EFSSF%dAR^>>=X+|RHT9F1NZOyI=FH&NW&i-UW(u~#>O!AiN4{-* z_82-=J?Ku~9V=DM*qE5xAY!Ds6CMxFr}{?W0!m!9da*Bl^yQ~dpFVu_sPW??234?j zvY%&rboB|NVC}PEu8YFE++#T$rU2 zs!`|LeGh8!gYY)_KN#8B*+q^*EFg@={`-e5*ws=cB_+r8ON!7qCUk;}oqoKU1JY<~ z)A(3Xk>vE_`*H1@_}^Ef3}{`Be71wdsGPhNvA z4C8U{j4n^0JqV-ypopJhWo3PSN?#94cfx-%V`0IGn$CUfm=th2v<>A)`%iW@XnS+| zIX|!&t4#Xh&IeS>z@y{;@rGi1hM{V#^gaQLj;52#gQS+#O8guKE|tD^Ef9b1vzvYI z?%l7j;?z&fR{Yza+ZN~9jRZJMcBee!ls_&cq>19J_VMAq=2w><9ch;|zz&%kYk$pW zTr-T~Kw4{}O2Vr!tCIEn0&S+~|8e#9iF4;k{5yGk<>kiBFF&0WR+8>#z;eNGI5INFe+BBB?E+FZ`;yznc_VI z07y2&54CsKZ`wZNd*JjD`JrbbgpqByC={30{$LZkH4UeWSJ-z zUd&kmgsdam3(Ox|h>GXhx^=4(peMMx#X~A5$WI5DH>97}d3onz#nz$Z@G@{@A&^pSr!gZhpyXrPid+4=tOH;RPQ=#7ncIHRGrYi2mh&id{J-7rZxH! z5)Bz;7%$bK8hmaP?W+ZfP%;DG%#p)~FW_u9CaNacegEKNK9r=DowzhNp0qqsC=YTc z*IaBjqEx&%7}hcZ3O4dYu-l*Q&^hb#=t*@@iwx$SRZ?}@4KkW~%e`sTw9-+$lP1Uy z7P&fPn>1SFcE#ZUXq?o_`kb9&RrJHF>+ADtRwavZ1|sH$U8%_#%jTK3V5?O8rEIxV zni&^?73)EFvY^O(4duIHToZ1SXO8bBU^lEl8>a}$3+~>%XODpG;I)&w1u78eb_|(7 zrot(V15=W!8je0mwGJxxqHn}Y*VS`0czcMWvHq+j@bT0K7o3jd;_`@dHh<~6{dmxl zs+*Lgp4M?PtnpH*fLY7!?jq-oT&t+AXSt!|{LPJ1%-Y@>5KQh%31}=ievA@7k!dL( z2R*cGDgx(-zi_qXtZybzx2CTJPV=3+;&&|v=g*!xYuR~Qu;?;q%S`y#bA3W9v7u8? zYL5vBH1~=U)eabopVasS<%*F4+oiPy;7am6$GZw!doxh_8_^<1-g52+JBadJ(*!oF z$*J%M`5b=0pjQVmtzSjkcFFSGo&JC0u7$n=$rlpk*syV9BJ>%T=}J!31SR(Pmf(ax z{hT%T&81fys_?}+>&^_-s)IE9E*k;b(mw2w9|q~MPFN%52`iulbxkfxJfg2iTwI(8 z$6C3T5XW5heO3rgEio2~@I*5*0k6UDN0hsx_{pPgwGP+6-o~xi?GRNx3a(OV(@w}o zEVIS({A6pENn;iiE$=OF@g|jXExY*d^j{_TZp6i6->mouz4^0PP$Zqyn}p}-1-rSq zZ5NZKjkP!Wa}P{>@Wb{pL%nEeBQ=l+kKPqYD{o~>NF~*#p5m$r_lu8>HNleohhlTP z9JEVXJpnJ@phI|d$DpY`pS+md`TK?qjBzs=Rt#(u+n!{}$}am>U7|WQG&HaovgH9H z>RT>~iV|Y#P;AoWLQNrMsf0Swk`gAq*uSUTx*Ot^y^yAKz|!oL;e z1~$mC?jYSkI4Qx@I|&I19a$#NBAvc(DF!NWDCf~T^}Vl8;Kql| zvLU>ODPhe@pDtB`(+s1c<}wR&3zE(8?(TX49CPzMu`!Q-&oz#6R|6ZdU9#odMp>{P z*z%KXeUykOyu*R1d-@JHZrtEUF?e!FM9p<>tnzH=m8z@0@GuhaobjQ2#=^xrtW;Hc zv?+bhKq!fhHavb6wU-b?yi60E#rE!n;4=ZzWG9gd{({%w9?eLL$XM=>l9|L@>~egR zls?fvO}|+&3UZijHjnL1gJRTwE{k9U+VE>XQ*>Zw%cQ`kD*R4U*HiRM#VL~?AZMoN zz z7>)k7j~KFv6f76D=Qsvp2k0?uQO1!1o}>8RIx-M>?%P%hGYue3?c&Azlu^eIhn5{K z*aTh=v1i?}Ws4#fl)k!u_WFBTS^awL3F+^fH*c2Qv7GpTMcf)HbJ0XqB9PLgogt5VT+GoqY-&x zrt|g9xFS8l-{{_%|MGG!scC7w=%Q}}951vOe)0F;bcK460C?1fuYF|Y(f=LFPoP`~ z9V{rnk$xK_gGs`psBw$({zsR<#Ol%QEL$J>1ls&l0)K5sE5n{WuZc~D#jw5e=rJLZ zsK;OZJHPey5t(+m@S zPSXf@T~1+}I&j#*sPhs4&MfY;_2zrTGad~!HL2i{WTC;d*yon+4|=uIJ<*X}&FkLx zL@?33J>x-#7vQbf+Cra$k9H^?bYv>TdAvj5q)r~Mz7$`S`&=m8nVVeOHg6_IS^44+ zyc$-Ig$qiTl^Sq9#(iZkT)42qN;)n+p88x}uO71KbW;mz<1n7;EZgNT9JLqd0Ac*91{5{v>B*D~*EsIv!k z*o{wITu)w%7TC4uD7W=y)a-uLoOIXoADSbjK>thyn_OD-P=P-9wWMe>S9L}#{6J?s zP$P*_IymcCnrZIJC55AvlveVhFTH$wxDacu*xXe{v9-KMuM5m;OFNH&TJqd2eCyg( z=`+6OIrCMJl3jb&7vIjfpgUb>;I@RVv6W_S=bcB&R6D#+G2*6B?MzJ*O#(8R`EOr$ z+;RHmr|sk2#f9%$Bd|!|-g00CHcrJUI2r9MGmemgv8KXB8v`QpZHG*uYP9cQebKlS zQ^wdWcJicbTG8~wlBLlT-aa}YTD5aJ9u>ZaT%a@Cqg{)ph@|~$7e{?{K#D&TgRpB) z51Nu4Rha3l`R!rZQPeD9kCnW}@;$`Y03#2GdHhIT(rddSz)CWM0O@I6+fD09&blZ`hj;BZjQtiD5 z!7@I}ec53Z=YT+i>=gn)iBAAoq0^uk50}yHq=E9;`$-+ls~N(rIYmXGyy-EsV2#Vs zQfw?HQKz?-h2v+bPadGPn?P+Nd+)~0n<>?#lB2;nrV8!{I+>07xl%E4!x5l~G*Wnm ze_@e>!JD8+!Q@H-8sGv)0?)`QRJ^GMJnv8(Z8&cDrwBM5sU9(pnX4dXH=%2$0L8u0 zNWTGt@1d`64)r7l76lJq)b+6y`<@!@&7y=QCk{8bE$PINPm@a4mdwiQ_+)~(6+vNV z(S$m(Pf$yaO9936&2FDZ%|CarJ0AXfzGdc7*%$ov@M_o%**u|~cTT++rw>fdC_E6R zerde;>nMaGhvK%p<%Pg7&?eGOy?3&n4Qx%;REEc>TIevYf`cX1fSs1*Fs}9M-S*%6 z8YW16?I+#<3GI7B!Ow-hov6tAN}uX1u}kU?Id&QxDj*sZF@E@+r*DA^y)Wp*K};p2 z4m~)N$FosZaHQ9pwFUIRNHza8DCo5 z6_^bI=#>Ii-8v9Vq~+yXVOi49H%9QCb83Q7?Q^?bWMDSfFNME+>FX?XZ1|?UUO4nK zn8{g(gVwKKZ%=pzF@0WNev&NZMu;7XIKNf5X!(;$Si=dhZH0Ceui(j;S`>?%Y-rS0 zdGruf?4s@Ee{jNXf{a5b+F7ga@FBM8GV6sY17Z%~CBg!1`un%_c4YGcD^JsC&p+O% zCl85rVDc)JM(N12>Fex1(L>1BEBGW1hN(OFN??Lht+h*ZNUB{vOD?Ddmjr*C@crl~E~1CJ0X+p8xVArBRD z27BD+6YxOLdNZ_J26$>B{U{u#mzS3d`M+3MqLv(mV^gJ$v?9<*J$xVfo@yvm z4ckr?y#%BE6uW8tv`g3Tbaa`&Je|FozL5cm+PSIPt-WtbU~CDwKNXxAtSO%FP72m@ zshoJ_LXL3TO+T{GOlVrs+vaM;m#FF^a1pRa4<6UN*{*PuXqN~u2AZ0 zBpM)`y_kT&4&*6AK4QsPlx~9LcUXC4{bxP|2L52YB^zYgo)Jqa7;yXuagI&uf24X` z?-Ya$m-&elie;fJ(OVDSy#-exzx(YPC=Gq}!otGsv9@%FEslghYG%fH?PN_wz~T0X ze|J6f@kyUZGRie=KF-e0&RBIuf*O8{BNaev1XU$N`x9sq5L^p*#`XaKu`K46Y1G07 ztbk#j!Wgg=Sp+0k$a@2~Gt{sI5i{*7$VR;Bdh1-W1u<>6S7%%GbSI44so&111Pha9=fLpggvy(j=sXES zI3Y*Kf#Ev3bcOoJ5oZRH#_f4A-T%3KsEDmc{X}mR=fiP)iPT7=%d^3!S6J$az%7_1 z4~2)IH!S>!=g;M+Y&X1GM2}Qpl|)#Yp?AbQ4*QMR7hMWI9v&VABXMDNsE}xFaxT3O z7?K5z5XhlHwlPHw5b8d6eE6oG+}5SGzC-{zVsM;hi*15Cw}<1J0(8&u!JUwmkf~~l zmfYkHiQ3CH63$;%#fiKiYR9j+k8dwSz*uek6=EKs;Np0(+U6!zK+L zZj&}TqKofHgS*hd?!L6J@CJa~WhlZ?1qPd>-#(&e1CJ1UFgH6MtJd-pg0w~6g*KuV z&ldzZ?`VZGEgODL5e}_j(ZtYDJj6lGXB@H($Oy!u(rM{yBM1e-Apf8%?F6Z1;plZe zZZl7!lYy%pBs~W>s6$+aoga8KGh({j@E%T0eekPbalM8vm_C=~v@~l%Bx$B|)-Sb3 z(-flbEzM7+0XWwvctB~BjTF^{cBzKFj^@9o3oA=fO`|O-)TvxO1DiRw&bfIP{ApK5 znEsFNA5}ArWF>Fp@kyo*e#GOAc{rl#QRwRjkNNK2UT8ORLr_2c#z#A>foX5qgO&_@ zjb3Y|QchHUVQoGdb|RGTa+@P*1RDA*0$XK88sC~zIA>6H-z`_56t&n$Fxh}eb*N6r zRFQBVaUz)nEt9dG697mS<*tSC8TxZh8+(@mV+QUov^3Y&` zj_&-^=h>U?Uz=e0{<;1^6nwjUl`Hu0v*?9G3$r%RVTd?TZcTJw2ZpZ}gjIWO)12?| zEG4g6=TxNk{rkg^9?pu!7D1jUWVw2$51WHj+drl(8D$`z}eyzfxA ziNdby3?v+p+6P){BYa^H<=bJ>4ABAF7rxs5kORA5fE`ngJK%DIU<-A9_W?VRM+L9e zx`l=1Zq0pHIe_IG_j(6&`9Zf+-;;w@O2ii=Kcs8JYSHXPb5RxW&1&sq@HmIjU75g2y}ZFzaBRAg|jK=K^*Z1vKcagRaL07r;29-3?flP zh;~s?u+1!Lb|Vog!I#IqHn57j<#jz72yf#Mk@j`{{PfIj3B;#NxiV$5s?q@PsDST$ zfdmygv&6MLMS|ma1lrzGFv@lvz5xr(Qx9+PeILjtjI#_Q^yHNl-7bBU@oh3e~NO9^0?F$L3YCvGut9pY5+z)eB|o_fDP z@Zmhkzy3PNA#FH-Eeo-%exh&zEkVJ}t*F|kC3*T) zqF&RCDj=0&(Up7!UHQ&m8^K2j*3O9xMIrwi&WOU5qUiV11X{mUMTHG~8F;Gyi zj3|$#%8>!~1pQJE9@@93`ozpPZ~+T>kTbDDjIcl5N_xgK7xAyv_Tu{b z`p`}(3PxUG>8UQG3GsIhmT@S<()#6JYs!h;-!~}kT&$gcC)Q)3cT-_0K-*;!|AEUE zgFV%L=n?jcWw7gYBDeL5C4-&$7I(ID6zsqFy#kMCz7T#B-4lLQVagiPBBXB0&3m9& zsgm#~(HqE4A$~`gpp}ERYewJbV&3?of1ziRNMhsjo>8zQ7vH&YLfv7vxb#20LX<-Y z79ZYrqBNu;TQgL+I`Iwhk@(-IP~9lBwXq4d^0FIYTB_8BsD{z@;&bB_^A& z1%iY?+HTDwJS3X~f)Vb)`uf$YU!g;I1PY|WA+2*R;@T2E!GRldg#Q2=gjW`;Qe zQFx62$}_pd-MI_|J&R}3u(~?dnco;YFj+jW7d%*-#%xtoU$F$w|DG ziH0YCi%jH6V#A-_PG9IeV-)W?cns!mWBHtFidHl4{eVE5yWQ=aIaUQD9OZU{f25}l zBlDx|b1M{jVX^hW7rS{e4K(sZMw-I`Ao5ccSWv;8C2XK=NAkJy%sB zr!^suf79_<_uR%Fn3U6PrZY%C*^f9Gg>^cvEZEGSee?J8W6Ga;6_nadT9QLcR_C;- zc5)Q$kmY_F;b%NwSPicaYH>C~*t#AR) zBf*voET~ysvN*_$!dYwK!w3tIq-L7&CV`NuExD4z55oN;r+OlZSnp<~Y(B{bxJJ0kt;@hg%KoAlsW9P_u>? zM3N4^=|-1*UCtc;*Vd9E%;$Zp#t-L!ikMxl+)w;J3CnfaQ1+ntBmy+55^Na6t@#Qv z%em}M#EEA~91X<<3X5B6=^^tpllHmqLyr;8X|(QaLs9{m;ah?|$lPo%_66*!ANjqx zEDP^ZQlvWYSf7f#cIa^ce=Npk=W?&a`t-Yha;LewW#$&KnHoU8^Ggx#ls|{V! zVayow6`8O$aR+=OTEce0i#TRfUd^hr=?PFEcv57l@}AnD!f9CXunOI|sGh20lPKs9 z)i+OcMNbZDBNqn_V#{ikh zrouPXss?$u`Qko{{131B99~~}q4e<$Rn7CQzyZicoMBTeXvkE7FTP#!Mn7QV7k}>O zG$~i(ps&XZC#%BqT$16S76Xu__{W%<+^yh8~;)S^XT%B);xeG&K6!Dy| z_94pZgu#F^HUS=CVnGIVf6(m0v**yE6v%JYz4n{r#K`No6z&;5Po>vGdjzU24I{f{h5PVBa zZ$zV&^HOe_0XSf0#WZ5ajvWprpZl-nR8Yn|GTT^Y@c5=L?pGw6Q+ncLnSuDD{C3fjzmxc=8)_lV>mZoJ#)%)r29DKrLT4t>4eNl8m* zFmJsMDGhZmt(scx^4l_A15!D*rp#SWx$Kd_1Q@wSghuicg1t|sYj7jA@VW78NZFvd zk~{~c_FerS>ya{V2EBuJ+mDNui^eO8@k^M<34%(r51j| zLKapA6VJMD;gymQE<&^4rvJ#wk-RHl+H@6iO_hBP1BfK#4 zcKcv%RgIy}oQmC!G(9{jmqmNV$E7YZmSWLWpbDd{PPD8!(xu_s|?bCg1GKy$275$KQNAK7ZZ8CpJf{Z-D)D5^r0i za$cXK4yIY;!p}ux9}N{GC;hS)L`dNN{L_6*Os4AlSy?Ar>~D>a98rF-bzit%cQdn= zZ;(xs7ZNVz57?!a07P~4f{#MS%A#tPJw=V|Hxom($P`~5z5UpEBBm-^@XAog=0(Fc(vp z0stJJQ>lj86na5UTgp#S`!6TnKv@3p@grj#W~J06twf!l>zWgMLBjJi07|9~Ue7f* z5<>zm#z2%W84m*;9}d#Ctenr6HL#}feYJY*q~$T+8=jND?=oX6&VqM# zZX$m5RyNh!)_{&q$6)QpRg}o-&-EXlplsyZAMzc|EWR7Q zhZ0YTFmSbLsK4~&UeT7mv8m0A&Zdr_ev} zI2B41WtEt;ND#Gvzo$X@Q-+0KJ0rVAea~m}o7`!WetbX1EQ)$TnB4Bf8po}?E#CEw zN)3yjG^5N5iixcjP@jHce31UrdHTOT%Jt^{r?DDsgvJDbk<7iLugS}+L&_$DQzVLz zS~$@F3&l+E+N1W65x(N?eRJZPO}``YgEmhTQKOo0A|WyA^<*0R$?UXU!DNQl%@E8H z0R9ZlPUjSl+~V-aQ^x9$9Ghf&*@JBuN)*aXtzZP4X@ZN3EXG0a;)5iz6}yBnuFg5P zVI;R z?1$?vV5_UZ0VH!e&yTB0BWgQ6FBsVX#h}Ye??{m@U?zQ|bC+Por(J)q90O)2c}L`8 zG^$C*+?lgEvh4Ka&%I6gGIB#FQ8gNg6bAsT?Yc<_%Tn&#*wBFd%w>ztJVj(o>)|H5 z=+aVuYjwU6#a#ufUNJ(AZtgJUqTP z_>d6Bf^-kICaKq+v^OT=;>S;)P(-C#GT^O9l2Qz*e|(N6RDBj>Tf-K@5@K>N0Gh^6 zE7zf07(0C)N2p8{7>44qcE@hVWXZ;rXh@MnTEQ^^b`;Wh`P zdDz)6VUwqe?AY5ca1!aIYiu%vP$q$Qn|9`<8+`T#XC!&;kzSTN82G*MMmqpf-VMj2 ze4IpAtmMv5cVE|0Z!O|mb=IOI=H0vV!7T$@05b)y;p51&y3amsl-zGLeuH&R{^DTv z5QC$HvuAPUlhcmHX|3h{Pd9wk?xqkP?aysLJFJ-7omc5Q?pOx2Y=_y%;3(-FchA+8 z7156Mx)+WOuln8W{+WKLxV?EAVyM~)n^o9>r_R@yag&kzXkLMdkOTw9EvD4_D> zHbvRLDZ7RmU@g-XO0tz0EYW1WP9g)yn2spP8yJ1VBck7qz&H{Jj1qNj!05|d=OXgT z4d^o4Vsw4If-+_K`5zHEi41y~IIgN0IEm5yom=3cREvc9^h-byuMx~JGp=^+iN}Aww(0mTKfY-?E?CiSy1jA~@GirLi zmvVZJLLNIFQ0|G@z5WD-1IE712WK9e&woX#+Ar%UG*$eqW(4TpkO=@N+{Ep>KA#V* zLK%RGc9mK2+?Ndb{p?`MNR4GhbI-(i%8@Y?IO@6Npq6)$^=wYnr3go0)&%sRY@BLy z3AAynN_2I33bsy;4BMTde18jFXlJnOLMfxQRhTx#t1OD<=HZcpw|~N)HW2r6_jYz& z$BGibz7E>E{?t`&oDIu2<>aJ8oUoBu4TZCpNR*W-#E%|PGvrIrNy*Z`M5xba~%>-wo+gZ5R(Y<_Imq|3s1~Lf;)p;10t|bHUp7$r^ zck!Sb@Qy?7gpHS{QUPE4kX~MmMQAdN6f;4za6=UkQLLgy^^v1TN$xbbMYYZMpwNwl zWSrUW4SU)!0I=cC(E%4CPm@U_Oft4)U}q-6(35L@`r?J@fdV>4hW9ve$;)B?ndi+J zhBCpULckmb1vqRFt!Py^v%oz`@J+n7{Un3vkd4>YjVEyYmzD;A$__$C`Q5;Lg~R7v zS1Mk;`V*cll9UP{AK`k)SyWnb30^5rxBkMH2=YSw-U#2vk7uu&t;vH^|MI>aC%Yj{ z+@G85v2wRS9dGTjzGxk(385N}Vh$Q>|Q-L{{* z4iZL6Ben?f9j8^)9f*~I%qxliwPfHhwbMUP4>?HV9UIZ}CRQA&_3=?289|t)(jg)| zrea^D72t{n%lWrgAXjb8;!J_=MFzS%-WvRqZH}aF0fuVFcc0$+!s4C*CWBZ!SAK{# z+fRMFa8j?RO}H4dC{S;|7cj2l!A?d#B8klOoLolCn9MkIS(noj>vR1Yt?tUfoK|8j{b3Z@7|IihQC&B6Vo|#0X zn`gujVRF(sBG=6K9K^(%;-AYm&>Xk#{HTKw9LwS6vOwBlrel4gAC$|>4eg0=kz!`* zsPsaG891JdEK|f5Lyy>&teYdrnF>$;yvsz@WWN8nRVn}!$+X7`BTjD?Vu{a~Q(1-E z672u$=T?YzILP~hB`V@^oD(O}9Z2LlIvJrhm+wBDI;m#h+dTG@#UMT99|M8)jYR_g zwg_kgizeVSn4%G4F2PBO=eI?6@OCRDm=EDZlMo(hNhU)SpfCd$)JQpryCH}HrIjot z{`tbbEA!BxEsOy|1skg|$y-}+XW*fI2OJnU@prVBr>mc(D>n5|t`UJ0muFaUAr5mn zGV+g{eay^Bj@`}`y=D-AG35|V!W+8tIDF$?y_qSQT$&iwlM$35E<*V%>lskF)&2 znJ>vO*9h6NU&2R-qg;Un~cF#FdOE86pz)V2i-fa;7u5|ip-$2U`ilMv{eXc zL1G>#v;eg=Lqe;y7bcFMDSBwMIkLkKmfVNnB}j;w!ESA%&8i-dyi%x)#U2kZAW;oB z_jpgtEDC$B6-ApPSh7KQb{{*1cuxwDm7o2aZK`%|9I%Aonc{nK+pn zW0LG6Z_EPzbK9-Yl`E0kps9SStAQF+~hh@RoX{K_nDsDr||M~5_Szi zxRRPWI^iU`alq>{BPNsTP$I{89*RK=a2_oqN<498lo{MG^2P5sp(CjY?wfH)JQ^F# zD?U^icPdn!NXuJk(p1}EZ*W|>g;^(y#rJb%cNDFuDY;B@ zwHSXV!zDgCu zCLwKXH$=~=$_Xxi_3k-KDIAqJ;0eYm1l5PE^~jd_($mvp`EMfX#31!RV3)Iup6k}P zt1MY!-sfvjQqWUo6lB7cLzZK5ZJG+3paqRtF2vzt3Jg~Wkn$;@PfbPU;f0`;CD(~J zAN)X=bPru}ozipmJqh0co7m2d?$3=illdg=(u|P_ z$#(eN7!I6lnkE(UaJ$5xfBte?o5 zNCDN21*Gew8&>bfSeJOkHXZc!dXhI9zEOOuGQgp$l0q-!~|Q-teAEFFVnqf>fWyb zR#N&hihF|*^ufl0@o1Y%17fRi5ResAwesHQ_V=hPvRQOs2HJlamsFt}8r)$UH zwV4fDa3JOi2zzI7r4QV%88aJkWZI{fim1)6;cB21Azfo|LTKVQxXd6{%8XAt_0X+7vcA=ydh!zqlMhtgttMY!PBk$%%b?ibxju z5IoTolzzC2k$?U5S7&?wL6`+?mIrD{JV3!q;MBjrOCIZc{|_L7c06Q?ahLehh0yIr z@$&=%W07aazmW8BI8zM|s2*_qD`L7yI^*gkavx2hp5(SNq&9Cm4IdU4FHs1H#Yj;W zrsqfofegMuW*t#lHwDPV4>!AT-Wm0_oky9p*d8g247I?#)nVE%2v$t)1j4rpxv5nD z@2;cUTgAx08*(s(n+|6@3lAp;n!+A$G~jC+jhhjg>LkD8cX0j9@Ils?tLW3@rkNg( z<*y3j*Wdm9*Q&?w7vV9~f2C6~k^hf}hM8r`IQgm%Gr=dld66CO( zjy5!MrHqD#hA%A_d1;a)5?`5;G;0UmB?d9M9RdQq3NfP@DA4=}FeUdZbih%=t#?-TkvEIW zRp96|v)8k+8$pcDlUD_X0m^txim@jbZ>{lHW4JYjU~qy&bl_eq5I1H2DLr$ipR1U_#iyopt$bZ_F<_nfk_ts)37I?^V);}YDctAM z@ATs%1DS*+^KH--6yT;LR~*Jap4?-BDG_1k=@=sYfStGaUnBhVxAhzMPKhXGWg-8`n>C3=BDry5dmvSM zw)0<%ebsNh*_Ir>m((AyGBU|En%%0G`HOj#u1r6&-)ur*YsTDIzG2ib0&zs6F}ruc z==+rmY<~cGsX(=`U-~U#>;oVJG9aYVB0)^JDtSPRS}4MENa@j5Pt$8g<;D*`OJ@CA zZeCU+11P%kxFQW-T#Oby_KMy~QcoMU?QdiE$kV>SeqbYR-$TbGSIA|X<0;~#3v4e#i6cYD zl787_utv-^ha6}S`8NQzVGD`b=+wm_9HHb z7)ILuWFV~__bWAG^aerUGl)N^B6OyVE!l)n!2>TZrrn>&{4&bF37Rt@s$G+T(5nUu zc9Lthh^GZvA;l1rBMnfb?NE@R)v6I59$i6$*E0+S8dre%6NL;lM_9n_4t|1w!~YfpBEaGl`@BA#H?R~)*k(KJ z;^oVUu;rM9?cR{qQ>^9$p$}%+CDfQ4=8ZW(=MLo`*U^2m`0v8M*XaKwg-vdKDUAQ~ zCoUo*BccIkcskK^PLQjyx(^9EP9&q)@lgz>!hhh}1(HrKz~nXNI^RIFm#)e?;EcsE z_tqw{5TsKln}osF#6cy^kT+`?$EA(+I3*;WDWwF3nOu;7aI_HZqTvNHyhD5ta#3DO z2GAop-k2_c#1b1A9&X-R1F#O`#3nzQ|{_-C(D z-}moX*1dmWoUBgR3Y+se=o(&mzf*XI`yHVZxVnLS5M0I9^7QDH03YN)_<_mOmSXGsCv7Y!9;sz#fzy(=Tv0h1aJ%EpxNzAXtHM5zb_EKjd^@v+t#fC zh|#0_X1aHHBE0U^>-)5jj9=p|H$#R?HY7nQ1m}mW*9iXUt5j0zoqX2aPg*JFc8S1h z)=tKe5Jpi(H}$%;r&3IcRq3+)<(w_O8$uUOLlZ*ZYCpOB5wi34QeAdUKL(7^A{A0) zBZkZOK07#R$Wt+TaPOagXrMg@T%HwJ_ueB09@X)Z(QX;J%AF{OB>H6szly>9b!L5$ zvn{zp2v@b`2K~DA4j^ZH`NRA$i+jCbK_S%qe=~^>E}4P5)9z!yDgpGMFm|o%41hOm z)Z0M#UNcFZ!7B3q(8uW9;|!D$*+yHIJ67;ovMan z-c<<|Ng#J*rt>T9y(Hov1c_nC3g}ckf)Td*cKc+@@98LXob!bZYHI5Wl+vm0m*G|cSW(cMJ% zjds9`+-dY2_nh7)5meAW6?i$`GZ0v0;c!>YZM1_@h72z~nMl^hl<(!VKgf_XJU99Q z(6${`Ys(7@!@#1gWW-6SR-DW=0r{z9o2q)J>4O2{(g7j8&D@ZtkEmS-#k_72@! z)nhgr8UG*l-UO`1we25X88fr7O_gY&$y_wcY)MITX)qMcqCusUd8}9z8Whblr9mWf zlLnel6sc6wKoY9&cP^oxZFrvdd*1Imj{ouR*!!s6rnS27>%Ok@{7vV=DL1~fC_p`Z z_eK4ad)DB=s9Yp}g2P+Cm;f@J!Q{94Mp}JKgkV~ddl@bsp+B&W*PGWsuYsLsll!35 zh0knGftT~Hw_1w`2^2aSiLy(v%B5sxk=^E|D;FKmvWeTqgkYO^jRSDLjL^2rS`-m; zXyGctw}{Wg)0358iB2_UAk@eC5K|YSd^!}n^?W4y3`3tW6*V&-nxMECcA4Z-gZqm?}1<(8=r8Oig3BjT^gq&vCT)Y z3jPor`DSpV@ImBk_*ox8Vf0`96K`OPe>!b^%4X;%K;0%uH?D_3wzO{c8vKrrAd!fK zxJRdDMVF0&ha9wFgHs!d&p{p)kmp?vvDcC>bFJW>^ILl8GO9$Es-ox zH~ZZP899DDwQ}{S=){L^^ixNaVq%TuIdrbIYCG~_*KUPN+h|wL98_^AMjzNsvfkxU zwtdzsFe&I!)}np0eSD1KW1#4!x|Xb*_%&x5SO7l}4A@G|tS9RH3%F&*P|xXH-gE#v z^A4w}4^SDPOjC!ev@mex2&DcdN6&;|ppb}ZG%HI8ibe(WrW!gzbv(CJR+7kLZT;1T z#8Rr_0gJ5$enUn54(FFK*vToc(FOu;l2x~~&Gs!w$J(a2O>ar-5R`tt4fmQL*x*Fd z!Mx)g`-&I+1vygIp<^WA6Bl|daDEh$$~$)KI9bPg6YmpZ7!3g0QO{oZ!=49PhmJGs zVI0#7dOcGJ&03d~L)ka9)v&>lZ^Af_G+YSUXRRB8;wlBHDg<6oy+=Y8iE`|i(I{q(DIgH4 zMUW@#`ZjIcW?Q&T>TV63{NO|NgdI4(C2>C#i6g-6Ahe%q141{`cP-3OTnznE6w;+= zog&49Aut}BRqou{Amp89O^Ok$U2H`G``b=VzEsGk4>>4G60+P?(20xU9 zLqupoj517iwoxd+T1ZYbapMZXRMteeM(0mR<;U6|nm*#W(SlA=3<=urs=^&~iT>Jn-% z&;Xt=(^Zer)dp8iYf9$4@uSeSlgUQh^i2B%#V?_@s|Dm&{4NwJPj^n#NYKAh&nrJg$=6uk^iWaW(=|H_VryQg z+E}Ggx|-D#L=U$Ugfwuyp(XXifFvl5m}0Haw|$GX!Wn+Kyn5fl8%cOEiVnRP7-$RS zeK2xT5$J^HRRxMJ()A@xo1>2g6$XaxU@QLa5H9@M8d%K3Qbootr`%ZdzEK5OD*5nC zY&<&yeth~nu!h52Lt&*91^o0C9xD>C@-l5nx;1N#qwt$%G_;M_%=*MW?c1sGMff#^F5{&_JJf*2*a{EEft9!#$@=!k7zd6L*vh!60A^C# zL#E|0Io$T|-_KJ66LJ(GbExj0D?bO$frci9-)GRbzpA|9i|jMbe;n8f^RZ(?LWx?G z)?C_Ku_>!!2c(V&33GyA!vVF>5pWvP9<2pHQK%nwa(1Yu_s{vz_2>NVjO)rsR>0{< z)Ii}ExFk5XI9%Jm&F_5svy8zbJ@)s-f9YR@&zQV43fU-+BCwZz0y95`k56BICjEcT zhak^;$`NGPAHjBSinm7FG`fdi!Ylw9uLJJ^!p8>dgv3Mik|?*s2t}eKShOG2wISbs zTJiy!hG3sEs{5yZ{iK}FsbGJ6&sCG3B4U4h{CB?5)MnGS!jGta)I||c2@=8(>ht~G z(d*5Yj~gTN5_BCL`X6lOyDmXU#}pR!=yQkG_px8NY1Et%FGtN8h|l!zx39ZDTa7sQ zqSsHW82k0UeE&@<#?r4kcpWm;LVXzWcc*Tcepg__aD1kJ`MSUE{#?N)_xG`TFKbua z{JbBTrczQms0Rro{MYw1(#vhgIBrPW(%2U7T2xEe=->V z#y2V&YX>k~7ovu4wi(ShjJLJ_H*fJy`licTGS9f}{hOMs0ECLb=ZUJP$p+IsetcB) z|LIy9OnDUmNl}sCtRHpE)nTNEtK`UIoPWQ4{q6Iv6Ik&F1hG});5u(PprZBg@|!Y; zSKj2oXZrWs*WW(N{^KV955B?K4w|OpA!_;GzZj5LLb4+J?_7+JyHdCq6^b~nWMTB5 z`Zw47DSTYk{C{))eaHXx>u;aiIQ9CEyTQ=k!`UEzeCOZz#;x*#VGPDL70BJ!F645> z1z+=ri|@Jw5soiwr!z)g<~}>9nfq*Ce5QW~c7NUdIe8%Yw|aau4aV*U6>nu_?cer? zUfdC2vI;^MzOz!ez7Nmc-#4SCPMx>uviAH?cnOZ9Y4J_{~Pme_I4jPO>*Fev7>7smw-5e+G=^Whezmo zM)Ks95H}9^6uZobxB++xrKhDzuBpCC49u zw}^J#Cb-D?(-kV~0BVo}nXqTHNezh>y&l!Ue`Tsj3SvMG#4nL3D}+&y14O3eCdz6> zIf-HkkdqvZ)saI^nr{6NRG_G>to>qvohl_X0wm4{yt~OS2ABrTcmZTU0lsLCPcx|r zB4Gv)AOV*y-$gBCpMn)n6PanuaBAI36QiM-i46-Aq&^Yg53GenAlHZ$Kp8Jtr2l>0 z;ES85R(Zue>~40!o)n=hdTn){cwKyzk6Sd2MNJo3{LocZY3(E~7cT9jE7kz?4=xJ_ zAY6w`Z9>OE9ss4lLZIdz&UJlClYl7R$HEU)HNnE->nQAkXc>}*`cX|uh(BFdS~L_0 z1VMd;mqo9QYA}22efRm7$t6L73Jcb)W|J)tw?-Fgo6BA~e$q*Eytj(){$gL>1>IsX zwA-euPP2n>H(!GkNTT6Lig*i8iY1Y4k5M%1*r=x!oxvC!TqeY#T#xtobJS$9gTN)4 zVNJF_=j(Z)0s>BodaeX=P7VlLM-bldcvDsbC>%C*8x#leB-cm08QjBSx@33SL7Ebf zQ~#cVXn=c4_$I*tKq1ezL`B+dAHry~eX%59mM-^us%FLkrMfAPg01iGcJYHRvJat^ zU*ENM%^J}wtuZ;eTXGOuZEZhnX;hWF^H*Pn5Qkl{^rDZq^c&2aRiu zFBr-rk?*Zb(Llsrf`+hhSkKrNzhSX69b`eDT8}KhDf+v!?6tP=WC%VCJJPT%97udl zNk5-lz%1|ZO9R_b+y!l93`FnO91zE3-mwTV_rPByi=P1gHp^3mmxhzG*wFL(GGvaN z4{i}e^+>rlN^O7$N{oo8$J<=KY#H?z_!)q4ebFa`5CZ8PaJm+G2*q7K2$87&0^lUE z(;vJASc(Wpt=c2vaXV?)6*eCuJv1gCuFe{0f32y>nDxk=cO#$Wpo(V~1923I9{6ArWA(1IvHDXZ)8fYKIaUx|P%6~2!r?AY->C*-NZlB9# z&-eS&Nb)hzFhHU{l``maA3={EQ&Caj6fJ@SfI=`x@eJYJYHV9}5**~N0I0K45wcR2 zo5iUiv(UX2-rrlbQu8Gv3MPQUDnL-@4&P9a3Jwu8y6pQ$#!5(3XaJA&^fkDUl&Xa{1PKj{raCWvhStf=c^~=w6TXDO2 z{iT`Kntr(w=tM2AR(bK%XahRc19pTQXr>8Rb4l~{cpRr0y?^a_ZljMn_8%H`!a92N z=#H~ZgaDXnSfWy-7?5f>Yz{(2!ejxy7@l+ieK1lY)ZjTs9m$(pS!*#-?n_s7duz!= zDMztaU1y}9hx8v(SC8TZ#Y5DdzP#HC-q%hjMf~6O6uX4-kRjvxBy3oGf5Cx6E7$MR z%YsRCWVH6F%Zhp_%fjs}K3hD!k;+a84cXIIKs_<4qsGw7!nE-9^;vqtmVUDamom?_ zS-+ItYmsH?GjH_a`K1P95;OlUy}LkfcB$bckL4SEj=rq9yI^dGMV8v>#BjmgO6IB$ z%R?SiZbIhy$tNLeU_9RBbA7Xqd7kRA+Y6p-@_8ImztIQS_30BQHfWZ#URPY+{Pj=F z8yjpPeAIt*soMJ_A+v!(%Z+^St-K=^!e=eAbno-7^2%BaTu96?VSUfmUNj8DK zQW)z5dZ9<)53qlJG~czCbdn^Au`*7D`!Wpc036hr>Ri7_P^MX%E)#}A=pwg+}cZpNt1MZ4+E>L2hIDK+Y5?Dt9ybW3* zHKX%_&BWLZ6CB5OOJ9NoT@QrfiW02acf-#IfcOsv&&py$)7E7|tY1leVR&Fr5R*mk zzX_i2VaQr$jP0z+_N*|%}k;A z8i6sQ?p7H}TSBf$&(G(2HSBlKoUB6CViai8m3+hCpMb|-yI4Pj{Carf7?MHra-e+z zsvd`t4YbSRh|ZhiVKG7-8=ulN1*3-c%yqLhA?__U6xzD$NLIAcRv;Vfq5We)IUc5l z`@Ur$m;K+rdlQ)|?M-mq5rL9|34c)HO-6#X2ChzywKIM+Vb}@K&9NSlrDx$<;Elry zfLkd!PXVY{{3$K49a<*J%;%&C`kHRDwC!^#X^SGKnK9%&HNmw8h2t-q&yS8NTqW^1 z)57pr7uN#$thttprJ~*CaqL+9E?KDB0kT|H0*@eEA_dtN$sFo!utmM84%fSu*xH3{ zUb?6lOC|!@ML#~~jAjS72${Wn1d^deuN?SYRM#2It9c=#J^B=78Jqc`ph@U#1muX* zr|McRYAT9sc+tX$0~UEY#pXy0=P*SX8z%Ut!<1eOv*JFbNguqJ?HvZoT(#ACs7Kd# zyP)A0Kmuu!G~QjK-&u$d(v) zSb~{C{QUC3?B_nP227^A%Q%Y=W*TT$OtViSKt>F&0q=bky46`M(w2k2j*H%j&73So zOd*U->^oqnl>~)ty+^90R_G#9;pDALBvl3Onoj7Up%d5*pL*5B@%-At$0(3i*71El zNCDv0RpvF)09yGRUr*s^aNk9Z3jLat(VYim-Z!x*Xjt9+*OJN#G%~7OW>JHparSkkkO; z`115U06TTa4wk6&u)7A24){>b6O`Ox1H0|+1-y_F7*J`yqj_6lHl)pjA0e>uW^b%{#HB?ev>jk@u1P6@@dU^nmVyy{YhOG%OF?;)rX zLAi#uhsH6!sWyXUkkroP*nT>g|IUX7PZ@wTY+-faH1XZo3}H3j@R@I)+nO#L+KukV zdW1TOteKD-DZrHR2*YAXMZ}trxeh=S5E!>YTurb_s@VCS6hi?_6TKi_G$!>QBulsj zHZN}XhjK@|9&(g(6r_gYo$CErN$t=WK$!8a`_WqgEFIQSw$2sf3y`dZp>CX2;(%v_ zVl4=YC9N_zTOn+5q0cm z%V&}$YD*E%i zd~%SXKTB9x;BVx|$&`*{06F~YS9Hfu(YGGg;wv2Y`)_pP)tt_MG-K7^q0B#Sg&2Xb zc443AGtf=Ltp#wc0Dc&mXFRJALu@mI(N@Vl@W*RraKa#e{5J&V$o>cziADxf=bmyN z9S%N*UjI*zj_Z=bIHTh_iN`sV3#I9w9slRLs>f@Syq%IqdVKsl-*BPvK7n&dY@DNPN)SA_`|Cn z`cL8bp?7*bc_<*i!tur*o4AKf=6k_`;~|?FH3}QbfImB&Mt)hj!s=jI7zre#5aLf zG~}pi!JqMa-K{VOQrrn-L9YRYk)jaNOpLN~ChQ(qf(BXw-I#H`eupvaU~?3OE7CeX z_}nQ0pf?8Z0KGWD!;}a{KpE_Am?d7LltSGM61Z6q-OoLZRCghO6TpR&NQnt#i!&sm zpn;}~h;M(n3;_!hvpjgEL!dILrIb&aLxSPW&~Zqs*O0S={tjn2AUR)13yX4@B(4;l zOMqBPbkFzdIs*{zJ}?|)9j7Sa9dI5I;o18FoIJoM7I2TihAc(ge z79u@=p$e03X8vp@hS!XNSpdrDILkH?Q+CEDlwir-2A)cBx8r-a_8>($UDiy;E)T07FVi_l^d0?rl# zaUct0(C~U5a1^#@Z-BbLHsy0%-IMrGur@PUbINl~l4p<82P5EXE_0Dl zE2U(PP9%jK#9lN5(e53&dC=-!8s4!G48XJP>AX)8i$<}79~?3i8nc||MUUI0-A-7F z2;|tD6ya2OBM!YICBC;HiHa~&N}^QCz`0eu{nHjFCeVkB#9R;$>qiHZM+U-k!}T>w zN5~pBO!+zUP%#I535D^Zo6#bouim_5#LY1D3TBNe44D}ir}^o9e@5Qa-w@{L-Y1h2 zyJOMZxes5+&UYS?iT!oXQma|RHo2PQ}%o*x^A6Br4l&kf0as&ixzCLH6k1m!N1oX12Z|@@gN!E( zdjwoPMI^$H4`Rf!0tuz`mwP7l4F2tF4iJ1^1crhUb}zz#b6+hkMPPY-7fqqQ<3KqB zR-JDQ^|rxe3{=&-QHo+C;5(Jj?L{HY5kMFpG9ZJXCN{xxPpOZ&hTU)`i)5=D6KJ~! zVmUe%p|eD4$<-Kv`V)>2caRDEMJu4XdMgKZC112TIdJC~1_a&>iL%Hs(Mo&sbaii8 z%Y$5;v~%J?wS^;BRWcV7!)K6N7qn*Lt_IR%b}e1H)K6zCbb@c{GQ3}(PEgXUiw5v3 z-V8ukE=qU<1cN)brTD5mZBaT^Jb&?J+OO@aDRwMZ0}ai5KqeKcN&*akSUWh9nFW`6twmXYcXitfFO>>9K6!w)9@#&@22M#`XQO#vPAiY~~1g@Z_sd|Fz?KADyPM+;2bL^ekP1){NIr;fAI730N z>p%q%bbRP_poKKV?_fI}2?%yr$+p34CU}yKt`_^@K>_fl6$)8(I8=Q)p-6s-26(L# zil^kI+FE6D2cQOGvbsK(caaz4PCYsN5J%250bB_q+U6Kr*m16T&oes83XMq>7zrm@3+3P8Np79N3RCx&W+bb}zJ;y%o4g_A!M1CFH(AU%)n zk}iG6ZgA4BKS85)ueJNJys~s+EqKovoy$~lFo_J5@_SKVuP|@S_0?EwY(rkt;jUW^ z?Phu>d^TpB|0Y=bnRx95&>-DBa#GQ2vuPr%F(9ymB|z@YIu5$$9?sO{iax%2%UP4l*&it0WrXsNE=k zjfx*7G~)cbQNISRMoKlB1^dvCkF%pdfs7eFdUHp)onQ(A#^ArXEtLv3LbtvYNN8&p z1p5-cp+L|bV&d^_gdkj^Ljb~>3n+G~wQ7I>l2TY`vvmB2OZ|WRXwX&k!;x(h3K~-< zl(#j8g39Z=z*%KnzClhjFag8QM?AqLzdnZt^pw**%>|g9l)6Gq|#}C_( zKWgzrp4%_yjkX$R+pquf31@C?KR01{$5r?9>)fUHiqyU48hg+68voHrbGS_>W-v@| zGPwHi@(1lXae&eKS>V+9{ZSi&>!)>e=#DE1uiIT8T_bL-I;Q4=u(ULsHmw@b)}4)A zow10D=V!GzrlR`U=~hX$8<6O$+4yz%PEaLwAu1_19?7`-+hHj|BEuT1 zE=(5SmXoYg2K|Jm#(Qkp5K6vn^vRQq(cX5=QQ;xX~$`IHvAddU& z92qA%JPPi2#8J4Nhk&K zYoe}KDI}}p*$f+ z#q&C}BaVoBnMTJOg(;(a22n91J5EhmIe@qgI6%X8lwGYk9|?v2I{Yc|-1Qn7AtEu! zOQ5+KzT58hpc$`&PARFFn1qu9v8t-7e_CX0c=&CaA`0V1zcyJu4<%3uWX=aOL6ejB zmHXK}HF3m@LDT5Vw;?GCgm6X&gFnwODQS=_{4o5$>Jb3_d&EnZd3+u#G4<|bdS!T0 z@zRH)Mtt7E`D3;p=piOnTM@4|4Npi3XoZ4`W_W%c(WGHiI!f?LM_RwZL&q`A(A%Eu>1L$vizhi=-^U@H)P^oNnr@QhY(K>x(rp4G5=-vVFjs z;TJ^B9ZGEDeBVPCwXckKpwi#6D#RItyVnqm@s%g#)>3RS};5gCSaz>Y`w$Un&JXkRXOV*{guM9p+O&@ zLmUwBYqYQ05>Y;MY@vBI1_w+XSW8RG7_g4y+Cz&1>aN$ ze|it%WQ0tUYoK)^=%$6w?E@)|b7f7hEwSvc7k0fEJ;unvc;TMACijqkeB-1g(0~_A zY3W>qhpnPK>udcOU5kHtHx3$=CN|iO;$Ea5tGPSP0P9W8Kd)g2e`Z`+48saB%RgRE z-sn0RN`HLFYXv}#zeI56%#|i4l9p$<_NV&#@>IRTeed@Qnkwy{_x;;4*qRR&^_0TY zUGmSa4dm=ed*tQ!$lq+3$2ct0Cxri4&&{ZNZ>kvT-5bo@-~T`V#(`ByLl~}B?2{)e z>;^HI)0lXhV}AWMwa~d+j@#2z+3Hx;_PMIr{?)tovRMP!qS+0D88y8h!TV;I1JvV# z^Y-}nuW-wH6t+&Ab77*QM2(D2{`SHwOVML6_MiIr@vkHWkUK&9MR7U4?#;AS?*plg zt0Vb!!Ab0!@yE6BkHztiZ+u-XhMixS^pZkCM}GT~cj7Ec?XGd}pB!5vl2$(3BVE8h zuf|UDgrT;jCPTtQqzBF$Fi*cXM8_}6x$k1TokH$=3zPfblU%Tx))gY12}=6pe2HGH<%x7gcxrj*&WnbKZNmi>aTAzeqrtl z>jLA%Ox3(fjTjqei-;&_YhzpG`ns+D>{b48pZ^=*@IrH2x}5)u{f{G7E-8BtGkeFi zJ~th7djc$;&HtkA=iE2$3$I?evQPEpJP-2x0rLD6cF=0@ajE{|pa?4M&ybgxut0Pr z8k%#=_n|a$ z(6a7xV&1&J%4T8l?(g^SEZ#Mj;ZlEwk;9nzaRD4T(Av*BrG)RDi zg@q$*A0QrwV(}<6XPEbV0fD82F!_m-WET~)D%{Bv#cD3}coxb><#27~-Li<+O08mW zkFG52_zVSgwx6F~^g3bWr~zP5K~NeA_m3YRN^*Mk6~oNG^z2_8aD>vnXhacU~ba9qDc|K4vB>G6HIbqBg*X|O`&Qm2%w}NsvV&pVFy92 z51dyE?Pf8s(klpxpyVz|NlA*gqm(0PwIUa{_EN=LzRXndqzF>7$l6k!jG@yhuh!>TA6h;922s!zj@*@ zD^SlmNBk2|*J+*$Pm#9%=y93rH>A&_PdG0+b|MCav)GRcRCQyRGFAtcTExX=#h+y_ z`I{^0B&QVTRA}8NQ};pTI|@bT^jgeFM6?SnB-=ts~Hjkc1%>*6*)f+*1s)5cxeA>ye+%aifb6M7 z`>2M^gUOjOjR=LlB7JTzb#gj_Kn?GL_DO32~&g3|Ok~EjDiVtX3m)JcgBL(uGPOEiJybh9}P;xh-6mK

`YBf zQ!5coGNvpyC#MKaPNW;sS+`qB#F^oy z``D$s(W+P2LCHJO<^cqc2?!;j;X!!ZRMB$Vz~4ZrQAp%izOPyfH`og4mCsw<+w5g1 zhYI}+pUjONfdwZYXwQwg*c{Q)-1CjQ9&z{H5{`#%H5+?W?T%?xX!;^$6e%WZnABVB zT}$2k)9>G}gVMI7yz|3CBspHlm5hsz-&YEj!y?~d^ytz1(7$`cUcxF8@Q%Mt*`5KL zU9A?n>}KOA0@}aqZ&Vb&T;SW)#1tBy#1pS}+DKb{qD|R* zdE%$SSrx{YEcNJMJTYe6xEgdDo!!OzPb!DwD@?p-Mx$AWCHi37-3Hp)+UHP05bukM zD>F_M=|VTd=Xq#>?0_bGQdt@RR`rW%?VbxicA3!2Ee9rRc&GbD~T%?SHI{VJ@h}Te+oJk`pK0!145|*`CS5J64}q zB#hm^|LoOxY4MmTK-K^?2@46W)6mdx+gzq!@UgjzcfQMRl}q7UzkKvAIeq^8#l3sS z-%CGZTo~qEg8Ht?Y_-yG-uS*+G1~nym=$x+`PQq`_ZEv=o?$b zjSQI?H6jKD=&g==#CBS5QhafB`fiU6icv?zFHbMN>#a4~Y||h9BP7s8acsK8g>~M+ z=N;>Ww7t|DtT7Ad+&S%fmrI{82wfLv@@LPY4k`RXDY)YMK9AG0IXMeq@=_j|85zoi zKRG`~eh5VlfG&PllUG841#I?T{rhlH$_5Pwj^MR^-8CLl2q&uTL|}`V-%>VMZ8M(r zXR?NE63-bFkTQDh1I?0{U3=Y@Gs;f-`_E<(0+j8^pr%OpZNX?v0*mR#y=`pC@;L5f zk42$ffy-s>IT=NpYI>-8Ys!wsrE(Ev!t0T&7Io$_d$48Y#moavbntLb6Qs< zmIhoQ7HQPH;0~F#MX0j;b04puRu?8cHQ)r+zO9e^A?{VA} z?^@HJ(=f&&QO*R3Vz%y8;!YRVEp-u?xcFWHTll=w{`*y5#tv6pe$OrS6z^+>`_Nyv zHf}A3;n?t_pKc}Vf{}I|V&rzWT`Mn=K4hd#PEM2$kOSCc356DZyb__DnVnq#SZJS{ z3e>UQ81lZ^O@%E65R0_VSFc`W2T@42PP`gonIhgSe1AmAGaZ3paZlSk7fG`vM?^W5E%iz#Z1va40 zGp3tCfbVOxI~@cDbfNLvtV&ZynJsVwS?z-N!u`RZp8Mok=2W#AG{t%M=hE`JiJ7BEKN_(P$)9QIekIOb2ByuF$4>CfY|Y3(oirO;fczk+_(mF z7lvDwd@#KRDu|LE*g;?kiljr!8)I9>gps*HVk<}~Ar|uMqO5yspqV%mrJka0qa3wQ z@M!_5?R*R8UrFW+|FW=}=&~@iQnOAPJBsXad8+oZAG!+^y(`|W=D%+H41ejTicV~}_0fG@v5&kYI43yh_i)bsH{wn!PoeBWzDKJLLd%&# z*ea=*Lg|tpR>LXGPM#c9wpLLwdV3jcZevHS>kQ1!&GpKwiL`!>VHp)4w72@HSDtXqGd@8R6#v?E$ZA7@gaXu|F` zGqv+2COFXZF)EN&M4pteSB%Nwih5&t;(Hg z_bJPj>mn@u=BSmVXTX~39 z9HnPlq@Ixb8Ij1q7!{3-jMzbozPuPgu7V;U#}#LxfH6a~9X!7$nDk3vT7pWnwU?)7 z?eY1!`G}I+n}E2i$U3NytRBF^Xp1cBdF^Cu!EV3C09N{oq7did&D=Tttemr>XGu1y z*jj9|B}g7C52(c44XuXwy?gtR8VUvkuk^}?gJ$>!z|2QBOq|>>!Z0f z@HCBkT}pc^4U%O&y4H9j67`M>#u%y~s+Q(Q%`~_nBSC90e^F<%hg8k+qema%dPQMQ zix!z0!03?&jYb0l+l9NP2oB?Y!*swRQ8@LE$8N}@|MB5EiKk&lczvtATYH^MC>;S$ zFlsp!9`o{wN;6ZhhzE^r*^O-_pPW&=zr}o7%vK9oTr&ytUd&62`;L$axOj0Y&Ob5; zKTW2j)N@&{V>FUJ6!4QlzrE`J0V7i(pRo z+x@AQhL?#$gH%;r9T66`3YI7p^nkXR)6Tp`HzOCN$&cA425?*)AlVyi>&tL);cVE0 zfHIP-z+Y{E_$4(k2qW4S_2^y^K{xTazeXn74U)mFtXZR^qycgkbKq)UU-v{kRtInV zh#Ty@K^UfmSlCK2%QA`d>V7{iID=94$~aza`OKA8B=MCRIVDJUb3Z`x>c4SUdm!mjR(QJww zrJQT=`zBmB?SUS1z_B$$wfn94HTAM0kYHI;uYdOhi_#Xx@E$KCXf);2LVK+t*MJk^ z3N$C4nYp>e*kZ0=b{t$dBp)CwN=d4gi8S+P4K2Mcwb-bbue}$QTogNsLnFF)JIFp7 zFdB2UdLpI!?jPIhiW{3Q>6~~2Xb|e`S_)7{Ph(yh(|V@7v4o_oXxTDr-W^C$>-AEK zt5*4owqg$ZdQIK!2TDIRgq1F9uHN{bRyHK5oc0hI$=f7bRZj}QQ2!~ z&z~1?gun*Gao83pE;Qi)NC5h1h~-10GkPKLyQ@!1!!|z zK*V~yvga^{dsePH@o{X}qX5j^boq6gy75V(#i5tW03W0vlhRPz%U!a1Jf&pQS(chk zZx=wuRZzwJ*>PefkC;#c%r8$_g8HG!quKAtNY)hE=WBDm&;dnUdq;V z-^5^MIQAAw0ygXjJ+&FLl-yK$1Jofi4Rcg*HlbmbsyeM&hzoQ_>I01U#X_OJQ%e(( zgAlPIVkBdtfsT$lx-<@g11v2o^UnwlEEP-^u*@%`cn!Ae-jiWii2=C9_}qV=C&=&) zhJJv_!U|bw^97QLhzC6ga@@y50(k#$h@G}51-9AFc_kFxexkk1oAPJrCS!>ShkxPq z#ye`>$om;(sxbhWI8hOaUfd^x#~qIx0h6~HfQ{-Qxx{v#`}$<73mBBETq)CY^)T|4 zPvVjT4vvAW#thmo8ahmkxu0#ZHiV+5yAxabr(+>)gvod!mf}B_*_F z%AP|R?%5futY`uK^%u70!M{b_8f)&H%ctFm*qxx&+xvMR&)0j=+y= zf?8ID9W}Uj?_O43o+cmy&%9~1)Ei>ViF{yh>P6u1xRNGyz&iVA2O#w_bsCg8iyKrZ zVF_(@3d9){kEwiS5W{U#VVEdO#nJI7?PfDYB4v&V2|-0L>DRUBlxtfkR8j zVykIr{2ig%YYUWD&d|TJvo1W8*O?If!1V)8-bLD(-kieWC z3Ha!_bHDGA@ZhWiuxWJ-x>axwG;=TjOPVI;k+K~Ny3Q{>PL!g(-L!>!z8}}=g18|V z_W+dOR>x7CHMGXzehhpri34O_cOAE6UmDxQsWSacSNR$J9ZBw18%x1vb3V#O#%&P_ zzy73l)s0^JiK~GKelpaJ!-~Rtz9J*Q8+!sZL2Ca}HJFO}w3XlL{}YV`zs%kAGppF3 z8NZ?QNIQ=oKl;asJ?xhA=E}Zod8*L5F#?F24sD~q1G467A^;tIrl(9$JwyD@seq8p8% z0Ak?i*2C~x9G|99^B6Y;zldzSIrK~RkngniO<9B##<=yAY~&s`KF)&Vh3527c5qg9 z_9=_f2>Vildv-XwDZ~N)C-TyzOLMarjLPE^J%hn+b50E4h*?=!%-_XnyH?!f(VfH* z5`E(*9t!5DQ1U+JF5}=a($;8tcl4kT3zc!AMLqhX0+Kt~%Ph;%MgI3rNgKFXwsjN*vuN6%U7DnRq z5(Fnx6d_YcK;TYoua=C#pV7P}9M4-uoG4LBU=3~Ep@oY?`45O!Ny)vSw>U=r1c89B z_CuNt!BYX2N6d`#_7|TwlhE$@?#~t!k|-@*nyIp3&Nn5;Xs3x2%8WQVj=U$FhkxYr z+ib*lF-zW>0AU2%n| z;9BpEGbogfX%Tn{2B@BQo3b5sCB?r>bqFrN2QrNGaAVXvIc0Tq3| zUHNVe&D}|}9(D`+eD635cu&NWN-xE|_9y+@N3)OKdubBQi`xfuklc+?6$s^azw;@7%~qK85H;=0`Z-kbZw?9O9m6bTLf#gZYg3=+$f1MD2WwH(g7qPiC>HAxq1hlI;?% zb}i)`Ov6d+M0!mmPVKB3PEDHmkfL`WEu=JvI-p(qsvl%zr%*(NpgTp_@9(&dx4j zMK}yIz)cn4EP0x`D{0=md3)FH*NHPcG7EOCRHWS_( zkOw*4lf-D3&UgM;q=lHnroJupf0y!puG6dkPI@S+zK;U`sW&3N8eq3(`&)po#B`$L z41M`>6XYt-oZjWokUvxBqGyn218xUj{0U%k4DOHyzP=?%hN_Or$zA!VfoZ7?fDt?s zi`}ffyu36VRu@uuF%snD9t*0^tpREkyQr%@?^Yg}h_Efj7VG%izZ?V+EsOnoDtsw) z#V44kluZzgs7}2XFgZtfFi3sO!cZ4KjIGzM_wD(XU4`&yFyU-Gqi1)HaOcLy*13`^ zLw$Fww(26VZ+jWqxfno9t2m~t3AqjsPe(wE&&k&-k*YzIk2peQDmt;^)(5vo z#Ql@*mhHmuhJB@Epv3hti3cVv#2l!~vzS;S zn(Pb-ir|6;iN$31BRxt#Vw4Idrjr>;%DyRpieGKS*@VUC!iUm7sV2uqv+^wISimrg z^Qo5mQrJBjmO7ph0lIc}zH#xDoZ^6p$53V;t8v|h|Ic=isn0;YZqe4T`V0bJ+`S#( z+rCnOr4c)GX*xH?ld>t#G5NWqs;cTFv8@N7$sk)S<~rzl&h^X#*eB|du3F??)P7BN}j3%1}G7k7_!ORrpqcGOI!b5)Iujjp0~uj+sL1?@U+K$jX0 z6APfbiF5H! zf-~JpH2PR(XJ)<_8|OOZTchxk^K@~1;V2Tt?~8q|fa#4Gs2kyie0y$73_8$(DWx@c z5`yMnbnf5QcaOYhBSzRjs{dD6?3cS?DQbK>UxP+|ijgQQtIZE%umvfuZ9DsTvwLf? zRysBGsSy3mz6l-G&4+>*xra>5VQ=AheFk1HW`QLx-K_>pvKx>$8Horm37J~2Qb8Y( zaCw+td`QY}#c0oTULDP)sOc1Gg;}qu>M9u`kW14u%lt>Dt-KWSwD`2Yzd>+k$fb~_Ah~U> zMAhcYEI1%8e$Rijndv99i2T#Vg=1WlEMvo4oHr+zn~4>BE6L1VWN)sjt=eDJMd=IQ zEpOBH1s7bDEC!oNjR8Zx@{TviXmv~^w1^3GP*iNLx$Av^-FWq)8O4PXW)Twi2gzvH&4GHi4*b&aw*~-LQ!rv3q&8;Im$~EEZIbj} z*tJdI-i5A8X`8eQ?~27k)e?%cHrK0MT^to6+3SQk^;KxS{mNt1-Ybrezku7vVk=3! z=tnka^vkwxmDKangl=}0fp!w|*$4Q+S}yk#iRa8(VwfFj{KkEm1+W7tt~;z;zH`>k z|BIY;1H>@DlJ#V~D z+5C&M{-u4PP8vvEize^nQKK$^dyhmJN3Lfq(?D3;wWoaYNdfrk#wRQFSP+_%Ac&?4 zpF)3y-F>L#{H~9t*S+S)(FD*ZFNwH`G}}* zOnxzdx;gjUU18YYD5r~N(gn4&iGh??1qy4+Hx8cYvgh$zJy4OL&{wUQi<1ha$I!Q$ zy9Zt(kII1Z20<3QYTNQB3VTS~GxtvUAM_K27yM)ATe;9F?pp}L(051S$lZC{aBDc(p-^2@;>{#a~maiLI0)}7t# z9^UpC0ITMca-^H4#y-f-$%(dhKbi-9AFprx6ydz}1-|Hg!2895XFPa)@JQEJ?;J=7 znKdbf>+GIx0`F&~IoN8PuU!(lKV9bP>-+{FXH@wFt`IQsYfWk!;$kPtz!*g=Y4nHNEetW*)*}y z7?pq&X6a0_aROR_q{#p*0M;Yu59=+2k$+`mRa)6x_|ZEJRQmv*VC=u@+jw`UrDdrG zVCCra=|Q=y5#u*5k=Y|8t4VGn;@Hd z6xtdqRvg5@HYrapuR4h7)S*S9BDEu}tF4O$BRxYn#MfRMRm6}5BPA~c1uY{^3P>P- z_?On5pjA^q#J#%Xy%k8_tp=*UpEo03VvywsJ{g?6A|A0h^eC00kCnpIqB`IC($dn8 zD*&LE$KraGtQT>O=>dnkcPt|&91&(>lUI)D+;b5`2hSR3?z`=f2SKwI|M;Am1ov0cLAbQ=c zmHynW)PmS563mjivi&XOnkt|-n6*G5J{Q^YAr*nfpV9OzmI^{!$v_1cQp4fuNkHc? zu2GbQC+iHTuX8%-7=Nb?IRTqVgP1T_Mclm}a9J*x$%;WD}`1j8BTm4)EKCqX+kx9B^pT$8h(nitA&5HU71 zD4c@#Azj5{3WBOF91>X4f!koBT6SF2fIQ@^xH#jw(_umbxQS-OcqI&P(j@QpqAn*C z0V>#?WTJGqR=um4aaFGm!*gLrpE-Sh)84>J(V*%HvsRDww=HVg&Bgx$Mc2;xRO=eJ!kA-Jk+-k7F-mb* z+i}iH*dL06c~tV@vtJw@V30if@)$%l!@FD2_Ndd~g^TPdir zW=JowL_U*LShR?VM(Jwz?>cFut&{4QKoi91GNP!f!;oRLj_SI)QMhQPm`}iwD~7*( zq1n~g)v<`ALRr^A8F5?sgQ~xvGn~P55}X3EZ&C}>t5&-L$k{q9E%gDujHuykHf@-6b)oX=1!?&j*d6xGcjb zIH*OlB^9+aZOMR%u0Rljlu%$-s~~$;4 zPwRQb&=T)pDyC@s9rlfML~z(tfpw;&5J z3SfTasJzATV_XA^AQff@ozbtW+%ZkwK08Y|ZE=C3SlFl6ed6vrrCu2m)pe|P;oKr; zJjRF~QugbWTqA!LTNw?kgdP~SXNsgCxs6(AFCPG7`Tn|P2*Jj~4wU0!Z^BVNLmQs7 z3li&45D^6m>&xrQ>3l=(32poi`}Z<%&KBdiefedq$`6Me+_i%9o8DH{?=w0M_2g`DuX|{`7v&hG z6KI$m#7Ra0)j&N*nDJ%D8TTIfWAzw+5~{#e;;-iSTsjOZ9mp2!CyrsP`l(##XHQ9X z^+P4AJ^#YkM!ff06Oj4UR3&lf^9nI>{8f6Tk>~&Uw3B_7drEA{?AVev&rd zCNsO|A7F^>56n?Ps#&;bHk*la8h54mnm3Ah;wK%b`YMdGa|jJdMmM` zHrHGa!qUkOLWz~4JBYz^_4x6A95?wCgo}iH^LGF2eMA37OQN{(%WfCHa!oizrlSXx zw61=Qb^yv33V~BK>LZ`g?!-qwF|!hKqVGzFpFI9V4u4%j5g)Evl#+x7J;2Vp)&|f9 zr-g|1`dv3hJk-Clwe-VAXZ^|9aWrt3Br9LUjksJfv>Ot8h`BiA)jw@>ZO2_D|AmPU zgpmY|4fMT8?!cWOEeFBMsn%LvG~5a8VmJc-NS}>S^=0F$@N0qjPe$E``S$4j`v0cz z1N~@<_OytjHe}Xe)j&K(pK^H)YkqW}w*p8_9 zTQRw~7CiPacWn@}XlR2GDZ(ODWH<00fI%d+(A0MbrlGo6DhDGBd)+O?RX7%&BjyKO z5-JMWFHLt+D)tT;Giu$t+Vz-0K%|W~EH-qsVV5NPC_I8TVOhQIZ2nz3QNdt(l2Ifk zt01UuJ_>c&rkj0nT+v|%bZJuLvfeMPg_j2N0*^9kKH7!3;N` z`}a87CDtFFXgbi*yyK#!Jyi`BUbqsT;Dq}qwjsL+W@a|FE!02B$j|tq{toqeTbTWS3Ws+C0^Apy04j0>YC8(jQozk71uG7S}#+x|j~rQejAKY8>? zT!>~L;zCZho3~b~HmKqQq;JZVm!CX&0>JHrkv8V+8s8P>3fS)U9|~UR3P7y2N_fOJ zATmwwMk`4y2B?JFh%hKOWMp2%XoP-g9;h@|A*w2{eM;Yo`{Q@=b!ca4X9a`D@ftU2LO4LN8xPH7F+L(LF7EWX za}&PMO7A}@ky>P2%mFzhrv<0mbSc+`tkF<84&zG$FP{T&89840z){t8bcB%_5?d0f zFgsNZ`8u*u?-U-PBlpAMBy}g9!owa2>jhpSy&_{Ay~28}oV{y2;aU%_;vD zF|%Z`hioBjbv!(@fmcAfaXeR8kHL^4u-Fh$Qe+_wGE)Jp9at<$`JmvdVbZoNd?$ga zDMDnx+~=k{mS*V@7m3X_<@Lp5?;mgW2#2C#n71`d`QCFj?uF|h(&;>m`d|*70X==f zfcJ0$uR!zdG@se?d6M4@$m=u{=v8QOBe|(zTLLen{|L_9eTphxMg(=Gz>f=m#)%cN z+p!kiiUlEn5Ycn24Q_QeL*2fwdKV10$Y*+0u^W1f9%)jyc6#mxVh-m^K5k7+Mn;Ou zE4s?C?}y@{RbZourqC>C-t?h*Ae9JUHspnpA-WdAX=F;7D5v&(!>&GPdDQ(0`=Sjh z(fr9p0MWQ~VW6lX>(=oH*&d zQX&-ODil^VNN!XwMfRL?Mw&9W2eE^IyM;$ZtpS>>F6RP`B(eG|6^uLHl6U@DKs)6z zK&iEwP4n~swB??Z%9C!CLCLj*O$mmnO}B@LyYGT`-WHIWg0M?m5grUv3%OkoGPc5V zu$t{I`$3DqoTF}T(F-{FXK*o1a%wH7WGOrZ9hd6bdW2gXvK5_ps7LIFt936Sl@L^* zDWD2$khD;HaFQkWf+uGwM*%{xHSyrKHUz3+!UC){@EoJ0(m)a6%M6EHu6TMZ>}$Ac z>u+^nXHUJqLWso)$=dNMp<;{dT>*YFj)9GruZ6Ah{G6S7E@@JyvIYc`#xLIZdvY z#&P!pY6n{06^z&5xSy-DXyZZyiYt$_f(X?G+U1a*SxJkFCp>RPw>sEztD#}ol`B`C zl8b9#QmTF|j@f$Oc;u(vnz0tt7lhD(&!rfv?i{RwEtrSnPJ!BO#|}uJf7h@OAPX*+ zSQ&^0!jw#W@j#eCDwXx%K_NC9O611wz(Je9WTg&UJ(dib6#&sw>v@T%@1HaRk$b?g z$R+zx-Fn6H;E=JSG0ww9_=aoGl97YmOTT?Xi08c-n^->(~E756ovb7b??-g z4RO$2H$tMKiMEySJfsN7sD;X!_Upq%QYNY`BgAI$MqmtH)^5T*#atlci13Y!aX^Rq zI>UF^cBGd>C>ES+(DVJfj$ZP?kRzvYY0oxz5Bptx@%n8=h&T}2aMM-U@3}gq%25>F z{9K8pGyH7W;0>ZOGFCxMyA-f+0phc1SUpSF<#qw7Ajv{Ss+a{O(2GTW^_w5esvPcD znV;wD9X|!jLZh*3INa>(8{)gY<`K1iQLv07aqXm9=dxJ1Jxw=*f`@gK8UE#2jJUYd zG$s?k^(rzxkHKI9{8>Wc83eHq_-EDHqUEwJ?)m3G<2y3PRxCDLsMA+V z)jleGiK>q3|3loHz*D)d@8gR!sWfUpX;2|4MTUf(q=jV6R-{OSp%ltYwVM=b8Ip($ znJOD4WynsYWGhoD63JXqkuiSPy=WiWo%Z>j-}m?b`@QH=-Sga+{d3#N}Sq`PR3@8w2lG zlhA(sY++`h<7l+L@5nXzy`Znov*nDrcK!N!Pfxx835HiRNZXi%s#HXnICiWzVg|!2 z{<>ciCKq~xxdZ25<`HEP3G+NAL6D1NB@g8O+Or87wDVBf}OBL)y?zKP*I({18)2XbN65+IF<92NQJ@UT z`iLi|<{jkP$8~yuAU|Q&-$J|G^@Z{KBajOoZ;Ow;T@$eRu-68f=|v!o_mXWLtv}8+ zT(@SQ)T)}ePdhdLd@+HzaSN?Vz6{B4Y*r%`k9>I{T|I-+uR7LA$7#GSu72Vqqqyu>wR5? zW~RZKHSaU5NQGDYPS1WOd#(iMYm@iN56Ic|Z1PZQM2?*0Fgw*9+b#y86ksJ1&VuIb zGqCQj!zh~GhcNJXfzK%C8=>bqdc`5k{IA;?yl-uG?-7O;IDkJ=Quz^mBfz*IcFJ6k z4GM7UuCR$jqM;Q@V(=`EeJO$GnL-*+M7e;}3Z+I7&txEa(D}7jG?YYcu#L_Uvt~8Q zYO5R7G@#@tf3dO`qCJu}m06W;Ai@>L8!W*+(M0e1C6ZS}mM!m%0My?Bzvh@zlM9&9 zFvIk#%*1dv8bNX7ykT@p2dccbEosOd*3;9NJFn@3PdB2g9gWJslbH>!(eN^k%~H&* zql_u6e3FMgEx0-Q&&st1_3N5{_c{K#UxxDN7q*y|q;{3a;Y3VvP=$#GTJj*ou8>I&-6v*f%fdz(%?3~k!)fbY2nX;UoA%+hI;csvk-AIb zF>VSKyMZ}NG-M4h??LLneLYY5BrUEI=e5ZyAda{tCFmShW?Gja9b-MI(*>o4+T-9n zAv03KiGyiHe`wc`WKIZp5A3`teQm-etVwnQ0MbCkU|(Yr>oQOe)Poz)elOmQ&fjy} z+PbD=zX4#OBP&Gk=#;SUj!%2aw_o&UT$f%litjvzNk80HJ4(n5! ze&f+2kte1)lMK1a*Rmy9CAr$YynLJ=JbiO6Ex`R) zU>U6UW@46u$ie`aN>1npK%b^r;5_D_h-OZ^8El5Chrv<}V3y5tsN_(svjR(mL}p(( zRC{^nkNGVPq)b-Fsl8d&kR6x=A={q~-AtSqH<=~_*s{|kZF8K^9Kj_Rse*gv@kK)h zK_>8^<3Vxhagx|qi16yu*nD?KYN`yA#N!374XNXlWB`QgVb)&gQT0cGf{y{9dRdhn zxH4ptn^8p=3mB-ZNEVAVPtFa;6RWrTH<7{Njv*sJ!(u3tsr1Jdo%JO##hRq;(H_H8 zlm;8w!L}v&SH0f4NsJvo1E+BSQ@M=+_q70siV*^ev8+Z2u7tV<+%I%7hmBKWVmz0} zgPPAXU&OEDkZD?HGG=TRk|egU+7A)O7nU8={b$x}I7v&I&m|aR%&+@;4^mVn$bm0H ze#k|wH^K*@(ilZ-Zi+P*@bEF+&^N>m*J zaJP&{O!z{Ojlar2Bm>(Ms(d4fe?bWoj-zX+Z1q6RXTLjLdX7BcL->fNp)(!l;+7|o zQxA&RIP+VK%G4i<*8T|dUw0wuwp~LwX)U9%o`J%PP00dtfTh>#xr#~rm&4Ua{h;U) zS>Fd)^kU>WS^-1Sqp;DVaU(@ZMwtYg30Z)K#>15No7~B;XYvA#r=*T(<}JIR_`Am4 z>ebK_;KouSp>ca~ON23m2uTTqMLN%cn!wapHDrIT2)V|igYaSP$srFUXu3gA)Zv9b zr)hMuw8=hs$Z}?E%POHBoAdyu1AyW`VEf2;|4|=vXa{PcL8ur=yTYMQqJd3`fc6TC zAH_5j@Z=JX@qZe`-xDx|$rECi}F5hn#qxMy`1->ryCqWCsg zP6uMthvY|3f1~CD&Hfr6d^Nu;V0!RK zhWEz*wCj8tphE)cK^BRM%`EuHo1>5EdesQ*c!;;9Qh33G*3*HkSuYt z_hSn@2fqPBU_H?bXow)`3MDcWnJQq#K#X{)S!QJ^Lc0MD2g&R+&8YibOr2kPkKN_1 zLtbIM8WoUzD-=uGUz|NXq!ZwA`_}^}cQ+1A18}BwOXsnYp z{c7Fc2q7wyhggU+q*YSc+U0M5BJljf)yZx)j^huQ#{}jrb8rmDT_a6R$EiS*%NKJo z9J$8+GL>WbkLa*}lX$Mo^^JlZ06?n+;F}DHk)Vm8i<}*t>dLjuAvY)}6+44w7)XnZ z#v-xPH|1a%tp_$)2rVqK7}oM1Qgw!%v~1lGi+qqOoTnHuT*!o~Om2ICxqhb=A}gvw z-wMaNnKao(b{GJTpFa>Ea;KCgPb&!Ybq8`U*ZnS;I5YZ|tT?UGIx1QqFBSzd(eDzE zxjfE@{6tIaM>j+)EoD!X3EA-6t~$mKo5DXTiy|Ph8H;>99-RgNYuY$!N>JaDAThXP zIvY0z(2yaCk)oy_Hw((8%ZYs^hXHH;2))#-rnX$nLRNo#SoissF%MW7WxR}h%i#X` z;EO}Bk684-k!ILI`i~gc*ymKVLKw;JKCeLxz`?dzCLl`^N z?Bi*5}FH8s;!dX&s z*q?6LXwuhpPAC>DmCEy(=Rb#^5+{`dAtpHM2LJ*M{;InFafb+``X48~ZF7R^N6kZG z1;b$m&j%Qr{SW?mEUuQ#eU13MKuBmE3sA+(kRb|EN)9LNobG~HK^-dGWP7^`a88&k zvkmc_4C(DVbBN5!1oZw5`ePf-{D7`e<&dlgm>NCz7!pUWyBe5)7eaNyxYT-o8k2p% zQLXfshSCp0$k-f{?1ShxA~(jFsR1L2v$A8jxnb5onmH>!$fyog+nTUNU~hmN(DM0A z*shdfPVU{qVDc9P;cy94uKM(Hf%B6YOTx^D`pPa<{0yv`MLK0#{$7O(*v z4qXTbllG=C1YofOAQMdDOK29g5>N%yh30022!*0G+X@6E4r#^T-s35VJS;oZn8a5u z4ejgMNi#sAd)*+6PTq!)I1J|ViQjdGba|f884mC$Gf{0whB;vb9|-B9d+jK1BWU$BrE>r6hVy0_g)?T-PI=yB9Z(fbs_M^y9mXGt@8}li{GxFOK!HP-t>>u1}nN}QZ3ul)3Yh372?b|nbU7$ zwxDv;qR)dm%&L+jgZs5Q`aM@F1wQZ>6eB618?EF3mHOk)Z&(_Hc zSn9lvgds%Q-=_YlbEE#ptb$nuM)W~esKM8F7l!}&Buv+|bmN3*0QOXhO)t!JqJ}?Q zQK1b1pD;**#!0Gu?R?_?jIwe(l#zNa>r66v=`Pxl#G5G z?v7dFCj?G8dB5d5!b+f^@(a09;b-~~+Rrt))1EbKJXbq4c?l1at0Q$JENdp4vw+Fc zt7=G%zSc_LDL{UU361$on)$&N(b_pwXLIzf0J$}I9WdYOpSxhIVX*ovdsd%H?W<^SM>q0iaSRbI$%;hV1Nt{<5B4Ey(+tny zZUo0PU@Mja#AmU1`|#`^Y)OId8y1r4Qc0MW7=BaLi*NqpB#W6Bz{Ddkn5`nRp)%#k&ZR?sK3gK}2O5M5Cd^`met7{PycaL{4s7KoLcK$%Lq{FQDb zl%F$}bcHhY_Yz!{27?UM`LRdeyZbW zRsX}iC1D%qzJ!a5%nAG`fro`01*6IUk@4QkCA52-DAApx6Crm<@NLr?R;K0F4xz8 zQj8YgQXci_O6=iS#f2n$f8%Py)9we~gaIV4>_(kOecc<*4+f3(ql?0d1g!#Z`xhC( zU30yr=ALCF9^j^VC3fF4y?v)A&lR{Zx zw}wC9PZ>V>wqjjECWU~i>jRXy)*eW{i&PE^HTdaj7GRuOrNA;FrTSR` z-w*RUj5C>s0mAWnnsHqIl0yxhXR;PA_tvN5agW(!kgk}N;yC_l)w;SN(y$tr3?w&n+Y*vbkYa*&7d~(H}CxpGS7sq8&NHV}q3>5wGH8-I%KK5Czl;rhi zK|?F3Z!efOcW!RqWN7e#tyk)t2WsJwGNe6M_G#2SAcgpu&w|M`WdHt82^VeA;w1k| z%O8e4(0&?A6GG8e5Mm;tpDk^=`Z2X;FwvYLT&(OKl0{ku%i)l=Vu87;GfhTSWEUBUngVL;nwK16KO<9ljNktuAaKm{; zL8X0*H}8Px*VyQ6hS3sem(G|mL-RPiw!jSBk4{(}W+h@sA&|`QAo#U^V#_!zc*BUb zUacU{zUc0OaqUy|<~Leh017}e-DFnXegmmMlBKqPDu9(utC{ z6`84Hs1FFj=z zn1${yt7seJ8i|ib3gMG-bb`ld#FdvN3r#}{_9tCBvj2N7^PV>_G$gH_FGUJ#ngBNX z>uv|M8el%L;p9G~(wXpxP)qMcu1SrBO$bS_tRc~=CQ#=~K&^-ay&NT+8XW&B-sgDn zN5j1)WZ8pRtDNd-fsxA$vyjBh&_!QoeA`ZY-MSN0qu_2k-fZsg;vUldkc_QB?0=HU zhvDpQG&gBr7J84D5Lp`mI+S7`ngbvfnLBqi*wia)6oxDfU5Ju-f{3{uP_)SBuSh9q zu6zC#MaLb{b7>=kS$dWHvO?W^Zwc)Nfgc6C<;nLXSaGMPW=W0b=e_RT7r+g@1Y-h9 zli@6M?}|PPOhE>5X}Nh6zUEMuvI&iso3oti=b$qp0!J>OQ{suQ=vGiYQe3-qaDtY# zMpC_#39Xm52mE_?VZEUkF9G$%kHred3XBKo3xAy>n+T*)=bS?W%o|A2fmFG`F{Aqc zkNS;!_FTK0olOlqNH@gqd~%W36HTQ|8oY<}l@STZ1NSThB8q{~d9-(-6T}vN!>b## z_6_OXWJyS>`0eJj+zmia${J=@DiAW~Adi79Ezm}bs^A?cJCmc*S&uAOvkuDa zT@zLn%w~8n4s!388+`j1jFyoY+sg6@5104!{3J7TM5Pmo-&x~tjSBQ|f!e1yPTOE8 z)RFWezIUYmdzqMxWprpqYl<`$i`;Km$|zL_NUX;FAN}J;K4AbW1DVKQNTpO9?$rI?+ek)FpBXyf-P~Kne8}_b6nkN_M4|pe%vx-@!ODVY?hBxyC(Gi znFvy;TR@+mXUSJ_SXXfofv9o*z>+^`ca04INPa|hi7#(#gC zk`7p>7R`Z~)GSSLOw|2 zVJgR&)gD8`X&%is6(^LkW#_R!-iLe3T!ws3_Qz_8sLGb!)zLDJ)=&k}wYJf=`lqcn z_=WzP)$pB*tIhDAMUF!&ixWZ@Gbs$oOgb2L_mbF)7e{YR=Tj5s)SSm&Lc-(o`4S#r^U4|*_E z55h>ic^Zt+iA$l9C4D#q9Mtm24WU9*nH-blA5{sR7(Aq@Q){heGjJ;={JWK$_kDy7 zWP7^`AH|t*XysVJEgj7l_R8@e-~2oUmoe~FDfXjJ{_Usqx6SZ?s$t)sUB!?!W&c42 zM}Olp@qA(*Bpqz`aJvaMp+Gntt*!3Or>!QPCPRiXkO$-h{ynH|1@Q-;47 z_pos;`|bYSwu%v^BaC*{MO5-R%Ho_0=%%s%;4Jw52C@%YoLwp*a#8F<59yT}bIvr5 z$+Yv?e{j3Mj{^S-%V@$oJIH=47(IHM=5e^-{`zYm;&aCm${OiUBks_586?{~TW(b9 z2C(EIEQ5TJtO4z0V4kh1%IKKI%aipYY{lFO+WZ{B<1BE9*$*7`rZa|)^IpqZ=Hj*# z*^JlBl_{&uC?z5z2%|FS9QX{RO%&WFO$x{O)dnI1hW&D+7#JCMPP%LN0a8;UrzI+w zkd)FR6BSh>X=r4T+EuQcXK+5Ugj_$bA9H*jNBLrr*rn4pU&peI#z*_N_cTvT|L9EF z{cDH_OZrZf#v&C5g))G2OfbZi0nbz#=mwE)@Kr-r~ z20AkOLR9*T8XkIN@yEXlgD-5i~DgS0&j##eVX^3`-U5UOy$bmFeGa;Y8qPjr8 z-IcQQ_8X_fZ?B`y0H7mMSs~pvB(swjY7zk&f^4=gL%T7?yBia&6?IirRg`A0UcH*= z1vn4YS=45L*(*v7XKtQa#s1S`U@Hash)+{&0xJ|0pBKp$Pr$Lx@Y1<-=~9rqV-j$! zO4q)QkJapHVRYcsSu`97W@H=?XVhS*b;OqWCGb;h_nn4rb*u^x@odzhdB_ zpxXhhDR6UFCt{L}Miu&tsyld4z(H45n+-{xdXDah6@|3xM!-pOLyveA-KKIyy@*Zwu zVOe_2^8HI2cI7EAZ&X!<(8T$ZO-N;&mIMwcqRzHEf*GQ@vKEr}#XgF?!c1Biy&$am zKd&WrZ;QgIE8y;9c#?bcXxaBNS?}MxnHx;W&52Qz+)yq}f9!vhSNny|RB&MzQD~t` z(|?lzEs7IBpHXOCboeE&VUol)H#fHsZ+?p%$UYap?A!vKf{Gk~&P(hskYCAdS3}L7 zTL5IBdR>fWtINxMgYO%a8lRHsu9N*pY%|%Bkevt#5$q_^!^^irQ`2h-AD`39P{%hc zCu}nk;pJOlRqy&9EjJ9>o@sXk+^?DT1UNw)^v3Atk#~123X1Y5I(72oIU#^5531HN z4xaK^l6QZFH4d)b>|IBv!rxB>2!D)tf$jc?YHZ zo|CS@&&BNDC@e~PjeqNWAqsIKSE_b9WU^4oDb%ch|FLX@>#zlJCW>VZ6BsczhMO7v zCjp%ZKRgXS`69ZIJuQ)?LB*a%?m!i5FF_g25_`HLswK0tmLEGx4P)%yfV@55*MaI& zkyCSCKkoW_&GO?ypafmq-~H)b`&(B(15A2g(#<8;Iuk?DlM;ybud~e;BjGx+iM;kj zPV%!5H~gM25ECO0=QA)so$vp$g?HAh5610MnA9nLQv|maR5wf(jC!;hB@jN=A`ax{ z=YKiS9dtk=`*`2p3&>xEB_!V6NjT@G@wxT}tWRUu$yBFPlJsja`O8Qu09HIQLn_x+ zv6!E?1{K?UACWn8G(e_t(8zFJ2-?A0iL1u^Fb2G9=6UkulD0QVB00q=?|+%8V3gf0 zKfxRLv+bLMTseBrBzUE|r`{JFm1DQe`;l~xUWr??{{7cAXhn(Hm&LhgEO6IUiOp^v z<=ybsOmxz^oGB+leT#DJ-Z+}+cV-ww#`TCVi94mOebGbq^4!aAnmyi?fw^**eV_0T zs|wV7AhAzFzbX(x)eahOle*(_>Gvn+y!HO}T9b38bl}ytFF3ScLxr=!_D!H@U7+WK z4y6BXHkiVt(VaEbzCtQ%*NQG%MoxEyh?P{mGYcU%)&|VC9yPKdqiU=dbBHNa&hxcnNm-@Eu4fxO!9IiPXN=hbo4T5 zid%$9Jbu`_o+|nv4ZH$q%_jwu>$}dWjfJ- zw&}eSMcyKl`g2GuXPP#RE)Ov8=*dPRlr_`y!|BDhAM^U3;y#`<#r}48(y*C_j=!uI zEMD6ez9L+#c&*>a8;PH^r+qzj`-JYL@6yCT!`#^jrDRNwjV;V1K{U9%pOPAbef79I zH9c82B>mON?lX}z!TUT@{!g8SAK2?xOKZZPT7z#!%b1|3u|?<&>nCh62AIZ*M3k9g zi9L2Fv;8Ac%x^rwl}&SYd!wM{>;}%Ospa@{^QELpx>M!LhMfrz+g(Kkdvj18BUqIx zboyib_iFvAn0X)pH^8fCqEiSU5U;c1vSo$c9SA!$LBlODY+I4DZQC}B=f7`DZ0DD< zh;(XeY=FQuTRCyc=9C0U1Zs*#?VSxNBek$a@(>12EHKFu`bqp*d!~pZEE2euqhM(I z_AOAnJbaj2aRnw7&{r(_uoswEMGp7@;`SFNUkGu-isOEt3~5A+=hp1b8GNr8n&F1I zOF`h4+yC}NyQ71HLt?wKvNEAd&7T2}bzbi_y0t&5eIG_Z)L+ptB46!#0J1N0=FEwn zIZO$XZ%2-ee_cr>G4E@_ zfty5+^$5cAaOTVeWH3y$W zVk_!S4hCLQlV47A=?{;MrAv$~make>T(z$J5QWIa&inu>!mSV=!&Y=nyD`!H(u#2T z7%zXH+w~^JD#6@;2QmeIN%nJ0t@lz|b99)^iWsj^Yl_!0b{=_oLP^<}-4M$PIULNc zx81&4HHX|~1Y>Q!&*0{WLgZzMQ$OPP1c4GU3q;_I|&Fk7F#AH>tEYEwP#5kv}CYpEu_urT9 z9L9l@U@RN6{lRIlbf}QQ{{TQ|2BqGk>Lx2$nSbV-?eYvtmfy`)xTo%b}py3exAxnmo;1C;yOnW;>Z;gw`tlV6|V5OARpQV zaSI5X9Q0MmEtWl!;U?+QF@gZp6kC}|S+7u^QlA;1JhyG4@yh)ZZ6?v8>pW4kEkL?& zHd1WE3LDX2@K1ngz25qx;5g_lesHh2Ru0I~nJ0=x&$BsRl|7~pXxYwcn=xevcvRo;=qKJidy1BV0sD5ePWx1F zC(cxXHTZiY_ZQrGh*Pk$-b7zQ@R15#R=jBK+@6Ii)0PS;4ynKd7lDyDN`DO}5NG(B-I9ZEM=;+iqr{jB@iJ z1-OCxPVY3R#aMd+lGiTsoug#DjJRBm42i8-Gm?LF1NEl? z;&UhH-#7AmsLC}9m!PVjr@DXQp%~m$1_0!VCt|z~o%&JBs#0|j{lVv74Y5Ier>)+9 zwF4@h5QwGwVe6uWOq`{%)T{j$qCU8%W(zz)MC7CkSMXYE+`I2rDq+w|i#}H8v_b!* z0emQZ7ngJ%=oTSwj770Slp6AT4f|zSboLNRP_EYw5x?Kfa(GCD9C{AoUz~t+KvM-& z_Gr>rCxJl0-jM3G@31MS@zrcipv^dx)+_w(#a-wsRiJTA{`y|~80B))G4gk2+MGXZeF!Z$k zAbX3ePoH2q;a~0KdEG!dweo?i}SYxDlqK^oMG3CzZrR`nb~HcbB- z=L0JeaFyu7g{nGLdD@_QDb{=e@`TI%q@b8sz^(KGnF+Q2so63M&C3B!z~BazKz}=B z@N43QoCV48hnt5-;e8;i3G%ufaAW-^4H*%tTqLOuY8}SzT+6|9-0!u!F!E%(#L(e@ zrLP*I#~)qC?-*ZvBM=E?i+gq=eb&xkF?faXK@6P>w&A zs=fDKQ@~_mY;0^}`K`|)H#?z_rJfEghn38u>o=jYiF)wt@_K$I9vN7ylw^Pj#!rIKkdK z)&N2--MjaDU*?uB2>_&P5xjv@ZkCXU5`-Go)LFVnuhHLlD-jia96}>NW@o+6p6h2<5t<_0`R>+pjR7m3{Vx6k9X9Q* znwkQ-XO-j@_$o#h7UnnD-kt2!m zxIkl6uo)|_H#jb6vhBw;hk9i@y92!iM6PVha2lS^zSy`pt1@G{o7WFme{>o9n^_!p zsILD0zONs?2KP8>T)K2rQCLXG#yd97Ra5_ulkV}i+ic_BnTd9-lv=O=-^TUn>g&C9 zGz)Xb4K8|A=XJJLStx?ku zb8*|Bz4ol*=g3w#dValo=cd}{+6zbfpiA4vZ(|P*ps#o-`?PZ>ES# zdD_h>GYt1%xM3Lu=r>0~;wY}xQ&n zh}pIwPt-J_He!NnHnk)8q2X zXxC7W)T#Z0>srO{GM*0p$1RtB!|+K1j`OF~p(BA3L}fV)S0E9I2S#Q>HXStkgSNQL zOi`_xd(^8%+pyY3$Rq9SUsRwW3GuhJghMkbPCMclz)kRr-`fc*DZy&-z(3!fiEqIM z82nW^{(p||3>5BAz958s4UhsH=4>|Hpv?LkAC+!ROGJNh|E>NbHrC+W{@dE7B4E&0 zKN(LE3-<_mQfzekpYO|{-)HeU3K0L5O z5B{yR`e(l3-#*YkRuKP8Kl?@XxV)PemYJ(1*1?K+ zpYS9GNB|W%0p=7_dDad~HL$X>N~~L0yMTd~T0c+m9RlL>&u9tDO&RTh0OKsUIUouF zS9bci$}K3w1dMcti#|xQM%g}!VS`>s5ER#&KVyJvCH}=8l}_j?i9H4>^*BJNZ#1GA znmsH1YsY384nS(`7eQgf$ePf4jgz4zaD#|D3zDoa+|lP4cIG&{%_I9zCCueloR?#> z1jRjOAaD|j*?WpecLT(`KY*3Y0L{AT+Yb?=IQ27F0Q(NQQGoZ-lcVTb&tOD z(tEcgQoX>V!pUHFlw?ycbL)*Anqf1H<~#%(IQT0|@NZn)e|+Yve=p)-DCtk+=WjX} zIvJtyT<9=8bF>GpwuWvq48X3{Yfu~+lZ!FYhI66RUcA#I5GESY{}>p;@TA@2-@6=U zJ`}O>PZ*2a%R#+MKUQ(rD6Zazs!aH0AB^f7%YLzM z^snWT=!X@$k8qFXtkJ#r^4FK8dQo1ojw>H0K~h5OF`j-j0nvX*>1T~4o*b`ML2qIB zUp~BwhuBC18e6PLpg|Zqj>Z3{Bo(@zm*EvY(3D{jhYJ)a z`q%z^iW7u$SNeaZG~Ftkocfsck^ct=vjabPpJ$I9ixQrs1o+bB2+tzZXKN)3Q6vL zZtQ{{gc-0`+MX(p-woCf4eg(__QA)cS0VWXpa-x>7?}ai8I5AjJRvYj9@7Iw#Qf=K z+17^a?1}v6(=_o^`-O%yFft#D86UVtXf$WEUzCGDan6ZDS&){GC8r`x#Oh&`qK&NV zi0)HcjZ=abLywpw-E3%RNFpcH^P(2`p@)aElnIPSz8HuR@SCy`kYJ$sm1J2n2)C2P z7@}qCy~!KuFFJqNC+9MIE9`ny-MhFqz18)8l- zZsXM;!MP(i4bWhLa{4@QOrhq_CU9+vyO&~3eLC>GOnM`U zK!cl5gnu|Dw%H-zEbKOY~A}W_5$(P!GL@Xu4l$KF4E_ z%YVAagbuV?D8h88qQA~in*A$2GMQW8^g&ftA#8v-W2s$Fm5A;GUd?qhok=3lUjV-f zA@-|yBQnDv$)a1C9xU$Y*bte0;mC>?AHzw&ibijk6FEOnyA}2xUNHkr=A18^H3+qSjD#x3<&X`)MhF#4PA{FlRy(Sp5*2Ek%bPu z@m0@H80&01{h%5BrVeg25J-@Md!$686#w2 zACt8ld_xU(y#us(1I>Nh*_M_THO~n)#}Q9Sr2ww!bX6MXR( z53H>|^?>*N$gJ}aNj;A@1Wf?&mewVH5GS+QEfs&OXRzk4ph)B&et(R_uh#Uv2HJgUwE zd6k6KX80oX5^hBZPzGxqJmLqq%`-?{1Wywsdp!Z3PoG1$Q3XWIbLNK7U{RT`GAdOS z*ynuS3JoWWr8)0xRt&j*U6KG`0B!fb46Do=y0Cy8r~8;VR1IH`m;#}QXaQfuJOdL~ zeQ;#O>=haLc@Tb}{m+EJ>6M=(OCudoV)1CQwABq1irW|DQvw}aR)`7)HOuA`L4ve1MM(|NG1u#AwukP!|j z&abN?7@XN8wt%DA@v}CIIAZkcm{Y58(a_XKS5&-wj^IH`x%cQIG~fnLABa{`kYGf{{2>&4m8jbgfL_Sza}8 zI?2;}-yhEvv^TSJEp*q4KwL{t@ZTu7p+}AwXXJWY0?WTU_eOEaVPWxHi{q4BPSw0gv5Bc-*Z`GBb{M?mZ&eU6vuj>+&AuKzQK z21G$B(?)1P(zFOYAu3;SGgZuyR#Dav_1SFmVO8iVHM)%kObmj@=L9{icdo8tEh^DQF#!nL>@Jp z3##P7H4Wn$dmdxLC@74~Co|)X*b0l}-N5x@o5zaC$-O*xiR0RGtSY7sJSNF4Z=*vG z@yJLA2Sp_0Wiolj+jy~04>SbqpsK6q(1Y_AI`m>>*Ac%&d6?1h{$h<0mWugZrfB-C zO?nF#nF3!z><{R#Z{9YwU>tKLExE~r3r!e>K34=}C>x=(BNy?JWIO{U%vzC2chbR& zx-=RvU;hcT7d%0_xo}o+5tvaaPGcF^Hf>lM!37wzyHP{qw9mX@swEqoleCtCROQ(hnlG^zU-2 zlSXVa*JChxkKZO8p~)W)iZU4YQBq)BNvV^~;K>^8sVOA(c)!df2HM8`2{%K_$jvQN zNJJ*kkzh|P49#+QaDe77%8WxG%|6Rmr>^cxX+s&2$R9(JTj=O-zk%|=7zp{wnA#PJ zMM&pS<=l!hiiK39XdvD66GTC~)q|}a4e`__Lh&dLVrChV*i>~DAtOk^PzIBz!UEo0 zHqrwxn}a7S&csZ`wd6w#@tatx6bn(4VPV+;qbd38Eo^-`m4-v7C@HU9e%2F_*NWQs zM3~W`Di6yKq1qU<{7ml2Hb+HtFZ*>?Am!Vi+rB~1&xZzSC>|RIfG7Sge(50(13T%x zyK(tW?0;~I2$7_5jEqBF{MJ2#ZJgHzZrq`TU?Q5hw)wmDd{o7cksZsyh_7`VE_g|7KzVo>m&KFTQbnwN@Mc6Rm+IBf5D z%E&!AkP{{oP?xV{=z4kQOLq&y007V`5mRhNxP!`0&<=Agg)HAbZ)p_eOLdtPJH+^so2~5KQP>6E$z3+4m>5t~tXf0kQ4`(ky#fxi2 z!Z`8y9Z{sn5Ihv(mP?Q^+t)v>I^Fz|Q67hrfBq+wHmmUJ z=(LKL8lPNPXNuz!Li~2_u{w6C!#WV>}jNZ}(BtY%IE{ixHR>oq z_E4hDVc;Pm5Y^Ucc)W*GI5d1EZxdQp*F+*X0@+10cpzpt_gFN^ACDO#9@Sx$RxZdR z*dk)P^8zLC9X(^bGeK)T6zi8NL1~hlz_3pUzZw>%PF z-IT%M7m$;&e?bMX3B^4dNe^cVYbc_G6^T)#{4|Z5+TX!-A05bj*f5~RPM85=AdPnC zvx`#H^O+MyjNMkYqRz`B`&;BNp;~zGmxtp2KS=c75LPxvNpC(0EBSq`t*zad>vl+K zDps3F_n#25^7{&)@h(Bv~`OWb%1Bs8vXH<9|+;OKTJBv?c@RnZRtqV>{ zKgqQJ@TslbtCls^U+Tg*56^E2s~7=woCHM@kZOsfgFT`N!R^0BG^x%&?+&DrEcf;2 zm))na0$jy!C}Tg1UHGzk@FQi2Px#;H?)-B%;cw6I|HV>bH<{@2lxogHhEOOLxXkU^ zAWKnsH%MrpN9QX<@KAK(z`FLh^KCI4YD9_*A8k_U@k1qu;3_wBlE-AL@>FeT_7tEk zNSYePuyX;&WWSu)?AbjH8{7-e%uyzfla({ME1eKT$jD+Kb|Jlj6jKB#0wV!KR_Z@t z4nCPy3Z_ys(7U{Ut;z@ynAC9flXt9-&K?$*et!AR#+0%>xC-F^nwSKGRvm`O-vBf`6AtNUKz3Fq9$ZG9e~>D`^VT#PkBnG-w-A z#V+!ZMpbH`geKsS)OWg~G@Eb+fN;eLCW>T-0~x-kqB$Jb`r(W@OfoIOA(f_cb2B=~ zD5DcFLf>o&dy6hWrk5aC(lvi$SezNAhg&Y87B|Ev+6b1HP=npq7{DuqAX{N1W(T}d z3}`R}iY`??T8BJtID9lE7{5YxcvZv_Uk<B19T-Sl&SwhN@P*VcYQ1Bta z-^6acNoMK0^>*&0`C+Un$P7lUP{%`EPva+=nwp6(Q^9%za%$Ywey8s$LEg))!qndI zb2D1bQt4U$^pc!gPbw@!kzK-RmPb(G3W!3>atP7D`<4=b1Zb}9!>ckM?O})N;GGa4 z1dMcjsqsPMlkHF*PM>yt@naIVCBq#w185Wss=aisQ&0bfMM-W#;;=(+X)*;t3EhB2 zvA@4C#r6X>ejZA(VN^jOnVtK@L$ngGCF|a#g&yPCq&CREL+982p^vs`2iXBlxn$XF?jdxjJcML}&NwzH3;r zdo_!n|U~7>^m|arKd8@ zSg+Bl8=+4d+zfn!SO*x;E$H5Y;zSDDfmw+9axK>TRE4hkd1M(wW)W~~$&hxhgLV@P za!HJnPkPT4$k*v&i5BAwY_yHZA))>F+!7=?G^I!S^3+;D&!-R( z`38n_NKlZlG>F_b*3}=1I~u=~Q8^56rVYtMd@{o6@KI9A&G@mpfLdumBDxD;&msx6 zteJ#u8E!nEToUZC^MOh4r*o#$s>u=qX%kK3Z9?cZKhDi1kDM4xEgJYC30>e!aBqyq z-g3No5+eIhEV}6Oi8*fiQY9>19n{|;WB6j;pdf$jvKRcoXqUbVz{x+GURXT@m5okT8t`euJjUCO|}i`0N?( z32M~BTI)0GDt@xjqQ_%18$w1%QFEm=RFTuy-1qg4X*2bpFsAzyQ4`qQvD`6}Y);bM zAmb{sHgpAVG-&xwZ_I^mMnAvbWzhPi~T>m&KlMe<>QlP~|dIHzWZ;e!^J%lu?p0GmN=}crR}51p-GoAqG`fy*f~4&6d!{3ml&} zk~15Mu=9EI#q@*1U)(uz_^=&pe5N?#L*(m1T?0gtYU4x0lwPpMy*U2>dXIAj#3;|@ zn}(&v#8zWfvw;x^88f4n+&Tl7YoK{>s{-7}B#$@QiXBipJ(8%ENc_ZQi!i_|`ObipwwY!6DOKQUN^Id3VNo^<-FcBdG@}v4$k{KDrf*jZs-|z{mwp8{tD`+C#u#)P#A} z7bSI&5fccU8md>y;MvZv*qpq>3z0t^=u9vrXUJlpS-Tc-g&rYxy+7D5Who>Bjag0- zUM4$HDG2D{y-vKi?4f%C-PwhyC^^y_N$K25qJPfc~O8{bj{T$j?e0ca6sLBg`Mc=fdS(3ICHB)e4 zJ|izd(C%=&9pF(Nr?(16ICRP{BR4R5L&8Ov+FTc4?s)F?O2dNRc%r1{4iok!e%2v? zf~1g#F=jO4Vqzn%m}fKmUJsvU6vpn(JiDsv=H{08A|kV^aS=|~4{GvYvI)haZujRG z_Amv+&&$Ubhl`ogeK)Q?94*DPR%jnK0+}tKCMNc}s7zqdjLPl0mo-G#cT*@9IvKP{ zdyt&>5kQ9JUu(RA&to~@EMWw3bS<$YqgR`uDvtofV~KO93m3gRfHPkQ9eaOB?jg%- zK<07(+I<&vAu4jrjk7<3wcjh9J;fsz)*;r^?Ln$Z>$?q|`YCBM{ zs*f;VVb2hb1;S6_75Erx={Ip&;X*&f?Ye&&=e)kAhZ18OWvGYHszAd6>~aGK_%@O} z2M#W5f0NM#93Kk7C}HY=!C2wcw79v6PA}NLf?~QF0VlEeBj_db1vwNSJ$_t-6ZjH6 zD|{;mL_%a&O(FRii1vIkHeuwv)@-TjEl_}o-bfs}Mb6VCJajE_m*CwLhx4Aj!jLin zYeO+)D>o&p1o--VCQ%xm>~evYhEQzrc^iga9y&pbnP7}RgB%O=grSX2XfieIVeF37 z>^6i1)Mxg=`Y9LqA-cP6p3`k^LNPSs%S%g0*b=OJ1OAr)8!~o)L3YaY33?F`=m;B= z>U+_pVfqgs9}U^csj*my4kk;`9mdTSdX$##ynE8*$@8giwPn64x1MXc=QxJK(h|s%7S=;E8#D(_U1plG#%_theIeikoC^{36mk5WlT+zN z=`ett;F-xySXIdbj%Cm)=M)7DVK?ifs4QQHxB-N7*TXY&2(!vaR$Xk8X+(YyP-vh6 z1ty#5uvh}h@t!$SOCNt{J4ZJ^N*3pA3B`NvM$h48+Q9_U&;jaqzu^qS0N&hwkvix0 zEb$@zfwJ))%co7A908Dx7UHH^+L|0NK@I=6Blibb^~i~Ej1t*NZd7s_s_HB>SAiRfhKQ&+!y3k zH2yT7i2&SU@KDMBKKtZC+3HWsWVCV`>7S5$2u=!ZI^}HhVD;1rdmvJo#u@_KrI@;> zoe~zaZC0TtgB<7L7S}d?)Jl#f#D5@iTchzbq;ZJ2=ky%z79b10bAJq164i@XM6@;! zGLMFjc}@8YL`o@n#sL)&e=>qReWBgz)=@PwK!GDyL`LRUB*Et5zl_OJBpMjTXEe}G zFNQvoq+Puhs>;ozD}vf064kF0|KSbM*a7|mizlR#jXaKUbx@z2;0_EQf!6(X6$5;M z=9ym^`4uQ@qlxQ-AQVSi5EXwM38#W{F$?&rsdT{t9dH{N(QuqX}PV~TS2b6`WEk$W}lZZJBF4ba_$wp&a2P> z1U!&RS(Z)?vZ*KC9axZY#nTTdZGE8va}%45NY*J2X|{Vul2rNz#!&#DNPQL+9_=z1 z@Q}g|xks_y8b9tP1{mQy@?5hk-`3SxVmXjx14P2Z@lgD3y+@oUijq2!>#w4QC@|T^ z$os&~ow z3MAUpjaZnTQro92UZSTB3;}t6=GbvI{%B+BgS`S0|6!f3AXvMEg;3a1wb+>egceRzFWkEGFSv;DUaxt<5Ju@dL?3)^k!1HqDskK^9 z&nJxPg_d#y!b{a^el1NZk&)jMc!A~ixWR@|Dwnw8V!5RL#A3zDcO>ykK8J}$>5xPs zF)KWa7<8V0fC3FZ?mZF5#tU9U z810l$1FDvtVlB11a19mD(3ROBo3>e{=T2XHpB;oBDw2s#VJdB=r) zf|GFT=^ZT2N@r^Ad@+#W`#7fZx+82MWuoI-Dc>?j`E zR!+FI5FQY@=Usuf3MC5=!qrT(MrWq=;XI!hq&{vZ#@ZZ*)_e^7j%m@7${qkxqb3M{ z>!N#1iw#{bxtY+zpg|l4W0Wwga>>qvJKAP1)_iOk`IZxCgQ3#)8(?dEtrqOlLh1H( z(jqj8lYpa95{P-5+g*+EZGh8^kq~MgAI|WYc;MahOJom%PB!WHU{p^ZIU}-vEfj($ zL85cps6K&1*cincMR#B&w5-^h51m(p;171w8NE#;MviKQA`&36i1KQ}Q6Q@sgYy>U zpw|4gqqs(}N5VO3u)v>T0pKzHAI{!9EXVeH`@Tv@p+U*4E@dc`IWxJ^U@T=!kuhTt zk|~5HQc8wQDJnx{%v@2K-NKDbB_U%-LU=wa_wRY$_uZbqUVnVIFL%Rr9_MkawXc2O z*E$%=JZ8pG!JBe1O!12suP>edgc;(sTz7rp&#$>(OYY?EnpjOS+-u+9tx5;EbY1RN zthz9pM_GWi>MeM96t52WJ@)W281a1_2UJ&N=pjGGHPrIX5C{*HWB8Y~lq8(X7*o0g zXW%vMJXEdj4}e?e<&}Ow14_G8n@Nkf{2I(HNDwhtnqJLY*&Fw(V))gsQu=>f=JxM+ zBe_V9QisdBP33q9!$JHweD-Xw-j$^nDcDaD>E4HLR8u6nz>t3a_pFk)_TP-|HRW)t zirvW;Kk^c(AlUN_Jv6J;6`$-e!iO{kkvl|_7aCa{st$R?%0c5bnv_=!-g~zA)egf( zJy|rdd__talNoSTx`aj)zSKdH!d3nM;-oS*F zcmKncJ+XCbc_(m8EPD4I&orN*>(tYqf)`C4*p@S>3D@Lwo%!!z{vy=~d&;6kFXP?v z2BIfGMGE6qg_I@ucB{JTPh)ghre+ zTv54j?=3Z7MokeP#Gx$L62oNNOzb|1(-dvcw4g3bfeZvs7>yfQ$dX?$9Af_Yw47&L zz=iBG2}{OIR>S)vEgjO=B$N7lTL-)qnjQ<`ePGZbL=frgm3+&3y!D+Pa3Gc$OBg(a z{XrM;K8FX7&Bu((*I_wW5VE-%MdteB8d2AwW-k~Z?mTK{zmXnqgnHdU*mrc^y68Do z`mM?!;c0YGHJ80kS@u~thX9<=a=W6G={sja?N`E}&S3H}xa3yX~0j^q1q zN{p<4R-HQS?w_YpvgqVuyj%2mu`?|i*jRD1JDwqFpnF$k+hmMkxKdr+NS>{Q5H+A& z(;eSqGTY8pk3EN=T?g!w*;-$(?c>wPxc;a6EELmPa+xoSFj64)acD5< z5oXVi^0wNqRN#HZ*nQ~a(`2rI)PJ>Jc#6Mv{g=PCpP0TqJJd+v$qxvj>02nHMXjc` zlQYNcMzMg)nd^cx&+|Uss*wbAF645XHMR6kT3h-93)t@Ep2n&IL@efte zeQ_ROStEi{ecn#zy?Csxu3>t9k3!LWG#t!dvjVsr`(8rd%0ivf_^iI$G$@skW{B{2 zmekA%Mg+sQU_zxie)-4AK&;KCa;t)Kk%sycxe$fHjBFO@o!7Wf<$l_3xl97yOXtCo z7}t&QGY_8{C@Ujm2@2oGh0^ZF$?CkMe3nqAk@151v9bR^2WT4HRMUWLT=x4=4|1yf z^U!q~>0`aR?dWLHp}<4k#T(krM;r2)chiOI>h*X?EieG8b zeXMb+7>c-#Dvw|q4S>XpIWPPP!oq(BZ6Qm-^fhZ&8PgBaWDrT`^?0e?r9Qc+r}kk1 z7Do$&0ZMb}vKa!f8!$RK_6O%?FbD;+T@CS0&GhY+b3_Zus%r(vaZ| zJ<2AwNI4rhuuaCqZcltS5rd2!L_L-PY6q78`oW&v3^ai&fCT&IynoLPCJwXPTJjID zzJ2ZLYHSuxIQgF^!xZMvc&S~cTs2ts`AVz(Zu7dDnyOAd^*=x*)&E278cX9}x7-fG zdcAqv9NgBlbiC$ji#$AMAD+Nn%tu>}s<3$g9f|wTFkgqHrBEN@w8_?K$7F}W8Gq)S zDz483%!8Pvb{jpes?9|@H%LX-nrzy+^RO%@c#PTzx%{x)7~OYZ9hV_hJK`?MfJ5kl z(oz-enRPJ~gd=0)=sFZnt>KbqN80wLPEOeHieO#aI;v2$yj4=LCjnX1ADcw zhk@eHS6&m1iZT%IvX`a*v}x03-i+-obuHLFzVV#&%wbOIE9DAuav@m^GL!%PK|Oq& z?x5y&HPVN9E!b!osHT{6j{&Ei>n|x{@fF9ds4QyE5X5v-uPc#q`}hGyAMb{7d`pfQ zMQV)(6Tqo@@tAUpDJ)dUlU#zs_zwx4)Vhi3b!h>nsk^ZKr>56w;K@po4!6M5xV-yH zDD2I|>(kFeEF7D5qsQW7(SgIKlbt7NsE_B=9tWS!ew=>0WVph9>}lU5xvB#)=wnPK zz)A*vvKjUfBddD2xm{cMUgDsG+zrwNhn5%+*wrzc)+t;TR}IP(h=02x&d2trn)lSv zJ#-WS&%vR^m`sNLgGMhOM(9v82QI7^VhL(y{F`#y`->qm<%Y%Wb538Go2Kr%b;|r} zwFyP=8KdMzR!UqZf$ztYwts-STMCH%+0rlT`Oyim`iXz*_Z3^Oy@iqRBK&a3htqr_ zN9;$p3C0y9K28Kx~IHVqV2_X&Q5^C(%{c+PNd?<-3Z8TKfB zHbT`rEnWl9Uq*MyylG1B`T}PVt;Lrm@5H>l+q`L2|t;(9&9X&T8x{li-xc4oFvJhxW-9>HlVA27M!1c8!EjhU^AnSV9tsPl}w zt+DPsHVsxVs_s2DOPoAg&s(NN$^@Sg7H^V?!a9hf$fF61*@)dWJQj#`TsLu3q34zLB;(d zV|M@tFMZSdMcYVSewn(w%ilXH->j_J>>tV+znp6nQM#?g`zyT=kyY%^$7dcgmiA`% zS54Mo@12AhL2j-w#H=qaWg|E5{-<)~4pZp@plB66R{ZzTMy+)VM2%h7%pV~l3B$Ii zFuc~GJ=@c&PnW1%jW=bC_~IA98viiyq;xJK+uuefH2pQV%^qb!mq8AB)pUAQX-F_x zoQ-H)$Gt9GqIs9DKzWjW$Tg-{h-vV9vf((5GWo~vg6@g72i$&QO+I{mxSFCgl;<*9 z;_hqbZ7ZOoM(vEPB`|_uo4YXiADbKbSV}`PZ1)4eIh6R~@8f!4{F&ke zJPo7GRqi7_f3hJ<*rWpNqIT|V?;v`Tj6#n(9oSN#aJ~IM%^16|)ZTmZ^xPSte+7kK zpDGLD#hpULQX$)LBvm%-TA+n;`=x#G)|i;wMMjE%fU0-ZPspETYHno*j>maueOY5w zumtr+-okN)#DBc946ic}H)>W-#4Rc{2gcxxM4;NCX{6b}_ITQ{mPsM8tD+W6Y|`&L z6O7-$&xvq}gqW=rzM`UV9YDG*_UG0nCVTX08k>aER%}H4toMf^&>lzN0un9aztF@a z?0;dLFk4#>R=$7uM1aKD4y9<$xy-_pg@#zhZ%#ZJG@6mv8$vbZOGNhqGy8xQFaOk)8Noz^H6!uDdbhjqz7q?~&7|CrbIs z6xvRWJUX}U-1%Bve&u>{?-)ayn#D{a=+=UW|ym~+J zHsuQ!N>z0l;r))I0DMDYI{3nVWyW#teyf_WNX;qJ)Ct_Kg=v)K>SKE*K$8PhK9iOfTqtQpiciP$C zupjhq-4X$YvmX%*^XRFNe8S-p7`}hjW+8SMcN|UBX8t=}oKXWHq|Sv8o0HM^)48Ml zC-wFA2TpPHkI)=&V@J#x(v?{F z*OZ_o!@uZy0ElmLsaP|Dno+9X?uK1;z3D}hc%D|*kPwNGfJ?=ef?dlGz z5k31+KF4uNU`<89GqhAE!Oee9WV02OKI74kwn6htKa+GDO_|b$Dj8KEkC%-g)|cch z*PXM^=ILzlINFBR_k#404TtpA3?^G} zSO`s0ylA-qq-MCfW^Ias7*lphX;@Axx#d;3kg7$fs_kCBpbRnZ)rTSRkz2BBBnFJ= zG60dQqGqS6clsG1=^&F|<`2A99ddhdq-l6=(H#!Dw+K)?jGIW zWQ4B|q|`$61?MU2G!+3JONrM{dzPcsPoY=uchn;`^H|y2QnAWAmxC-g13de))db@J z;SA)0zY9(lN)`$>nO0gHLWe%CuI6THVv^bOs3+*gyw`codSpB?wxUO8W8?P^kG=|f zTCHH5n53gg4gDo=O}!Z_V*x}f{lP`&l!*nt)B9zZkL_9rrs+-V?1&4^doH|AOjQ&l z@=?sWn*+}B%g|TLPydl61;jX$u_3=o%5c^e)H^ULWwR+4z9dpg7dN{P11-EOYHIVP zr7IWF(&q2(+?E#(PB|~3?Qwg|T+%Qo-J0`ncF&c5wsw$KGSgt*z@ko_I+^`S@zTn8NJCH_ zo;Yc5p26`H5{r8DT9<@lQ^pJ~s#{?XLm=IFQhLjJOD`{IjxMzOXNC}C7hExmNyO(a zE+?6tGvQLdd!43Sjq<#e8`f~d%O%r_F8xwZ+yeK=ts?rJ9CuVypZk|&cK3WI&W_-E zt)w*G)@|-inRNZIP+hGZRu&-#ASnbl3;#!_-ghq%wuT8yLtq28R~d+I*w5US6)2VZ)9HJ1vhW)Ibnp<=mS?l1#qto7!;l!=9D^5dw9EBWL z_5dhg;nZuFN|x9(4g8UNCyqF=`Jd&$wj>isl^6~B6Q0JK!x zOlEJ*etP!Q-7{foE|2BL&sc7_54eOrJd&0ecJ3Tm}-n3u%hT9(<$fH69m+SR4kW|3Cck&>7EnEQ@&fW95jda@ZKeg9qjF&=H6{EWOW{@=Xhofu$%hEz|4 zB3I9-eg!hR0=SCe4##v@<`7UfY@(Io1nIT7i&`yC@(X`{pIL~4BmQ@w(ef}xo3J+iKyBX_m-Tx8D!U{71mecO zvw;ysA*49VYex*4H5`nD|Je%QD*aKQmpU?(-(66;~)?dvlJy$UUa4T0ZeSP3|O_ z_pyj;(`gFsZtEYu0^S&2EE%kHxAM*RM8tn0w^X z!Go7~L~9Qjai?=$lc0d~?C5I4HTqv%Ic^^`OU)O?k1zDC^f_;w7}n`U?ENpFNl49S%hCvu>?dsa%p$ zM~4L2*CO8L1`2N|?)roDT(NT+$}K;B{{GLY?Y$Q) z$fm^BFfuZ_Ts$w^XV}hO{pZc=$Y6*xGJhr7TgbBcuWq`wJ=9NCjDfA8;#N$jt&Ay2 z%GWe0W%rm^SZGr(TmqTg2A?YTRtCSm2XDI$*T0(ISLgAOrW46gsmG38qRs7|VR%rG zeG%QDVZ6lWh{+qTOt$LMrMluQpwQdfyPvObXDahjcJ>5RsgQ;jZ|&$=Q%6U~KJwDT zpkS9vH*ePEBwwThxb*ey7TjZR+@eLbmMvR?c50BylS+bAV`t(RV6aJzIZw|9b7NuI z+xt4evTSydVea5qYdt_n`n+PT7xPYedT&_0x|;NOC%sm=*k|_Kn04);zSEV!ON$=_ z1({#JeOr&MGyVSIrdU}2p_dOIWETGEYzsJp8eZ%>iN#DNBBTX~^`A3&HxqAKj z^=t3c)6+|m41D{{qPdzTsdJw6$X0Y!So@c?mK2%pare$0zOc^DKFg9v*BBgWx@F6j z0s>k>v$l3@pN8@=X;!hFKD~87j7~4AZPyQ%0T(e<}czTdOIM4?jU7eEp!0#J`OA6VA^%&O%4P?udG% zsbQN)eVf)N3@eYaDPVWUw6={m4R7AJhJqck|8jY<>z}#v9%b#Vr`^fQsvoG**g>BQ z+4dX>Q|0I6=Gt{wmCB8E)_$eNWXK@7$*n=pg>9+s|4amySbG;J(1Gg|EcS^D4-aoS zsa^@buc;HDM>RDyTaWrlq?W&&bvH}i%?6wV^}!&s*%1xbs%2cc;?QA9l&QnzYu5%i zI%-kC8$M#*ZYu0XeMddLh<^d>H3)XW==ZzfO|p0Hs=quZpQ}Fn7B8Miz1cQOnU@OT z0Pi>f@lquHrSTs6m?vxQaSEUNzpmwwt*u*@3^I3QudS!K~Zl3f)@$) za7cT;?rJwbmZD?ki*N3~EU6=lbg2pEjsvkb{;Sn<&9(B5P>!#?(8|Di4PRA#k=1pPJ zRG_ZZ4+a*Tm(dJP+nIb2@-yV3d&UOSHAM>+Ell=l<`%|Jpk`nmg~-G}z%KB8p|Z%^~68H5Hd9cf7a8 zG_NB9^J-mnPKF&85ckCg_wH>05?)ErG6u*FsYQq|Ux%wRjzznC>z3Wvv5m^g$|g*i z@-?rRogH+{iTKq)bpKrAys_2S3|SL%qV3s~vrU>dE&TAoJz~15$I5JHUhF?)7?|0b zKr@lNI$+&#KKbP2+~yRP)l4)v(+9JnRHr8bLlrx3O+5-u^z3wE>y|AUD1`NH2l+Zu z^hIph62!qXeE5_JOWCi8lAdhP?EIFkTD9oat4YR}I18ouj#b5$9$oj|4aLR~EDwUH zza}ZjevFo-#Sqn-J^T09Y}~jppEvogSp7aU(l+4y$+J?CzX_~-2Vs$4o? z(^;jfjs#ve=7ee9ZUAQy2RMECuO4{c4Z6K19Snqv-xw7Y$eG(cY*!z$jq9;%*RKbz zTUV3neYE{xmw|*8U7INXF=IxI7-8E}>Hh^LU#;T8v-Wy=PE&M^jK1aDJd1QUh`IYR zyGUzl(FBtYPT2>?ovhnpU|2ucfX+?V*zY&Ee$ZpfjvYpW zmu?hp5*IQq-CHt1?L~_gy~{tY&3@muK^1?;Z+Q!6ottfKRq>_(flNlnd4GJ`+deXo z`lkpRFqWqlEePvK2-+;R>#;WJ1z!K`R$Kn(*P`_=TIO|y%NsawV2x>4jPoRd?jdyB z4;{KTCB>4xZ1@v7E09lS?*1d^(j_??X%`AAt?BUbrSaqaLpdjyQ{E>0RJLg0aq2=U z(daS3r`52XDq>}zZm(|ouK%D$`2aQ82sP>HYet~Gs8lHpdjO1QhIgO$LX)?nIOSHi zXV0GL>FK*6sZx(0ckMM|$l$>Q4Gr~b)#&=yW^y?IC5~ciMn!#9z9s2y+RfNZtR1CT zJoZotvXyKp4EF5LJ=HDfZub5AMj2neHBrTnwej>)>P6)BT3VsotXVT_pXcN8J3Gk)Akey^cm0um1H(RVG_AFE z?OMr0@7}++h>frxb*|#4fz59Anj|V5%i+nc-Oq2a-+|n^Qhu(D=JnX_^$4=dia%Oe zTIyh(%f>5i;-HTYLbd@DwDbCW`0zoqR;~WUqTCAyl+qhoNx$2i#{s(P)LZ`V zKiO!u#W&E{_^q>|)^{4N7kv|iGU##3 zz(;HI*s^{5Uf``+GoxnBHsBI6k&}A} zVsT5~6@C26-z9B6sETeHnm{VEw;(3hy-@iX=YAGKj};w95Vu;$BkOS1?kfU!wAqnt zS3d7lpSX}VZ=JUi_Gn+uqIN4NDq79HY;(fstZvsXU9x#0q`wW6^fOwUMJ%LMO=r&I z^p&_&3VAV>^MBsz^RxEKl`EO$*hX}YRe>ts-<69sG&E$2jbGX82+t*%i}Nxv6nM2) zZr|RR)@JQx%a#qXHOte2VrbCYzjL52ZB^zBTR-EO>vm>g9bDJ_hYqdl-u7$a*yH9> zL*ZLUvgqiPkS*vznP(2=W$}51L*&&i>K>X-lV5O&{q5~HFJ2r_yyD`uYlDhcthOJ= zR5yKv6`tn7TdewD^a@HeWpw+46SEuRIS(b8wLW3=cm!^^+5qh}2M$=YZP%`g;U@c; zlwY+wZmrER9DUZ$XX~RC*!ShzuHU%f^Yx9Zp{Z${It<}OD6@i(AH!iU3P=SZ%c{S^ z9yLg1XE)Pa88LG7vFUnLHUI)vWdkwSpyx|Lf5bL2gsB7e%|>icgQE!;NvaX`R?<>- zx~QJ3dE3oOrGGV*t!*LaP!Yfz_Ohq*X zj2T)pXU>$D%)zzgw03T~Nl=Zi+rouid0=gld@QB?K4}44I)k3)HwKgvAI)r zShL=Zc!ieZ_Yctm_ zO#^}wJGR-9wvVkm`oi+w($thtb_Pn$Z^@DY!-j=}wH!0XddGDeM82S%{gtQXHpl0$ zUkC9GQYp=_My-go7a8uBadg;=sCo#ZXq?C*_Eu`Y&HTu_)I33-;zgiAQ zuLEn|cKm4ragq7|QMoKfpWC*9i~5MEwGm4i<3|{@U}w)Id)Ht5&Y;A2B9O`|xb0Abx_~pU7mNif4a*uOlqQ&2a6ru$6fo!m%79sG_R> z0_~EKpa0|ddrYNPjM<}&V1T9?U(_YuOsPm;@V36Xy1K5eZUE1w7~Zz}K$T(4wryI9 zL{>WZCplV8;qUM7?c>wtND6oYLB1NxcB|Mc*m32e*(%i(m(mIEAQ>tk=sQfz(qL9X zW7lMt+uj|O%D8UV2&{$0#jcaLQ-Y%nD#nc)_aAzp=4Oj#toZW_7=PDqEmjh%n`6Wqe!bZO*qvGV$7dzBerE1?$-$J|;{QP(!S7;wxj}P+mvmQKX0G7u`nn98&efO}F z;?|1oyzWeTirjnrtf3d}F>+*mhV5ytSg}H*X3Z7=oqo5~lPK9PW@lGFeE6{C_>N&g zR{@_Igjq(hWtY=9OKsECMl<{K63dRl6TwnNzckIOA;HkRCa^^)AairAj4zHKXcL%( zNq~5>f9ge2**P^ja>DqunkEw07z0s${HRs_qen!Erd3Y@&XJG7B~Llvt$R`1Si9ql zX7~jDc+Oc>xSuoV8PmzcS%=brFgJ-cO!xZ~N9y{R!h4n^#ZdB4jMYXn|6m9yKuhaC ze7G*=VNHc*?b?&m44m4)lIp^swqgSZL-gB3n!b4XQrWU)4QNu^4ege?A3Jd(Cof@8 z;k$PSKU`LgWeR2URzqX^_3NjA&)c|TN1ffT=Q1$1Hh?XVgv5}89m+v>^_{YrlvVHK z+-Er;`;hlNeVzwC|8gxPVP~(!>II8$8|s~FBx(O@=D+{eUs3s~COp?_g4qDgmALa| zq|}x^%X%~q(mR()AgAi4&fC{uN`KW=qb5yiE3CS9l@B%OoZF$DJDUMS!>GWG(aCS? zP(^m#(tUnQfJ=>n??<#?ItIA8HGyxdWuhT4<@)u(V7&qm*5w;FU_c<~8d!T9dkoZT z<1~770|d|wEWOc^h6h2JQ0y}GciqvW^OFuw0YBcRyC$~RVtsI&`Hi9Rv#GDVpI@pa ze8huO^IH+Z-N#zFA%sBe)SzN%@AKV6X?{lAsC~w>XU(aeZK>e{;;K8tLE2LN6})*9 z!rr(@n}z@u76YwfKjJ_a6>Cy>9IA()pt>C{a`pFpMdRr3p zC#4zaIdw02`*sq1%In9cI>`(K+QUrPJItPdK+_P5#?#b#=hQH5r_FxU6>6-20B5^z zR~JYuqyX5_Zs}*v?Crn0Mx()w@#8-{yP#vD0m7}JtsUC&_;L6>VvG-rIYpI+tXjE&n;2m5#i&Q#X1w%rIMe4Z&Ri1|8@F z!OH9m5}p&oKdr5;cax{vHf!m81e6zTeZWhosh2O;!uNd{Uo&Ex;9O*;8VZ1w8aBN; zFbt`zMYi#F7|*>FEZO-}r%fy3wCLK%&zdoGsLOXDt`k?exw-uV<1v_7LZ1SDB09uyXKiP%&C6lJ1Ogyrs_C?SjH zooWKYHXrp9b+Ppi6QE&@^XJcp+~RGm+p*(`MK}dRF4JDLh#+B&x@h6Ju_V?}Q`c~{ zGs~L+M4umFltp@b{p3tHmZK(V5&k~<%*T*pW^sS+K>2mXCwiv?v`yyXG~Ox|sh~x{ zD|PlO^Ln$v?Fbh>g;~{!Q4=_Pw&mI;R-j$|N6p~)_=uzg%tUp(g3Y@KWF zT`J?&bIh>-TH&>n5mFaRf_#VZHfZkJAS`;$NBOE{WIq9+@?VH8$=y3dlr#OJ zk0R>lH{7lyYb?=u9f^isdEdXhQtP!sP5RrNPcFf@m&RBI_ydaYLVE3zL+s-_nL5~T znsHCmj_tjqK7ny9$8t^R4(V>L`0>LNz8UX~N)_)va^%Pg{1$28D57~0oVVv5gNPcm zK-7YlaXx#db;W*8H%+=~AV{+l7E9~=AVMZuvB4xk{4kQ|Pz|}xdv}qqR ziOp)g;&(6V@0^Dng6{T^EFn#& zd3vf-ae6Ry=xDWOt#m?}@me-kycew*ZtJ#S~NHl{7*KR-lbo}X!svZ+y zQ3R^0$_7r{V{m)B-LPRgERpxZh3)Ts;u~V0L?wp#mA6njuS6q}aZUJ>8&UYwt%8>{ z+O5LuGVyMrQ=_5w@#iWrU+wZ3+1fRd4X9Aa;0N2jWP0(Co)q7qG>8k5dh;BJ$p9B8Jocw zO7kwv(vKNs92auR|EsxTH~A}}eU!NsML{JpYWkv&JL8%w&6`7bC^C>|Qwc;&h^`GM zXvbDJ9OT)kabx*;n9S3?3)R8QIu4N&rcV#TYfwnMOQ-YK;!j=ejlnzz4U6p$dtBJS zel&deo1bsdhkUaeqmgWp8*-Be^k8j&cwY}}am*-D_@Mq&a4S0{I9<5b+0cs#_;SgC-e(Y0w^BRTq5iqi2-OpxP9s}?I( zW>pWh{BS_CZbvi!E+(I5Yv?&8{eRTDT`@81u*)f_(!|Z|+AGPe7{y^`=Fbbp{7L^`$5exn+5NUF)+c%8KQy6MeP% z?$@(1?%MUlrDcYnJTItRnoN_8z1Q3|IpfZqdH}a9z~RP-2sMYujf`w5nXVc;i7;ag z15F1H(>UwYthvtlfCfmn*o;Y!$f?hTeHtycNtNe)@CWv_7qNPcSdslMexzH!0#7TdEOhqu}k^P%3|Q1%NOdMnuUr`SIh&j9bGvuFX=m9U8c` z(?eaSlCv}VG`_iDKzy^q9?%3zJ7M0 zZ`gXc*=YOyPttv=HSRh~Xk|gIy?gh@sG#5{iEJWv7)1Bzorrq9diRDLJ$0%(-5xs3 z;3zu&9*b!QhU@XCpXN$p_5^ZVvW{^5ssOQOj%72dYP!{{N~ZVa z`?ZV)sr93L=4YnCsw!glZMGi+8l@L<1M8Iq$YZRJ5Qoz2{O$o9MaB2;6VVz`PA_5R zP6ga-*|u$lad+bS417?cPL+Q7auK*1!BDFQqlQ~gfd-bUQ689~GtT=~FptZBMEdE| z%C>E5+h|te^z)uK&wRlnj<_cu9o$MFp^P3i(V3UWA7{hAT+Y&^y%5W;*A|!h*b6_u zeY7J_Dnw~^d>dnLWa!?4q8@u0xq*cZ>3DJ%wIiprI-eTueh}f|*mdZa(8>#Vt1O?4 z(p&S>OxDMLNAkA1AOuzXtd!*HmR`+g<_5cbwz~Yiz3&j|+zj1+y8-n59cOIh&VkeYk2NP~VxbeMnl9 zvOf;gT*+?G|K@rr^q0Oo^0~Ty+3y1Y*cm<4^$h2M$t!7iL7s27Xrhw;diMY&O~so7 zDZtx8_fnRu~37Swfk_eYXFW~ZPuc5pmI-5hdtqL(n zC@I{22{SALGYBNEu^S;3x{Cn8mQR2}7r^vq7srJjs5(&9zZ7l__l5;0mVv<1=$he) z3(=7wzSilkvP=tdOe#H|sH+a4YWCqkkkL=GxFbF!!3h(L54)cWf#+gxHcObZ)_tCS~RUE3=4lzk8qk+WDl z31+iq&yJ1?vF}+ccvvst-h%9}NbKh|S!lIQc1c=Ash{=qX)|{B0D5VgpOgy0HQQ9F z5XKwZf-v5E4gfsL$JGl|n%p7M01YTUX^Sd)6;*v)#&FG*R5qSJ&Ew*i2PeK08xqsm z>wptW%gYCncsw4pgvt?+2O}vZolIdeX~X&T7lEu#pFW0-T3>wY5*IQeXW1mRdiClF zvCS#Sz&3`Y6Bo5S_*nI-m(3HD2#zMbz-ijq+4205d$$HXwSw7_D8R7?M%LXuePt9{ zI1~dBel50!sHDp%cBIcXePUeNCe&V9%htt59+KHHft2C99iz>q)Fbd<;`exA=FHn? ztrWChR{Z=abP}m>)&BjhShl##xorLTUc8`e=p;kiKj5}Cp`qFc_qD*ZVECBRmN$P4 zbLnvP^xU??hxpeF0ruW$I_6@iM7h?$gUmlrtjhDTWg|9jRPcN*{a5bg(*m}kGv}kPBu9}DIH;woKNx*;4Ls6( zcfFFxEK47*9|}j*%^qZGqJii!an`Ib>J->|i<_Hp8zI$j(EQ&&E10*H+_Y7SEK1I` zy6WxQw;z4lw@vLjbpW0At>oIAPu-CSiFQ7{XUv|xp6+R>%1Q5dHEakn0k7`ws!^kc zR68K_wLs${J}}vHYh2%I)v7_1R)=mI^)W{2KR^r#lCVHlX+UW%564(UW}wSs9i*-I zr9Mv8g^j16CNbndsdR3-&VE0AdMyYX!pzPq4uP4?Rl|tGtIKAy496fk8?eKXHTdb0sYH00MymAB{_Fr2UyLv-1rg^Q0lm*Fd==hMtp( zSuIQSM5l;Zvt5yqtMG^Fqq#YS)jx6kcs9DjzMBtJW2Z4k&wAmz1~+&14yDs$ z^tf>|d+0~pqz%m+4qXi{sqEvk<|6Up)7S)J5mt=k=+#3jVkmyiJH8sqAAc}eL_43g z43s`WeSN)>eTaX?JQ;VeS3McNvRVo`5d?=Z`%bX6Ra9TN7&d?g%tI0aULDbSKUNIIz$eaGjDoi*_SvhCTa?T`PJqd?L&p> zVa1w1-KuxQLS35z3==lg zQ9cD3Y9zWMGydIbL%Po2$28?zg|7+lSc9%B2=(ns3%e? zM-c-M&n@z6nNWNuKDcF85oDgjexx$)$A^(toGNPwXIjBJ;n$0~R&4vD&Go!IeFX}~ z(Ruq?;vmsjydh!9Z=|@W5`bf{IM7WC!zkeKf{?dkhOP8aH z_F&H!6M4VF4oJeFy z#}T&l-KY@38{kD5!Z45Gz^RF@A4*kMm*l?i4}| z*q#`5XsjWl461|c>-O$FG~FRyivn2EiLlm+1oMslPE|E3K+Y@Bt$i*I)WTP<)?sxw zYS_-rd&9BR)C>kf6+-euOHDU7r}l5m@CQ9My9!m}eC6jspobB6B7&a!aXo2D@0K96 zr%#{aHG%5uhbvHQaO|+CAB*d1HwZTWKYDxy(s7&iE$6ekV4rvZ@x?U z^U34K?EUWCVAzDh*=eC~oa4GGBOuKD8qA?QHA2%Ko-|=j<9hX0Q%KurR{4Eobc5C= z{Svv&bny1Bb%1sujh8q?mQs9XVk(jzZ*L5>$7pg?>J(j_|JI~R#*2Ic@h?UwP-p}! zA~Fjx_`-=Dy6QMNT?W`Fp8%)py?b<0=lAd5)=VIY_%b=Oi`<&V@@S13HA*;ZG!XM0 zxCh$wx#0KPoQm4d-8AnYM(Ntk`!oR(J~%o}N5(*w*sJDt?%DH9$2t=>nFd#T8q`@N z#0qe;;h>xT_+1fvQ)Zt8u?|M-1~KW{Oki3n1kI>-T>0LgpiG^$hT#tr@J%OIHc~}b zMd7Fkgj=jD1F6;Y^!1%zmWwrk`bqRini8sP)pP+`iCzXWs8(^obN#CA%W$^|GRHA= z$3>+)BvTut2QB2!r~@PGNVP*$nCJzW?wTBRT6zTzD3uX6Pr+M}MO_bjJ=Sg(^(C`g z#0*n2Gbt%pB9#7ZAOjV9bz7w483~I0gS4U07Y2SaUFQu0z?>#N@wTYCuki_Qua*jTeRO zar&MYlU1iJU=MNQXF?Xj=x>=lXU+zARH0Zz(nBCaG+a+pNnK>b4D{Ctvu7VFe4)xO zBzUTqR`!3$fss2jr9QScjQBurdJfjxjW|1!$2`@#b;(|pJPIVrjj4u0oS`okaON^H z`T<(ZHw2>%VKG_$qoh^#=M#7kQhKcWR5>udDQ#jKUca7>bTIOc6UR-4o|5~_tHT*c zE!}`&y&s;Q(^hEe=-9y`CglTlno~`gJGZW`V0Vo$7y@P*p0qXC+@i|rs6h_yI&c2` z4%B5*vJsIWWj)zjR-HRvLJJYA@=44!MdG1n2ArWzCQ4oq(%&8uj3R)1Ewuv#afneC zkvA|V#zkJ9hQahq}?u)2C05_+!*(#kutV zP8)6?Pq<*RMHY%|m!|6ggkB`tKuTy(>+ZX=;muYOM1A^MJGJqdTIG!fZm<#&n>LL& zTwXlePL)mbS$#(A_n$MTJ-UssH*|YvT)epV@^o?iNE%_IWkS$?Dk~Xb+N#wg;2e`z z)#cKH1`QfaFWs)|I;ZO7?nX8h^STvf3`J~zO+hsWN2jSPB*tZTSmRL+ZQl$BT`u=w ztS0(S;^u}52xM&QH5b^m%3I9h+u@IpS{bcHY4EuB<|*PlbZRmo4Nrn^90IF@u1UL!UDK2>UZ|aMd8+ zAz=O$Jh}*(MsZ8K654Zovbbc|f_%gcOM8!;0HXU!6a@3V`g%Jpb-41d4TYXKZfRcO zuDcxJIk@noS_S=`7@D}qGXdP7QkmaQ_nCx`)%^D^h{&yjYhZEiv)3fq_}u;GnEz7l zkaiGx7^$Re(`jr8-k+Y^Vk4ptP<;IDsJfte`2CBg>~UF1Wj6`$oY-l^;tlkA zpWmg6sac*oB>{M;xV>phWif}7l8iXoZiLTylzZWkdDq5dL?lp=c3`lxn1m!RF0664 zy2L>!vp`b^Y#$sOalgQ1?k!2=5gN@$2xpX~CA!Tv&qquZ>5RG(WWTI!+qT0A5a7f% zYE{(hDv|N2O)=44M05HC#*R7to~;)V6H^nf39~E2{hkR$#@z#}4jh=dH6KO&B5W-K zP;#V5p>aT-x(+--4wy?1e&)1ll9hly|!(^|AtDi1ktp3-3#l_yhe;O0p& z^QVxGK4CF9VA@3gRRMjYQ`*v+b_VU%J zHzBw1`&qV|ad1>hGerS?1m=$R@#BEdsW7#;Ckt5IkoRk};}IV<$>^k-`WAxA^XgM# zVWt03VHC4>_U*Xp_aeb%+1HnG0~E%J%_+Za<70>)am z_M_fO3zE1uP?6-UK-=Qp_-k6QcaV`nQXn%3z$j2RC^YmUeiDy|`wAvzpnC_^s@F2H zMN7(#u?EYnr=suC06Kvt0g3ty5w3%|%(-`b(V*6GAxmh{EX7L(G@hr62K91Ut8MU5s|!z%XhCf9kw?@xmD924{uX zzi1r`7XcrtW?Rr;my*(2wTZ;-MyDzmYLtoD5ybF4f4dHWZ7I511#{=`MhV~9+h)Lk z(6jx_{kq43L}*coO4d0asC3>ym@XtJP%QXIyeDI<+q$(O?24H2aA0-@;9Tw?DQ(`o zj)A(Mp5fDGDG!-C=tCvQ2>0|;rzWDqwN;ts1&~9ta&y;Nt`F|~Zc3-^`VpUfa$3b* z6=fBVE>X(w{nr_pzlGKy5we1pg#DO2IetT;Dtg;SKRbI>yf3%4h<}FuvX$6_dM*3m zl6LFzWld>Yg5ef67@cN@6&f6Wsw?OeYbPDQ315E_;3g3fs7MKb{we5YlKl8DAt{m* zb>x1xWtVbuFVf^dOM~N3yZD|`KT`-{^{3t$&aEN;SuKefU@Xp%@-{l^!>kmSdU50ZhS;=;zn)^kjj;OrKzpG+SK9f zP_yI!#4%|D2r+U%VOS%6^+^(_^-S2WFhbbOC|*j-$}XX~yLbP2-qhj2F|T^kddL=_ zC6X}BrJ*C2Wd@s@w7+Plim%Z&QkmEG-?jCUnrp`fJ#9jjj7zvbKP7F?^0!E1b^5x5jrR97=G!KtQ=oCB>Zoun1dKxKGaX6E4&eiyjzJ@5ltNY&l z^ey-bf*@}xmmR+`Hnuja3}et-N*;jCy)9ApCyCP9tQL{d&e>T%;Q5+AK;65l&dNid zDn3mDQQ$sn(6FIbkGgt>`i&ch0uN07FH!FQ&(5)8D2PNV{E@f&IV#^z zoHAt%`TnN!fU+79|Hs;8UGuzO`|A{slP8+Bt%uvYQmvfs^C#g8FRN&_}rNr<8SxkPW1dju<3xw<&# z(U}rkm`4m18wV{U-k)Dw@w{-wC^Vs2FTVg$)X}}!A#`G#@3B|kD}nwI^?s+ce`@C& zmJRsN{p&2V|DGS!jk|2HdCmWbR$iMZyG$_EWB;`uG-$xz<3|~XY+cj$N$>|KAKb8ON+>%kC_`e=i z$2RTT*R|2Kk92vLvwMPvZPZ;X91&f`%*{0&(h+&CYzs>wz2wri1;=`u`2$^An&!;` zTi8%4h|7~Z)iZA1917Lr>xjyLzAd|u&!^tn-B_d*wqVwc8+9<@6`nJR&^pDA#1@f> z1;7dinlv%V_io;`t2yVDehexSG^{`w!2iIfWy_XAgK-E1Ctl3Wtw~d4Q$$gb%80oW zk&zo!)KKMLgu|iBCiM9qXwZQ;La@E$#QHDv`}H}Mx&w$pdrcT$rbNK%+k&9_wxu^WLLb`w{ z&#{i_)~~--$O9}4_q^vR-;YXgG!*l#_~B4Ar2`>*@LW~AJCy^r42vl9D~y*arKKb0 zfsS7Ecp?NZO;aHiIv&_hltrExXQ8&ASUjTVq%rMriVRo!Rir6Y5kPpBi*!gcZC%e0 z`h^lBvsP1BckiCmwnfB=g6lYW5`00;^WL`r>AFh)Bh<7(r%wNopReyG)gvz%Tx&z3 zltx+cPgj-2S$y4uNt0HSMQ8k&sq5-57<=8!NK+FOez=$l9_;r1Va-7ndzchS0=@6; ztbAXJBU#crVW#GA-+zWsT>pbDS-y(yL#Z5HIDIdRi@)YiqPAHB|26@JT)O@cy8>aH zGPAPc?jAj}*w4?UGas!GoVXZBxmH#da%*)92Yo>WY~|tr5uC>@?hLqLc2IJM?h)mu zQ0pL!h@QH3f!nxrbcb^=WIISN%u9}FBS&VbI`uIF9 zTe>Xtf$7nx@84vE0qk-%`F@Y!)OuOQidrLyd1ZH1 zLPk})80z0?eqRfm#E9k~OIl`9W{-f`79H$AqI=_}O{+O5V&s@%KREszA}5{Q!^P_i zb(Dj8DIGFg&}7>o7fd7dLjfVBKh>n@XOi zcf&Bp&U`T-{_5RwHbE98xA3*N{G}KQChb>sON$)OiVNp1i3JiOq~8sJJ+I#Gef3@IGUd9Tjg304M)$+z zf7?j;c%$ZZR6Zer)>GX2XmqVgKTIB3Jad|IN<(BE2Tv_?Aa%hrBL*~Wx}MXku8=t? ztoi}-mJ$D2_^s{4zG4c4Pqt6T8> zqC)B18$5xte*&@9dj8AWBzTz=lig7Z64Y$yx1p+R8Dd_+e$#8xBus?Jse{_4*QfSW z0nwzJm9pMw&YU^2v}Iqvc8Qh#qjTs-8d&Q!s#l+QHL4AMx0)y^-|`pI+9qT-lpSG9 zo1=gE=FPe~&HG$pI>^<>UFlq>@+5ZQgf+LfQ{i}cO=GFtfmqU7HMYvd>bQC1#sE^D z64=k{_51vy7KdAkTtX&fwus@7h~P3HEXK>7w+ophe&cx zp{kz92b8c$(I#Vj2qn?!VZ&$5YRht_Qt3{fI@J?`a5Q%W{TNWsYpc*gSk=W+OFpe> zXP3*M2x=8Nj|{&>f;j=A>3ECb6xOe&c8KfND5ek-btL%FlZzn5@K3#d?eWfQ%LOJi z?BUxQv@CNPkZ~aOH=mwOuND3j>06Qm%EOosc2rq1kag{a3w`Or3ZfNGQE=Qm;s~2L zgVK_AV*zFn%PeoM<+P34Sq=Mf9?jJ*NgG0gf&ySeVs)5cK<9`yJ%&=cLMgJmik-VG z>eQ*jO+iZ(w{GnbCF+Keah@7ur&au3<^qS4icnnor7<~nb74>X0&-UgVZ%{}-3mK3iF2+>`i>IVh0qlS<6cJ(Hg*1CP$g|2fhgU=zTu;HKFP*x z5A8#|gdlY_iVQLksfPMZn)861ijejmaGXjH7`xnJpw_M)3$&oK-h>Q^A3>{$WHHvu?ZWc$Y3E8O ztzy??P!eJ7NpwGVx}~SSms}JfNuR1zdfhj|>nd#BE4567frVQ4WTX~i^?es6B$%~n zcZ zHW{xb{#;zzgrXwJQR2aAwz*-01_C&xPZg|)Uu+d%leIEt2cR>qV~2;;=ElAh=Hz-z zuNbH66+?jdA2oHw@3YSf7Z%!81EH_O!X<+jXnmA^bqB7J4xnZr^snJb4Ihs3?$ZVE zOIGWm7oOXqM43is3S|}s_-E^&3Y|*rgN|#-B?Z#_X%!ezz z$bbOx#38D9A*vhdlW;g@(g|z3+2r|vxp8^j7rZf`pG4b4L)wpcw-?GDFgCC3{UamN zdg~*#w-7DqD&M$m+t|sqtbmBP@SNK{Tg`iULgZV=QWjAkUcP$O{KPY+SwUl5L}gzq zLOzDobG1Q-jwxktZOH3`DBi0nM3{bPOcsIp7*xx_gBf(ZOAH_%95gil6h=} zqQOupiLgkE5Xl%J)1qW3Wge2ELdlQ@C9JY!nG!`&rWGY)A>;mB<$3q>9PfVjKK43} z`##p~M*sis_x)bOd7jsKDGIMunk+gdiZFUj3$Snm*P|Cs4&)0CMep;7n?ZLY1eY*p zG;5r?+Cd%QI^d?=n-QG3bf|;i$?-BZ00W2|2+3_uYf36|O)!5<|0fhMa^$)Zoq+C* zk-IeIa|;xJyV%laFUMefL62wi$Kv@57RZ#S=>0HB8hN>6RDswf0K^L51|89%TQ^7A zT$6P&agp^tCURMId3CPm@_+zkpt(r`6o(LVGCB7F@JEA_I~2JcoMsalY5facEn)W? zc?E$`?M971am`LQ5!fmmagiosxJv*L81rn{@O+a&ZA2DRvXR3(m_}>DfgoEc2{3t$)Yst+XBvN!F32(09n|%>f9$3P{K=< z`mg$Nbop)S^bOaoe);WV=HF-0iH?2U>PYG4fLpl0>gqn5A-{%Hy$ ztOUYFN1|WxdL;>x!ULy~-Mb%e8*S+{a~R)8KTj8+RKd&Vd+rnNYrWE%W6m+g-OQHc zbli5uWarim%CC$mq5zP{s91$X7bLR^{gSY<{Iye9YY|*B0JMO4U>Tm>+kaua%lY4^ zlf|v)6O^Nv}L3_g?Go=NPi0}&)?CfGSM>?aS3$L&|UX6IuHkr zMlIlx$7}K!X@!T>A}F%*^I7>3G&wta&1wmfrzj|S*P_hp4yVhv-YmSH%oTsxYlvg= z+f<;B#LBb3=a{IetqE2);2cqy3q;$n19zPsP#^;uRu$3n1_d?3v12RFT~uFU-$sKA zmx}UKm>wvhUf+6cFxf}?MDif+C(8Fk$E4D;pQ2V*GJz(_LJ2yOLebBFK)B6Xr0+vk zZP;Ng*WyLb+OKR!-G6x0K8XeRBJm|E zDMkgyWd`))X1a*80boZ;Kuve3MA}B`svq|kF9T|M|3g$402>vzxz9F4NusAD2JirA z<)UU9SzIn+U-Eof=WOB_WU%A%j=lFLu!gfy<^vGfr9 zL5f?xJG-(R&a`W+nemWTU6gjh5_e{S&GK3(+hil}K_A^On$yCQUK*%zJYMePuQavX zvva3Rek4|6Gx!}pIZRL;XHL?ec$!i!yG&dWkwYZ%>S(~^CukS@{gh`+`HYyEvVYy2;^>OuFjzmciAvx*`F z(zRjHo4|6k4s;%|Gcw$g{}%DOl0`r4-o+-m9-&9XgxrTXy}c2qt5k<~Zh;xwV~PPj zw2C5Lq#~i9HVpnIr87TUyX-72?e@N3T&76fPsm)`TwBk;%G!$it%zj?Lp`3m?(@zs zGNezYV#xQL$zx6L7!I_I6$dZNgdLs=2E%2%Z(7v4ZQDZ-J|}h{VqNx0}C6uG8Z2j&>X!R}S?;efZn2 zBB*mRruWxW9FcqphHkt(+52{VpsFn7(pV72gbdn~FBw*3 zb-dB`f^Ao(!Gm8W3N_c(Rxhn-=-I5<7B{nXlD<35EKaeYNJKc*0KZ3MfKx84R+kY$ zC~7^+fLNpdBsj-3#*6V(!G~0)sI$xR{uyFpo%jJB_$Oo2rp@-a% za)9Mx4gG1Lzy%l#|Fe9YyZaQxZz2pynp{DAC|0b$DwoGtXP6&7frycT}2WifpQ3a~D7mLa@LC%I^r) zZSH%dEodb3{amLcmIKJ_D2e^9bpXwZ-vp-g8BB@q+e|zQX_dkZ1~)vo(-rQq^^Cme zE2dwNU$qWczFhl9nZ}f;V4y8|-Vj2rV)=-$B)s)T+gXPAN6-$z>d}2&0F5-wFX6vs zoF1r5!1I)qKP&-Yjz^!CfFDz5KAOdpf)f5cYUhUh#NVe`>}aEJvX3HOxiNja^WoLs zk;Wg(UlFtbc4NIV@VB$FWiWPpb@h|OEF%zCF-R;;LXbR4(M(|mK#y-i9VNr>eEHyI z=Ts^gMn?66VA=jLt8LR|o1V`LH2b74@l?DLGFs8pmhz5BvdY$No!*38E z7-PB&th$9L|Ady8Ai`%`g6;M1fBZ2veg&p!0_3YL$&Bu*=~|2-g0KM$;WA$wB_f_V zUpSrsEsXQ=4C7_aNOVtH2T&<-VGzZNWt+_pQJmPHmp7ABTca5*StY9yfpJ_^Xm1gG zt1F{gk94u8{NO(Zqv7|3k`KjN~U~k zDFc_%hj;6?m9EFWiWem^f4JhIQ>W_V+DdJ=o+_|qzfoZZ*;#uji-s_b{mZ-{lR-Qm z5<*mQ_W?&`M~i z;2YF@E4&ZBp_*OClXwJ@7_VU$?J0;N%x@{|X0Q+Hj})yc)Kub3`t<43NH{9Qy<4Cn znF9>LA4?2$1ziCP`U`5^0>$ux%KOzZJ~HV*e<=PavA9X_`^G&@+rDAaE1GbQ48hF4 z@+=@7_>>I`EuCf7&)Kl938x-=%e6p6GV4@18L%4I`;TrA><{Vj7a^g zJ>+*3G}@bnNB@G`_Zl%X?a)Dm?$HWL&!o7FkgKVqgMf_td3-GbvTF#)M6-#6KeUpw z6MO^h`*QCjmkTY<tku@EXk|_X#W3=qYAKtPd_g|nYv8GcL*f$~icqy9&(C59I`;kO>W9Aag*LnM_kp9fHoEKP{qX?HK%`QLy4rLJuS zR2bX6)lRo zB-5MCpNubqY+zdjpn}! zS}vjm+C*2kY5K>tAV5lMTI?izb)EkOdR38O`NVt=vdivT1b}W-t2X#tsMtQ@SN&=h zX>5yHV!2u!e&ux9A}aT`SV&%=+Tb4Z;R2E1XCbc~HA>KA%qu>hBL zZn%g3@d$)F9qQpEkbCDLqmT;5H8EIQ^G~!V*uBHvr1R%+CJ4W)!#hZ;Q`Vf$WvES2 z2z8{(JHBo7AAp){1bz@`SeQ_Jt42eqPG=r1p}l7BK6 z9Do~=s+I36V?DvEHxo4?5u=LEz0I~3-ifv8rPc-G>a_0454WB&u8aYJ$mSU5>!mB0 z-Jv=*aeNG=2D6&2As7-#=>3mCYzs0(9kBU4@U@Tvk>kd{wozZ_tl*5X4khI8Y{g^T z#&Nu0W?{Gs$h6Qmai8^>#xz0F8mJ$G*ADSi{F8=t*n z*RD+jOGi6l|6mcWgyKKkTopLad5S$B?%BKQ>aOp8Fp!e5V6u(YE8ISkIcfNSM0LmE z{4#Ce8VWOy%z)OY=aX*mP>I*#B4#nCxG&kOR<_vD;QAR6I@0|Yp7ZYkofR4u<~(~P zLK?9o6B{_7qEq!-a0m#0Ph>$wXs9OeQp$)%0=q)=hzF7F|AHQ2c~Ns{0PF)9h2HhA zdafzCgeWO(FHQe&N+?A^O2Ak*Qgqr$oBQA#Q)<<~vM_+typEE&Q>H2=-@8DYjJJE~ z{C7JKrf~N$>V?$#Em^!HIFi*BhZ!yR4zI4N>bBzR6860}lfyJ?Y*cP=ZP^tL@_;<< zG0G)O`#vnbpdLb+6~2FUQ+-2!LKG){tq5@c-r4~#`{S5)3N(&YxxhN2km0CXGar`Z z$>6roAHOs8DShyr_`Tt~ZQXIKfAr!726q73Z`d&;%6zsgHWTS;hk7!)`}FBFs0&Z& z#JCCIL~EOiYd%11)QiNy+@_h_WCSxU)pZ`aX&P#`WoLuySiZh^+E`ovWm?iB)3)I% zCR~57n+I_9KsnJ|Xq!^t%GoX{#k*DnLY(HCc)SV@ z{x;JBRTn${B;!ckki{dTg2{Vx_SdEFYg3Dz*in%MjLjyAtX2f)R9|A${Wt}@Nwj`~ zB2fovHM@w1co6X9OcTq{yGBd;Zs(PA;En$HXU4;G{iJ{6nC@8WvaWmk`@CCk_>g+C zT?SMs+BP&C=!Fn;Xvxh*KfW)AkyQr3CZcbfAFh3VF~4M79E2u}FBU~XEWI)F!uwY+?kvR~ zV>Odr3JZ4urWY|F)F1$zbk-Dmbbg0leX<_!&}rKg4qo3CU;FYkwAKY@y)W?w+X}pbZ$|V7Q;}B6FJas<%jaJfP@hcAs7;elI zATn5JmzugSE2s|28X8GZC2mtpz7M1SZ&@nCv!Q|UBwon8sE~ExdCl4f{Gu6}EOtDC zKQ-*&hy%!M9HfA1m;0o1vyHZ&p_^cI+>%!lA^-fP7M@N><8>m3U$CId{G!#1Py>rVWiVSa z>i7@(=T*&BiQeXJ#D}6+CecnQ3?0`;>o3l6Z4HR=()HDi zZ(LpQtsmPewL78qb8g1O7;4@ppwM6`-EOj%+&(C7!|7TZf^hjukORv=!GCk`l6xqY z3-aZ(Mn!jI0bsOj~@pt*z6h(bh{nOp7U849Mkf(eunQB*Q1U4H+VwqJ{Nm~e5P*f(Y9?rfLi7iU6bX?mGAbO4z~J{gDSfWMSY4O8+8ZVy^4%C4uJr%VE91zCh?7`%luMo@*6}3mdI5GpHL+KwUfGQU>)~vuc3vFAnnSa0R4LZ=XiHHLz|^}j z_P=36&vFW&mR&FX&~n)9pB9+>vbniAwxP~Vg4nIb&;aza;K*WA4>};O7W<&g%texe z^_tkH$>^!bN1ktL&fyi4zV5q5B#mt7^fKjQj2=?yaVMuWf`mA{J>=(dh4AN`noq#V z^C&N}lM5!EllVi+IRG8N9TsTmFyG#g!6bMnzqmg_O<0g^WoH;ZU4y>wPK-zPg3{^%bwsOgdw59vR@Q^lzC__z#;;i3v(e~WGsSCb|_FV2ddlj>+Q_u*+r|!^cEe6{~Y40$pz|=6MQHUx)4#! z2VF;rfr4V)`t=R)R*TG8YaP+Hmfr6I41*Dz{Oyza?HNfzdrR-`$wH&O@AlbV5ps{o zUwpr3QMY7)tY|sxQb7XCR*^WzeM^u(WNK8OFPURKj%pQz#u|SHN1!f_0UNT+2d-G87GR^(_ zZ!4?|p*QLV6uY2Piv;|ZDSQefTB(CCaJ8zn6?1tmir9@hb>qF+*&0V8IyrCR?|dwt z)(qm_KP_YWQ|8QRTBqvNS+;8rqBcMt!2`t)3F*NJw(aG|yE?2XSsnWhUfOitGP@qB z5yLKMn1V=jVc-rtJ>HQQ|L$bTq3#q=&#UQ%>#<3vZeA&DWLue`R}@5-u^==j5G%{9GGQKFnMLLEsE(LMkV z+?er1H@!qGe0kEs#EC%y-GgijU@ehD+^NUSU+i8z^)O1aN5rwZl9H=A1E)_aZ8hw< zL?mcz+(;xL95=iHSFKVJ=o708CV9lIOtg9XZzylrGA0I7B$$GyZn!U?IPRspYJ-As zjmc#vi;dpy+NYm5Pr>=k`RxjKjnDq)ZC&QcSK{Up2M>Zn@K3boe?b(bDtj2LpgSL1 zzKGv)nJ6wR6%k)efq@q=EFMkUAR>dTzT>_R0qGZ=1r?%fOvKG6d-F>AMzKcXDC^V8 zpnCp9MXPPT_R__R-KcAY2m@r5saXU82MStRQ?F$h%YHFj0??9VcvQ|UP9cg@fG0>6 zUw8HRFB$!$EfGHiam#039-(~{ZX5FXJO2Fq&uW2oEoLhQmiK|7{!8;4L*7H^wtSiR zVGfugGFFxDRRpIVDZVNs<4e4D&gR>^C&+Wfuu{ye>8N--qKA{wAOt&+V4DKg5TwOh zC?w=R()Dm)$38hr2sSwn;2=A(WSAC{2M{a!2fabY3G4xD#hl61?i=X zvaUOGrk9v1)Cpdq)qZ{nL-f2#u(lW1$H;Q}v}rPuV3>TCE)hs+9>eRk6TVE5$dn(W zHk3E-WpO2n0Ga%PZc`(FU6v=rw^4x7F2m5QX6cKHLaS8kI}_hiBrN}*T4UMs{-+Ry zxwxXe$(4tBk-tloJ1KL$SP-@9KCL0gMeNv}&HcTRCxxW&``a-0>3ZKh3EFd?@S~sS+`nRYcF_`(}YF@UeWK&T##$NA%Od) zsb9qSK~$nLmse{)?GZCkUob5YI6}Fs@8XbwME|s4^NH$6<9tpg`ttZ33VI zy_Dg1zWI>1v#SrXpMr+Lh+#^}`>^J$$gjOXKG_Y=oUTXm?% z5E&r+)xo_%iCzHRy(4HEGG6m}R3O)8~RgG0Rm2r&h8^ z-0hcFg-$(jO`;-0LeyUamwot;*xrMGSk^rKW#kGpv$aE};idGQNY&Oq&N+{k0yI`c zs=~lfMK$5j>H@fDqe4YhwGnKK{G_2cGB8oa2p4H^C_j=)>Mdwe6iWj@Cx)^40w=iR z&!?kyuiZd!p>E=@FR^l0k$qV73}~w}Q@?05j$`cKBs|o14`w6A!&+p1>K@P7-wCTV zu~oSP0uM(oY8Nup#gZ17>SxK_v-lKRdr#liG0*E+YG*M!aOKg|E*hZ@)aR{It!4vz zH$+RnBe{`=#w7;ZgbgC02m`c)o=8QJ=>tr5AVdJ?zy?<{FDNmo{Ik9F+%6s7#%X9e z)y}#8KiV3y{#%4~vXBP;MqtS7OvolrzAL%U+{fIm1?5s;WMt&j9-}e+V%f4DYD*Lh z2vl<51sooHU^?h8s3dgifDVjGPn`CH>2cmvqXrEc2xM^ON{~rsl%UzuJJfsp__2Ml zo2K~(2kY#0fBuOW*l1QxUwt1mLr09>Y5z~a6i-wx;3h_-9tr-3QW9xKj`PUBw^D73 zu{dv9@HsR#24^-M8!jt-X*`{#e!<|PE$u!^$M|1S>a`oRR7(*ZfE+%t>D|Bop+k{4v5>Jp zu`5x#h_M;>r>%empnQE(M?~rD9keXjPOB-K+cRK+-&`ou=@VX4q8&TD>Q1aY?cEQ>$^Yq3Dn@JyxtavGX53g@|nPUG}A;w1|!VIOn|?Z^?fqZNZJXV8b>J#$EzaT40vd`?w(lZ zT4tgJLLi0{*7dm6+_eRQJTcn`aXc)uGd?_dSzQ3v#OdSel5EGsCrf!s9~11eB{%bLXQ}({ta9X3bX^>eS5qLN|haEmtD@P~2%QP9Vb17GroOpd!kyNc{OILv~2c0^n zJt=C-hDonE$hEffh;MPY;++UN%M>d=-MY%27g3lpucK%fD7LZ}!Qh}E0KC3%@1Fab zMtq5Ia~c*Ucj8T#n+cHzE;jBI5bGR6zsWAl+H;}GQ3c$sFU%2;uPl12Yx;zul%|iP zI~Y_Q(nM5-GK$b?+XdJ=p1}-u>dJU6<+lh{ZYKqqoRLacZa5DV#G;O55u#Kq4QVXf z*xFjF3&cy4gkzQPKR>-#$ZXVEwTnmDGkx1alC*4tpkz=K!JIhdD_BoTn z{bd6t^R&A+*hk+MV@?@9g>pz7l+4w@ioqIlvpZWuI88OO!vFxvs-cQRI%@8S%U}@& zg_+9-1U@cET6eZl2N52LLzT%@q3K3}DrH0F;WZ&7A7h9I^L=y}#zx-)W{iaLcWPdn z56w_AgQdwDA4>Jam(OX;>KutEcIAL&F+3`D7{H|X$59xG{mgu3US!vbjKWjeT9Lqo zIV?|~1< z@z{0A>#r9$)qJIcABd}v@D$cLAOo(tGP3=mal&X7(aUh}cF(r6?+Uh=B_IeP1VFTp^i^AZl0|@nu-K_N9L$fPZ zO=54VT?CZ5v7P8~2_pn~!a#hdflh{(YU8{3d1k%WXaW1fQ^6fdw5`&&p}y1V^C|HO zs8wz(PDU0Mno>IecZuli+~;RwF3gpkjbhP+NW)!d;)wB}3v6#ZYV>D3p;OKxMc+lV zW>^eJWskTDBu!=DZ|(K^`PBn8xb3@C|AE+gg4Ni*9dM_hU^qRQtOgWy6{r>ZTXdB; z#PyZM-zmx3LrO)yLip+Rwd;n{4isauY%(RKFMNl|iuo}Xb!c&(o|FZ=7(yKAD_!XO zJWq`qCNXreTp`b_Koy(K{tOWThKRX1t{~WNg1NIxg3uIq4biZSI4@bx!Cx(AMgLoP z2d#ZfDy8xbcGsa!*i5CR^I|?V?9Lw3o33q6p?Z<3y4Tw~8y*_fMHxI4r)E2<>lk`z z>*yRgHqN@gNb05KloIf)+ir)w7jGvcRsKcLIT1Sj`4DbB4yK7gr@XG*c$o^Lk_0j2-pLZcg8(I{vXLI}yR!Xk;K2K$ z1TMdO{AfZUuh-I2Pq0Y7D3b8p;zRPw2$%Ed5g>GHk;P7!ef-dF|Rv z`ACL(=$z~X#AQy6>EGZB$N0n%6B)S|jXGzyrT_Z3-Pe_@6K7oTOWL~?p`Pr44JKAA zSyX3}`;E~KO(?D1iSFz?Wte+~)73E^HqjNJ-)pfKM`UE0Y!|jxlnT5BC``@&sT3ET zF`ALk$A3|raz{i}2NRguDYG#{O?Ap#ZeMVhEj9hQ%1%>E^ef4)Dy21HrzkqQBsn%- zVyQomHb$Nsm;X}Rp0pJ z>)h_iF8Q5@+>Lg&i?OUeXt~_kBqpPaVYt3YQ0TtK_omKXlN4eU-g-^zfByJT$@a0i z*9)fYX`8kVik=)&DK`F6VX;2-M`|qH z?=k6#>KLV(-R2tVjRpZuP5QDmH~#y#*DH(6`nzwBA&fsqkL1FcPaJ<9Tb<<$%#A-e zEhxkyxrTAOIK*c<7kVfjlgatA#HXwKNo(Yx#x5N#7Qa-h*_PhKj+$J%d2`?FB-$~w z8WEIW@#uZDorXkCn*eTNaOLvl;f!+4Sv9l7zfAWVdR%)uyIW-3PycD`u?98$pqi)K z@Ps=xXwju6ePYQs@s0;q+ebV$ouBp4&n@`f$B*Y9rxgFY((WBD)aWb{`esrajL%ql z1|^JtWd{`{<@&%kS!!+i`bGkr91$vc;6M*@xGUzoG=fiGzfPn%h~x03efv=K4D9yh z-&RQ%JO8GfP|J+1zt@wY>HdCCH-uQ2j5!>_mXBCwW}c?YkJ;VbEBFnqYXaB6avMOu})qk7*3u5Wy4YUKYZ4~uw%QWM;tTk;WLu>jxNV_w(K~@sAnpcPu z#iV)qyP{_xys^NtpXs+|=+{rUC39G~TekLeVX$>MI*zm_n+NRG)$sLBHPx8xV*@bVSx~}~m=l3i(?0QMGni41i;@i55i!Xd}%~F}MT~$%C?=r7}lU}`CsprBhhT%)q^_p{5uV7}80UC&5)0Z7{iE|cJZHs$|@UorltOvWa(}ee2saaSmCIKjj5Ydran4}ZW8<8{X86`ixCK~hxNqfL)|-3WIXq{N%M-Smwhfn>0bNzwRoqR;;RjRw8J36c+4<(`IMQs`CWX{SY1(!wlkwP zrD%?kk&!L>pYw-nWS4sP!sk!sx`^Vp^ z@w(44^;VQp6QGGBG-&0dvvu*m&Hp#V;L42~H})q5wkX5Gt9wo?@4u(LicK`f6xmGy zOrQCHPG)AK5DlC>ZW(dl^)$b^*WXrGpBe3_D0^R0F@#0aHRlf5hT>F06ojg@u=OzASe!5V=tT1-yCupv;)Eh(7{ z$VJt2Jf)&hN_~#swgulez9OgJ_qxu1=dw)FTCQnmJ6fZV-2xoFP( zMeX=m31E!(GnSoY#ZYxl+qusfxVXN!lC&8S9&X?ng1YK)%2umJl4?m6CdS5B?l}dv zc!FWXT{d7F*il~Gr5<(74Dc_i;nB>*7s7vRMA~>Y7YfqvU*C`BC$fHdK$iiEViN}~ z7rb%fZDQJ9U3Fjoo(?L3`)q7%ym12oiVbCLmDzynw{MTZ_UG2h@9*2I*Q=*BWk^;p z$z);CRO|5!kuTbHo8Q7DpYynJJAGCt_+W19Xcyq!q%UI|8mDn|Il6h{mMv#>%gU}V z_Y8q|NHA>U7&V9xvKwbPNs&$tPXPdRF)C zM0>quFO!a5Po-`t`b2{?;JY1;_w~{iRmPLPc*d9RIVn4apVCqaZ*jD%VHe9m9Fh$W znRiCC#-_J5EQ>q{TvoAv*(>9y@W5HNBSwT<{uhtnU>vVXBW&4+n65J`o0?U_OQnCv21=8aT zN{S>Tf(=YWY%q+OlK@D}RuF_Ebj|vm30? z<;N6oogzX*53%_?FyEghZCF9I&f zyc$SrmxZs2@9i3`Vl>uo$HD6N?@v{nOMRbYsOIZK!i)w}Iu&E+_*cT+6V0+x_pki! z&z{;DB!libMuP@TM3;AJ1>eqL^Z7R=Rb1-o;`ukUU0t%g(7BAncqE@jH|_`NlpKZ2 z@;-^#{xtG;86mCW_Zs)^{UPe(@$Fu5w9dQ9wytGz*?h8Zx1EzRnBch3_76n~9?t`= z!UU$Agm4$?&&1q3F`$>Gno(cRw3 z<#6r#^}G1zMicmjKM9K_oR2pi(T%jh98@y&Rb0lJx?qOOCWQi>IR zBhNmZfhyYVt_xp3oCp?|@pN(AkDj!p$MKTMtLggv`}+ew`u6TgP%!wo{N+int5r}l zxMfcO(8V?)BOfounlE3zUAkzu)B#V@3wURt`U z!D)O~TYKV6*_UUL|8|v%4wSHwXUL`9F(DRQ&eKI)m8xrfO62)EaqlGz&=lOb7pf>J zbH;6tA?wconYw>|<#|Bfqg1Okq;j?>p&3mAjLfG{OTJLAvC+r(Q&(s$I_mB@BD0O2 zilPL5{ap5C+{2@)n=kbapcKRQD%2n~J$)8{e9g@8ut(^vh~WR$Pchd0xy$iNY_90kF%J!FZ zy_!dV-3x~!)oYg~k_#21Sop}X(@23lA zvBZ%ad9Zwb1<%Cu#yG1W*0M3FU@-l%;@c>7q*Kq~@;Asoffeet1ua#!6Vdx%|4cqH z)GQ?yqJ?%_oBB@d`MXk(NYoiHfTGC8%)}(#_WLOIFo%9+++Y52vzEXrOY*9*P%!G> ze>}6=XY`g=-3ln+-A+IR8*VX-XEgtJ`>Zo8F1>J1lRp+X5NK-K^a}Uk-Exfd^)DV= zDP;iz*fRvgItqBE<>0CE zX9F`ZA<1p^B3gSst*F@ZI*sQ*&cImJf7!AD-8$6%$WS>W z1N<3@rabCyS9bo~9MqvwJy8XlBC3ULu^&0o)x{-Tyl>jIJH|!3a z=gJM94oPR6P14D511VM?tyJ@MrEWc@4kn4$T)HQP!<_U+f`nKUXI0YnP8Wf#Xa~md4)s7~Eg)E6^^#WG*+y7;yPq=e(1TO*d%~(fvyXzF>}GzQ!*nDhBqU(s;&X2rWJN^yEm9O;L!ReS z1V&Z)V@AIKnkgV@YwPLlKZ4D{rJ+)9I67_>dXg%ppM`~@tXupjn>^1EHS>Qp{_Ffn z{Fu`5LfKP{(p#t}^va2aa2ZdVBqIn7sOc3eyFS?RWw7)lxdiFUv=AqHuMmsI4I4W0 zFJN7>M>^%S@?W|%p3>xca1*ToQYER0=76A_{B8fvow0R+34Y^7&=o~Fae%_EXlC$_ z;?=!B7N1GUKgZ|DIwt;ZZ71V>{aG0uZcfq~<23rV^cQVcuVH>=VsZ`f$ZarnTc4 z$lpZSx?_Hf;XaMDo;@9G_?B6>%+7tu?B-Wt=3&#$>QSNRfUBH#()z`w)bhTWG>oHw zRJ-RqM-MHT#=NNcOWF&{@0sLh>B>SZK9{a()^pzDzw}2n(Heb>Aje5@^m{$kvqDiOws!7*oXRBAztnPh{w>+8QO{831fHW zZ9bx-@aCJ=>^?>9;PIdOFG~sAF02cpT{=iXfDZpzdT)9=u*a)0YYJKGye=>FjBnX~-PKAeEdmO&i3VRLW;#VV9ycB{by-%CyW^~@)xov!@kSI}aU zMOtNY(KPq%kT@?>wgzsDtgQpYk|HR`i4LQF({|$`@3XnYs6&Ur#Prk4vl{S$6LdIS zVFri+J9kzI-hv5|eHaBv6;E%{!gCfmcY7|3YflxKPa)TCcUz-IP!`kEe_1Jg2+23I z9Q-Su_hJ_YI;(NIYvVwNWeC~4+_wWs#G&{wrSgmKrPJG-aH!$I1i~ zSN?*cw-IeZxZMWEB}Ex(hOKMU8G@{>3%*~o7s-x`Z1paB_>_G^53h5FmB3l=Ij;_3 z6jXcL)-&qVw7umq={s^-p{*L(=fQ$8ndOZ9t+;o9mft>10CeW`FNMBU+2Mw^au7h$ z$u16bdu=SHuHC=dZIFT5b*xmsHOD-D@3T8 zj~o@ih=N&8Smxv0FF!n-OwKyR6EJPGTPw^-w(L&@%&=f$d8ppi|>7=y0igsO|`}pc!q2}fM90So9nbV3r=$G$>22K@E+2kjLtwOW;%(2~0 zzB8Au{OE)^so7@lRSoa7oqO&eDbSl#6BZ32I)hOigg4G;G@d7k++B}lEnWUmzj(GJ~H4C#}&UFjy5);!xmahj+RGh2*tHb+&^TzAF~wRM4^mP@IUSV!=+S30UeCT6xoS?@ zi%z?;=~W+SiQXS3`k}3`#Lt~xpmL+s7mp2=P2QX z;hb5P_NHVujC0lB9k&!3b}TLTJ6k%7hJlOcbJm~FlZbz$?!YBC=ge0%+=a+VnAZk+ zORP}76;QT?@87@T*0Ql6V=SYX#FaQ=q2}8h21X1PrR&ALH6G20;519A1rTSN2|qp;obQQYDhcXdN4?T_97RJr z8nNVxbA5_^*BD?BeLaEeIFS@Z`t9sBZJNNfsP4wr!L=@_eqI1)es}f%4Wd@YZ10q0&pP(U4Y@)@ktr|N1w_MR8bQzmum-po19lOt4(T=ec6h# zC;$E{o_%qm!riZHG^e(7ZrHHlO+;5`Sg(u{qiB1^KU>I&MS2%`Je2*B)A}v@a141v z93l3e-qmkezH!=pKtZbW9L$YLmhhMP)PNhin3e_$?9DrUc-Lql6I-)T;t6g&dnDq4 zhf6EEvT4C;yO%eb`SROGZfKv?R@+)YmHz(g9QW;vt914ZA;TQenY83l{QOp)PnknT z4sO;|ZJjLbv2$M6SUaJAGOornw%(clc7@K~U!zj!_98Fl4ikK|NcN}c(CFQVlO@i> z^Y=ncSk$jgjG`El4Nc9K$v)|s_fE!SetN6{T|3!7!JkzF&VpX}AFuZ=s5btPpxKXO zj80Sf6&Q=XukWK4k>r`v%B;THB0t^%;nIPF_4h}UHpK5X-KT9}yF?<6NtMN^W2h(n ze4Vov@=-1?5@VALN_My7h^z_-`YzL03EK@PPcFH-zrpXn z@z-(fUHtssxn54j)TiWH>(rtdEf;-R3`(+w11S}ewDJrIcDj1f$n*y9cK`|%wSxay#8%EV`Q6Kqbr*U-d$=b%!=Mqbqun~B9bYjMb@vCND z4r%RKw7d8D#5l!lE)RNlw<)V%o&T|7`fwHdwZ-PK6tJ@j{BP{-*U@;TDaJdFd(U7~ zBU&Bm2Xu)sXcYb=SSsIT^%s-e>wo};9nx}aSR7nmS%e>>TJ`f=L#e~Y&|6W|#4?64 z3;#ogVajcujNaEjhCafZ84&vDtro-Ro)%}Govir!3x-8@pbeRmUfElG*T{Hb3tm)y zG+S_cWS=;}!Z&Ze3tGQX->mfHbv~kJ2;;!!qetIRy@QA*o{UM0d0@8ApDJ1pxm2`1Ar`~PnFUq6bH(vwEzs}|D-MYffJ&~z zhW!ay8iigpmhT1Dwf!e9$pMsn5;nVNjf5cyqdlIenaS`O_{!++q=X5FTvqhLXPC&7 zM`CNI+04j-f@%SIY)ImY&&&e?h(SpcA)fKq?A zIz%Pgznh_BcU{YXaR@1#mn=!~IeKvK-n~cfg5$0H{#D<$q1Id;zYFLz#czV(bDZF% zP-q&1O29ym@+fnUK5c?76zUT;o$x{`1f#in zvI;O(T5PG8PaUZ5*abhhV!?onddt9#FrhiEGT_bC<1^mow3WJt!WNecN#_hx|}+sYIfyX<=2b5YEqvsGe+MSR`^*X zWh_I6z^$hi;4d?X8Rgym<{zd^S$-!CZ008=NeM0{050|5$3z<_g&~FNVby|v=NwQi zE$0ozNO2{3zRT4-@a1@Xlm>k$^i zV!UQCLeY!qE}?JfBUtP1DE_>Rxa*H-%CUC)NP%Y`#uAUl0l#gg zKrdNpg^^wwoqbiI=av_#W&3Y?ZW(kKkH9U%jI&nxxg`dLgmiKFM#E^ZZ?bVyt4CB2 z-h9A#oV0t-E{r4qdlLcOj@<))VD$btX*mu+VmFf~#DI-P7Nz&4??GTPf#Z@}Fm`Ba z;pcird!=?GM}$R_-Q``8g0~2}jdIoW?jL+e_B_k4F~WS5-vUISr4`K^)3FK2in+FL zfE&bbSTvt&CZEP_HgN4qOVQ`t!NA?ldFj$LshQVL2Y;}-auSf)&Y9XuBVg!RE;*gD z!PS#mEnEH@xFB%W6fa_YsWUWFyyQ!;LTRswQ12(EV;sg&9+A`P!P1K3O-D3J$s~>0 zhY2iCb{K}QLIKei53gRpea<{eX;8O+P4sKU_DP{>CHlBs--hKGiBU?oZhmSO82K5S_e z#}%%|NH>-%%4p4b13N+@cO`;iQGftGVJ=Wp@&h-a^gcalM(b9Ty!N`!*mGR?D*j~Q zSSNMIB}BI!1_0*IS#s6Yv2wucbBCmB0s}xPvyUY3ERY6JF^WSgIxWPw>SH&k-?4Cb zMA$#drUh0ASgfRkr@u;bK+EJde*A9PvgtO--*ECm`Gyo0KByQTK5)`p)uO}VS=%&d zS)pm3BiYs2bR(nH><}j+4>(0=;`yPcw0sxRA|I;b=HcRD7L-- z+MsDaV#I=JBeD!B+(*&d9Y+in$+F^mA3nuQy869l9EYseyvNqdp*q1>7lnV>H;Vp4 zgXms8AaP^X86w?Ypc2XLFM>MnH*%~bcL)sjU{R3|sPsblzQNmR?G21AEi(m!T<4Rl zYTsh)cGW;tG=Q5zEJRoV8nF1@JPjSQe7Z22S;BN;|JSg76yMy`Dl7ntdUSVu9B$%j zFU+N0z?;DmcPPLmFD+!T%QN?-i#XR(=5Ue-V80pjp28U2kD-w-bO$22u-zc5S{4ri z%!2Syy5Cs5Z_l1RGTtMMl=M-)zTIG&hDc5mj9pYFq%8>hF#zm<1Ng#p;o3ydmx&aH zZy>HJ9H;&RdY6X%pT!B@b!b(n#V~?>7spC2c@@MliajSJU5*SPw+nBf1V)WP9Fl$c zvT2tt$K+|zx}ThElw~8FXj5^%pyBzg1R&|Jb=jY*IPXQ%lYiXjH^+ZE_CwYy$Vqz(EIqL zsZcLjy#%e_?_;i_l_7K_WRb8xy-fdgqgw>b0kj4VZ@06B5zfbjzuKALJ_SkT9C@ER zG?5W)SDVITbb%R zoAVkLQbHGPrV{l@S9vv)YU2>S=wv@VWruJdimyBO@isI+SJEVdERj}`kS=5;C(wav z#D#!gec;E};S#}oeNW(YQT7sTaN)#Mzh~sVCQpW-{y44Mp5f1v)u}~IKt=ogs_W<{ z`!VFtbD6{U?W=s**D9zwORd@81pF`|Dg`_LFn)1AhJQX|ZF(Fe);^3{;W*hZ>yf*P z+hSTbJ8BK9gXIXo4ebOCozMx5#|vJp8?H%)cGTTzm<*nwJuFr!;3)Juxq24K3TD3(K*UW2eZok+qWO) z@375HZ=oHydOi!KM~&J{3)>MUYx-ps`(spQ5ZUuTBZlU33(bq+TrAO+h!lZP{aIz@ zaptO!6c_`S@#|9mgHu=x1O5v*xMzU!^+2()Vf;>pWsah|ifm%JuhD(}KGrjUiZ5XGlh_=K4?e3wZURGOQ;Z zv^*diZEas~Fr@;jM-aO)v7f=H>>i)drkdag9pRVDT4R-^O#I6JnDeuacQ+s+gh2LQ=iGJ z2PuW2S!E0)x|9@JAn337VM!U#x1NXC&&G3fz-?f<7?OSV7J|x$n#Cl|POGu~xtZ*^e59 zxZsr8fZ_O_1ygD6ewA9L$E?ywNdQz~ma>jy!<=Rp;6<;B2p5(hKX(=ENc?>^_adHnvWPtDM1WXl(O2z4N~e% zUhXO)bsFCo##{==RwRPDOPZxliKEcJzFDJ|4}@+z2?myajTLtj(yyt9TMM4U)!KW+ zE?)Es=*{r@nSHG-ne8S~l@=ILQVhIOMpd3eE}%BH275pKM=5X=kY!Jw(YQ6}_&7OB z&4bok)?4y5%;RA&aUT;}G}r|EKr$n$Q?XsigEKsWKjT&86VJcSnh33tkDOE>k-c=s zvuMV`q6L#f-c#FPk3S0#CvZ`6iFbJeCFLZwXgr7O*rz6i!jLFgF;x`>H_R4)X(J@w zl=aqDSE_`>+tlabiC*?xx8aGIpW|PpJZXxb$;6(PojVVgx{ZF|sGMK$H>v_q=rwcB zz0J`d)huN{E8#pocyQ(fWoKCf$haMs6 z0>~~9sSYsR^x;r|#xh{fZD5+ji}L0UYNuX!%19x`!OfWQ6^=>tNYW(ZnG8(ANf}Gj z-M3YbGn!rZ1)q8N95q)q_u)5kO0Tp!gnQL<%C69}XFqyfPESpJK`-tC&8)Zdc?bA< zS0aWKZgega7F<}>;utN_%m-;_PI)S3rhWU4tBoH*7)VT7@%8O+fu032C5gHMqXb;b z*3G7Sd;9+iu=vg1hS>q~yS;NS;63l`C1KyX<{|z3SaEtdn`!Zd7I&I|M zo%M}U>0fqDS9iI361?v)kMUW};-$TETJ4SrR274Uh~2wQ?8<3*54KsuyhSy|gX!`Md!RnAcIY!;PpS)S4Xn8=Yh#ro0=F22@N|gqYKfgoklwmw{SDF#o*(9)j zRyBN0q!d1U&2Pl>rnU{k+oisq0^964Yg#?WDiT{|_htv$uKGFwvGsoce_NydWtA&6 ziatb{7W#ynAgCLNeB#u9=z1mF}lTKq5lE3r10$iJJKT3kPk_GApM#FH5p0m zjN$~QY5c-I<8V%^3ENy&EOmZgV%%_F$vc0p@;Xs5C9e1^DkjZo97h@O`y!2_Uu zA_L#u{_1km=F7OJ&3r9^P2}W&&D}u^c?TiO!k2}&?j;3IJqmKl`9nHCL{vl!I?fRl zk!Y$Jm&ipED$-ORDBSCtm2e}0VmPW)dqngFSG%6|xE;XHcjJEvon!*sNXvahv$gM5 z9>MM?MKRk{u{!X*)Z6L|ZoPcjP-!R8K|u9IuZrWq$pI@p*3j3EIsfB(Izz@XKdkEG zLvI?#s|AP}@G9lfD&tjQ2bp}AQ^d8bd})NPco(Pp-sbkpURtF8QzP&Q2`bz`dc)cr z7MWG3*DS?Sz)*E@UqM1u`g@;}fKx+;4rN$Pk5>|WX1(50Tc{9c^wF}FDdSzPX-&d6 z8YJzuZB4lfgQ{*01y>0F52#HZckMKzR1{Y8c33Kr)_;E+;U>I`&;5L}`MvQ}rSZE< zRUyzq`ag}h>0w^TXL)p1wV&u|X!=(ls+eutl62N33)q(oAAQXcE1P_2M50Gl&7eLc8VHMDODwZh8c)ps$V0= za}q6y;Y(k{>=5dzoTz#WW%9y;iHqZS_~>icgS3MpMh+>*OOPwX?R!~JT3zzvyBmKN z-Aje`kEU10s=1ZHCrMdFbr&lyniN`&VnbT(X(96#4UDudLL?|s4e$q{Pf)A2VL8ev zPZhHc{Lj3ah?tkgi!*VeO24iVE~bu>Ls5tO-AN)Wwi&eaG=Y6=y(@&K(%_JY^Z>q@ zRU^p^qASU11;qE~Gv8IS&@k>i(x~G{&&v^Qdc5oL?~t5U)K(rtp|9|Z3pX%E$SQs0 zqD9FDao`Y0&qGbd9*gbnmFuxk&+|Vc+Xe3H0*V2ocN0k4d)Yst5YuYide`)qkYj=Z z(oerY5q?|Bk9-;cP@Or>#6#gBq=TOG9#U!JXPzp=RUFiIA(~QyJ(P}g??*t?o_!39 zSigS#q-B-81b1vYN;DX5f8~R-K8c{#xPlx4K_8lB9V0zlk?ZkaNka^{eEHD_jQsp9>@&9a?V%SyN|o&0HYA_#?%rCR)k0~Xe0Xd z@8tN>HP&%3<1u{Le-{-^0E|J8 zC%Hr9S$6WBA1-;)gn89Cik|BMoHE7ePvH_3vlx($bqUwLIVZrxv>CYMM25bKVT+Q;cDT;*#?E_92Jyy*g zPnt#p?KA7xi5+!f59Tj!$)}blM$%{E;7cB5W`(GvkSv%_ppnp%10v{;m-|x6?*;V6 zd07p)!Kyz2wBkyK#_8R!F*tHi?0jL}7mcbP-etCDBah#}c}j_F212Ef9Jei4(e@vZ zV`qV_42TvJliW?1)Oh&tp;S4r;e7jd2l>kVS-l4oS;a6*JoDKF1IxI$+O1n7cLeU7 zxjx+LN^fRG*akoxq;#A`w`ejktx9_q=ln49rsWt5-hl;;K)@z4A~+|rsC-57*fT5j z(SY+vbV&-IN>qY-kQC@YugpCJd=*1uc9ho@iwImW+&LN5V;dY%QzB=ey3Ycyx7|%W z%~luMOOzoqW`*ZAqxlpTNHPEkmJ-`nmwqFkd}3(FoY`HbRF__x&++MZ=h z?ZX)6sBEp_>y9bIQYfNoE-q4_;ZoD1#Bh%NBU^CU8-qUXML587!&uzsM<*4nNgO(Z zIIfl`<79%u%|KDwZEl;ns$xyue2jeG1T%`4u%}O+%pX~eb?l<6^-|J0?_V;6yS2k& z7$XQv=~Sz*u`%h{v*+Y$c?%+==d_se2T}82IEtuZv9Dj`w7-{+>@xclk7mezfBx#g zuTup;M_JiITRWocV_;z5)&1!$b*^hDR`XmB4pA579-S%UKZ{=OG>D_q`4FO>(vv#u z8k$}pm7AnAE}5YJBsddR8Pd(%?0U91iU%o^YNRVLR*8gh3@jDB`h4R-rx<9U-Hc01 zk5_YA*&(||MXW~enVXyI9Oqnfei~|ctDSE|p0*sDLTO1g%+L8| zcb(pY0Ua@n<1Y%6AqM(KZP#fWya{Ah_F~=zYh_RcEXP+{Zb9JGBgE#WKiVGqL(OBN zl~tCP+m^IEB%}J@Tyroe$P^*#-vSK`YggNAK*G5BXJ+A z-;w3e*9eqL{iZ#5d{#9);`85r)NI+mwA}bOwmjIo+y(NL1sUYMq7+V!F^5bBU1UCh zikk_zoS-Wz1q&%TpL6r>viP{GXNYKwWRD-2Pa3%Y4f?`f6RA(g@q2IiGh;#D_kD8# z+6Wffn1Q;R{}*R(0+(anzki>`Ff$D9F^n|^SEQO6OO%qv+~KljsYI4RN?B55BGrsB zO9+=1QY5kzMb>OJV=pb1NjF@`%FzOOW4ugJ!kiwr!F%?ZWr`fL4 zNDvgch`_Dz1cgTmMgViLhz_IDFY5OLFmZb;pB>a#hA|6XC=Ndgz%JG^ta5Mw9_Vl> zC8_hJvn^Vd3;rgfL5MU|VriP++7A6kBYRr>;+o<*8kF@tkVX+N=wBFGLm?ZuTAzADq{2J5G5-h?NJ%3n7T#y$a zbbwXSzZ&>)%Jp`hU?n>2@sxZ!P8PY=RVNN0|-T%HMneRwLMJ zcD4s}tUM)TJEaX}sHoiG3|2mVp2%00);pe|aAq5a%2E>6dcuJQkxNKW%VDjB2BgYg zdZiXfvl663O$?%@??PImR@}{KsVAdlawnPIG@Nql>+oe~M$t!rV^(#|`SjS&NmwxP%r!;WfcFUONFoZ(t(kQH8R zyP^Dqfzk+OMY#`P@)*$!amd`z&|U~%Tt`lOO|)%)te-H;n$DfK+N|ZUjH1wc_wG3Y zqK?RiUztpOaZOYzZCV=r&9Fi&QPR?y!5(hJr?PHwjuF{4<*;Gtyme#-_OQq?pPYI* z^~knHkA4?qhWyX?<94benaR7!p)p)tlL-q}j|JCXakYWr3Me>XucD%&%AYsw&ZxYF%zk4h?@<`BGP7P{-^MpHtmk$qK+c>wqrDWY z+r=1Glcr6tpy4xS^Q+bbcmfhvmY% zzX)ARO>Nw#&-knH)9!)Xh_EBZ#(x~WV6MzUkmTN#531ibrwMZbR5t(I^h%zC!4Ywn z(cjp!5{Z;nUgA_RHL%u=B>ynZ7baq@oDd>o+yKH4i;gCv-bdgtYxRLO2Tey1=&%Nt zRfeL}`^vc)N~3Dp;==Z(qCx@FaAlNp9Pbrv(9CF|RI#8YLeGPpZ6y90YDL_cR##&# zAhXON$(SP*kATF|a1cLRb?i8IbNT5vPki&ov%G^>$&ni5=tXSwqL+P~y&7j5L~hxV zBvie4)RSNLkzKzU>q)VCrtNl;lr%`v6SG?v({*QDxNt$1p3K@>+96^h`}&;sKq6$n?qA-`RO9=<&tTHE{E~To;OmMW!q70@d0ZM?5f0~T7Vmi;;Z}it zh^TSMX49-}29;z8fLQb#sm)Gee!(Aef_3`@p<6ILV@%*01cqdJVG(yPh zE9mV`-|q-&dj$w>m2vOj5x5DhlQHC*m$WpX(bxlc;$3#6pl`+7CB29rX)`{ z*)oorR)8dmx)b6IL-gK%vz5A$gAHJ#E75Y~0i%HUPTg%Ky!3(mrW za#T{%hOLGUa>)BMLLJ2|vV+l1=t-px6d^QgqdJ;r6Gd>?MZ?iGr_t`FR5DHkBda3! zk4F1F$nByXht?;i!^cYSV9vqFORImkfUI8Rv_X2uEdf=R0+!dO{7)kcZMR-Z?2S!K z0nj_iw4LaAiO&abm&5=~Q=NDs#ZAD|Oms;^JQ)r_?0xJjYLN8NR9|^V7ffUORV9oV zI^y{Kme<UrkLA|7Skx5tWRr)*bf6{mJuX%GvqoY3#UKHqcGjk4iP6|9i6WrSy}#C=dkqhQG5{o3Nb111zrn5ObBRW9~VVI8l}h<6sHbKYh)P z@?=o+IPxu3XRvEmCG!5)OfmPxmiCkGpD5{ao-(ZU*HDkcU|}UMNR@ix3s;>`itYN$ z_7HU!<(6ol=^Cv2Tb6WP9lq7%HBk(e840xjNvKB4JLF2GM6rl6WYm$zH9UsJR1Jh| znRC}=r?vY~scLB)yz?0u2$ygpCvP&$P8&_WUw&od{{7z;wOMe!twaDir~7W@=;tSJ z(^96Js!`4BMQiPb2&Aj(xGc9^J%<1YB5{cQ4RRByyqZMO>riQ+hQ zX8^(SFtP9fzr+m|fgH=W=|Dl&5wjaHdW+n44#F_Mkxe+3YF_c_DU_=PkdUIvD!P}t z)o@KOurPEW(vl)-h-uy0DE~%DO9q=+w1Tg!?j`;uQju1$i=2p~b7u3*F8u7z$cF1ZS9C9rz zhBJfe&e*|rDrfLt=yo$}!$wMattvf+GP;SAgh;jsMwy%n|Dy2QqhlFO7f5&Qz!w|& z`tv7ObhfKgMZaPqHbVEsw;yjs=LM zixy6wUgB7uHKh?=!_RMi^pWAM?^_JiQ2L=s^P+#w4sFON*})4lp3p*|Y)qitjDz)X z5IY5QZjaY7w-e|4LmwIV`ytYb47{*tApjr-8A{30agd=k9BC$ODlM-ChlyZ*;(WD! zpG`E!KnYna4@QQE{%UG2YZdpQRxO;ZAgAzroI}B%u z{MheSbcAX&5Di#%a^uB@JeI=-%l!SPEWUHA7x}+nU2{9$Q)~$4;MKjfY?Hf8dN?Mz z7OI<7&Q(o&aDeUyeJsiW&PU4rY!7W>D3yy=)S1$apSWDe|1-a1WMZTDWTKC} z**};2` z5PZj~gXQR&WpC28f;nE?b<4Vk+1n+4J)Nyvq%WKr@Z27PR!lv_=thhZD)2&+!k4nZ z+X|VRe_)GGKRxy+)eG1CE_933;YITpP&#nIlbqkGf6&|KziIYR7f9}$QU(?=ftZKN za`xuVFcq=jkwS5cpc9>#5uGE>RpWtgANO%*gmT09j|v7!WNz;BvcZez&o5QKTG?hj zRq+2+@vSJ zlJASVSP#)Qc`(mo9$-?m`t>hKs4ANE zxe}ac1qGt|=pE4CwqS`BYG%S6weu|Bb4Nc!vCVH}~ ze*__zdjVL)iJ)Shkag+OfyMsQJU82N_3kU@ou1yIYx-%V2xdjDKgRnul2peefz6Vq z=3FrWZX5ylaaDf`y^={jo8upy3O@)yT5FjdaLPiKr z=}7P#Z$C}!#6sp$1Vtv)KzicGqh;i?7L|sW75w>G1HPU>D(V-6-0eYKOnCe-6frkw>| zkOuy7uRD5I0j1S9>K4n0_$HBQ4N(aaa6Py9Qm9>HsNtWpUygxYuhP|Ud-G_Z%s6s6 z?)Rq0eo-}(a4LpV>7`V$TU)yWa?Bo@Y%j8E-(d}u;X6RfJP>L4Z8o>Hy))mJON*V= z>#O2y;yd+muFO#|u4zAXDAg68Tak3)$vEh_Xai&mtR^|x<|>X1vsSNna}i`7H0*1p zG$*z;p5U-&+^uV$D&C{e0`xv_IcvT&IH`|DCGd;s@sgeRu#1wmP~luB5kS&>!#p5U zYEy)n-oDm$>6PJY*nUd{U*vi~(a|Z?K*m~Iqv)~k%46JCeNEme_-8#%oNiGITpNbN zx>o2T_C%6ppv)41)0$>x9BelBNz8e87j?FX7XQM^#>3YE=-hDY1isg`>!aRn99qf+ z(3l-(I8~-d)UxH`<*kE?{|=goXCfddU)|e4`f!T7L!db#CfC>DO!G%>-dyB%fjOn| zGVH&j-YfLA`Od;DG3ss4JzoLHlW>6*&kM?_s~k zlP=gci2cQAdy!_zjC}Jz=tPZ?0({h096{=N%FPM)S&ng=y7`V>Bs3&72Tap(> zxXI)X5X5!rFbilal*TZ3B6Vs$8f88$hDq11gPC^m^Ap7&PqC%QBER11bJTo&cClOM zFTNoB7u?-SF!w%H2`U3i+!X;Kz|G5$ncV6HMSmso<-qM~4KE1&EtK!-UQ4k*mciq~ zf&GIMg+rF<5w_PU3hkv2w*4j|vwWGVqA<=tSwUW-uDpH!(J=jOF>F~G*wC(lOT^;B zeZzcmrew0(&uM4P+JT1i*0JZUFTmlI&iNEA&;@gAKbC~dWGy=F(lFQ;XPk+MU3ID* zfobC3N`ZK?A>D8S*uaqf6jhA+yGMnkG5G1rx3 zX^RPjPU>sx9UpstBV!lR0~2zw!D{8jialzuZ0S-tG6yB=MyUS9@4M#OkLV=wi@m^6 zv>*59{M!5c#S~F>RUi$kjXINJ>SUpUdCIXN^98hI%e+E(9!ZOg{>03>JH$Pwp&Xua zYu=T(bB^Vs#Q%Wti5oqPf(Kg%yxOn1l(i=LU%_5(E>2svW)2KY?W}%N#nl=OJ^$+9 z!8f~#i}oU{e*}zWR;JCG!u>z2n0LY!4)L(pl01J7$1>95mbWnRyP~*E9rdeZP?L-8;?0YB0RQzOKEQ7K_$aKmidlSs4I_r;x z-MlxyHTHG{?V5DVdz+jNSaf9ClG}?T5ks7t6d1KqpOs2})R&c@iy(|njMWG}1ozy# z_%KiG8Za27;&w_7j@QimN!vi#pG?yz2!tb;jo8&=CG5f2nRunJ;)PN{)2A(W4kw^K zb1c6F<_Ho7=7HDL;5 z1P22{XX!otC(lplyucP*SP6o0)5Mm?hJn{_1^O2x=oF#|&`7@f&HX=ozopmtwkRy6 z&xhzf^)Q-pZofe>(-3|+pNI7&B(3kY)6OEotuPdVB968@5lkCm{{79j&&eumprp6H zKed(F|0$;r*D$1@`b2o21Sp$>k86slNzNEEW@M9Gt$;w^C72j(A)(7->otTd!oBvZ z$_d)){HE9W-SG9VMeGWWcrZgUguEjEM4vJJ6Dn{$-U3WIz5ei02Is&w#L|#)ycI1! zQc5O@;V7@Zj2tt#XEbo&Z2&NIHFKEv001g;EU!iQ2k&8~6}u|7nSp_Uw?9j(w$o$= z+rf{W$h1VF#nbh>jcC$*=9C{Z`(x-Llek14QG$~C2LDt!l(L7NiM+^}2))Q|Fb^oy z%>_U=yw?!2w^8nkX-KZ5mtJ&w&Y9b{Z;$QOK=J;aajF4t7jS&kRa}nLOH6k>kRmQn z1RPE4A03g$EQX(ku~vc)ww*cQz>IT)^SIy}R&W~jUf_R?JX@K4F12S;cjj>zAk1TM zR)7e>#6MJ88qHa?(kdCQR_R7q$OzuS*(rnbh7KJ%LS{kKF;l)n5AXYfpM=go`;iiB z1Q1mzBbs7;w7XzTX3CHAppor0`oit}$5hn7m5!C7ijsLqmF*YOl=5KlB3C~5*nR}O!=?S*A*C@g z##0^))6VQm9UPF=2I(|jT7M?B<3oa_7GxVEq!rZ}Qrw4X3N-3+D;kS0SGGa3_ogL? zn#B#DFL0W2WAUZuB2LD?DVY>{vwgg{ySC?%Qtn9;Br#3MDZ2eGuM6SH zh%i%rQEOYn?^ye)CsQuPN{m|Z7=Bvx47W--h8L>6Kvb}VKnhtNDpHTB`UFUF0IdDa@_@p;DZ{S9mm?l-N98Y)wC@vbl^bHbCbG$gcm_=1 zCTpfFUG}9mfjlfyL=hn}RS?x@rdU5U!XPWd?=2%AWhczA07w}m6LdSOpU%NKd*(~n zRK+Njd<^C!@@{MECj80pwvyDW86hbE1-JO|*>nn-Il#sPFnZO~%86j4@1=a+evsqJ zv30aTTJ#z5GH3_v!2%Z{7}K>!Y9;9`Yb%z)dtJ7(eT}hzV<2~-pN1cV{Y!FDL+nL! zTS!YnqMa zhb2Frtdxz##nTcZ2ElhLkB)kNI|B}1mX=xxhMSrI5|qoLvw)mw{||rYpb`15yxOdj z#y~tJ9^PQQu*@r3Q)KK&slT#?)Z1^*sNNyMKhe?&e}9`MQqXtNr9)X~wDryNSG6T4 zx8GwEr>~SYrnxtw4N)D7lNXP*H6D!!E{Njy+eOlmGUBfy=8JJ=w(Mrc2f5lki7+IU zI~x??Y>IZtVp=s54&|FDFW?mHFI6B#$yq_FH1`iBP2 zcgZ(=`ZLHQ{tM#PB$F)QHLcuOYay=8k_l_Ulx{!)=VV!a`$fqB;X#pT8LJ*5W!cW4 z9_T0e=!p^G$rq9|gA&Kc!)PNd)ua3D6Cp&A7FNIe=?;QE(IKP8Sp*YcVCgOMRExY3 zA%<|8Q>d`p%zk)*e>Iwa_uKQWUWLwx-66I{;(tOPSGbsR<0am7th2(!V&)b$7o6AI zPo8X;gRGdCw=f?nH7$-O`LI$lN{r^n6Iv{CUnTAnR3+WDY()b~3MG;QWB!Znobc?# ziUzGl0!AN(frtTKLxAken!|d4QdQiL*r_nNe#1WF6QldHN>ndJyU%2*=f`eIR0 zvM+QyAO$?>(@^tqB-Pvttj}X*wS2g|(hJr6c5#Xo_k_h$)IPo{5lrqigp~}e4?T+v z@1kfq^eiQgnpX)CqSujmmF&>Q$M(V}iGQZ(G6lT0#gLpn-T0pXA=#MnbX&b1paYP0 zM%sS;rz#~HwOme`4xq&SAfm0jIpis(6YDfFR=(%&Tp^N&;8b|d?=52VIphzRqUL2x zdy2SC2U@e;eT)APKEU|8t9%Sn39lmCY6Q$-KuZAN`VR^|xL=BcKOw~owd`q=3Co|X z+(j}`N6a?K?3jw?et8=*k6Jmbtzg))io^*SG8nSsn9xBYej~-5@_YxB^N;wt!NjxZ z&$k6abKsXIezAi+Y^PoF&x5gSJr5u+ikbf~FK5zhHw#ew6QT`3-Ia44)q)gJu}-sO z$;-icJq!&QeOSKyaRf`lo#B*7jacYh(gy3|>91ciw=GPkF$fkMEuSyS0V4h$zORVb z@$-m&@1sF2AVUlA&*PB@XbPReWDju0^Bxw7W%V5PSNb`y$_m=}@xz8)biB}*)xr#y zB#+PJrf2it>}qO0T;_Y_L36Za7d%qpLeS_0w-kt3AQO!9U+$pn`k*4d;q*O6 z_=$q}@y=^_Hi-QI6!&RwV1!Ag2xvVI4g94f|Q5ZqeOOx3**AvYKHtX_->=(Q25N^>BNzR=J>1t+S$fV1FD#m?jLORVz7zOnUMaA**Gg(0 zku(aeNE9*q`4=$6HBCDNWAH^e`NOh#|FC$tjOv<6b5@UKg!PP%8m^+Z10IX#p=jKQ zzh51z!wU5(0}G2t-5ww<3?cCqogFg@=_12F?`$h0p;Eqz4`xv(+!5a9)Lq9xW-M`H z3wn%+VbV1n1535Za%gk2MxNt$5z1PL{b&*eR((PhdHX5VF z2f2UB0IJMmoU4-pT6r`V1{<#K$SZcHsCPI9L0$zLWZ+OR4iwZHEer7w)?SV9&YOCq zJ6Nd|0CgZ5Aw-xSw9{f44tWOEBxN&~sr}gKIUga2z$p0BaRG zDj6U|NCoa3kk2}CP!qipIGEG_Fm6Zi#zFz~rgaj@z1V%;cjvN#@BS(u9gO5dCMsz0 zuaNu92TbGLL~0Fkh2yZv!dW0a6e3S}RbI!$NR7epA0}}+2eGjG?#$F@ zIwz*rB=23y%GzfX$HPru`K&p#wg`U&)5L8u-DZi^yXqe7Kqx~_ZeP)N*2RBO;agc_ zFzOe6BIIuP>H8`a5;m841sO}8>R;L#EN;YA!?ZD3@7AND>wN0nkHXt%;sBa2q!}ouECq zo)#1&Q-=t^j$D{IbLL?tuw+1yfdk#5y7_x#n2&Hm6=qcR79^>NUZCdLnHG@cqp%FW zf=>PJ2cVaanS16F<;64dXeFaaz&bLbYP}>$4#cE&{$|YfFb!XjzVV5e>B|8$yCE!f zVteleB7v8n5`0_64?rA_vQYm4r7?y~5x?{IFS(9o6A=kLeue755%i(jQTlLJ^gp_4 z($_5shfD7G?`q!BGM4eS?=jqnfoQ}E|HFHPO_wj~mrJ%nHD3`idHxk5!laVd^4CRL zin|@BblHBQL73{nfgbqw?VHU&XLxuxvkn^Z_@VJ}#&j@gz*@b^*Td<H?yl~g;7X){$9FD8 z*$q`iuXMBL7rguMCTame-t@$Sg?Vvc*PxB51+q)_4aw6QtqP@~7xK>r2a3-(w+?4dZwyTPcHz~x8f7bm$mDza1@qqGkqPwbXtDnkV>fe=)m~)>dvF4Z`f}(x7b$>{ENeRr z6mu=2y#O-orfZ-$SZF6v?TNqJdz~@5V5-_BZnI}6AOXAyrK!5za$$0y5x_od8WTb? zLz2?WhR-njBV1%w6g%Qo_JCX-Cd7m2lOGp3)8B@XbLP3Cn3l|Vjb~iphJ{nxMR7kzCt+5 zJF#TJC$u?eE4t4*8_67r8VjIagb88MDXX^?T z-+Z)+ycl@`7wR;WXRg<5@CIlzu;&mWMYnNd8=@HroH zT-|i!=*n7pGf`I2ds0mbe2T(i9MVAZ)OUAQimNWzlB15f-6xRg^8Q9MWKtMS7Mo41 zT|UaILr45xPe#Zx8-TC%XWIIsf63SJ?n1ocb>lXXP@|Qmow+wN|6NbR#ezn!Y7PBe zKqtjJMbr#B^uLliJSpyQ)I>@lF`(!v<5m}aZ8@?-4j$??1gh{wQ<^0uQk?O4oyeTx zC^tHRC-_sPiDQCBD>%TZ?P^#}Y6v{dp?(RZ01~das#c0;oJ3o*k$QSSF? z?cW%&YUP+4G0GwE)@+w%`e9oOxh>4!^*zM^GoPOn`C1TCKLWP%Xigk^$Ec!->LH0w$fv>^F52 zbfm*DozCadaY2r;S8~R)$9%v`>IZ96bFR*{A)tbT!kDaB0&+9Sc2un^O+vR)ZlNgu zS__BScIKP(1uhY*dx^r3NTZ^zMEB-FB-2(a>8J$zKy|b-p;%m}#Fm61Jh^?xvY{9z zOG=Y*MLF1qUg^~Rh(|JWLWe}0lk>I)bm=MIM$Y(fP)Hjo1})n6y?Dg)d_1D%O5!ey z>1u7+ZgQJcb#i2z6d#D)hU*AsBFfu0IHv%%hJd-ZYzF5zlq}SYKR}|1YnQ3|FFwy7WJVb?=i3PDVc|L>X8l)Z3G9LF zQ2FmRi)#%*liSqt$PBfjQz7&vS^81YA2~1VZUsel)-@!aJlSjTCUeu0f!eeGppOeB zF~|(6ZikGmV<~aWGYgK<0+k>Ec`{hkyqf~t`z)HkC#f%op9QpS^e=UWmn9K!ZXs$5 zu)O{6aQRz1jWbUhG!$PXtrW$}e})R2CE)@!>JBpJr4&=)QtGjTmZLS~h96ldc$@fg z%0W}eeh$wYTlwEMZ0|oec-4#!wu(_OX-a#w8J^MS7SfilV?)RRD7P10Fo-m$WYC-o zmWtfptW!hkQ0Y(@Tiqjm%}D%3oFj2ZQ{=g-O}|!{3~N!sU!m!`PHhs`LZ+0|m#Z9( z=%4@&(Z~5MG=yqezo>4oH?uvZ<>gDxJ%^dp?I6Dub}^)}TMLHwP)b;Urk%tgn{wKm z#(P}lj*CGcS*5kxNO|Yr4dNUpc0J(Fq#`*kNV3S`?6$sF%17}xRDv-0qi48mqtK@W zOSUvS>M~r1sAo~0|3DiXjaEdTUtv2|$#Ly9Se^z5Y%QiXAmeg67O{a9DlP~|KUDak)WWwqzxKb?SB!5sZ zAq^JK3M4YAM1A)T2kG+#B$G%;H?oh&!Xgt#Nd}0o5K|80rryI=lAc<9*)>3^Q0Qj^ z;kYUri(SC%n}NwV2E#KfIcLQjDfA5ko!~q3j_yU)b`3$UxEhLn#GwXD-$PmIjr1NU z^)+Azh3EJ&1LFmrp~i@0bZXv68f=dJNEX`{+J@fIxHD)G>`)S|qp_Yg2BoRxI4bHz&D07@zplikM8oA}$+@M_cxmOy>G8b^4^VR^R12yIdY&!a#E~Ptk${|yBx#CiZG>BC?Ik!O8Keba zK+RDZ;!EuL-_!}nE=x}zDeKoo1r}UKsI%v6dF%<7EIDehD?WZoNvqst2{5Ij7Iy5hOI=%*gAw;jQzo+4wa!Sn{oYd3 zJmj>Ole6AY#Tshafz2}oy(>5y?vGpu`jDf<9S+x@^kS6YF4*&{lyX|6fOFMt7)Za$ zIu|_iftuokFnST9HWbG~Z!v_x1juO*Hxc+0hC;Y*CmU79-6W7m9?*$i1VEpQ&s5za zzLo#PR=zje^P27A^i!ua3@I}naK{{|*^&*xxrmPgWH6B0Vib2k z4$h+yGv;O{g#G$?yEx%X36E%TV&|v5^TT#Dy=ziKAVkpcRK9eLxbol) zk`gmw#E5HRTn%;R>SH*w;%%0dXdj6k3}^9it{=0xcs+vL{_i7*)blji3XA1vq2XHX zdP+q!o>kFkEpkngPsTAsH1&|@0sdHq$z`7aDcHAb$2+N*Api6$pyWr|KRsPR+Lckb zP(DUEhwu0<5!Q*;6{H9TRV>2TUFDBF__@(e=}`XT<P>lNH`TbO#jsbW-! zmw;+|2Vx|=kK9@sw+xvt;MJVP?`GZn(@%}Omt$A~@!n*L2oQ9u#1xXQVG4gL;wRMu zxds|ziq9=j$s`b;*KrrnU<8iZ`0aN(P8_F%gnU%^eg}KO%G!m>2XEp511zUHE~Rp&b&Eh#J}WWuKC}=T_wb zyg?4`bg1DE^<*@U;)!T7wVc!D3`%D{U@Od^+L}@NGT!`ai_S?5BVTMQV`y?JAjFY< zl&X$$Smfgzp%aHlIp&g*E3dZ|-l7 zHMf4We`L&n!y%UtC*&E8XwWw$#dLgxTUpN5tz`CclWDcX17956Dy~|aY?m)>4w!R` z0gSxR&y;3zgpgK;c*ykmBeXy{2(C700O}WSC}Ss=TzkCtV0K1wU`=Ai!pA}&)48Ob zL~iKFlT1$-?9wFmi`zb#-=vmBdHGz`_Hbj#jCP4|*+2cez&Az3WgUuN4QXz5f1M3= ze9z3@_G4!;KAIQ$xzcRLie~+WxkUJ53v^;=gZhrGci(j`B*{q-n>>6D0V9xB0lo?w|rwx_)CsnobQrK(d3I% z*)4J35l+SO|9fSd-V|mUEi}IGxz^72i52^NW+)zQxt8U+S*Q+D89X(4InBQ`T=JIo zj``qusihGkVwg0@*<}>6myoiiGnpcIOw7I`H;w5lmi5D*N%OSxwZKOQE|euCCMEa~ zk!>r(;+3de=+8(fotG69PNh@FwyM1}CQt|6nam@K{Qs@~t-am64CFooFd@kdz-Rn7YhROK1wi z>(tymdf^a8V;o8sDN2A-Pu9RTpzpK`5%^vG4x%#H740)TBW0vc zo(=BF6UD7uQ$cYX_-sXIjpUvr;n6PRwGM%f#j-5$$r9-#U)7TDpLBn;h^{J+qr- zajVQc!!9ch-|ULzI2fe88?m<+`}XgxA+@t=E#mH=In%1my|n_)Q`Q&vDr7iKp4%qwJu%cg6JsL9Si4`J*<^ zpogqti5O}L{5r(rT{h1@K8K0((pyMK_w3u3g}g5+XZaJ~HLsd0v2)%PrM;UR3UhE0 zZ{NxEqwcL5?FZ!|LQpYF-yQuK6GrWLXYihBCKCvF6~ac6p7q*r)lQg{eL!21>}n` z8g8hits??W1x_6@>_Y>rLFH@W2FtD0#TnW9+~S1wjZB%jb5OTdXTH2*g#7kZ5VrWx zr=mN&j~sta`XT){P|`m+yw{JFrpt4h{0*atZ!Q|d%HTyu|NHC1?_Oh>M#wXra(t0X z8OpaZjW&99lk|j#cG7 zU70rR_}*#b8Y#i?W98;`n0IKb^f&;ArRbKI`Q7;8x1~8uV{G|yv}G3*8zXg;FFiOR zeCsu8qqVLt0U*Zc)|yUeknVnk$i6VJTG5)R^wt{z=bEKd|a_IE>*tcZQ94XpbSdpC^gHz$PkGinCf zb*r+CGgG|({>v?5n{9x(f10J+QG1?r+229+FN3OCe{tj1Rn>g0C~HQ!G?{MLaq;!B z66Jd{4|@6 zJ$l3}8dtjccJb@{qrIo62fMP;*>t0H$fSGIXJ-ILR-x3~dE`jH1z4*|qQ{2mTN=SB z7R6Fku6YzRM@*`zUg?Uiz2u2ES^SM0NUW48g=3E75Wd|PUy_4dskUM@5w5be$Ival} zpX+0Xx*=_`;v^GLu#M=Jwue0BLW0lt<|xs(ciW^5s|wUUl#2qq-wMC&cwTM>c4)2) z!PL6K>eoJGqBq6#AkRr201eWtQVsWF;#fbHr*>LhRb(Hy5Z8d`=Olz<8?{AK0ooR7 zdH45@pez^(_%ydH36`srFXEf$=v<1K`{g#LpREjY+r+w`#l)!mHQmdg4r1<5!Gf|V zZm;y+h&kZ5T4}?gF;X+>yNiLd)CX~^#!;-Ky)8AjqUiPoEO*~`IN$T~ZobuoKan!Q$^ePuHI zw?Pd9x`MKf;*~#f=9;B10of`7Dkm2wJW6=}LuZ?m1zVTDxWry6{P!OXVxP|=)A%z) zbMQ=aYq^H-H-pHtKA;Rc+1fxVd+LXPn3`}Xqgz>a%x1ie`k>4689xL@A@;M5KbHLD z&q*a$p*U=~GvUcC2EAH*kH#Q!z)R9Z)|kMm4b)#<$)oQ5isl{!`(;5z&wQoWZbM9H zIyP*$4i({j0%ayJUFzj-Z#+Fcs}|J1Sde-*#3ahnLg|}C!tHA~_<|jE+-4=X@louM zNG*nbzuu&#FfO37dy0d{#$MeR&7Yv#!E(`8hzTyfSr!{K1mwL-(nRrIiT4R7QsDzt zqsg?rOB9W;7lE^fH&?0Eo#fofnkO6{+Mk4>b%~Jl&umuZK=aJr@|!IB2z?;kDY84e zFJH4=H>Q|Yvoi8d6ytfBQG-Sz<$7xU+wwuxYlGf)vmZMlbRkVk>v#|-rSCDamlcox z0sV7=Pa5pD)x+J}bs8SmRGbvn&0IUMUz;h5#@1C2s_#Qp;dN@kubs(dqonZ*-gzLa zUy;VYK5{Nq{It}xOWik5@G}YIK)R~LY440#ObIre!5VNHhdp=ddaVwR%>#2*G*q5# za%POu2^3Y??@5#T171KIP|B48r04jT$2j{l(|59?cX|VCF-C^OO?=2Rn0J zqvU|q;O@e)sik{JoSK=`V!&PMZ#0u8)A}S)nO6WQ%C@5Q>9Tn7Vv2itu;`Fet_as(<6CsEs+)#mt7m}Rp@U)OfU45@}y(OBAtl!c!@`KSf&(BJj*|XN>)waix)SVQx;*>c-z(&veV@)KvPPD@xSLC{JR^`4z>4-F(&`D8RXKpnF&YV|ktd!=3>{SWd~MH=xN)c+ z<@IWZe5>FgFC{mpQofCEb*@Qxyl#hS=8(~>HoKW8*lHc#&D{H-U=wp2G=b{xL_dlv9M`xcqZpIOjrgg=SPR?pHKChCcVR_>N_iz&T6O-rPo&-<=ADMmC3(aP9gP^;Wgd`m9Ivy2JUC zOx#?EIIBAi96&Cgf8?fTVn-}dQ&GlNiFE|wRLS~1CSGZuPIaZw4W8LvSmd zGq!4XZXx(|fzUk&uD%N_xsXSYXwZ((7_ELdn5|qG@zJcNmbWk|K>R)9l)JQ%`oIoT zQTSsQ_2Lp@ckb+d;_;EmX1LTyva2mM586b3XR8Gwm!d(kvDJgv6)9A9_BBq@$MBj#CQfRpM<77e2aY1@DKCXa$Ui*X2jp zOkGLn$D~A;@)Mm-9l`PO4cf)?(j*7V-z7*tUw(t=KVDE6xzUjWf|#Pv-YIrdFD5os z{?PY|eVbBR#0(aFeHl^MQ~3B4(>{dLTm0OLqh+c|hK|<8RP_rb7)*D3w$ePH3*H8P zTYkLzhrm7n)cb5su$qhuNUFETm8VeVZkpVYXgAG!Yd7EI8rN@ zd)siiSwm%cqq=f~>dS-$+R)VO*w;kmBoPHWb)mg#HXRG*Y_!QyXot!$hPh0{kJR`> zxB2pMQY=e(F!#W@apg#)25?s?BRu}PuH<^dcpScUZ&sVmldXUF>zAFslR(nluhYiH zu$q%4K}wYxXVftbVf_lp|BAp13j(V*Gxk3y=+&9dKJ+8;AXoyqq)^-O7`ug9##McP zw}l96z}-1uXFZv1Z_b|%ij*?S8LOL~Q1jE{bf@rCGnh$vJb((h* zC7lkFWhgCpG*w6Km9>F1I?{`bjOx3mY9&dyzE=98DGzTUI@srPiqqQrX{Rb4Mv<;Q z+QZUmJ!|*keSg7*9tw`T>Hf4vd(dgQk&{Q$y9;KqW9QDDQDYk@6YX2YPiy#z$G0Gv znH=@;|JDPi~ zPK4e#sf}v=1y`fQUSi;+8-tEso?c$nDAb&unAi2nV?1S}pk?@Gi~wv-Z2cws#7;^d zUco~aA`00>i$xx(k7S{lUrK8tHhCEhKIyoQ7DPa^gxMoWZeIZDH2&7-{zc8Da@&`w z(%zd438%w;=;rL;=ll@6k? zK?4}H$A_%Jo%r=2fSj{TnO-NyJ?r_5SlnHpp>b-1r6|p~O-F8Wt7AI}xYd`Q&CPZ* zl*drp`dE4F6sxJcM>TLF8*A>7!r9HZ+4|S3b#Zg|ejnGsX)M|#5R;lG-_#}kbLOS< z1eb`OAzdS<1^ReF1m#m>mrC1g4Ot1=2qOFe@;9L_1SggK=bv*M)?$=*+&RQZsB}p(@ z9d3}HP4DTMAVF%|vL;IXRi5EqR+CkEw6ASGe{ayB%6&(tN&(eaMeTVy?xBA-^iWg` zVlSD|ar8$@Vsrah>gU96Qv>R$7dK6`DF~O`ff(jMpoD*^ZUp{L=d!l8tSHvDEZoiW zj%?x*`v!9>HGc#-tA8uFv6^i-%y81vG&a4I+lKx@1w+)22>vxrf&ew-7R{ysi#@|P z5TT0DjQRkk-)9P8_}ErbmLBe=|H#C35qZs({l>2lhHODHGeGizKqUN6iif_NB)o3Q zVgE=Mm6SUG9#`;E=}T#j$**kmeiPUYynN?-u0wmYZJO`9F?s;(;FS>^`wz~`${$*R{V62^2=*PimV+{b?jtZ$ZjUngF8 z^+1&|FsU|_(s`kccq`M)r9lykgZzV}sgx`ty{ia)jHvf`^)<4V1cq&EcWoy+eSrW- znPVmSKVlC`PL|^C(7YnQmH9UV-#qTn$C$R{voE_}Uj`8^NEX4EEniA~Jo!m``>{J9 zK&9rmMrb=9qK_DTef5bf7usj(h0nv?HbE4ZbL`u@;roDCk!2L*K4-siX!OiIFEdj{ zUI;aatkuf|V)w|BSQkIVYS5I1hK>zJJ|HivCoXkVONgzVt z8-J|MQeZa>&kK@3=Xr_ZDW+iyn{Ocib`Kt>c-Y92TXMQO=5n-EA8iUSO*sP|SsA2JAeOF8Fg3Sd-Mrg^nAm8srhu8|3pwa4?2A(S{^o5 zZI|g5A_aJr=1FQ|W0@o5<>Y7~TP&fiP%`j5h9Tob4}~#D3Lm)(k8wE^@>F?_P4&p; zoN;LwIQ?^Fs~d@Dfo=yqac#wzpyPvm$uBvj0=h`$EO(;#&7GIM!DI#SkXPS6Qr+AK z0Kbf|N_R5o@*TO8+@jKzZKhu2j;^C1>Vq8JPK1P?cj#w&p`-6esn94B*ssylUygyF zQ0E9}%)$q{*?|_r#3q^ASjt~n^%N65ksHi88>W7$ov+&dh_B4*_7=wM5Pm}tEME$o zo#5%=_g;KO2PsuPxa2xeNvFVI8)&~<)LnT@Jv5sS=B!OQ+v}i(Mg8lRvp~N`#1x_W znTt_7J+o9I+ubO&ctv%VH2Lgk+Pi%|h+XRkNS)RstB8$U&3_)Ry4m8F1`{1YhriZJ z%dzw&Vzr5>fs}j^RzaX)BXlR36(QBSy)p*$FU4}`rsc)SWe;R)(h7Mh%vsIK_S2f6!CQP(f z0YTPU1Rc!kH=-)J^XCoI-ET_~#@otqmv`QwusG)Pu4F|Ev&Byk!Bnw~G<(v%R0Y+9 zb_w*s87X^_GQXPeswy%qTH}xTK;`XX=tPr7y9HtN$I>=(BZJQGiUsMT>AhgqRaNOI zrfX!cJpif#eg;_`l>hczbv{kkud5p=jt8%ewwAncw#8Rstq>^%k(M+8Xtx!4-2Tq>3#=?i& z;;!d8N4(YQB$^8sIb@N%yE47CQ`oAY_6^c|q`)<~k#+^9nS3-`_6`R#3nAb#0|0ww zpC6xupa4aTT;}hkBMc&|LAYv3wUVUe)wdggNwM25cB#IX3?uK;D zbLqiT3D27c0CDl!-^9yBXf;P+{Xwjv5yTlWYMhCK-9f3L*wmQ}G(@jxVQZ=2#4eQ0g*4Z)63Qh(2r>W*&Zw>aVzYjc%JNr$fkEhqF0-enq#ph8 zx7FvUvZAC7pZ4ydbK)0YC<#y4zy5)Q9_Oh~X;9R66?`ARz7e-wjJ4=gw$RUYwI6#Q z_BLbv&`yP6`Zg!naiU7JZ8zS0inGo`C*@F@VxTTUYlot^ZQ}1XgS9TpgG$eRAI!!__Aen>t5*B8Jjeacm(>rHr;l`(|6!<{v#BE|ueXQ`ckPN+Qgr ziILV9$-TPi*-;aE$){n6fS?#ASPC(kCV7m)1xjGtrXM;`vNrNqoc{V`?R9Hug(%MR zO(0I06qkQ51EQP!kb_3Z+pI7WAmEr{=ovWJ#EO+UPp&g&E zpSc$HM>ZY1UFwzbXB!5OS%bu;yOt&X?2ode;K6_P)^9qX!o9yx@z96~cb{qS=Ff#} z-+2hSp6$8HFKI53S*s6}p@FE>>c~6&CW3*Eu5rQ(CVi66~_N5qYS*VB5_Mla!Z;c*%dHPTP0XB{Q0zP{(@1%5K+3T+C)xu z>-Aw9tk(aQ3L#JoaBt+f*}i$SVbI&ZThLEDY&o+W`cEESac%Yzk?T16%@Vlm8i3}` zP<>k6`&Y=-LI=fUuUhMO<=*&8VN)8K2Etbb4h4zh>T}7^=IEc z(CypD3xX0mj@qn|7>r#r1vCuHVG5puccSmVi`nD!Pw{B=8dW@df6K2Tp9~y2+=B^hK<$VA8tj7+f;2hQQZ6ezz{DR33geepsf@9wg@Yz~OLrODTBkiSDha`Xhf3^xj->71!Sx_F*eah>Z zxf+326rs8Sgd~BEe{7`wL}{)6iL@3QH0+%$WEWFQ`|5xAR7LXJ--6rQ&~2?vZ!lp% zlx#w^)8k?KztR(3c%lIxGqFahEzb*Qx63!>*#6NXsi~5vLp|+I`DD`f2)RmX`ffe+ zw?ArgoM~pt%`S>I=HL~0_FitPh-zTTzu9jYQ#tk3?uEy|eem#1HzatobuCCEhR zG62_swKoe+=R5S$M+cUl(*%I8$kb)mhYoBQ?07qw(Xosg==;n=b8D|mo88L(z~J~j z#qXXhd4C`5R*WOFd-A4>xp z(ZX@U)mE~5@;1GOSILT&hyUz=7$SvNg7MB@SuTC?rW}=~0}vtT5YC2`{o{iP7 z_g9a^P-BAz1^#6|@YdH}PxhZ_UIAovsz!fR?SIz4HE#0V)ykV<^YnN5rXgK7&rQ+l zOW+Q(Myby+7ziE8=wn$tEIm;)H9P)fK;_;IDr&NV=P9qg3-q0RKc0-VTtYbiZAIm` z;OxJZ-2Hkr*wy$5#PoL*j-R4Ems zpgFW{8mqs5`BECo#+mx}Av6HFqqWmq`ngflgy@fzs5|n2M)fO{Jx;qQ++7SrlNIA( zleX1Lmp_`b4g#ps`Kj-?R`$|qiV$T@v>p@km>=TvJb7NjX9r>yo^O{5ePi-dybVyPy`lt;eBg#X(NB%D@UpHE^XF=a4NS(Aa%0 z=v>mUu?eP`L=>d!{eRSNT~4!B3+BX-wGX#EjMuMZ#jZw$34pAe1YlQR`#S=9zW?|C z!JPl!CZzxTL2e43k52Z2@yO(@P_z`PSqySR+#S8;&yu8586~D6>PwHOi21jOj;VE= zjnTo5tP1?%;hC3m1KeElJAC*`eNAQ4^bUCStfTMcUg#$N?w#q-`E=yTY0>FrSUo@cW%a?W?@UFq0gHn1Oq|LKGafhJ;*O|l}_}&8ZGlQJ{*RblpD=DDV6SxPGLRJyNIE) zfYjRed0*5keZ3?Qm57cJr6;lP8s9oT)FpopkYmLdsf7i<#|^^s^2YZ!CU>v;z}4H4 zLZ47@qL{KYqXyh3V!%-bT_>i0`fG*zang$|}8GRI( zq=y#F^M>W0@84fqjbHaecxbd->XfH}_`Dbw;2n9jcX^tqi=;m3A7J_ZuYdgqZ@l7$ z_nBtnBT8FZe~)Tmc6cSw#*#h0?$oW|UdCE!5dP2a@&Gd&-F*JnI4G4tll zqx^Mw>hBU^1s>A}7Kvxfedy;w}Kzj@c6*ymXnMU zZ8+>;s;K+)Xq4&VK$i%SGMaH;RN0dHK3So1mxZ+B9NZ0JOToZ?%@(YB2a3w14;F<1 zuaa-ex)69(M9@*~cWwW$m1hdmI~XU#Y`&Y0`Tjl-{w^^6g0aQ30Y5-Ml=L?GGgIYb zV_m%O{1<|sYx+Tu!cVf)Qbzr1O0MZ4Rn!|Y!u-@Bt!*|RepA!=gQn(xl$Pz^-IEIW z%AgOGZQr3HgZ#|on)x>&ce`CMf9yX#>^&H-#HOlMoy<->*@)KZK5H+NJjOB) zFNp%{XNJzpX5l^H&km^q7uTpfFD&{1khMuO(>@f)9{Iwh+0}trmBA!u3!@;p5XNk@ z-|fW(ZlOVV4Eb=FTdC|G>>Yji)6%BXUJd?|W8TzPaG6SZT!!zLJSR|>)CB?!;tf|+ zn!R~YP+%z9T${Jg&eUFwJRODty&Kq%*UxA5@1W$qy*@!*Yak~AnF?_I7W`}KwzvsZ zbzcNTPVsXQqB?Tw(w?$Q@9+5Vivj8`C0$u)Q&3Irpm#H|Sb}^r1D!kSHdzbj^@a=l ziYPI@JINQLFru1yav>@$dqCxc{Pz({X|K{98-Ui$KA?DvY_#YnH|WPV9rkr=ndBBiE@4|>eTxt#i@~u z8widnjS2;07Q&(RGSflfJBHDk-qfihqb+-3gENcT=(Id2#3TqaVe3Uo5MKOYIqW?y zS9nqQM^g8F5HDhOc+2Z40nd_)nbaC2)gKJRl~(8p)K{#F6eH8g2DPwY>&fTeED^SL zbDwO>ZlW~)`9nO(Jt>b0!_sQRx{}tU@T{U-ob`d20=#4Ox=Z>O@cBA+k=A-tkk>NK`|n?zFx#0iBcm3z;+xK0QQG$W(wX{LQ>f0g~iJp~QR zx+3~+GMfy?Y+}wpbZ7AWr60cS{)cj1UFq>I!qGYA+#BvHV?z!6ZA!4YNcz2AUc{-$mWoTDP;#Em=R!GjhnEk>%` z9@tBMd6UE*r746yW#mht&8=`M5!FIq@5JT~eQT+-VtyIyd%WBx=EFy5xe=G!h;wm; zD~T=h%)%fKS?}wYAiQ7}VrulzU_H+CE;<|Ik^H&AM1i^!Y;X8=d{~vTvQ7qbj!c!V zAg3gp(zxNZbHGoaKPP$a`snC|3|Cn@N&?S&D&j)*KCz~mpN~9wFXr%Y_%2XE=*UQk z!}|t%3ScdY^&O(skp{VEbTq(gN113L`-*Fp*$e*$7~>L7l#_@_m@Xen4SD ze*ip=-eKGfyA~8IHLoH22(%&I=m9gQ>4hCMo}Y~n0uF>F1h9lZc-usGX^CCz1%y^G z%eS$=b*o}LPjf35ot>Mh>y?kbqsu$k3UB-F>IC!PYv*hlPvm) zWu6?1#_RVobvsYW#ShS97oj zpxci8N1tMISabb(p282G$=+o@injfFYF?^deG( zU%t+Ar>*d*t@@+3j3M>&`Ib1PAU#E~$%6IzBEs7AR?r7ati^mv@oxIW!9{f~@Du8w z%Y1!dNvx_qHi z;PY%#darB)p&?y2X&2JKmgh zQiN3MQaEc)iZHxp4R>5zFCp1ODgxlSm2YctE$OWd-;PI416o6gKv^5ZrR0}MmeOrJ z`|PeIbR3+h_=T|%YylD(`E>@^$PzI0#fS!+C<(-I$bVGgkvmKHh8EuIX`}P5+`6@*D4v4e>5~}pede`%@<+#V)T)HV z*Ws^xSvodqt@0~NnopSMtyo~gvCmBRFnt=)l8=u8@)go-c}5&P=v0O!YQ>n_6_}Wj z73S>%Etn{J3G3x~;;{m&j`sFkn(B^vz-jRC?VIGR52WrMExaw}cq{b6OP*kri%Vmj z`0V7sE2ipzmxYR4g~#@6yvrtySceG`2C*3bqDC{+7d;ew_@0;Nsco3Ah+Zy{X2dga z>%0q;R|Hl?GUyx#`$(}AGIanLi>(mLX|ZH93`@0&tqbZb96N%NM|KURH@v_I^T=YX zAa9wYv~yNFEy;k>r{Q~{?8)Cj!x)`Q&|j>Yk6D~9tOVl2Gf5MUgLkhJNgpP9z|1f( zuZ4)2eBzCd2HXjV9jDa4EJlmKC0r;_eWfhI(1yW;us3NG(knqPG4_rQ^Fu2 z8rR{D236-l$@Wmp2|;HfRB7@8zff^<9wF3AkdZIyk_0M7u? zUBUER@0XAYQAjQX(*d}@C&4CZ5Wn>_W_D6Jj2GR;~um8rh_PFkuAMEVy5ii(Hf&W z>Lgfp1d|?Y8!{0Flx$kcvlCA`?g?c$cJ&&2(kpd}g3p zDo*e7=J(~~|9rF)DuKg5L!6?~QVX+CEKswM zkJD-W7x&&dp|3uvm16uP0ws7}GF;sLPzOvHSCv7;ogv16)@AU^Kex*(e{?@zw3LY52 zLSMX}G46tjX>p*o)>(xs=BgpESf~eHz+?aus(0%b4uYf&}1xBa%trc^jF51*BlAT zRnC%kAmK0_f@ZyaTqA-Z%(Kptprsm$-*VCyAf@tmQ+9myU^J;^$_Fh2J%q7-af+-g zHQNVVgd)&c4c2}BuF-~PO)B&*K;F&sK9GpxU0rV$v<=qcFP31qLLF%Qad=5<|m zuxUs!r4nf*s-R-69oO$oJpv3IPkyZjKcjxkx6gl;u9d0cR;#h;{Io&k1fbe!ANLT; zG$G9Qp%2X9nVyRBaff10JaN#*msOa4OXuIm6+iEe6&o4#7e!PfzK3*6T??#6D*nbL z9WZi)9KIEVgI7b+v`ySIyjA#srOujKa$1DkY~WXUjp$8PA;4m;fsSYtjO)}UuVt6V|CJs}PX)k% zdfHHF)y>?{J-x{jBE5~KUj3$L0pyRVk@e*v zs(5&rK?6?P;n>TjVp_zYA9#Iv3`b%Rt8XW&18v;ahJ$07vg0AZ#fBWby`%(o#rJoH zEl<8M$Hllx73Vz;Ovc79cdPe%lOGWH)~+^d*w?isLwzM-XlN}E!6uQy-dR)Sws_`_Sw^XD7&xp zVD+fj^+uXQ%{CFsn$qt8w7CX8urF4??$Sn5M5812Fv-dP6XQUbHYCR#%x=h8V^Wy7 zVasDg8Aw-&l-zIVGEKs&4w5(j;VN)e#A65V%#~a`O;-^7=26&@>T}MS4Kf=jqv)f1 zq@y(907Ssk-+l6?yaTQ&%N23Ao|SG&vQdp{96Hii$3z>PDzhaEe+*0kc``JkuD7Wz z!z0dGTdva499Xmc+DhJbBf-xBOyrMTOO|UB-FQ=cAA;R*%^3osxm<81W z4DUj1Dp=CYIE_j-N<2?u9;R(k>A>0*J;=kRQ*iLHVHI7ZmyzX?$Y`hnl&hfs#~J}0 ziHvvyrY42|GM=Kvs^L?lJ)1g}ypCX!j!(H4gi$M9Xlid$oHM&z>ZvErit+n6Jq5!- zw(>2TR1S@-TIoUsT;3Q~j1BzWey{c2ir^vBF1He(%zhN3!FME6TxjmWK0GT-jH#K2 z80b}Xvk$=_i3}G;e3~pAhmS{B+!d*4aCGF54i@4GNn$LbP6U`H+eGZsG?4S*SQ3eh zH(fl`El#j1(Zd@?$Yjj*lyg(f4Ng(Qwaozo7%+3GY7*cK+)U}(a43eWVbpCu&cL`P z9ItX?9Nb*oK>iwki}AA?~^}0~c-fy-&EDS+Io7Dfy5pe~?s`d}@OFh_rX;7vmGSE?7-Cw%_U3M2# zMVd)y9%f`w7uV|0FfVZc_M4B1JE)t-H{4e7oQe8HO3WRgL0E)7Z-B)-Rt#KL;tlZ@-obp~=(j_0cl@G|r2ts^JA zK42l>&INqBS_V^Vv=SWI6|9mp(65YOGQ3u2vR0Tk!u*C^bjg@U^nwz$V1AH4vQhNn zqT8NYk zBShd|!LpvK*6_3XlJE%c;RGS$RNhk;!7>8|F$So59`m_$Q_(O)5FumhN26715l!)= zQlE1(P1>}p^mR^-;ew+h4vL~j`?zTGtHg{9NQ~+E)X*K(TU#a`oX@5XZ??Y8$tw=W zw!+TDCa1qFy1&f4j-4H`E;(&S8K6dbL;R=8zVqj?WiA>uZEtmY+T1)~$pCCYv@@P- z8JMP``P?|2G=?yP2Y&{dz>yi9 zXt*i7_s4G^#41~bL>T`y6XV@Bv%7?b;0vn&dHrs=B4Vcf)ZNRFMQK>pmHUVd99HLk zDaJ}L%QhL=Y|1TQ4L@K{K}KH_6(wu27x%8?ECg82q6JZx);nBhsO%wv`yY@_jSScQ z-*yGwWSojp-%jkSqOOE=j<3*=7rha%38o~RMtYPKkkgv$VVwO@+;y#`XZlyQi)bx@-TsPi% z6+d*@SPv);tdXo6<; diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py index 526502bca468..847ddd9e4d59 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py @@ -1,4 +1,3 @@ -# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -39,9 +38,9 @@ async def main(): async with DefaultAzureCredential( exclude_managed_identity_credential=True, exclude_environment_credential=True ) as creds: - async with AssistantsClient.from_connection_string( + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds, - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as assistants_client: storage_service_endpoint = os.environ["STORAGE_SERVICE_ENDPONT"] diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py index cb4b6a2aab4a..eb4ac5bfca82 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py @@ -31,8 +31,9 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - assistant_client = AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + assistant_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) async with assistant_client: diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py index 29cbdc66d7a7..b6400151054e 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py @@ -38,12 +38,13 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - assistants_client = AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) # Enable Azure Monitor tracing - application_insights_connection_string = os.environ['AI_APPINSIGHTS_CONNECTION_STRING'] + application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] configure_azure_monitor(connection_string=application_insights_connection_string) # enable additional instrumentations diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py index 538407082a9d..c1cfddcd8414 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py @@ -44,9 +44,7 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) as assistant_client: + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistant_client: # Enable console tracing # or, if you have local OTLP endpoint running, change it to diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py index 90c8ce2eacbc..aa68d45a132f 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py @@ -22,12 +22,7 @@ import asyncio from azure.ai.assistants.aio import AssistantsClient -from azure.ai.assistants.models import ( - CodeInterpreterTool, - FilePurpose, - ListSortOrder, - MessageRole -) +from azure.ai.assistants.models import CodeInterpreterTool, FilePurpose, ListSortOrder, MessageRole from azure.identity.aio import DefaultAzureCredential from pathlib import Path @@ -38,9 +33,7 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) as assistants_client: + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistants_client: # Upload a file and wait for it to be processed file = await assistants_client.upload_file_and_poll( file_path="../nifty_500_quarterly_results.csv", purpose=FilePurpose.ASSISTANTS @@ -96,9 +89,7 @@ async def main() -> None: print(f"Start Index: {file_path_annotation.start_index}") print(f"End Index: {file_path_annotation.end_index}") file_name = Path(file_path_annotation.text).name - await assistants_client.save_file( - file_id=file_path_annotation.file_path.file_id, file_name=file_name - ) + await assistants_client.save_file(file_id=file_path_annotation.file_path.file_id, file_name=file_name) print(f"Saved image file to: {Path.cwd() / file_name}") await assistants_client.delete_assistant(assistant.id) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py index c3f2b74b7d6d..571ff5d79104 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py @@ -22,19 +22,14 @@ import asyncio import os from azure.ai.assistants.aio import AssistantsClient -from azure.ai.assistants.models import ( - CodeInterpreterTool, - FilePurpose, - MessageAttachment, - ListSortOrder -) +from azure.ai.assistants.models import CodeInterpreterTool, FilePurpose, MessageAttachment, ListSortOrder from azure.identity.aio import DefaultAzureCredential async def main(): async with DefaultAzureCredential() as creds: async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + credential=creds, conn_str=os.environ["PROJECT_ENDPOINT"] ) as assistants_client: # Upload a file and wait for it to be processed file = await assistants_client.upload_file_and_poll( diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py index 7a95a1df6352..442b4d588433 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py @@ -33,8 +33,8 @@ async def main(): async with DefaultAzureCredential() as credential: - async with AssistantsClient.from_connection_string( - credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=credential ) as assistants_client: code_interpreter = CodeInterpreterTool() @@ -52,7 +52,7 @@ async def main(): print(f"Created thread, thread ID: {thread.id}") # We will upload the local file to Azure and will use it for vector store creation. - _, asset_uri = assistants_client.upload_file_to_azure_blob("../product_info_1.md") + asset_uri = os.environ["AZURE_BLOB_URI"] ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) # Create a message with the attachment diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py index a4791dc9c8c7..e671d6e4b348 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py @@ -31,9 +31,7 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) as assistants_client: + async with AssistantsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as assistants_client: # Initialize assistant functions functions = AsyncFunctionTool(functions=user_async_functions) diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py index 34e62f9ca963..e599b8478365 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py @@ -48,9 +48,9 @@ class Planet(BaseModel): async def main(): async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds, - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as assistants_client: # [START create_assistant] diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py index 2473637752ac..1afe57f10980 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py @@ -29,9 +29,9 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds, - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as assistants_client: # Initialize assistant toolset with user functions and code interpreter diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py index ec2b570f0978..e4f7ea016922 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py @@ -61,8 +61,9 @@ async def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: assistant = await assistants_client.create_assistant( model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py index c6bf104b05fb..836690e58e4b 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py @@ -96,8 +96,9 @@ async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: # [START create_assistant_with_function_tool] @@ -123,7 +124,9 @@ async def main() -> None: print(f"Created message, message ID {message.id}") async with await assistants_client.create_stream( - thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler(functions, assistants_client) + thread_id=thread.id, + assistant_id=assistant.id, + event_handler=MyEventHandler(functions, assistants_client), ) as stream: await stream.until_done() diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py index a00f401c1ac6..65c281f399cd 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py @@ -61,8 +61,9 @@ async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: # Initialize toolset with user functions diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py index 5d7e0398ce4e..592841c1764e 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py @@ -30,8 +30,9 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: assistant = await assistants_client.create_assistant( model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py index e1e5c27e47ee..d8869c8896df 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -75,8 +76,9 @@ async def get_stream_chunks(self) -> AsyncGenerator[str, None]: async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: assistant = await assistants_client.create_assistant( model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py index 981002f22710..d3447da92084 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py @@ -33,18 +33,17 @@ async def main(): async with DefaultAzureCredential() as credential: - async with AssistantsClient.from_connection_string( - credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: # We will upload the local file to Azure and will use it for vector store creation. - _, asset_uri = assistants_client.upload_file_to_azure_blob("../product_info_1.md") + asset_uri = os.environ["AZURE_BLOB_URI"] ds = VectorStoreDataSource( asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) - vector_store = await assistants_client.create_vector_store_and_poll( - file_ids=[], name="sample_vector_store" - ) + vector_store = await assistants_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") print(f"Created vector store, vector store ID: {vector_store.id}") # Add the file to the vector store or you can supply file ids in the vector store creation diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py index 2c1eabc34e87..87478cfa4933 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py @@ -29,8 +29,9 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: # Upload a file and wait for it to be processed file = await assistants_client.upload_file_and_poll( @@ -39,9 +40,7 @@ async def main() -> None: print(f"Uploaded file, file ID: {file.id}") # Create a vector store with no file and wait for it to be processed - vector_store = await assistants_client.create_vector_store_and_poll( - file_ids=[], name="sample_vector_store" - ) + vector_store = await assistants_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") print(f"Created vector store, vector store ID: {vector_store.id}") # Add the file to the vector store or you can supply file ids in the vector store creation diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py index c506935c77c8..884634138616 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py @@ -27,11 +27,12 @@ async def main(): async with DefaultAzureCredential() as credential: - async with AssistantsClient.from_connection_string( - credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=credential, ) as assistants_client: # We will upload the local file to Azure and will use it for vector store creation. - _, asset_uri = assistants_client.upload_file_to_azure_blob("../product_info_1.md") + asset_uri = os.environ["AZURE_BLOB_URI"] ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) vector_store = await assistants_client.create_vector_store_and_poll( data_sources=[ds], name="sample_vector_store" diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py index a6ac7c88b062..97d1a3441f39 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py @@ -27,8 +27,9 @@ async def main(): async with DefaultAzureCredential() as credential: - async with AssistantsClient.from_connection_string( - credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=credential, ) as assistants_client: # Upload a file and wait for it to be processed file = await assistants_client.upload_file_and_poll( diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py index 294980d31c04..cdc7b1ecd666 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py @@ -31,8 +31,9 @@ async def main() -> None: async with DefaultAzureCredential() as creds: - async with AssistantsClient.from_connection_string( - credential=creds, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + async with AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, ) as assistants_client: # Upload a file and wait for it to be processed file = await assistants_client.upload_file_and_poll( diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team.py index fbf4c296a569..2146f780b02d 100644 --- a/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team.py +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_team.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -200,7 +201,9 @@ def _set_default_team_leader(self): """ toolset = ToolSet() toolset.add(default_function_tool) - instructions = self.TEAM_LEADER_INSTRUCTIONS.format(assistant_name="TeamLeader", team_name=self.team_name) + "\n" + instructions = ( + self.TEAM_LEADER_INSTRUCTIONS.format(assistant_name="TeamLeader", team_name=self.team_name) + "\n" + ) # List all assistants (will be empty at this moment if you haven't added any, or you can append after they're added) for member in self._members: instructions += f"- {member.name}: {member.instructions}\n" @@ -331,7 +334,9 @@ def process_request(self, request: str) -> None: messages = self._assistants_client.list_messages(thread_id=self._assistant_thread.id) text_message = messages.get_last_text_message_by_role(role=MessageRole.ASSISTANT) if text_message and text_message.text: - print(f"Assistant '{assistant.name}' completed task. " f"Outcome: {text_message.text.value}") + print( + f"Assistant '{assistant.name}' completed task. " f"Outcome: {text_message.text.value}" + ) if self._current_task_span is not None: self._add_task_completion_event(self._current_task_span, result=text_message.text.value) diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py index 0f8ee1d66f0b..98fff01843bb 100644 --- a/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/assistant_trace_configurator.py @@ -18,11 +18,11 @@ def __init__(self, assistants_client: AssistantsClient): self.assistants_client = assistants_client def enable_azure_monitor_tracing(self): - application_insights_connection_string = os.environ.get('AI_APPINSIGHTS_CONNECTION_STRING') + application_insights_connection_string = os.environ.get("AI_APPINSIGHTS_CONNECTION_STRING") if not application_insights_connection_string: print("AI_APPINSIGHTS_CONNECTION_STRING environment variable was not set.") print("Please create AI_APPINSIGHTS_CONNECTION_STRING with the Application Insights,") - print("connection string. It should be enabled for this project.") + print("connection string. It should be enabled for this project.") print("Enable it via the 'Tracing' tab in your AI Foundry project page.") exit() configure_azure_monitor(connection_string=application_insights_connection_string) diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py index 66981207c671..708f79ffe851 100644 --- a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py @@ -25,9 +25,9 @@ from assistant_team import AssistantTeam from assistant_trace_configurator import AssistantTraceConfigurator -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py index 4fcda33d7269..fe545c391563 100644 --- a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py @@ -28,9 +28,9 @@ from assistant_trace_configurator import AssistantTraceConfigurator from azure.ai.assistants.models import FunctionTool, ToolSet -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py index bf6142d2468c..1798e994ac2b 100644 --- a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py @@ -28,9 +28,9 @@ from assistant_team import AssistantTeam from assistant_trace_configurator import AssistantTraceConfigurator -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) user_function_set_1: Set = {fetch_current_datetime, fetch_weather} diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py index e5fc84cba49e..54f4485098f3 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_ai_search.py @@ -39,9 +39,9 @@ from azure.identity import DefaultAzureCredential from azure.ai.assistants.models import AzureAISearchQueryType, AzureAISearchTool, ListSortOrder, MessageRole -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # [START create_assistant_with_azure_ai_search_tool] @@ -51,11 +51,7 @@ # Initialize assistant AI search tool and add the search index connection id ai_search = AzureAISearchTool( - index_connection_id=conn_id, - index_name="sample_index", - query_type=AzureAISearchQueryType.SIMPLE, - top_k=3, - filter="" + index_connection_id=conn_id, index_name="sample_index", query_type=AzureAISearchQueryType.SIMPLE, top_k=3, filter="" ) # Create assistant with AI search tool and process assistant run diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py index ea0ca27d79d0..5c0354743ee6 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py @@ -31,9 +31,9 @@ from azure.ai.assistants.models import AzureFunctionStorageQueue, AzureFunctionTool, MessageRole from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(exclude_managed_identity_credential=True, exclude_environment_credential=True), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py index 46f0fc863398..886ffc5a7445 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py @@ -25,15 +25,12 @@ import os, time from azure.ai.assistants import AssistantsClient from azure.identity import DefaultAzureCredential -from azure.ai.assistants.models import ( - ListSortOrder, - MessageTextContent -) +from azure.ai.assistants.models import ListSortOrder, MessageTextContent # [START create_project_client] -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # [END create_project_client] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py index 2272df5ef505..aa9c4a56f260 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py @@ -30,9 +30,9 @@ from azure.ai.assistants.telemetry import enable_telemetry from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # [START enable_tracing] @@ -40,7 +40,7 @@ from azure.monitor.opentelemetry import configure_azure_monitor # Enable Azure Monitor tracing -application_insights_connection_string = os.environ['AI_APPINSIGHTS_CONNECTION_STRING'] +application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] configure_azure_monitor(connection_string=application_insights_connection_string) # enable additional instrumentations diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py index ebf991cda895..a5abe60f8053 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py @@ -36,9 +36,9 @@ from azure.identity import DefaultAzureCredential from opentelemetry import trace -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # Enable console tracing @@ -59,9 +59,7 @@ thread = assistants_client.create_thread() print(f"Created thread, thread ID: {thread.id}") - message = assistants_client.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") print(f"Created message, message ID: {message.id}") run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py index dd974c97278f..a801c0637b03 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py @@ -60,9 +60,9 @@ def on_end(self, span: ReadableSpan): pass -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # Enable console tracing @@ -87,9 +87,7 @@ def on_end(self, span: ReadableSpan): thread = assistants_client.create_thread() print(f"Created thread, thread ID: {thread.id}") - message = assistants_client.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") print(f"Created message, message ID: {message.id}") run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py index 3443cd4da0a6..d5c6697ef78d 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py @@ -30,9 +30,9 @@ from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # [START create_assistant_with_bing_grounding_tool] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py index 5d4e5b4c2cb5..d31d601c361a 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py @@ -30,8 +30,9 @@ from azure.identity import DefaultAzureCredential from pathlib import Path -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py index 0ca5b3b4637c..52a601be3828 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py @@ -32,8 +32,9 @@ ) from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: @@ -56,7 +57,7 @@ # [START upload_file_and_create_message_with_code_interpreter] # We will upload the local file to Azure and will use it for vector store creation. - _, asset_uri = assistants_client.upload_file_to_azure_blob("./product_info_1.md") + asset_uri = os.environ["AZURE_BLOB_URI"] ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) # Create a message with the attachment diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py index b61aec56d587..58c0f5b1d472 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py @@ -25,15 +25,16 @@ from azure.ai.assistants.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: # [START upload_file_and_create_assistant_with_file_search] # We will upload the local file to Azure and will use it for vector store creation. - _, asset_uri = assistants_client.upload_file_to_azure_blob("./product_info_1.md") + asset_uri = os.environ["AZURE_BLOB_URI"] # Create a vector store with no file and wait for it to be processed ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py index 29c5909cb7b1..6c9b89114ee0 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py @@ -26,13 +26,13 @@ from azure.identity import DefaultAzureCredential from azure.ai.assistants.models import FabricTool -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # [START create_assistant_with_fabric_tool] -conn_id = os.environ['FABRIC_CONNECTION_ID'] +conn_id = os.environ["FABRIC_CONNECTION_ID"] print(conn_id) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py index a12f4e0ee2d6..05ec5c652915 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py @@ -29,8 +29,9 @@ ) from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py index fcd0d98408b6..a8c5345f5588 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py @@ -27,8 +27,9 @@ from azure.ai.assistants.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput from user_functions import user_functions -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) # Initialize function tool with user functions diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py index f8ac75ffe5e6..8f85e904056a 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py @@ -34,12 +34,13 @@ from opentelemetry import trace from azure.monitor.opentelemetry import configure_azure_monitor -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) # Enable Azure Monitor tracing -application_insights_connection_string = os.environ['AI_APPINSIGHTS_CONNECTION_STRING'] +application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] configure_azure_monitor(connection_string=application_insights_connection_string) # enable additional instrumentations if needed diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py index 90232ce20c8a..2bae3865b4c1 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py @@ -38,8 +38,9 @@ from azure.ai.assistants.telemetry import trace_function, enable_telemetry from opentelemetry import trace -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) # Enable console tracing diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py index e1a89c18e5da..d8fbebf4bbf1 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py @@ -36,9 +36,9 @@ ) # [START create_assistants_client] -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # [END create_assistants_client] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py index 62528ebacf5f..b5c80eabc7dc 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py @@ -37,7 +37,6 @@ import os -import requests from typing import Set from azure.ai.assistants import AssistantsClient @@ -53,14 +52,14 @@ # [START register_logic_app] # Create the project client -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # Extract subscription and resource group from the project scope -subscription_id = assistants_client.scope["subscription_id"] -resource_group = assistants_client.scope["resource_group_name"] +subscription_id = os.environ["SUBSCRIPTION_ID"] +resource_group = os.environ["resource_group_name"] # Logic App details logic_app_name = "" diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py index 4a955be24415..082e7a132f35 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py @@ -31,9 +31,9 @@ from azure.ai.assistants.models import OpenApiTool, OpenApiAnonymousAuthDetails -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # [START create_assistant_with_openapi] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py index 14d8622707ea..75d4c86727b2 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -36,12 +37,12 @@ import jsonref from azure.ai.assistants import AssistantsClient from azure.identity import DefaultAzureCredential -from azure.ai.assistants.models import OpenApiTool, OpenApiConnectionAuthDetails, OpenApiConnectionSecurityScheme +from azure.ai.assistants.models import OpenApiTool, OpenApiConnectionAuthDetails, OpenApiConnectionSecurityScheme -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) connection_name = os.environ["PROJECT_OPENAPI_CONNECTION_NAME"] @@ -50,22 +51,21 @@ print(connection_id) -with open('./tripadvisor_openapi.json', 'r') as f: +with open("./tripadvisor_openapi.json", "r") as f: openapi_spec = jsonref.loads(f.read()) # Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) auth = OpenApiConnectionAuthDetails(security_scheme=OpenApiConnectionSecurityScheme(connection_id=connection_id)) # Initialize an Assistant OpenApi tool using the read in OpenAPI spec -openapi = OpenApiTool(name="get_weather", spec=openapi_spec, description="Retrieve weather information for a location", auth=auth) +openapi = OpenApiTool( + name="get_weather", spec=openapi_spec, description="Retrieve weather information for a location", auth=auth +) # Create an Assistant with OpenApi tool and process Assistant run with assistants_client: assistant = assistants_client.create_assistant( - model=model_name, - name="my-assistant", - instructions="You are a helpful assistant", - tools=openapi.definitions + model=model_name, name="my-assistant", instructions="You are a helpful assistant", tools=openapi.definitions ) print(f"Created assistant, ID: {assistant.id}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py index 3edf52a10b9e..b324841552f1 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py @@ -28,9 +28,9 @@ from azure.ai.assistants.models import FunctionTool, ToolSet, CodeInterpreterTool from user_functions import user_functions -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # Create assistant with toolset and process assistant run diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py index 495c15a770f1..9cba3ad5417c 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py @@ -33,9 +33,9 @@ # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # Initialize Sharepoint tool with connection id diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py index b69e2baba867..2af7dd61353f 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py @@ -36,9 +36,9 @@ from typing import Any, Optional -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py index 22a76c64f3d0..f162c9ed662e 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py @@ -41,9 +41,9 @@ from opentelemetry import trace from azure.monitor.opentelemetry import configure_azure_monitor -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) @@ -77,7 +77,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: # Enable Azure Monitor tracing -application_insights_connection_string = os.environ['AI_APPINSIGHTS_CONNECTION_STRING'] +application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] configure_azure_monitor(connection_string=application_insights_connection_string) scenario = os.path.basename(__file__) @@ -97,9 +97,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: thread = assistants_client.create_thread() print(f"Created thread, thread ID {thread.id}") - message = assistants_client.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") print(f"Created message, message ID {message.id}") with assistants_client.create_stream( diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py index c1fc673681ba..8d65ee6bddf3 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -78,8 +79,9 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py index 8b5db1b0b91d..057d81e0e71b 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py @@ -44,9 +44,9 @@ from typing import Any from opentelemetry import trace -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) @@ -98,9 +98,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: thread = assistants_client.create_thread() print(f"Created thread, thread ID {thread.id}") - message = assistants_client.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") print(f"Created message, message ID {message.id}") with assistants_client.create_stream( diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py index c76f9d316cf0..3c526b92689d 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py @@ -40,8 +40,9 @@ from azure.identity import DefaultAzureCredential from user_functions import user_functions -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py index 756ba2bd2e17..b90de1826877 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py @@ -38,8 +38,9 @@ from typing import Any from user_functions import user_functions -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py index 39e213b65649..d3f1102cc4d3 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py @@ -33,9 +33,9 @@ RunStep, ) -assistants_client = AssistantsClient.from_connection_string( +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py index 1474ee93aeff..10da7ab431da 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -39,12 +40,13 @@ ) from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: - bing_connection_id = os.environ['AZURE_BING_CONECTION_ID'] + bing_connection_id = os.environ["AZURE_BING_CONECTION_ID"] bing = BingGroundingTool(connection_id=bing_connection_id) print(f"Bing Connection ID: {bing_connection_id}") diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py index 948b6496b489..16a4e6fcc2be 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py @@ -29,8 +29,9 @@ from azure.ai.assistants.models import MessageDeltaChunk, RunStep, ThreadMessage, ThreadRun from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py index d689e165a3b2..4e0735d1c0af 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py @@ -35,8 +35,9 @@ from azure.identity import DefaultAzureCredential from user_functions import user_functions -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) functions = FunctionTool(user_functions) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py index 4b4325ccdcb1..a34c7def89e8 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py @@ -73,8 +73,9 @@ def get_stream_chunks(self) -> Generator[str, None, None]: yield chunk -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py index 2d9ac0fdee12..88da2bb1a91e 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py @@ -25,14 +25,15 @@ from azure.ai.assistants.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: # We will upload the local file to Azure and will use it for vector store creation. - _, asset_uri = assistants_client.upload_file_to_azure_blob("./product_info_1.md") + asset_uri = os.environ["AZURE_BLOB_URI"] # [START attach_files_to_store] # Create a vector store with no file and wait for it to be processed diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py index 4ca83aaa8384..b3f55cab1277 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py @@ -27,8 +27,9 @@ from azure.ai.assistants.models import FileSearchTool, FilePurpose from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py index b96797c97d5f..a31924190c7a 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py @@ -25,8 +25,9 @@ from azure.ai.assistants.models import FileSearchTool, FilePurpose from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py index 93e85c3abe3e..9eb050971bb9 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py @@ -31,8 +31,9 @@ from azure.identity import DefaultAzureCredential from pathlib import Path -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py index 765d696e9fd2..0751c365255b 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py @@ -27,8 +27,9 @@ from azure.ai.assistants.models import FilePurpose, FileSearchTool, MessageAttachment from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py index 765d696e9fd2..0751c365255b 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py @@ -27,8 +27,9 @@ from azure.ai.assistants.models import FilePurpose, FileSearchTool, MessageAttachment from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py index ae67c9fba346..86900f428c99 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py @@ -27,8 +27,9 @@ from azure.ai.assistants.models import FileSearchTool from azure.identity import DefaultAzureCredential -assistants_client = AssistantsClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), ) with assistants_client: diff --git a/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py b/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py index 6b632064c12d..6a927c646779 100644 --- a/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py +++ b/sdk/ai/azure-ai-assistants/tests/overload_assert_utils.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression import io import json import unittest @@ -101,9 +102,6 @@ def _get_mock_client() -> AssistantsClient: """Return the fake project client""" client = AssistantsClient( endpoint="www.bcac95dd-a1eb-11ef-978f-8c1645fec84b.com", - subscription_id="00000000-0000-0000-0000-000000000000", - resource_group_name="non-existing-rg", - project_name="non-existing-project", credential=MagicMock(), ) client.submit_tool_outputs_to_run = MagicMock() diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py b/sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py index a6b41004ba18..09cf9eef56f4 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistant_mock_overloads.py @@ -55,7 +55,9 @@ async def test_create_vector_store_and_poll( ), ), patch( "azure.ai.assistants._operations.AssistantsClientOperationsMixin.get_vector_store", - wraps=get_mock_fn(assistant.get_vector_store, return_val=VectorStore({"id": "store_1", "status": "completed"})), + wraps=get_mock_fn( + assistant.get_vector_store, return_val=VectorStore({"id": "store_1", "status": "completed"}) + ), ): assistant.create_vector_store_and_poll(file_ids=file_ids, sleep_interval=0) diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistant_models.py b/sdk/ai/azure-ai-assistants/tests/test_assistant_models.py index b9e5e1b6b81d..eee0b4629d64 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistant_models.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistant_models.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression from typing import Iterator, List from unittest.mock import Mock, patch import pytest diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py index 0a680bcabe33..432af973680c 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistant_models_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression from typing import AsyncIterator, List from unittest.mock import AsyncMock, patch import pytest diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py index 55b8a02b7f2c..2f7b55d6b1a6 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py @@ -33,7 +33,7 @@ AssistantEventHandler, AssistantStreamEvent, AssistantThread, - AzureAISearchTool, + AzureAISearchTool, AzureFunctionStorageQueue, AzureFunctionTool, CodeInterpreterTool, @@ -85,12 +85,12 @@ assistantClientPreparer = functools.partial( EnvironmentVariableLoader, - "azure_ai.assistants", - azure_ai_assistants_assistants_tests_project_connection_string="region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", + "azure_ai_assistants", + azure_ai_assistants_assistants_tests_project_endpoint="https://aiservices-id.services.ai.azure.com/api/projects/project-name", azure_ai_assistants_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", azure_ai_assistants_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", azure_ai_assistants_assistants_tests_search_index_name="sample_index", - azure_ai_assistants_assistants_tests_search_connection_name="search_connection_name" + azure_ai_assistants_assistants_tests_search_connection_name="search_connection_name", ) @@ -130,13 +130,13 @@ class TestAssistantClient(AzureRecordedTestCase): # helper function: create client using environment variables def create_client(self, **kwargs): # fetch environment variables - connection_string = kwargs.pop("azure_ai.assistants_assistants_tests_project_connection_string") + endpoint = kwargs.pop("azure_ai_assistants_assistants_tests_project_endpoint") credential = self.get_credential(AssistantsClient, is_async=False) # create and return client client = AssistantsClient.from_connection_string( + endpoint=endpoint, credential=credential, - conn_str=connection_string, ) return client @@ -244,7 +244,9 @@ def _do_test_create_assistant(self, client, body, functions): assert assistant.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) else: - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) assert assistant.name == "my-assistant" @@ -290,7 +292,9 @@ def _do_test_update_assistant(self, client, use_body, use_io): """helper function for updating assistant with different body inputs""" # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id # update assistant @@ -319,15 +323,20 @@ def test_assistant_list(self, **kwargs): list_length = client.assistants.list_assistants().data.__len__() # create assistant and check that it appears in the list - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert client.assistants.list_assistants().data.__len__() == list_length + 1 assert client.assistants.list_assistants().data[0].id == assistant.id # create second assistant and check that it appears in the list - assistant2 = client.assistants.create_assistant(model="gpt-4o", name="my-assistant2", instructions="You are helpful assistant") + assistant2 = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant2", instructions="You are helpful assistant" + ) assert client.assistants.list_assistants().data.__len__() == list_length + 2 assert ( - client.assistants.list_assistants().data[0].id == assistant.id or client.assistants.list_assistants().data[1].id == assistant.id + client.assistants.list_assistants().data[0].id == assistant.id + or client.assistants.list_assistants().data[1].id == assistant.id ) # delete assistants and check list @@ -355,7 +364,9 @@ def test_create_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -430,7 +441,9 @@ def test_get_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -458,7 +471,9 @@ def test_update_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -550,7 +565,9 @@ def test_delete_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -622,7 +639,9 @@ def _do_test_create_message(self, client, body): if body: message = client.assistants.create_message(thread_id=thread.id, body=body) else: - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) @@ -635,7 +654,9 @@ def test_create_multiple_messages(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -645,7 +666,9 @@ def test_create_multiple_messages(self, **kwargs): print("Created thread, thread ID", thread.id) # create messages - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) message2 = client.assistants.create_message( @@ -672,7 +695,9 @@ def test_list_messages(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -687,7 +712,9 @@ def test_list_messages(self, **kwargs): assert messages0.data.__len__() == 0 # create messages and check message list for each one - message1 = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message1 = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message1.id print("Created message, message ID", message1.id) messages1 = client.assistants.list_messages(thread_id=thread.id) @@ -729,7 +756,9 @@ def test_get_message(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -739,7 +768,9 @@ def test_get_message(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) @@ -823,7 +854,9 @@ def test_create_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -872,7 +905,9 @@ def _do_test_create_run(self, client, use_body, use_io=False): """helper function for creating run with different body inputs""" # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -909,7 +944,9 @@ def test_get_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -942,7 +979,9 @@ def test_run_status(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -952,7 +991,9 @@ def test_run_status(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = client.assistants.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) @@ -994,7 +1035,9 @@ def test_update_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1059,7 +1102,9 @@ def test_update_run_with_iobytes(self, **kwargs): def _do_test_update_run(self, client, body): """helper function for updating run with different body inputs""" # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1378,7 +1423,9 @@ def _do_test_create_thread_and_run(self, client, use_body, use_io): """helper function for creating thread and run with different body inputs""" # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1442,7 +1489,9 @@ def test_list_run_step(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1501,7 +1550,9 @@ def test_get_run_step(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1572,7 +1623,9 @@ def test_create_stream(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1611,7 +1664,9 @@ def test_create_stream_with_body(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1655,7 +1710,9 @@ def test_create_stream_with_iobytes(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.assistants.create_assistant( + model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" + ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1676,7 +1733,9 @@ def test_create_stream_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # create stream - with client.assistants.create_stream(thread_id=thread.id, body=io.BytesIO(binary_body), stream=True) as stream: + with client.assistants.create_stream( + thread_id=thread.id, body=io.BytesIO(binary_body), stream=True + ) as stream: for event_type, event_data, _ in stream: assert ( isinstance(event_data, (MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep)) @@ -2305,7 +2364,7 @@ def _do_test_create_vector_store(self, streaming, **kwargs): else: ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2326,7 +2385,7 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2400,7 +2459,7 @@ def _do_test_create_vector_store_add_file(self, streaming, **kwargs): ds = None else: ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type="uri_asset", ) vector_store = ai_client.assistants.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") @@ -2452,7 +2511,7 @@ def _do_test_create_vector_store_batch(self, streaming, **kwargs): file_ids = None ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2522,7 +2581,7 @@ def _test_file_search( def test_message_attachement_azure(self, **kwargs): """Test message attachment with azure ID.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_message_attachment(data_source=ds, **kwargs) @@ -2585,7 +2644,7 @@ def _do_test_message_attachment(self, **kwargs): def test_create_assistant_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) @@ -2606,7 +2665,9 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = ai_client.assistants.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS) + file = ai_client.assistants.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) assert file.id, "The file was not uploaded." file_id = file.id @@ -2647,7 +2708,7 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): def test_create_thread_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) @@ -2668,7 +2729,9 @@ def _do_test_create_thread_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = ai_client.assistants.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS) + file = ai_client.assistants.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) assert file.id, "The file was not uploaded." file_id = file.id @@ -2714,7 +2777,7 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2757,7 +2820,7 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): def test_create_attachment_in_thread_azure(self, **kwargs): """Create thread with message attachment inline with azure asset IDs.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_create_attachment_in_thread_azure(data_source=ds, **kwargs) @@ -2817,50 +2880,50 @@ def test_azure_ai_search_tool(self, **kwargs): with self.create_client(**kwargs) as client: assert isinstance(client, AssistantsClient) - # Create AzureAISearchTool - connection_name = kwargs.pop("azure_ai.assistants_assistants_tests_search_connection_name", "my-search-connection-name") + # Create AzureAISearchTool + connection_name = kwargs.pop( + "azure_ai_assistants_assistants_tests_search_connection_name", "my-search-connection-name" + ) connection = client.connections.get(connection_name=connection_name) conn_id = connection.id - index_name = kwargs.pop("azure_ai.assistants_assistants_tests_search_index_name", "my-search-index") - + index_name = kwargs.pop("azure_ai_assistants_assistants_tests_search_index_name", "my-search-index") + azure_search_tool = AzureAISearchTool( index_connection_id=conn_id, - index_name=index_name, + index_name=index_name, ) - + # Create assistant with the search tool assistant = client.assistants.create_assistant( model="gpt-4o", name="search-assistant", instructions="You are a helpful assistant that can search for information using Azure AI Search.", tools=azure_search_tool.definitions, - tool_resources=azure_search_tool.resources + tool_resources=azure_search_tool.resources, ) assert assistant.id print(f"Created assistant with ID: {assistant.id}") - + # Create thread thread = client.assistants.create_thread() assert thread.id print(f"Created thread with ID: {thread.id}") - + # Create message message = client.assistants.create_message( - thread_id=thread.id, - role="user", - content="Search for information about iPhone prices." + thread_id=thread.id, role="user", content="Search for information about iPhone prices." ) assert message.id print(f"Created message with ID: {message.id}") - + # Create and process run run = client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == RunStatus.COMPLETED, run.last_error.message - + # List messages to verify tool was used messages = client.assistants.list_messages(thread_id=thread.id) assert len(messages.data) > 0 - + # Clean up client.assistants.delete_assistant(assistant.id) print("Deleted assistant") @@ -2886,7 +2949,7 @@ def _do_test_include_file_search_results(self, use_stream, include_content, **kw with self.create_client(**kwargs) as ai_client: ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2919,7 +2982,9 @@ def _do_test_include_file_search_results(self, use_stream, include_content, **kw if use_stream: run = None - with ai_client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id, include=include) as stream: + with ai_client.assistants.create_stream( + thread_id=thread.id, assistant_id=assistant.id, include=include + ) as stream: for event_type, event_data, _ in stream: if isinstance(event_data, ThreadRun): run = event_data @@ -2927,7 +2992,9 @@ def _do_test_include_file_search_results(self, use_stream, include_content, **kw print("Stream completed.") break else: - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id, include=include) + run = ai_client.assistants.create_and_process_run( + thread_id=thread.id, assistant_id=assistant.id, include=include + ) assert run.status == RunStatus.COMPLETED assert run is not None steps = ai_client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id, include=include) @@ -3035,7 +3102,9 @@ def test_assistants_with_json_schema(self, **kwargs): def _get_file_id_maybe(self, ai_client: AssistantsClient, **kwargs) -> str: """Return file id if kwargs has file path.""" if "file_path" in kwargs: - file = ai_client.assistants.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS) + file = ai_client.assistants.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS + ) assert file.id, "The file was not uploaded." return file.id return None @@ -3120,7 +3189,7 @@ def test_azure_function_call(self, **kwargs): # Note: This test was recorded in westus region as for now # 2025-02-05 it is not supported in test region (East US 2) # create client - storage_queue = kwargs["azure_ai.assistants_assistants_tests_storage_queue"] + storage_queue = kwargs["azure_ai_assistants_assistants_tests_storage_queue"] with self.create_client(**kwargs) as client: azure_function_tool = AzureFunctionTool( name="foo", diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py index 3c125f7a078c..af72d85e4b7f 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py @@ -81,7 +81,7 @@ assistantClientPreparer = functools.partial( EnvironmentVariableLoader, "azure_ai.assistants", - azure_ai_assistants_assistants_tests_project_connection_string="region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", + azure_ai_assistants_assistants_tests_project_endpoint="https://aiservices-id.services.ai.azure.com/api/projects/project-name", azure_ai_assistants_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", azure_ai_assistants_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", ) @@ -123,13 +123,13 @@ class TestAssistantClientAsync(AzureRecordedTestCase): # helper function: create client using environment variables def create_client(self, **kwargs): # fetch environment variables - connection_string = kwargs.pop("azure_ai.assistants_assistants_tests_project_connection_string") + endpoint = kwargs.pop("azure_ai_assistants_assistants_tests_project_endpoint") credential = self.get_credential(AssistantsClient, is_async=True) # create and return client client = AssistantsClient.from_connection_string( + endpoint=endpoint, credential=credential, - conn_str=connection_string, ) return client @@ -216,7 +216,10 @@ async def test_create_assistant_with_tools(self, **kwargs): # create assistant with tools assistant = await client.assistants.create_assistant( - model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", tools=functions.definitions + model="gpt-4o", + name="my-assistant", + instructions="You are helpful assistant", + tools=functions.definitions, ) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -1550,7 +1553,9 @@ async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): print("Tool outputs:", tool_outputs) if tool_outputs: body = {"tool_outputs": tool_outputs} - await client.assistants.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, body=body) + await client.assistants.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, body=body + ) print("Current run status:", run.status) @@ -2236,7 +2241,7 @@ async def _do_test_create_vector_store(self, streaming, **kwargs): else: ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2284,7 +2289,7 @@ async def _do_test_create_vector_store_add_file(self, streaming, **kwargs): ds = None else: ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) vector_store = await ai_client.assistants.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") @@ -2336,7 +2341,7 @@ async def _do_test_create_vector_store_batch(self, streaming, **kwargs): file_ids = None ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2371,7 +2376,9 @@ async def _test_file_search( if streaming: thread_run = None - async with await ai_client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + async with await ai_client.assistants.create_stream( + thread_id=thread.id, assistant_id=assistant.id + ) as stream: async for _, event_data, _ in stream: if isinstance(event_data, ThreadRun): thread_run = event_data @@ -2404,7 +2411,7 @@ async def _test_file_search( async def test_message_attachement_azure(self, **kwargs): """Test message attachment with azure ID.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) await self._do_test_message_attachment(data_sources=[ds], **kwargs) @@ -2472,7 +2479,7 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2515,7 +2522,7 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): async def test_create_assistant_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) await self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) @@ -2580,7 +2587,7 @@ async def _do_test_create_assistant_with_interpreter(self, **kwargs): async def test_create_thread_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) await self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) @@ -2649,7 +2656,7 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2692,7 +2699,7 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): async def test_create_attachment_in_thread_azure(self, **kwargs): """Create thread with message attachment inline with azure asset IDs.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) await self._do_test_create_attachment_in_thread_azure(data_sources=[ds], **kwargs) @@ -2751,7 +2758,7 @@ async def test_azure_function_call(self, **kwargs): # Note: This test was recorded in westus region as for now # 2025-02-05 it is not supported in test region (East US 2) # create client - storage_queue = kwargs["azure_ai.assistants_assistants_tests_storage_queue"] + storage_queue = kwargs["azure_ai_assistants_assistants_tests_storage_queue"] async with self.create_client(**kwargs) as client: azure_function_tool = AzureFunctionTool( name="foo", @@ -2877,7 +2884,7 @@ async def _do_test_include_file_search_results(self, use_stream, include_content async with self.create_client(**kwargs) as ai_client: ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai.assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py index 67359b64dae4..5bae3fbe46f1 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock.py @@ -60,9 +60,6 @@ def get_mock_client(self) -> AssistantsClient: """Return the fake project client""" client = AssistantsClient( endpoint="www.bcac95dd-a1eb-11ef-978f-8c1645fec84b.com", - subscription_id="00000000-0000-0000-0000-000000000000", - resource_group_name="non-existing-rg", - project_name="non-existing-project", credential=MagicMock(), ) client.submit_tool_outputs_to_run = MagicMock() diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py index de9bfa0b8763..278b3ad963f1 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_mock_async.py @@ -60,9 +60,6 @@ def get_mock_client(self) -> AssistantsClient: """Return the fake project client""" client = AssistantsClient( endpoint="www.bcac95dd-a1eb-11ef-978f-8c1645fec84b.com", - subscription_id="00000000-0000-0000-0000-000000000000", - resource_group_name="non-existing-rg", - project_name="non-existing-project", credential=AsyncMock(), ) client.submit_tool_outputs_to_run = AsyncMock() @@ -214,7 +211,9 @@ def _assert_tool_call(self, submit_tool_mock: AsyncMock, run_id: str, tool_set: else: submit_tool_mock.assert_not_called() - def _assert_toolset_dict(self, assistants_client: AssistantsClient, assistant_id: str, toolset: Optional[AsyncToolSet]): + def _assert_toolset_dict( + self, assistants_client: AssistantsClient, assistant_id: str, toolset: Optional[AsyncToolSet] + ): """Check that the tool set dictionary state is as expected.""" if toolset is None: assert assistant_id not in assistants_client._toolset diff --git a/sdk/ai/azure-ai-assistants/tests/test_overload_assert.py b/sdk/ai/azure-ai-assistants/tests/test_overload_assert.py index 3f7c8b343e18..38240159be7f 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_overload_assert.py +++ b/sdk/ai/azure-ai-assistants/tests/test_overload_assert.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression import unittest import pytest from azure.ai.assistants import AssistantsClient @@ -9,7 +10,9 @@ class TestDeclarator(unittest.TestCase): @pytest.mark.asyncio @assert_same_http_requests - async def test_assert_errors(self, assistant: AssistantsClient, _: AsyncAssistantsOperations, assertion: OverloadAssertion): + async def test_assert_errors( + self, assistant: AssistantsClient, _: AsyncAssistantsOperations, assertion: OverloadAssertion + ): # This is a special test case tested verified the decorator assert name field presents in one call but not another model = "gpt-4-1106-preview" name = "first" diff --git a/sdk/ai/azure-ai-assistants/tsp-location.yaml b/sdk/ai/azure-ai-assistants/tsp-location.yaml index 2df370e20752..157aa848374c 100644 --- a/sdk/ai/azure-ai-assistants/tsp-location.yaml +++ b/sdk/ai/azure-ai-assistants/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Assistants -commit: 02a554c61c069231f265000c3e94c09d42579ae0 +commit: 690df2e0bc38d885e92b9eab292d21839f5174a8 repo: Azure/azure-rest-api-specs additionalDirectories: From 29fb20f0917ddb13078f2bbd94a1273449c28c8b Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Thu, 10 Apr 2025 22:12:48 -0700 Subject: [PATCH 02/11] Add more config --- sdk/ai/azure-ai-assistants/pyrightconfig.json | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 sdk/ai/azure-ai-assistants/pyrightconfig.json diff --git a/sdk/ai/azure-ai-assistants/pyrightconfig.json b/sdk/ai/azure-ai-assistants/pyrightconfig.json new file mode 100644 index 000000000000..3af0746bdada --- /dev/null +++ b/sdk/ai/azure-ai-assistants/pyrightconfig.json @@ -0,0 +1,21 @@ +{ + "reportTypeCommentUsage": true, + "reportMissingImports": false, + "pythonVersion": "3.11", + "exclude": [ + "**/downloaded", + "**/sample_assistants_vector_store_batch_enterprise_file_search_async.py", + "**/sample_assistants_with_file_search_attachment.py", + "**/sample_assistants_with_code_interpreter_file_attachment.py", + "**/sample_assistants_code_interpreter_attachment_enterprise_search.py", + "**/sample_assistants_with_file_search_attachment_async.py", + "**/sample_assistants_code_interpreter_attachment_enterprise_search_async.py", + "**/sample_assistants_code_interpreter_attachment_enterprise_search_async.py", + "**/sample_assistants_code_interpreter_attachment_async.py" + ], + "extraPaths": [ + "./../../core/azure-core", + "./../../identity/azure-identity", + "./../../monitor/azure-monitor-opentelemetry" + ] +} \ No newline at end of file From fbfec6a997e96405fbeb8627d40583a3af72a745 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Fri, 11 Apr 2025 13:13:02 -0700 Subject: [PATCH 03/11] Fix linters --- .../azure/ai/assistants/_patch.py | 320 ++++++++---------- .../azure/ai/assistants/aio/_patch.py | 320 ++++++++---------- .../azure/ai/assistants/models/_patch.py | 10 +- 3 files changed, 299 insertions(+), 351 deletions(-) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py index b5f3733040e0..d6b8e19f0f8c 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py @@ -87,12 +87,12 @@ def create_assistant( # pylint: disable=arguments-differ :keyword instructions: The system instructions for the new assistant to use. Default value is None. :paramtype instructions: str :keyword tools: The collection of tools to enable for the new assistant. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -107,15 +107,15 @@ def create_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -151,7 +151,7 @@ def create_assistant( # pylint: disable=arguments-differ :paramtype instructions: str :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet + :paramtype toolset: ~azure.ai.assistants.models.ToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -166,15 +166,15 @@ def create_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -190,7 +190,7 @@ def create_assistant( Default value is "application/json". :paramtype content_type: str :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -206,7 +206,7 @@ def create_assistant( Default value is "application/json". :paramtype content_type: str :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -330,12 +330,12 @@ def update_assistant( # pylint: disable=arguments-differ :paramtype instructions: str :keyword tools: The modified collection of tools to enable for the assistant. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -350,15 +350,15 @@ def update_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -398,7 +398,7 @@ def update_assistant( # pylint: disable=arguments-differ :paramtype instructions: str :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet + :paramtype toolset: ~azure.ai.assistants.models.ToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -413,15 +413,15 @@ def update_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -439,7 +439,7 @@ def update_assistant( Default value is "application/json". :paramtype content_type: str :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -457,7 +457,7 @@ def update_assistant( Default value is "application/json". :paramtype content_type: str :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -498,15 +498,15 @@ def update_assistant( :paramtype instructions: str :keyword tools: The modified collection of tools to enable for the assistant. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet + :paramtype toolset: ~azure.ai.assistants.models.ToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -521,8 +521,8 @@ def update_assistant( :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -532,7 +532,7 @@ def update_assistant( None. :paramtype metadata: dict[str, str] :return: Assistant. The Assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Assistant + :rtype: ~azure.ai.assistants.models.Assistant :raises ~azure.core.exceptions.HttpResponseError: """ self._validate_tools_and_tool_resources(tools, tool_resources) @@ -618,7 +618,7 @@ def create_run( # pylint: disable=arguments-differ Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -634,10 +634,10 @@ def create_run( # pylint: disable=arguments-differ :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -666,17 +666,17 @@ def create_run( # pylint: disable=arguments-differ :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], AssistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AssistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -686,7 +686,7 @@ def create_run( # pylint: disable=arguments-differ None. :paramtype metadata: dict[str, str] :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -710,12 +710,12 @@ def create_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -739,12 +739,12 @@ def create_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -782,7 +782,7 @@ def create_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword assistant_id: The ID of the assistant that should run the thread. Required. :paramtype assistant_id: str :keyword model: The overridden model name that the assistant should use to run the thread. Default @@ -797,10 +797,10 @@ def create_run( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -829,17 +829,17 @@ def create_run( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], AssistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AssistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -849,7 +849,7 @@ def create_run( None. :paramtype metadata: dict[str, str] :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -924,7 +924,7 @@ def create_and_process_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword model: The overridden model name that the assistant should use to run the thread. Default value is None. :paramtype model: str @@ -937,10 +937,10 @@ def create_and_process_run( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources`). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.ToolSet + :paramtype toolset: ~azure.ai.assistants.models.ToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -969,19 +969,19 @@ def create_and_process_run( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], AssistantsNamedToolChoice Default value is None. :paramtype tool_choice: str or str or - ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AssistantsNamedToolChoice + ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. :paramtype response_format: str or str or - ~azure.ai.projects.models.AssistantsApiResponseFormatMode or - ~azure.ai.projects.models.AssistantsApiResponseFormat + ~azure.ai.assistants.models.AssistantsApiResponseFormatMode or + ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -994,7 +994,7 @@ def create_and_process_run( Default value is 1. :paramtype sleep_interval: int :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AssistantRunStream + :rtype: ~azure.ai.assistants.models.AssistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ # Create and initiate the run with additional parameters @@ -1086,7 +1086,7 @@ def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword assistant_id: The ID of the assistant that should run the thread. Required. :paramtype assistant_id: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -1104,10 +1104,10 @@ def create_stream( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessage] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -1136,17 +1136,17 @@ def create_stream( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], AssistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AssistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -1158,7 +1158,7 @@ def create_stream( :keyword event_handler: None :paramtype event_handler: None. _models.AssistantEventHandler will be applied as default. :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AssistantRunStream + :rtype: ~azure.ai.assistants.models.AssistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1197,7 +1197,7 @@ def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1213,10 +1213,10 @@ def create_stream( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessage] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -1245,17 +1245,17 @@ def create_stream( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], AssistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AssistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -1266,9 +1266,9 @@ def create_stream( :paramtype metadata: dict[str, str] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.projects.models.AssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AssistantEventHandler :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AssistantRunStream + :rtype: ~azure.ai.assistants.models.AssistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1295,14 +1295,14 @@ def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword event_handler: None :paramtype event_handler: None. _models.AssistantEventHandler will be applied as default. :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AssistantRunStream + :rtype: ~azure.ai.assistants.models.AssistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1329,15 +1329,15 @@ def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.projects.models.AssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AssistantEventHandler :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AssistantRunStream + :rtype: ~azure.ai.assistants.models.AssistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1378,7 +1378,7 @@ def create_stream( # pyright: ignore[reportInconsistentOverload] Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword assistant_id: The ID of the assistant that should run the thread. Required. :paramtype assistant_id: str :keyword model: The overridden model name that the assistant should use to run the thread. Default @@ -1393,10 +1393,10 @@ def create_stream( # pyright: ignore[reportInconsistentOverload] :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessage] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -1425,17 +1425,17 @@ def create_stream( # pyright: ignore[reportInconsistentOverload] :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], AssistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.AssistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.AssistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.AssistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.AssistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.AssistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.AssistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -1446,9 +1446,9 @@ def create_stream( # pyright: ignore[reportInconsistentOverload] :paramtype metadata: dict[str, str] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.projects.models.AssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AssistantEventHandler :return: AssistantRunStream. AssistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AssistantRunStream + :rtype: ~azure.ai.assistants.models.AssistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1518,15 +1518,15 @@ def submit_tool_outputs_to_run( # pylint: disable=arguments-differ :param run_id: Required. :type run_id: str :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.projects.models.AssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AssistantEventHandler :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1548,7 +1548,7 @@ def submit_tool_outputs_to_run( Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1570,7 +1570,7 @@ def submit_tool_outputs_to_run( Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1595,9 +1595,9 @@ def submit_tool_outputs_to_run( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1646,7 +1646,7 @@ def submit_tool_outputs_to_stream( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.BaseAssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.BaseAssistantEventHandler :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1673,12 +1673,12 @@ def submit_tool_outputs_to_stream( :param run_id: Required. :type run_id: str :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.BaseAssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.BaseAssistantEventHandler :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1704,9 +1704,9 @@ def submit_tool_outputs_to_stream( # pyright: ignore[reportInconsistentOverload :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.BaseAssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.BaseAssistantEventHandler :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1759,39 +1759,14 @@ def _handle_submit_tool_outputs( event_handler=event_handler, ) - # pylint: disable=arguments-differ @overload - def upload_file( # pylint: disable=arguments-differ - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.OpenAIFile: + def upload_file(self, body: _models.UploadFileRequest, **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :param body: Multipart body. Required. + :type body: ~azure.ai.assistants.models.UploadFileRequest :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ - @overload - def upload_file( # pylint: disable=arguments-differ - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1799,10 +1774,10 @@ def upload_file( # pylint: disable=arguments-differ def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. - :param body: Required. + :param body: Multipart body. Required. :type body: JSON :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1810,11 +1785,6 @@ def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: def upload_file( self, body: Union[_models.UploadFileRequest, JSON] = _Unset, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, **kwargs: Any, ) -> _models.OpenAIFile: """ @@ -1840,6 +1810,10 @@ def upload_file( if body is not _Unset: return super().upload_file(body=body, **kwargs) + purpose = kwargs.get('purpose') + file = kwargs.get('file') + file_path = kwargs.get('file_path') + filename = kwargs.get('filename') if isinstance(purpose, FilePurpose): purpose = purpose.value @@ -1876,7 +1850,7 @@ def upload_file_and_poll(self, body: JSON, *, sleep_interval: float = 1, **kwarg is 1. :paramtype sleep_interval: float :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1893,17 +1867,17 @@ def upload_file_and_poll( """Uploads a file for use by other operations. :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType + :paramtype file: ~azure.ai.assistants._vendor.FileType :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose :keyword filename: Default value is None. :paramtype filename: str :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value is 1. :paramtype sleep_interval: float :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1917,12 +1891,12 @@ def upload_file_and_poll( :type file_path: str :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value is 1. :paramtype sleep_interval: float :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1994,7 +1968,7 @@ def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2023,12 +1997,12 @@ def create_vector_store_and_poll( :keyword name: The name of the vector store. Default value is None. :paramtype name: str :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is @@ -2038,7 +2012,7 @@ def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2057,7 +2031,7 @@ def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2089,12 +2063,12 @@ def create_vector_store_and_poll( :keyword name: The name of the vector store. Default value is None. :paramtype name: str :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is @@ -2104,7 +2078,7 @@ def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2161,7 +2135,7 @@ def create_vector_store_file_batch_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2184,18 +2158,18 @@ def create_vector_store_file_batch_and_poll( :keyword file_ids: List of file identifiers. Required. :paramtype file_ids: list[str] :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2222,7 +2196,7 @@ def create_vector_store_file_batch_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2251,14 +2225,14 @@ def create_vector_store_file_batch_and_poll( :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword content_type: Body parameter content-type. Defaults to "application/json". :paramtype content_type: str :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2384,7 +2358,7 @@ def create_vector_store_file_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2410,15 +2384,15 @@ def create_vector_store_file_and_poll( :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2445,7 +2419,7 @@ def create_vector_store_file_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2473,15 +2447,15 @@ def create_vector_store_file_and_poll( :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2527,7 +2501,7 @@ def delete_assistant( # pylint: disable=delete-operation-wrong-return-type :param assistant_id: Identifier of the assistant. Required. :type assistant_id: str :return: AssistantDeletionStatus. The AssistantDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AssistantDeletionStatus + :rtype: ~azure.ai.assistants.models.AssistantDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ if assistant_id in self._toolset: diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py index d87a3f0629fd..e9c73c2c8106 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py @@ -28,7 +28,7 @@ Optional, Union, cast, - overload, + overload, Coroutine, ) from azure.core.tracing.decorator_async import distributed_trace_async @@ -89,12 +89,12 @@ async def create_assistant( # pylint: disable=arguments-differ :keyword instructions: The system instructions for the new assistant to use. Default value is None. :paramtype instructions: str :keyword tools: The collection of tools to enable for the new assistant. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -109,15 +109,15 @@ async def create_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], AssistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.AssistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.AssistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -153,7 +153,7 @@ async def create_assistant( # pylint: disable=arguments-differ :paramtype instructions: str :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet + :paramtype toolset: ~azure.ai.assistants.models.AsyncToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -168,15 +168,15 @@ async def create_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -192,7 +192,7 @@ async def create_assistant( Default value is "application/json". :paramtype content_type: str :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -208,7 +208,7 @@ async def create_assistant( Default value is "application/json". :paramtype content_type: str :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -329,12 +329,12 @@ async def update_assistant( # pylint: disable=arguments-differ :paramtype instructions: str :keyword tools: The modified collection of tools to enable for the assistant. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -349,15 +349,15 @@ async def update_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -397,7 +397,7 @@ async def update_assistant( # pylint: disable=arguments-differ :paramtype instructions: str :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet + :paramtype toolset: ~azure.ai.assistants.models.AsyncToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -412,15 +412,15 @@ async def update_assistant( # pylint: disable=arguments-differ :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -438,7 +438,7 @@ async def update_assistant( Default value is "application/json". :paramtype content_type: str :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -456,7 +456,7 @@ async def update_assistant( Default value is "application/json". :paramtype content_type: str :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ @@ -497,15 +497,15 @@ async def update_assistant( :paramtype instructions: str :keyword tools: The modified collection of tools to enable for the assistant. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :paramtype tool_resources: ~azure.ai.assistants.models.ToolResources :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet + :paramtype toolset: ~azure.ai.assistants.models.AsyncToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default value is @@ -520,8 +520,8 @@ async def update_assistant( :keyword response_format: The response format of the tool calls used by this assistant. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -531,7 +531,7 @@ async def update_assistant( None. :paramtype metadata: dict[str, str] :return: assistant. The assistant is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.assistant + :rtype: ~azure.ai.assistants.models.assistant :raises ~azure.core.exceptions.HttpResponseError: """ self._validate_tools_and_tool_resources(tools, tool_resources) @@ -617,7 +617,7 @@ async def create_run( # pylint: disable=arguments-differ Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -633,10 +633,10 @@ async def create_run( # pylint: disable=arguments-differ :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessage] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -665,17 +665,17 @@ async def create_run( # pylint: disable=arguments-differ :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], assistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.assistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -685,7 +685,7 @@ async def create_run( # pylint: disable=arguments-differ None. :paramtype metadata: dict[str, str] :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -709,12 +709,12 @@ async def create_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -738,12 +738,12 @@ async def create_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -783,7 +783,7 @@ async def create_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword model: The overridden model name that the assistant should use to run the thread. Default value is None. :paramtype model: str @@ -796,10 +796,10 @@ async def create_run( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -828,17 +828,17 @@ async def create_run( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], assistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.assistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -848,7 +848,7 @@ async def create_run( None. :paramtype metadata: dict[str, str] :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -923,7 +923,7 @@ async def create_and_process_run( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword model: The overridden model name that the assistant should use to run the thread. Default value is None. :paramtype model: str @@ -936,10 +936,10 @@ async def create_and_process_run( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources`). Default value is None. - :paramtype toolset: ~azure.ai.projects.models.AsyncToolSet + :paramtype toolset: ~azure.ai.assistants.models.AsyncToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -968,19 +968,19 @@ async def create_and_process_run( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], assistantsNamedToolChoice Default value is None. :paramtype tool_choice: str or str or - ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.assistantsNamedToolChoice + ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. :paramtype response_format: str or str or - ~azure.ai.projects.models.assistantsApiResponseFormatMode or - ~azure.ai.projects.models.assistantsApiResponseFormat + ~azure.ai.assistants.models.assistantsApiResponseFormatMode or + ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -993,7 +993,7 @@ async def create_and_process_run( Default value is 1. :paramtype sleep_interval: int :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ # Create and initiate the run with additional parameters @@ -1087,7 +1087,7 @@ async def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1103,10 +1103,10 @@ async def create_stream( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -1135,17 +1135,17 @@ async def create_stream( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], assistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.assistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -1157,7 +1157,7 @@ async def create_stream( :keyword event_handler: None :paramtype event_handler: None. _models.AsyncassistantEventHandler will be applied as default. :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1196,7 +1196,7 @@ async def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1212,10 +1212,10 @@ async def create_stream( :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -1244,17 +1244,17 @@ async def create_stream( :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], assistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.assistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -1264,9 +1264,9 @@ async def create_stream( None. :paramtype metadata: dict[str, str] :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1293,14 +1293,14 @@ async def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword event_handler: None :paramtype event_handler: None. _models.AsyncassistantEventHandler will be applied as default. :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAssistantRunStream + :rtype: ~azure.ai.assistants.models.AsyncAssistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1327,15 +1327,15 @@ async def create_stream( Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1376,7 +1376,7 @@ async def create_stream( # pyright: ignore[reportInconsistentOverload] Currently the only supported value is ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :paramtype include: list[str or ~azure.ai.assistants.models.RunAdditionalFieldList] :keyword assistant_id: The ID of the assistant that should run the thread. Required. :paramtype assistant_id: str :keyword model: The overridden model name that the assistant should use to run the thread. Default @@ -1391,10 +1391,10 @@ async def create_stream( # pyright: ignore[reportInconsistentOverload] :paramtype additional_instructions: str :keyword additional_messages: Adds additional messages to the thread before creating the run. Default value is None. - :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :paramtype additional_messages: list[~azure.ai.assistants.models.ThreadMessageOptions] :keyword tools: The overridden list of enabled tools that the assistant should use to run the thread. Default value is None. - :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :paramtype tools: list[~azure.ai.assistants.models.ToolDefinition] :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -1423,17 +1423,17 @@ async def create_stream( # pyright: ignore[reportInconsistentOverload] :paramtype max_completion_tokens: int :keyword truncation_strategy: The strategy to use for dropping messages as the context windows moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :paramtype truncation_strategy: ~azure.ai.assistants.models.TruncationObject :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AssistantsApiToolChoiceOptionMode"], assistantsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.projects.models.assistantsApiToolChoiceOptionMode or - ~azure.ai.projects.models.assistantsNamedToolChoice + :paramtype tool_choice: str or str or ~azure.ai.assistants.models.assistantsApiToolChoiceOptionMode or + ~azure.ai.assistants.models.assistantsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AssistantsApiResponseFormatMode"], assistantsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.projects.models.assistantsApiResponseFormatMode - or ~azure.ai.projects.models.assistantsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.assistants.models.assistantsApiResponseFormatMode + or ~azure.ai.assistants.models.assistantsApiResponseFormat :keyword parallel_tool_calls: If ``true`` functions will run in parallel during tool use. Default value is None. :paramtype parallel_tool_calls: bool @@ -1444,9 +1444,9 @@ async def create_stream( # pyright: ignore[reportInconsistentOverload] :paramtype metadata: dict[str, str] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler :return: assistantRunStream. assistantRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncassistantRunStream + :rtype: ~azure.ai.assistants.models.AsyncassistantRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1516,12 +1516,12 @@ async def submit_tool_outputs_to_run( # pylint: disable=arguments-differ :param run_id: Required. :type run_id: str :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1543,7 +1543,7 @@ async def submit_tool_outputs_to_run( Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1565,7 +1565,7 @@ async def submit_tool_outputs_to_run( Default value is "application/json". :paramtype content_type: str :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1590,9 +1590,9 @@ async def submit_tool_outputs_to_run( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun + :rtype: ~azure.ai.assistants.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1637,7 +1637,7 @@ async def submit_tool_outputs_to_stream( :type body: JSON or IO[bytes] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -1664,12 +1664,12 @@ async def submit_tool_outputs_to_stream( :param run_id: Required. :type run_id: str :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.AsyncassistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AsyncassistantEventHandler :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1695,9 +1695,9 @@ async def submit_tool_outputs_to_stream( # pyright: ignore[reportInconsistentOv :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :paramtype tool_outputs: list[~azure.ai.assistants.models.ToolOutput] :keyword event_handler: The event handler to use for processing events during the run. - :paramtype event_handler: ~azure.ai.projects.models.AsyncAssistantEventHandler + :paramtype event_handler: ~azure.ai.assistants.models.AsyncAssistantEventHandler :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1747,39 +1747,14 @@ async def _handle_submit_tool_outputs( thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler ) - # pylint: disable=arguments-differ - @overload - async def upload_file( # pylint: disable=arguments-differ - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - # pylint: disable=arguments-differ @overload - async def upload_file( # pylint: disable=arguments-differ - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: + async def upload_file(self, body: _models.UploadFileRequest, **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. - :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str + :param body: Multipart body. Required. + :type body: ~azure.ai.assistants.models.UploadFileRequest :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1787,10 +1762,10 @@ async def upload_file( # pylint: disable=arguments-differ async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. - :param body: Required. + :param body: Multipart body. Required. :type body: JSON :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1798,11 +1773,6 @@ async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: async def upload_file( self, body: Union[_models.UploadFileRequest, JSON] = _Unset, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, **kwargs: Any, ) -> _models.OpenAIFile: """ @@ -1828,6 +1798,10 @@ async def upload_file( if body is not _Unset: return await super().upload_file(body=body, **kwargs) + purpose = kwargs.get('purpose') + file = kwargs.get('file') + file_path = kwargs.get('file_path') + filename = kwargs.get('filename') if isinstance(purpose, FilePurpose): purpose = purpose.value @@ -1864,7 +1838,7 @@ async def upload_file_and_poll(self, body: JSON, *, sleep_interval: float = 1, * is 1. :paramtype sleep_interval: float :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1881,17 +1855,17 @@ async def upload_file_and_poll( """Uploads a file for use by other operations. :keyword file: Required. - :paramtype file: ~azure.ai.projects._vendor.FileType + :paramtype file: ~azure.ai.assistants._vendor.FileType :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose :keyword filename: Default value is None. :paramtype filename: str :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value is 1. :paramtype sleep_interval: float :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1905,12 +1879,12 @@ async def upload_file_and_poll( :type file_path: str :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :paramtype purpose: str or ~azure.ai.assistants.models.FilePurpose :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value is 1. :paramtype sleep_interval: float :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile + :rtype: ~azure.ai.assistants.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1982,7 +1956,7 @@ async def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2011,12 +1985,12 @@ async def create_vector_store_and_poll( :keyword name: The name of the vector store. Default value is None. :paramtype name: str :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is @@ -2026,7 +2000,7 @@ async def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2045,7 +2019,7 @@ async def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2077,12 +2051,12 @@ async def create_vector_store_and_poll( :keyword name: The name of the vector store. Default value is None. :paramtype name: str :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :paramtype expires_after: ~azure.ai.assistants.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is @@ -2092,7 +2066,7 @@ async def create_vector_store_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore + :rtype: ~azure.ai.assistants.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2149,7 +2123,7 @@ async def create_vector_store_file_batch_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2172,18 +2146,18 @@ async def create_vector_store_file_batch_and_poll( :keyword file_ids: List of file identifiers. Required. :paramtype file_ids: list[str] :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :paramtype data_sources: list[~azure.ai.assistants.models.VectorStoreDataSource] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2210,7 +2184,7 @@ async def create_vector_store_file_batch_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2239,14 +2213,14 @@ async def create_vector_store_file_batch_and_poll( :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword content_type: Body parameter content-type. Defaults to "application/json". :paramtype content_type: str :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :rtype: ~azure.ai.assistants.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2307,7 +2281,7 @@ async def create_vector_store_file_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2333,15 +2307,15 @@ async def create_vector_store_file_and_poll( :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2368,7 +2342,7 @@ async def create_vector_store_file_and_poll( is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2396,15 +2370,15 @@ async def create_vector_store_file_and_poll( :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str :keyword data_source: Azure asset ID. Default value is None. - :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource + :paramtype data_source: ~azure.ai.assistants.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :paramtype chunking_strategy: ~azure.ai.assistants.models.VectorStoreChunkingStrategyRequest :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile + :rtype: ~azure.ai.assistants.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ @@ -2524,7 +2498,7 @@ async def delete_assistant(self, assistant_id: str, **kwargs: Any) -> _models.As :param assistant_id: Identifier of the assistant. Required. :type assistant_id: str :return: AssistantDeletionStatus. The AssistantDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AssistantDeletionStatus + :rtype: ~azure.ai.assistants.models.AssistantDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ if assistant_id in self._toolset: diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py index e7e0ed0fd702..ce4f475b6899 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py @@ -93,7 +93,7 @@ def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[st **Note:** Classes inherited from the model check that the parameters are present in the list of attributes and if they are not, the error is being raised. This check may not - be relevant for classes, not inherited from azure.ai.projects._model_base.Model. + be relevant for classes, not inherited from azure.ai.assistants._model_base.Model. :param Type model_class: The class of model to be used. :param parameters: The parsed dictionary with parameters. :type parameters: Union[str, Dict[str, Any]] @@ -1045,7 +1045,7 @@ def _create_tool_resources_from_dict(self, resources: Dict[str, Any]) -> ToolRes Safely converts a dictionary into a ToolResources instance. :param resources: A dictionary of tool resources. Should be a mapping - accepted by ~azure.ai.projects.models.AzureAISearchResource + accepted by ~azure.ai.assistants.models.AzureAISearchResource :type resources: Dict[str, Any] :return: A ToolResources instance. :rtype: ToolResources @@ -1098,7 +1098,7 @@ def validate_tool_type(self, tool: Tool) -> None: if isinstance(tool, AsyncFunctionTool): raise ValueError( "AsyncFunctionTool is not supported in ToolSet. " - + "To use async functions, use AsyncToolSet and assistants operations in azure.ai.projects.aio." + + "To use async functions, use AsyncToolSet and assistants operations in azure.ai.assistants.aio." ) def execute_tool_calls(self, tool_calls: List[Any]) -> Any: @@ -1626,7 +1626,7 @@ def get_last_message_by_role(self, role: MessageRole) -> Optional[ThreadMessage] :type role: MessageRole :return: The last message from a sender in the specified role. - :rtype: ~azure.ai.projects.models.ThreadMessage + :rtype: ~azure.ai.assistants.models.ThreadMessage """ for msg in self.data: if msg.role == role: @@ -1640,7 +1640,7 @@ def get_last_text_message_by_role(self, role: MessageRole) -> Optional[MessageTe :type role: MessageRole :return: The last text message from a sender in the specified role. - :rtype: ~azure.ai.projects.models.MessageTextContent + :rtype: ~azure.ai.assistants.models.MessageTextContent """ for msg in self.data: if msg.role == role: From 6d30ce041ff8f29c646472c465d47b4ad5b61d10 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Fri, 11 Apr 2025 20:57:10 -0700 Subject: [PATCH 04/11] Fix linters --- .../azure/ai/assistants/_client.py | 5 +- .../azure/ai/assistants/_configuration.py | 7 +- .../ai/assistants/_operations/_operations.py | 104 +++++++++--------- .../azure/ai/assistants/_patch.py | 34 +++--- .../azure/ai/assistants/aio/_client.py | 5 +- .../azure/ai/assistants/aio/_configuration.py | 7 +- .../assistants/aio/_operations/_operations.py | 24 ++-- .../azure/ai/assistants/aio/_patch.py | 34 +++--- .../azure/ai/assistants/models/_models.py | 24 ++-- ...sample_assistants_azure_functions_async.py | 1 + sdk/ai/azure-ai-assistants/tsp-location.yaml | 2 +- 11 files changed, 130 insertions(+), 117 deletions(-) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py index 402e3499a801..ef1341bc0ac4 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_client.py @@ -33,8 +33,9 @@ class AssistantsClient(AssistantsClientOperationsMixin): credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is "latest". - Note that overriding this default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. :paramtype api_version: str """ diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py index 167da10d4f73..b3aa33c5f408 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_configuration.py @@ -30,13 +30,14 @@ class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attrib credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is "latest". - Note that overriding this default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "latest") + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py index 6032feb2a16c..fe62868c3a1e 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_operations/_operations.py @@ -52,7 +52,7 @@ def build_assistants_create_assistant_request(**kwargs: Any) -> HttpRequest: # _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -80,7 +80,7 @@ def build_assistants_list_assistants_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -107,7 +107,7 @@ def build_assistants_get_assistant_request(assistant_id: str, **kwargs: Any) -> _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -134,7 +134,7 @@ def build_assistants_update_assistant_request( # pylint: disable=name-too-long _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -162,7 +162,7 @@ def build_assistants_delete_assistant_request( # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -187,7 +187,7 @@ def build_assistants_create_thread_request(**kwargs: Any) -> HttpRequest: _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -208,7 +208,7 @@ def build_assistants_get_thread_request(thread_id: str, **kwargs: Any) -> HttpRe _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -233,7 +233,7 @@ def build_assistants_update_thread_request(thread_id: str, **kwargs: Any) -> Htt _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -259,7 +259,7 @@ def build_assistants_delete_thread_request(thread_id: str, **kwargs: Any) -> Htt _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -284,7 +284,7 @@ def build_assistants_create_message_request(thread_id: str, **kwargs: Any) -> Ht _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -319,7 +319,7 @@ def build_assistants_list_messages_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -353,7 +353,7 @@ def build_assistants_get_message_request(thread_id: str, message_id: str, **kwar _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -379,7 +379,7 @@ def build_assistants_update_message_request(thread_id: str, message_id: str, **k _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -409,7 +409,7 @@ def build_assistants_create_run_request( _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -445,7 +445,7 @@ def build_assistants_list_runs_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -477,7 +477,7 @@ def build_assistants_get_run_request(thread_id: str, run_id: str, **kwargs: Any) _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -503,7 +503,7 @@ def build_assistants_update_run_request(thread_id: str, run_id: str, **kwargs: A _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -533,7 +533,7 @@ def build_assistants_submit_tool_outputs_to_run_request( # pylint: disable=name _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -560,7 +560,7 @@ def build_assistants_cancel_run_request(thread_id: str, run_id: str, **kwargs: A _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -586,7 +586,7 @@ def build_assistants_create_thread_and_run_request(**kwargs: Any) -> HttpRequest _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -614,7 +614,7 @@ def build_assistants_get_run_step_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -652,7 +652,7 @@ def build_assistants_list_run_steps_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -689,7 +689,7 @@ def build_assistants_list_files_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -710,7 +710,7 @@ def build_assistants_upload_file_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -729,7 +729,7 @@ def build_assistants_delete_file_request(file_id: str, **kwargs: Any) -> HttpReq _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -753,7 +753,7 @@ def build_assistants_get_file_request(file_id: str, **kwargs: Any) -> HttpReques _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -779,7 +779,7 @@ def build_assistants_get_file_content_request( # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/octet-stream") # Construct URL @@ -810,7 +810,7 @@ def build_assistants_list_vector_stores_request( # pylint: disable=name-too-lon _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -838,7 +838,7 @@ def build_assistants_create_vector_store_request(**kwargs: Any) -> HttpRequest: _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -861,7 +861,7 @@ def build_assistants_get_vector_store_request( # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -888,7 +888,7 @@ def build_assistants_modify_vector_store_request( # pylint: disable=name-too-lo _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -916,7 +916,7 @@ def build_assistants_delete_vector_store_request( # pylint: disable=name-too-lo _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -949,7 +949,7 @@ def build_assistants_list_vector_store_files_request( # pylint: disable=name-to _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -986,7 +986,7 @@ def build_assistants_create_vector_store_file_request( # pylint: disable=name-t _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1014,7 +1014,7 @@ def build_assistants_get_vector_store_file_request( # pylint: disable=name-too- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1041,7 +1041,7 @@ def build_assistants_delete_vector_store_file_request( # pylint: disable=name-t _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1069,7 +1069,7 @@ def build_assistants_create_vector_store_file_batch_request( # pylint: disable= _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1097,7 +1097,7 @@ def build_assistants_get_vector_store_file_batch_request( # pylint: disable=nam _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1124,7 +1124,7 @@ def build_assistants_cancel_vector_store_file_batch_request( # pylint: disable= _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1159,7 +1159,7 @@ def build_assistants_list_vector_store_file_batch_files_request( # pylint: disa _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "latest")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -2342,13 +2342,13 @@ def create_message( :param thread_id: Identifier of the thread. Required. :type thread_id: str - :keyword role: The role of the entity that is creating the message. Allowed values include: + :keyword role: Allowed values include: - * `user`: Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the assistant. Use this value to insert - messages from the assistant into the - conversation. Known values are: "user" and "assistant". Required. + * `user`: Indicates user-generated messages. + * `assistant`: Indicates the message is generated by the assistant. + + The role of the entity that is creating the message. Known values are: "user" and "assistant". + Required. :paramtype role: str or ~azure.ai.assistants.models.MessageRole :keyword content: The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via @@ -2424,13 +2424,13 @@ def create_message( :type thread_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword role: The role of the entity that is creating the message. Allowed values include: + :keyword role: Allowed values include: - * `user`: Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the assistant. Use this value to insert - messages from the assistant into the - conversation. Known values are: "user" and "assistant". Required. + * `user`: Indicates user-generated messages. + * `assistant`: Indicates the message is generated by the assistant. + + The role of the entity that is creating the message. Known values are: "user" and "assistant". + Required. :paramtype role: str or ~azure.ai.assistants.models.MessageRole :keyword content: The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py index d6b8e19f0f8c..d752142abbf1 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py @@ -1790,17 +1790,19 @@ def upload_file( """ Uploads a file for use by other operations, delegating to the generated operations. + kwargs can include next parameters: + param file: File content. Required if `body` and `purpose` are not provided. + type file: Optional[FileType] + param file_path: Path to the file. Required if `body` and `purpose` are not provided. + type file_path: Optional[str] + param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + type purpose: Union[str, _models.FilePurpose, None] + param filename: The name of the file. + type filename: Optional[str] + :param body: JSON. Required if `file` and `purpose` are not provided. :type body: Optional[JSON] - :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] - :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - :paramtype purpose: Union[str, _models.FilePurpose, None] - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :keyword filename: The name of the file. - :paramtype filename: Optional[str] :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: _models.OpenAIFile :raises FileNotFoundError: If the file_path is invalid. @@ -1810,10 +1812,10 @@ def upload_file( if body is not _Unset: return super().upload_file(body=body, **kwargs) - purpose = kwargs.get('purpose') - file = kwargs.get('file') - file_path = kwargs.get('file_path') - filename = kwargs.get('filename') + purpose = kwargs.get("purpose") + file = kwargs.get("file") + file_path = kwargs.get("file_path") + filename = kwargs.get("filename") if isinstance(purpose, FilePurpose): purpose = purpose.value @@ -2287,7 +2289,8 @@ def get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: @distributed_trace def save_file( # pylint: disable=client-method-missing-kwargs - self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: + self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None + ) -> None: """ Synchronously saves file content retrieved using a file identifier to the specified local directory. @@ -2495,7 +2498,8 @@ def create_vector_store_file_and_poll( @distributed_trace def delete_assistant( # pylint: disable=delete-operation-wrong-return-type - self, assistant_id: str, **kwargs: Any) -> _models.AssistantDeletionStatus: + self, assistant_id: str, **kwargs: Any + ) -> _models.AssistantDeletionStatus: """Deletes an assistant. :param assistant_id: Identifier of the assistant. Required. diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py index 67dfd5d3edcc..ff7afa16a25f 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_client.py @@ -33,8 +33,9 @@ class AssistantsClient(AssistantsClientOperationsMixin): credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is "latest". - Note that overriding this default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. :paramtype api_version: str """ diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py index 2fd4bfd489bd..637f56b4a09d 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_configuration.py @@ -30,15 +30,16 @@ class AssistantsClientConfiguration: # pylint: disable=too-many-instance-attrib credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is "latest". - Note that overriding this default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. :paramtype api_version: str """ def __init__( self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: - api_version: str = kwargs.pop("api_version", "latest") + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py index 573a8fed7a5e..cb6696b9608a 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_operations/_operations.py @@ -1238,13 +1238,13 @@ async def create_message( :param thread_id: Identifier of the thread. Required. :type thread_id: str - :keyword role: The role of the entity that is creating the message. Allowed values include: + :keyword role: Allowed values include: - * `user`: Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the assistant. Use this value to insert - messages from the assistant into the - conversation. Known values are: "user" and "assistant". Required. + * `user`: Indicates user-generated messages. + * `assistant`: Indicates the message is generated by the assistant. + + The role of the entity that is creating the message. Known values are: "user" and "assistant". + Required. :paramtype role: str or ~azure.ai.assistants.models.MessageRole :keyword content: The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via @@ -1320,13 +1320,13 @@ async def create_message( :type thread_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword role: The role of the entity that is creating the message. Allowed values include: + :keyword role: Allowed values include: - * `user`: Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the assistant. Use this value to insert - messages from the assistant into the - conversation. Known values are: "user" and "assistant". Required. + * `user`: Indicates user-generated messages. + * `assistant`: Indicates the message is generated by the assistant. + + The role of the entity that is creating the message. Known values are: "user" and "assistant". + Required. :paramtype role: str or ~azure.ai.assistants.models.MessageRole :keyword content: The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py index e9c73c2c8106..0704e4548a7b 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py @@ -28,7 +28,7 @@ Optional, Union, cast, - overload, Coroutine, + overload, ) from azure.core.tracing.decorator_async import distributed_trace_async @@ -39,6 +39,7 @@ if TYPE_CHECKING: from .. import _types + # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import AccessToken, AzureKeyCredential from azure.core.credentials_async import AsyncTokenCredential @@ -1778,17 +1779,19 @@ async def upload_file( """ Uploads a file for use by other operations, delegating to the generated operations. + kwargs can include next parameters: + param file: File content. Required if `body` and `purpose` are not provided. + type file: Optional[FileType] + param file_path: Path to the file. Required if `body` and `purpose` are not provided. + type file_path: Optional[str] + param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + type purpose: Union[str, _models.FilePurpose, None] + param filename: The name of the file. + type filename: Optional[str] + :param body: JSON. Required if `file` and `purpose` are not provided. :type body: Optional[JSON] - :keyword file: File content. Required if `body` and `purpose` are not provided. - :paramtype file: Optional[FileType] - :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. - :paramtype file_path: Optional[str] - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :paramtype purpose: Union[str, _models.FilePurpose, None] - :keyword filename: The name of the file. - :paramtype filename: Optional[str] :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: _models.OpenAIFile :raises FileNotFoundError: If the file_path is invalid. @@ -1798,10 +1801,10 @@ async def upload_file( if body is not _Unset: return await super().upload_file(body=body, **kwargs) - purpose = kwargs.get('purpose') - file = kwargs.get('file') - file_path = kwargs.get('file_path') - filename = kwargs.get('filename') + purpose = kwargs.get("purpose") + file = kwargs.get("file") + file_path = kwargs.get("file_path") + filename = kwargs.get("filename") if isinstance(purpose, FilePurpose): purpose = purpose.value @@ -2433,7 +2436,8 @@ async def get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[b @distributed_trace_async async def save_file( # pylint: disable=client-method-missing-kwargs - self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: + self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None + ) -> None: """ Asynchronously saves file content retrieved using a file identifier to the specified local directory. diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py index 3d4ce4433874..03da9f45fe7a 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_models.py @@ -5235,13 +5235,13 @@ class ThreadMessageOptions(_model_base.Model): """A single message within an assistant thread, as provided during that thread's creation for its initial state. - :ivar role: The role of the entity that is creating the message. Allowed values include: + :ivar role: Allowed values include: - * `user`: Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the assistant. Use this value to insert - messages from the assistant into the - conversation. Required. Known values are: "user" and "assistant". + * `user`: Indicates user-generated messages. + * `assistant`: Indicates the message is generated by the assistant. + + The role of the entity that is creating the message. Required. Known values are: "user" and + "assistant". :vartype role: str or ~azure.ai.assistants.models.MessageRole :ivar content: The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via @@ -5257,13 +5257,13 @@ class ThreadMessageOptions(_model_base.Model): """ role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The role of the entity that is creating the message. Allowed values include: + """Allowed values include: - * `user`: Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * `assistant`: Indicates the message is generated by the assistant. Use this value to insert - messages from the assistant into the - conversation. Required. Known values are: \"user\" and \"assistant\".""" + * `user`: Indicates user-generated messages. + * `assistant`: Indicates the message is generated by the assistant. + + The role of the entity that is creating the message. Required. Known values are: \"user\" and + \"assistant\".""" content: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py index 847ddd9e4d59..bbbe50c2865c 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-assistants/tsp-location.yaml b/sdk/ai/azure-ai-assistants/tsp-location.yaml index 157aa848374c..5307517cbb65 100644 --- a/sdk/ai/azure-ai-assistants/tsp-location.yaml +++ b/sdk/ai/azure-ai-assistants/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Assistants -commit: 690df2e0bc38d885e92b9eab292d21839f5174a8 +commit: c0ad22895973fb5459104a96239b186c0b38f9ae repo: Azure/azure-rest-api-specs additionalDirectories: From a26904e452b7d86cf65e0ba1c7bbe59c8ad81a3b Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Fri, 11 Apr 2025 21:21:44 -0700 Subject: [PATCH 05/11] Fix spelling --- .vscode/cspell.json | 4 ++++ .../azure-ai-assistants/samples/fix_sample.sh | 20 ------------------- 2 files changed, 4 insertions(+), 20 deletions(-) delete mode 100644 sdk/ai/azure-ai-assistants/samples/fix_sample.sh diff --git a/.vscode/cspell.json b/.vscode/cspell.json index 404795164d2e..3fc739f1654f 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -39,6 +39,10 @@ "sdk/ai/azure-ai-projects/samples/agents/tripadvisor_openapi.json", "/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/data/**", "/sdk/ai/azure-ai-projects/samples/evaluations/data/**", + "sdk/ai/azure-ai-assistants/samples/agents/nifty_500_quarterly_results.csv", + "sdk/ai/azure-ai-assistants/samples/agents/tripadvisor_openapi.json", + "/sdk/ai/azure-ai-assistants/samples/evaluations/async_samples/data/**", + "/sdk/ai/azure-ai-assistants/samples/evaluations/data/**", "sdk/ai/azure-ai-resources/azure/ai/resources/_index/_langchain/vendor/**", "sdk/ai/azure-ai-resources/azure/ai/resources/_restclient/**", "sdk/cognitiveservices/azure-cognitiveservices-search-autosuggest/**", diff --git a/sdk/ai/azure-ai-assistants/samples/fix_sample.sh b/sdk/ai/azure-ai-assistants/samples/fix_sample.sh deleted file mode 100644 index 067c4b55317b..000000000000 --- a/sdk/ai/azure-ai-assistants/samples/fix_sample.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/bash -fix_samples(){ - for fle in `ls $1/*.py | grep agent`; - do - new_name="`echo "$fle" | sed "s/agent/assistant/g"`" - echo "$fle - > $new_name" - sed "s/gent/ssistant/g" "$fle" \ - | sed "s/azure-ai-projects/azure-ai-assistants/g" \ - | sed "s/ai.projects/ai.assistants/g" \ - | sed "s/AIProjectClient/AssistantsClient/g" \ - | sed "s/project_client.assistants/project_client/g" \ - | sed "s/project_client/assistants_client/g" > $new_name - rm -f "$fle" - done -} - -#fix_samples async_samples -#fix_samples . -#fix_samples multiagent -fix_samples ../tests \ No newline at end of file From 4bca3986088ec5fd31923acad96670277b51eceb Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Sat, 12 Apr 2025 13:32:03 -0700 Subject: [PATCH 06/11] Add package to the build, so we can use private feed --- sdk/ai/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sdk/ai/ci.yml b/sdk/ai/ci.yml index 117b9c6c785d..633ae26cc11e 100644 --- a/sdk/ai/ci.yml +++ b/sdk/ai/ci.yml @@ -50,6 +50,8 @@ extends: # Selection: sparse # GenerateVMJobs: true Artifacts: + - name: azure-ai-assistants + safeName: azureaiassistants - name: azure-ai-projects safeName: azureaiprojects - name: azure-ai-inference From be0c45fc016c23111aa809845ea26429e485f4d0 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Tue, 15 Apr 2025 12:01:40 -0700 Subject: [PATCH 07/11] Fix samples and readme --- .vscode/cspell.json | 6 +- sdk/ai/azure-ai-assistants/README.md | 1184 ++++++++++++++++- ...sample_assistants_azure_functions_async.py | 2 +- .../sample_assistants_basics_async.py | 2 +- ...basics_async_with_azure_monitor_tracing.py | 2 +- ...tants_basics_async_with_console_tracing.py | 2 +- ...ample_assistants_code_interpreter_async.py | 2 +- ...tants_code_interpreter_attachment_async.py | 2 +- ...eter_attachment_enterprise_search_async.py | 2 +- .../sample_assistants_functions_async.py | 2 +- .../sample_assistants_json_schema_async.py | 2 +- ...ample_assistants_run_with_toolset_async.py | 2 +- ...le_assistants_stream_eventhandler_async.py | 2 +- ...tream_eventhandler_with_functions_async.py | 2 +- ..._stream_eventhandler_with_toolset_async.py | 2 +- ...ample_assistants_stream_iteration_async.py | 2 +- ...m_with_base_override_eventhandler_async.py | 2 +- ...tore_batch_enterprise_file_search_async.py | 2 +- ...ts_vector_store_batch_file_search_async.py | 2 +- ...ctor_store_enterprise_file_search_async.py | 2 +- ...sistants_vector_store_file_search_async.py | 2 +- ...tants_with_file_search_attachment_async.py | 2 +- .../sample_assistants_assistant_team.py | 2 +- ...tants_assistant_team_custom_team_leader.py | 2 +- .../sample_assistants_multi_assistant_team.py | 2 +- .../sample_assistants_azure_functions.py | 3 +- .../samples/sample_assistants_basics.py | 3 +- ...tants_basics_with_azure_monitor_tracing.py | 3 +- ..._assistants_basics_with_console_tracing.py | 3 +- ..._with_console_tracing_custom_attributes.py | 3 +- .../sample_assistants_bing_grounding.py | 3 +- .../sample_assistants_code_interpreter.py | 3 +- ...nterpreter_attachment_enterprise_search.py | 3 +- ...ample_assistants_enterprise_file_search.py | 3 +- .../samples/sample_assistants_fabric.py | 2 +- .../samples/sample_assistants_file_search.py | 3 +- .../samples/sample_assistants_functions.py | 3 +- ...ts_functions_with_azure_monitor_tracing.py | 3 +- ...sistants_functions_with_console_tracing.py | 3 +- .../samples/sample_assistants_json_schema.py | 3 +- .../samples/sample_assistants_logic_apps.py | 3 +- .../samples/sample_assistants_openapi.py | 3 +- ...mple_assistants_openapi_connection_auth.py | 5 +- .../sample_assistants_run_with_toolset.py | 3 +- .../samples/sample_assistants_sharepoint.py | 2 +- .../sample_assistants_stream_eventhandler.py | 3 +- ...eventhandler_with_azure_monitor_tracing.py | 3 +- ...stream_eventhandler_with_bing_grounding.py | 3 +- ...tream_eventhandler_with_console_tracing.py | 3 +- ...ants_stream_eventhandler_with_functions.py | 3 +- ...stants_stream_eventhandler_with_toolset.py | 3 +- .../sample_assistants_stream_iteration.py | 3 +- ...ts_stream_iteration_with_bing_grounding.py | 3 +- ...tants_stream_iteration_with_file_search.py | 3 +- ...ssistants_stream_iteration_with_toolset.py | 3 +- ..._stream_with_base_override_eventhandler.py | 3 +- ...ctor_store_batch_enterprise_file_search.py | 3 +- ...sistants_vector_store_batch_file_search.py | 3 +- ...ple_assistants_vector_store_file_search.py | 3 +- ...s_with_code_interpreter_file_attachment.py | 3 +- ...tants_with_enterprise_search_attachment.py | 3 +- ..._assistants_with_file_search_attachment.py | 3 +- ...ple_assistants_with_resources_in_thread.py | 3 +- 63 files changed, 1240 insertions(+), 110 deletions(-) diff --git a/.vscode/cspell.json b/.vscode/cspell.json index 3fc739f1654f..a8bee859a72d 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -39,10 +39,8 @@ "sdk/ai/azure-ai-projects/samples/agents/tripadvisor_openapi.json", "/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/data/**", "/sdk/ai/azure-ai-projects/samples/evaluations/data/**", - "sdk/ai/azure-ai-assistants/samples/agents/nifty_500_quarterly_results.csv", - "sdk/ai/azure-ai-assistants/samples/agents/tripadvisor_openapi.json", - "/sdk/ai/azure-ai-assistants/samples/evaluations/async_samples/data/**", - "/sdk/ai/azure-ai-assistants/samples/evaluations/data/**", + "sdk/ai/azure-ai-assistants/samples/nifty_500_quarterly_results.csv", + "sdk/ai/azure-ai-assistants/samples/tripadvisor_openapi.json", "sdk/ai/azure-ai-resources/azure/ai/resources/_index/_langchain/vendor/**", "sdk/ai/azure-ai-resources/azure/ai/resources/_restclient/**", "sdk/cognitiveservices/azure-cognitiveservices-search-autosuggest/**", diff --git a/sdk/ai/azure-ai-assistants/README.md b/sdk/ai/azure-ai-assistants/README.md index 30d4deef34eb..82a70cfba73f 100644 --- a/sdk/ai/azure-ai-assistants/README.md +++ b/sdk/ai/azure-ai-assistants/README.md @@ -1,20 +1,1184 @@ -# Azure Ai Assistants client library for Python - + +# Azure AI Assistants client library for Python +Use the AI Assistants client library (in preview) to: + +* **Enumerate connections** in your Azure AI Foundry project and get connection properties. +For example, get the inference endpoint URL and credentials associated with your Azure OpenAI connection. +* **Develop Assistants using the Azure AI Assistants Service**, leveraging an extensive ecosystem of models, tools, and capabilities from OpenAI, Microsoft, and other LLM providers. The Azure AI Assistants Service enables the building of Assistants for a wide range of generative AI use cases. The package is currently in preview. +* **Enable OpenTelemetry tracing**. + +[Product documentation](https://aka.ms/azsdk/azure-ai-projects/product-doc) +| [Samples][samples] +| [API reference documentation](https://aka.ms/azsdk/azure-ai-projects/python/reference) +| [Package (PyPI)](https://aka.ms/azsdk/azure-ai-projects/python/package) +| [SDK source code](https://aka.ms/azsdk/azure-ai-projects/python/code) +| [AI Starter Template](https://aka.ms/azsdk/azure-ai-projects/python/ai-starter-template) + +## Reporting issues + +To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-projects" in the title or content. + +## Table of contents + +- [Getting started](#getting-started) + - [Prerequisite](#prerequisite) + - [Install the package](#install-the-package) +- [Key concepts](#key-concepts) + - [Create and authenticate the client](#create-and-authenticate-the-client) +- [Examples](#examples) + - [Create an Assistant](#create-assistant) with: + - [File Search](#create-assistant-with-file-search) + - [Enterprise File Search](#create-assistant-with-enterprise-file-search) + - [Code interpreter](#create-assistant-with-code-interpreter) + - [Bing grounding](#create-assistant-with-bing-grounding) + - [Azure AI Search](#create-assistant-with-azure-ai-search) + - [Function call](#create-assistant-with-function-call) + - [Azure Function Call](#create-assistant-with-azure-function-call) + - [OpenAPI](#create-assistant-with-openapi) + - [Fabric data](#create-an-assistant-with-fabric) + - [Create thread](#create-thread) with + - [Tool resource](#create-thread-with-tool-resource) + - [Create message](#create-message) with: + - [File search attachment](#create-message-with-file-search-attachment) + - [Code interpreter attachment](#create-message-with-code-interpreter-attachment) + - [Execute Run, Run_and_Process, or Stream](#create-run-run_and_process-or-stream) + - [Retrieve message](#retrieve-message) + - [Retrieve file](#retrieve-file) + - [Tear down by deleting resource](#teardown) + - [Tracing](#tracing) + - [Installation](#installation) + - [How to enable tracing](#how-to-enable-tracing) + - [How to trace your own functions](#how-to-trace-your-own-functions) +- [Troubleshooting](#troubleshooting) + - [Logging](#logging) + - [Reporting issues](#reporting-issues) +- [Next steps](#next-steps) +- [Contributing](#contributing) + ## Getting started +### Prerequisite + +- Python 3.8 or later. +- An [Azure subscription][azure_sub]. +- A [project in Azure AI Foundry](https://learn.microsoft.com/azure/ai-studio/how-to/create-projects). +- The project connection string. It can be found in your Azure AI Foundry project overview page, under "Project details". Below we will assume the environment variable `PROJECT_CONNECTION_STRING` was defined to hold this value. +- Entra ID is needed to authenticate the client. Your application needs an object that implements the [TokenCredential](https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential) interface. Code samples here use [DefaultAzureCredential](https://learn.microsoft.com/python/api/azure-identity/azure.identity.defaultazurecredential). To get that working, you will need: + * An appropriate role assignment. see [Role-based access control in Azure AI Foundry portal](https://learn.microsoft.com/azure/ai-foundry/concepts/rbac-ai-foundry). Role assigned can be done via the "Access Control (IAM)" tab of your Azure AI Project resource in the Azure portal. + * [Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) installed. + * You are logged into your Azure account by running `az login`. + * Note that if you have multiple Azure subscriptions, the subscription that contains your Azure AI Project resource must be your default subscription. Run `az account list --output table` to list all your subscription and see which one is the default. Run `az account set --subscription "Your Subscription ID or Name"` to change your default subscription. + ### Install the package ```bash -python -m pip install azure-ai-assistants +pip install azure-ai-assistants +``` + +## Key concepts + +### Create and authenticate the client + +To construct a synchronous client: + +```python +import os +from azure.ai.assistants import AssistantsClient +from azure.identity import DefaultAzureCredential + +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential = AzureKeyCredential(os.environ["API_KEY"]) +) +``` + +To construct an asynchronous client, Install the additional package [aiohttp](https://pypi.org/project/aiohttp/): + +```bash +pip install aiohttp +``` + +and update the code above to import `asyncio`, and import `AssistantsClient` from the `azure.ai.assistants.aio` namespace: + +```python +import os +import asyncio +from azure.ai.assistants.aio import AssistantsClient +from azure.core.credentials import AzureKeyCredential + +assistant_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=AzureKeyCredential(os.environ["API_KEY"]), +) +``` + +## Examples + +#### Create Assistant + +Before creating an Assistant, you need to set up Azure resources to deploy your model. [Create a New Assistant Quickstart](https://learn.microsoft.com/azure/ai-services/assistants/quickstart?pivots=programming-language-python-azure) details selecting and deploying your Assistant Setup. + +Here is an example of how to create an Assistant: + + +```python +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", +) +``` + + + +To allow Assistants to access your resources or custom functions, you need tools. You can pass tools to `create_assistant` by either `toolset` or combination of `tools` and `tool_resources`. + +Here is an example of `toolset`: + + +```python +functions = FunctionTool(user_functions) +code_interpreter = CodeInterpreterTool() + +toolset = ToolSet() +toolset.add(functions) +toolset.add(code_interpreter) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, +) +``` + + + +Also notices that if you use asynchronous client, you use `AsyncToolSet` instead. Additional information related to `AsyncFunctionTool` be discussed in the later sections. + +Here is an example to use `tools` and `tool_resources`: + + +```python +file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + +# Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, +) +``` + + + +In the following sections, we show you sample code in either `toolset` or combination of `tools` and `tool_resources`. + +#### Create Assistant with File Search + +To perform file search by an Assistant, we first need to upload a file, create a vector store, and associate the file to the vector store. Here is an example: + + + +```python +file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") +print(f"Uploaded file, file ID: {file.id}") + +vector_store = assistants_client.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") +print(f"Created vector store, vector store ID: {vector_store.id}") + +# Create file search tool with resources followed by creating assistant +file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, +) +``` + + + +#### Create Assistant with Enterprise File Search + +We can upload file to Azure as it is shown in the example, or use the existing Azure blob storage. In the code below we demonstrate how this can be achieved. First we upload file to azure and create `VectorStoreDataSource`, which then is used to create vector store. This vector store is then given to the `FileSearchTool` constructor. + + + +```python +# We will upload the local file to Azure and will use it for vector store creation. +asset_uri = os.environ["AZURE_BLOB_URI"] + +# Create a vector store with no file and wait for it to be processed +ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) +vector_store = assistants_client.create_vector_store_and_poll(data_sources=[ds], name="sample_vector_store") +print(f"Created vector store, vector store ID: {vector_store.id}") + +# Create a file search tool +file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + +# Notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, +) +``` + + + +We also can attach files to the existing vector store. In the code snippet below, we first create an empty vector store and add file to it. + + + +```python +# Create a vector store with no file and wait for it to be processed +vector_store = assistants_client.create_vector_store_and_poll(data_sources=[], name="sample_vector_store") +print(f"Created vector store, vector store ID: {vector_store.id}") + +ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) +# Add the file to the vector store or you can supply data sources in the vector store creation +vector_store_file_batch = assistants_client.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=[ds] +) +print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + +# Create a file search tool +file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) +``` + + + +#### Create Assistant with Code Interpreter + +Here is an example to upload a file and use it for code interpreter by an Assistant: + + + +```python +file = assistants_client.upload_file_and_poll( + file_path="nifty_500_quarterly_results.csv", purpose=FilePurpose.ASSISTANTS +) +print(f"Uploaded file, file ID: {file.id}") + +code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + +# Create assistant with code interpreter tool and tools_resources +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, +) +``` + + + +#### Create Assistant with Bing Grounding + +To enable your Assistant to perform search through Bing search API, you use `BingGroundingTool` along with a connection. + +Here is an example: + + + +```python +conn_id = os.environ["AZURE_BING_CONECTION_ID"] + +print(conn_id) + +# Initialize assistant bing tool and add the connection id +bing = BingGroundingTool(connection_id=conn_id) + +# Create assistant with the bing tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=bing.definitions, + headers={"x-ms-enable-preview": "true"}, + ) +``` + + + +#### Create Assistant with Azure AI Search + +Azure AI Search is an enterprise search system for high-performance applications. It integrates with Azure OpenAI Service and Azure Machine Learning, offering advanced search technologies like vector search and full-text search. Ideal for knowledge base insights, information discovery, and automation. Creating an Assistant with Azure AI Search requires an existing Azure AI Search Index. For more information and setup guides, see [Azure AI Search Tool Guide](https://learn.microsoft.com/azure/ai-services/assistants/how-to/tools/azure-ai-search?tabs=azurecli%2Cpython&pivots=overview-azure-ai-search). + +Here is an example to integrate Azure AI Search: + + + +```python +conn_id = os.environ["AI_AZURE_AI_CONNECTION_ID"] + +print(conn_id) + +# Initialize assistant AI search tool and add the search index connection id +ai_search = AzureAISearchTool( + index_connection_id=conn_id, index_name="sample_index", query_type=AzureAISearchQueryType.SIMPLE, top_k=3, filter="" +) + +# Create assistant with AI search tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=ai_search.definitions, + tool_resources=ai_search.resources, + ) +``` + + + +If the assistant has found the relevant information in the index, the reference +and annotation will be provided in the message response. In the example above, we replace +the reference placeholder by the actual reference and url. Please note, that to +get sensible result, the index needs to have "embedding", "token", "category" and "title" fields. + + + +```python +# Fetch and log all messages +messages = assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) +for message in messages.data: + if message.role == MessageRole.ASSISTANT and message.url_citation_annotations: + placeholder_annotations = { + annotation.text: f" [see {annotation.url_citation.title}] ({annotation.url_citation.url})" + for annotation in message.url_citation_annotations + } + for message_text in message.text_messages: + message_str = message_text.text.value + for k, v in placeholder_annotations.items(): + message_str = message_str.replace(k, v) + print(f"{message.role}: {message_str}") + else: + for message_text in message.text_messages: + print(f"{message.role}: {message_text.text.value}") +``` + + + +#### Create Assistant with Function Call + +You can enhance your Assistants by defining callback functions as function tools. These can be provided to `create_assistant` via either the `toolset` parameter or the combination of `tools` and `tool_resources`. Here are the distinctions: + +- `toolset`: When using the `toolset` parameter, you provide not only the function definitions and descriptions but also their implementations. The SDK will execute these functions within `create_and_run_process` or `streaming` . These functions will be invoked based on their definitions. +- `tools` and `tool_resources`: When using the `tools` and `tool_resources` parameters, only the function definitions and descriptions are provided to `create_assistant`, without the implementations. The `Run` or `event handler of stream` will raise a `requires_action` status based on the function definitions. Your code must handle this status and call the appropriate functions. + +For more details about calling functions by code, refer to [`sample_assistants_stream_eventhandler_with_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/assistants/sample_assistants_stream_eventhandler_with_functions.py) and [`sample_assistants_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/assistants/sample_assistants_functions.py). + +For more details about requirements and specification of functions, refer to [Function Tool Specifications](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/FunctionTool.md) + +Here is an example to use [user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/assistants/user_functions.py) in `toolset`: + + +```python +functions = FunctionTool(user_functions) +toolset = ToolSet() +toolset.add(functions) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, +) +``` + + + +For asynchronous functions, you must import `AIProjectClient` from `azure.ai.projects.aio` and use `AsyncFunctionTool`. Here is an example using [asynchronous user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/assistants/async_samples/user_async_functions.py): + +```python +from azure.ai.projects.aio import AIProjectClient +``` + + + +```python +functions = AsyncFunctionTool(user_async_functions) + +toolset = AsyncToolSet() +toolset.add(functions) + +assistant = await assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + toolset=toolset, +) +``` + + + +#### Create Assistant With Azure Function Call + +The AI assistant leverages Azure Functions triggered asynchronously via Azure Storage Queues. To enable the assistant to perform Azure Function calls, you must set up the corresponding `AzureFunctionTool`, specifying input and output queues as well as parameter definitions. + +Example Python snippet illustrating how you create an assistant utilizing the Azure Function Tool: + + + +```python +azure_function_tool = AzureFunctionTool( + name="foo", + description="Get answers from the foo bot.", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + }, + input_queue=AzureFunctionStorageQueue( + queue_name="azure-function-foo-input", + storage_service_endpoint=storage_service_endpoint, + ), + output_queue=AzureFunctionStorageQueue( + queue_name="azure-function-tool-output", + storage_service_endpoint=storage_service_endpoint, + ), +) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="azure-function-assistant-foo", + instructions=f"You are a helpful support assistant. Use the provided function any time the prompt contains the string 'What would foo say?'. When you invoke the function, ALWAYS specify the output queue uri parameter as '{storage_service_endpoint}/azure-function-tool-output'. Always responds with \"Foo says\" and then the response from the tool.", + tools=azure_function_tool.definitions, +) +print(f"Created assistant, assistant ID: {assistant.id}") +``` + + + +--- + +**Limitations** + +Currently, the Azure Function integration for the AI Assistant has the following limitations: + +- Azure Functions integration is available **only for non-streaming scenarios**. +- Supported trigger for Azure Function is currently limited to **Queue triggers** only. + HTTP or other trigger types and streaming responses are not supported at this time. + +--- + +**Create and Deploy Azure Function** + +Before you can use the assistant with AzureFunctionTool, you need to create and deploy Azure Function. + +Below is an example Python Azure Function responding to queue-triggered messages and placing responses on the output queue: + +```python +import azure.functions as func +import logging +import json + +app = func.FunctionApp() + +@app.get_weather(arg_name="inputQueue", + queue_name="input", + connection="AzureWebJobsStorage") +@app.queue_output(arg_name="outputQueue", + queue_name="output", + connection="AzureWebJobsStorage") +def get_weather(inputQueue: func.QueueMessage, outputQueue: func.Out[str]): + try: + messagepayload = json.loads(inputQueue.get_body().decode("utf-8")) + location = messagepayload["location"] + weather_result = f"Weather is 82 degrees and sunny in {location}." + + response_message = { + "Value": weather_result, + "CorrelationId": messagepayload["CorrelationId"] + } + + outputQueue.set(json.dumps(response_message)) + + logging.info(f"Sent message to output queue with message {response_message}") + except Exception as e: + logging.error(f"Error processing message: {e}") + return +``` + +> **Important:** Both input and output payloads must contain the `CorrelationId`, which must match in request and response. + +--- + +**Azure Function Project Creation and Deployment** + +To deploy your function to Azure properly, follow Microsoft's official documentation step by step: + +[Azure Functions Python Developer Guide](https://learn.microsoft.com/azure/azure-functions/create-first-function-cli-python?tabs=windows%2Cbash%2Cazure-cli%2Cbrowser) + +**Summary of required steps:** + +- Use the Azure CLI or Azure Portal to create an Azure Function App. +- Enable System Managed Identity for your Azure Function App. +- Assign appropriate permissions to your Azure Function App identity as outlined in the Role Assignments section below +- Create input and output queues in Azure Storage. +- Deploy your Function code. + +--- + +**Verification and Testing Azure Function** + +To ensure that your Azure Function deployment functions correctly: + +1. Place the following style message manually into the input queue (`input`): + +{ + "location": "Seattle", + "CorrelationId": "42" +} + +Check the output queue (`output`) and validate the structured message response: + +{ + "Value": "The weather in Seattle is sunny and warm.", + "CorrelationId": "42" +} + +--- + +**Required Role Assignments (IAM Configuration)** + +Clearly assign the following Azure IAM roles to ensure correct permissions: + +1. **Azure Function App's identity:** + - Enable system managed identity through Azure Function App > Settings > Identity. + - Add permission to storage account: + - Go to **Storage Account > Access control (IAM)** and add role assignment: + - `Storage Queue Data Contributor` assigned to Azure Function managed identity + +2. **Azure AI Project Identity:** + +Ensure your Azure AI Project identity has the following storage account permissions: +- `Storage Account Contributor` +- `Storage Blob Data Contributor` +- `Storage File Data Privileged Contributor` +- `Storage Queue Data Contributor` +- `Storage Table Data Contributor` + +--- + +**Additional Important Configuration Notes** + +- The Azure Function configured above uses the `AzureWebJobsStorage` connection string for queue connectivity. You may alternatively use managed identity-based connections as described in the official Azure Functions Managed Identity documentation. +- Storage queues you specify (`input` & `output`) should already exist in the storage account before the Function deployment or invocation, created manually via Azure portal or CLI. +- When using Azure storage account connection strings, make sure the account has enabled storage account key access (`Storage Account > Settings > Configuration`). + +--- + +With the above steps complete, your Azure Function integration with your AI Assistant is ready for use. + + +#### Create Assistant With Logic Apps + +Logic Apps allow HTTP requests to trigger actions. For more information, refer to the guide [Logic App Workflows for Function Calling](https://learn.microsoft.com/azure/ai-services/openai/how-to/assistants-logic-apps#create-logic-apps-workflows-for-function-calling). + +Your Logic App must be in the same resource group as your Azure AI Project, shown in the Azure Portal. Assistants SDK accesses Logic Apps through Workflow URLs, which are fetched and called as requests in functions. + +Below is an example of how to create an Azure Logic App utility tool and register a function with it. + + + +```python + +# Create the project client +assistants_client = AssistantsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Extract subscription and resource group from the project scope +subscription_id = os.environ["SUBSCRIPTION_ID"] +resource_group = os.environ["resource_group_name"] + +# Logic App details +logic_app_name = "" +trigger_name = "" + +# Create and initialize AzureLogicAppTool utility +logic_app_tool = AzureLogicAppTool(subscription_id, resource_group) +logic_app_tool.register_logic_app(logic_app_name, trigger_name) +print(f"Registered logic app '{logic_app_name}' with trigger '{trigger_name}'.") + +# Create the specialized "send_email_via_logic_app" function for your assistant tools +send_email_func = create_send_email_function(logic_app_tool, logic_app_name) + +# Prepare the function tools for the assistant +functions_to_use: Set = { + fetch_current_datetime, + send_email_func, # This references the AzureLogicAppTool instance via closure +} +``` + + + +After this the functions can be incorporated normally into code using `FunctionTool`. + + +#### Create Assistant With OpenAPI + +OpenAPI specifications describe REST operations against a specific endpoint. Assistants SDK can read an OpenAPI spec, create a function from it, and call that function against the REST endpoint without additional client-side execution. + +Here is an example creating an OpenAPI tool (using anonymous authentication): + + + +```python + +with open("./weather_openapi.json", "r") as f: + openapi_weather = jsonref.loads(f.read()) + +with open("./countries.json", "r") as f: + openapi_countries = jsonref.loads(f.read()) + +# Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) +auth = OpenApiAnonymousAuthDetails() + +# Initialize assistant OpenApi tool using the read in OpenAPI spec +openapi_tool = OpenApiTool( + name="get_weather", spec=openapi_weather, description="Retrieve weather information for a location", auth=auth +) +openapi_tool.add_definition( + name="get_countries", spec=openapi_countries, description="Retrieve a list of countries", auth=auth +) + +# Create assistant with OpenApi tool and process assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=openapi_tool.definitions, + ) +``` + + + +#### Create an Assistant with Fabric + +To enable your Assistant to answer queries using Fabric data, use `FabricTool` along with a connection to the Fabric resource. + +Here is an example: + + + +```python +conn_id = os.environ["FABRIC_CONNECTION_ID"] + +print(conn_id) + +# Initialize an Assistant Fabric tool and add the connection id +fabric = FabricTool(connection_id=conn_id) + +# Create an Assistant with the Fabric tool and process an Assistant run +with assistants_client: + assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=fabric.definitions, + headers={"x-ms-enable-preview": "true"}, + ) +``` + + + + +#### Create Thread + +For each session or conversation, a thread is required. Here is an example: + + + +```python +thread = assistants_client.create_thread() +``` + + + +#### Create Thread with Tool Resource + +In some scenarios, you might need to assign specific resources to individual threads. To achieve this, you provide the `tool_resources` argument to `create_thread`. In the following example, you create a vector store and upload a file, enable an Assistant for file search using the `tools` argument, and then associate the file with the thread using the `tool_resources` argument. + + + +```python +file = assistants_client.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") +print(f"Uploaded file, file ID: {file.id}") + +vector_store = assistants_client.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") +print(f"Created vector store, vector store ID: {vector_store.id}") + +# Create file search tool with resources followed by creating assistant +file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, +) + +print(f"Created assistant, ID: {assistant.id}") + +# Create thread with file resources. +# If the assistant has multiple threads, only this thread can search this file. +thread = assistants_client.create_thread(tool_resources=file_search.resources) +``` + + +#### Create Message + +To create a message for assistant to process, you pass `user` as `role` and a question as `content`: + + + +```python +message = assistants_client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") +``` + + + +#### Create Message with File Search Attachment + +To attach a file to a message for content searching, you use `MessageAttachment` and `FileSearchTool`: + + + +```python +attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) +message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] +) +``` + + + +#### Create Message with Code Interpreter Attachment + +To attach a file to a message for data analysis, use `MessageAttachment` and `CodeInterpreterTool` classes. You must pass `CodeInterpreterTool` as `tools` or `toolset` in `create_assistant` call or the file attachment cannot be opened for code interpreter. + +Here is an example to pass `CodeInterpreterTool` as tool: + + + +```python +# Notice that CodeInterpreter must be enabled in the assistant creation, +# otherwise the assistant will not be able to see the file attachment for code interpretation +assistant = assistants_client.create_assistant( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are helpful assistant", + tools=CodeInterpreterTool().definitions, +) +print(f"Created assistant, assistant ID: {assistant.id}") + +thread = assistants_client.create_thread() +print(f"Created thread, thread ID: {thread.id}") + +# Create an attachment +attachment = MessageAttachment(file_id=file.id, tools=CodeInterpreterTool().definitions) + +# Create a message +message = assistants_client.create_message( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + attachments=[attachment], +) +``` + + + +Azure blob storage can be used as a message attachment. In this case, use `VectorStoreDataSource` as a data source: + + + +```python +# We will upload the local file to Azure and will use it for vector store creation. +asset_uri = os.environ["AZURE_BLOB_URI"] +ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + +# Create a message with the attachment +attachment = MessageAttachment(data_source=ds, tools=code_interpreter.definitions) +message = assistants_client.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] +) +``` + + + +#### Create Run, Run_and_Process, or Stream + +To process your message, you can use `create_run`, `create_and_process_run`, or `create_stream`. + +`create_run` requests the Assistant to process the message without polling for the result. If you are using `function tools` regardless as `toolset` or not, your code is responsible for polling for the result and acknowledging the status of `Run`. When the status is `requires_action`, your code is responsible for calling the function tools. For a code sample, visit [`sample_assistants_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/assistants/sample_assistants_functions.py). + +Here is an example of `create_run` and poll until the run is completed: + + + +```python +run = assistants_client.create_run(thread_id=thread.id, assistant_id=assistant.id) + +# Poll the run as long as run status is queued or in progress +while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = assistants_client.get_run(thread_id=thread.id, run_id=run.id) +``` + + + +To have the SDK poll on your behalf and call `function tools`, use the `create_and_process_run` method. Note that `function tools` will only be invoked if they are provided as `toolset` during the `create_assistant` call. + +Here is an example: + + + +```python +run = assistants_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) +``` + + + +With streaming, polling need not be considered. If `function tools` are provided as `toolset` during the `create_assistant` call, they will be invoked by the SDK. + +Here is an example of streaming: + + + +```python +with assistants_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AssistantStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AssistantStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") +``` + + + +In the code above, because an `event_handler` object is not passed to the `create_stream` function, the SDK will instantiate `AssistantEventHandler` or `AsyncAssistantEventHandler` as the default event handler and produce an iterable object with `event_type` and `event_data`. `AssistantEventHandler` and `AsyncAssistantEventHandler` are overridable. Here is an example: + + + +```python +# With AssistantEventHandler[str], the return type for each event functions is optional string. +class MyEventHandler(AssistantEventHandler[str]): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> Optional[str]: + return f"Text delta received: {delta.text}" + + def on_thread_message(self, message: "ThreadMessage") -> Optional[str]: + return f"ThreadMessage created. ID: {message.id}, Status: {message.status}" + + def on_thread_run(self, run: "ThreadRun") -> Optional[str]: + return f"ThreadRun status: {run.status}" + + def on_run_step(self, step: "RunStep") -> Optional[str]: + return f"RunStep type: {step.type}, Status: {step.status}" + + def on_error(self, data: str) -> Optional[str]: + return f"An error occurred. Data: {data}" + + def on_done(self) -> Optional[str]: + return "Stream completed." + + def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional[str]: + return f"Unhandled Event Type: {event_type}, Data: {event_data}" +``` + + + + + + +```python +with assistants_client.create_stream( + thread_id=thread.id, assistant_id=assistant.id, event_handler=MyEventHandler() +) as stream: + for event_type, event_data, func_return in stream: + print(f"Received data.") + print(f"Streaming receive Event Type: {event_type}") + print(f"Event Data: {str(event_data)[:100]}...") + print(f"Event Function return: {func_return}\n") +``` + + + +As you can see, this SDK parses the events and produces various event types similar to OpenAI assistants. In your use case, you might not be interested in handling all these types and may decide to parse the events on your own. To achieve this, please refer to [override base event handler](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/assistants/sample_assistants_stream_with_base_override_eventhandler.py). + +``` +Note: Multiple streaming processes may be chained behind the scenes. + +When the SDK receives a `ThreadRun` event with the status `requires_action`, the next event will be `Done`, followed by termination. The SDK will submit the tool calls using the same event handler. The event handler will then chain the main stream with the tool stream. + +Consequently, when you iterate over the streaming using a for loop similar to the example above, the for loop will receive events from the main stream followed by events from the tool stream. +``` + + +#### Retrieve Message + +To retrieve messages from assistants, use the following example: + + + +```python +messages = assistants_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) + +# The messages are following in the reverse order, +# we will iterate them and output only text contents. +for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") +``` + + + +In addition, `messages` and `messages.data[]` offer helper properties such as `text_messages`, `image_contents`, `file_citation_annotations`, and `file_path_annotations` to quickly retrieve content from one message or all messages. + +### Retrieve File + +Files uploaded by Assistants cannot be retrieved back. If your use case need to access the file content uploaded by the Assistants, you are advised to keep an additional copy accessible by your application. However, files generated by Assistants are retrievable by `save_file` or `get_file_content`. + +Here is an example retrieving file ids from messages and save to the local drive: + + + +```python +messages = assistants_client.list_messages(thread_id=thread.id) +print(f"Messages: {messages}") + +for image_content in messages.image_contents: + file_id = image_content.image_file.file_id + print(f"Image File ID: {file_id}") + file_name = f"{file_id}_image_file.png" + assistants_client.save_file(file_id=file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + +for file_path_annotation in messages.file_path_annotations: + print(f"File Paths:") + print(f"Type: {file_path_annotation.type}") + print(f"Text: {file_path_annotation.text}") + print(f"File ID: {file_path_annotation.file_path.file_id}") + print(f"Start Index: {file_path_annotation.start_index}") + print(f"End Index: {file_path_annotation.end_index}") +``` + + + +Here is an example to use `get_file_content`: + +```python +from pathlib import Path + +async def save_file_content(client, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None): + # Determine the target directory + path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() + path.mkdir(parents=True, exist_ok=True) + + # Retrieve the file content + file_content_stream = await client.get_file_content(file_id) + if not file_content_stream: + raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") + + # Collect all chunks asynchronously + chunks = [] + async for chunk in file_content_stream: + if isinstance(chunk, (bytes, bytearray)): + chunks.append(chunk) + else: + raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") + + target_file_path = path / file_name + + # Write the collected content to the file synchronously + with open(target_file_path, "wb") as file: + for chunk in chunks: + file.write(chunk) +``` + +#### Teardown + +To remove resources after completing tasks, use the following functions: + + + +```python +# Delete the file when done +assistants_client.delete_vector_store(vector_store.id) +print("Deleted vector store") + +assistants_client.delete_file(file_id=file.id) +print("Deleted file") + +# Delete the assistant when done +assistants_client.delete_assistant(assistant.id) +print("Deleted assistant") +``` + + + +### Tracing + +You can add an Application Insights Azure resource to your Azure AI Foundry project. See the Tracing tab in your AI Foundry project. If one was enabled, you can get the Application Insights connection string, configure your Assistants, and observe the full execution path through Azure Monitor. Typically, you might want to start tracing before you create an Assistant. + +#### Installation + +Make sure to install OpenTelemetry and the Azure SDK tracing plugin via + +```bash +pip install opentelemetry +pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry +``` + +You will also need an exporter to send telemetry to your observability backend. You can print traces to the console or use a local viewer such as [Aspire Dashboard](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash). + +To connect to Aspire Dashboard or another OpenTelemetry compatible backend, install OTLP exporter: + +```bash +pip install opentelemetry-exporter-otlp +``` + +#### How to enable tracing + +Here is a code sample that shows how to enable Azure Monitor tracing: + + + +```python +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +# Enable Azure Monitor tracing +application_insights_connection_string = os.environ["AI_APPINSIGHTS_CONNECTION_STRING"] +configure_azure_monitor(connection_string=application_insights_connection_string) + +# enable additional instrumentations +enable_telemetry() + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with assistants_client: +``` + + + +In addition, you might find helpful to see the tracing logs in console. You can achieve by the following code: + +```python +from azure.ai.assistants.telemetry import enable_telemetry + +enable_telemetry(destination=sys.stdout) +``` +#### How to trace your own functions + +The decorator `trace_function` is provided for tracing your own function calls using OpenTelemetry. By default the function name is used as the name for the span. Alternatively you can provide the name for the span as a parameter to the decorator. + +This decorator handles various data types for function parameters and return values, and records them as attributes in the trace span. The supported data types include: +* Basic data types: str, int, float, bool +* Collections: list, dict, tuple, set + * Special handling for collections: + - If a collection (list, dict, tuple, set) contains nested collections, the entire collection is converted to a string before being recorded as an attribute. + - Sets and dictionaries are always converted to strings to ensure compatibility with span attributes. + +Object types are omitted, and the corresponding parameter is not traced. + +The parameters are recorded in attributes `code.function.parameter.` and the return value is recorder in attribute `code.function.return.value` + +## Troubleshooting + +### Logging + +The client uses the standard [Python logging library](https://docs.python.org/3/library/logging.html). The SDK logs HTTP request and response details, which may be useful in troubleshooting. To log to stdout, add the following: + +```python +import sys +import logging + +# Acquire the logger for this client library. Use 'azure' to affect both +# 'azure.core` and `azure.ai.inference' libraries. +logger = logging.getLogger("azure") + +# Set the desired logging level. logging.INFO or logging.DEBUG are good options. +logger.setLevel(logging.DEBUG) + +# Direct logging output to stdout: +handler = logging.StreamHandler(stream=sys.stdout) +# Or direct logging output to a file: +# handler = logging.FileHandler(filename="sample.log") +logger.addHandler(handler) + +# Optional: change the default logging format. Here we add a timestamp. +#formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") +#handler.setFormatter(formatter) +``` + +By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key or token), and the request and response payloads. To create logs without redaction, add `logging_enable = True` to the client constructor: + +```python +assistants_client = AIProjectClient.from_connection_string( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + logging_enable = True +) ``` -#### Prequisites +Note that the log level must be set to `logging.DEBUG` (see above code). Logs will be redacted with any other log level. + +Be sure to protect non redacted logs to avoid compromising security. + +For more information, see [Configure logging in the Azure libraries for Python](https://aka.ms/azsdk/python/logging) + +### Reporting issues + +To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-assistants" in the title or content. + + +## Next steps -- Python 3.8 or later is required to use this package. -- You need an [Azure subscription][azure_sub] to use this package. -- An existing Azure Ai Assistants instance. +Have a look at the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-projects/samples) folder, containing fully runnable Python code for synchronous and asynchronous clients. +Explore the [AI Starter Template](https://aka.ms/azsdk/azure-ai-projects/python/ai-starter-template). This template creates an Azure AI Foundry hub, project and connected resources including Azure OpenAI Service, AI Search and more. It also deploys a simple chat application to Azure Container Apps. ## Contributing @@ -34,10 +1198,14 @@ see the Code of Conduct FAQ or contact opencode@microsoft.com with any additional questions or comments. +[samples]: https://aka.ms/azsdk/azure-ai-projects/python/samples/ [code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ -[authenticate_with_token]: https://docs.microsoft.com/azure/cognitive-services/authentication?tabs=powershell#authenticate-with-an-authentication-token +[entra_id]: https://learn.microsoft.com/azure/ai-services/authentication?tabs=powershell#authenticate-with-microsoft-entra-id [azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials [azure_identity_pip]: https://pypi.org/project/azure-identity/ [default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential [pip]: https://pypi.org/project/pip/ [azure_sub]: https://azure.microsoft.com/free/ +[evaluators]: https://learn.microsoft.com/azure/ai-studio/how-to/develop/evaluate-sdk +[azure_ai_evaluation]: https://learn.microsoft.com/python/api/overview/azure/ai-evaluation-readme +[evaluator_library]: https://learn.microsoft.com/azure/ai-studio/how-to/evaluate-generative-ai-app#view-and-manage-the-evaluators-in-the-evaluator-library \ No newline at end of file diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py index bbbe50c2865c..44045e154c0f 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_azure_functions_async.py @@ -18,7 +18,7 @@ pip install azure-ai-projects azure-identity Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. STORAGE_SERVICE_ENDPONT - the storage service queue endpoint, triggering Azure function. Please see Getting Started with Azure Functions page for more information on Azure Functions: https://learn.microsoft.com/azure/azure-functions/functions-get-started diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py index eb4ac5bfca82..7c4210f635e0 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py @@ -16,7 +16,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import time diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py index b6400151054e..2ce29e6db236 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_azure_monitor_tracing.py @@ -17,7 +17,7 @@ pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-monitor-opentelemetry aiohttp Set these environment variables with your own values: - * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Foundry project. + * PROJECT_ENDPOINT - the Azure AI Assistants endpoint. * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. """ diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py index c1cfddcd8414..605391bc6ad7 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async_with_console_tracing.py @@ -22,7 +22,7 @@ pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: - * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Foundry project. + * PROJECT_ENDPOINT - the Azure AI Assistants endpoint. * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. """ diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py index aa68d45a132f..4bab1ba8aa61 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_async.py @@ -17,7 +17,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py index 571ff5d79104..0fabad0a180a 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_async.py @@ -17,7 +17,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import os diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py index 442b4d588433..78256571f64b 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_code_interpreter_attachment_enterprise_search_async.py @@ -16,7 +16,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import os diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py index e671d6e4b348..399e5610e771 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_functions_async.py @@ -18,7 +18,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import time diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py index e599b8478365..51b1528405c8 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_json_schema_async.py @@ -15,7 +15,7 @@ pip install azure-ai-assistants azure-identity pydantic Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py index 1afe57f10980..49ea7ef96fed 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_run_with_toolset_async.py @@ -16,7 +16,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import os, asyncio diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py index e4f7ea016922..5a1d47d1f167 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_async.py @@ -16,7 +16,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio from typing import Any, Optional diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py index 836690e58e4b..e62fe546a05a 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_functions_async.py @@ -17,7 +17,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio from typing import Any diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py index 65c281f399cd..d1b6276fe284 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_eventhandler_with_toolset_async.py @@ -17,7 +17,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio from typing import Any diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py index 592841c1764e..c6a3c7f8933d 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_iteration_async.py @@ -16,7 +16,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py index d8869c8896df..edc8c7bab614 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_stream_with_base_override_eventhandler_async.py @@ -20,7 +20,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set these environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import json diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py index d3447da92084..e4ebf2ddd616 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_enterprise_file_search_async.py @@ -16,7 +16,7 @@ pip install azure-ai-assistants azure-identity azure-ai-ml aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import os diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py index 87478cfa4933..252a62cee11b 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_batch_file_search_async.py @@ -17,7 +17,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py index 884634138616..103825f2492a 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_enterprise_file_search_async.py @@ -15,7 +15,7 @@ pip install azure-ai-assistants azure-identity azure-ai-ml aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import os diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py index 97d1a3441f39..b9c0050348b4 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_vector_store_file_search_async.py @@ -15,7 +15,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio import os diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py index cdc7b1ecd666..54ea32b05291 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_with_file_search_attachment_async.py @@ -17,7 +17,7 @@ pip install azure-ai-assistants azure-identity aiohttp Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import asyncio diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py index 708f79ffe851..d7f7ee874227 100644 --- a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team.py @@ -15,7 +15,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. """ diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py index fe545c391563..8c819f483d73 100644 --- a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_assistant_team_custom_team_leader.py @@ -16,7 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. """ diff --git a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py index 1798e994ac2b..0e442868652d 100644 --- a/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py +++ b/sdk/ai/azure-ai-assistants/samples/multiassistant/sample_assistants_multi_assistant_team.py @@ -15,7 +15,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py index 5c0354743ee6..089cd4e5679b 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_azure_functions.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) STORAGE_SERVICE_ENDPONT - the storage service queue endpoint, triggering Azure function. diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py index 886ffc5a7445..ae18f5c338e2 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py index aa9c4a56f260..19ead3c9bd9b 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_azure_monitor_tracing.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity azure-monitor-opentelemetry Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py index a5abe60f8053..daa5f8cf0f02 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing.py @@ -22,8 +22,7 @@ pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py index a801c0637b03..3f0f8be8055b 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics_with_console_tracing_custom_attributes.py @@ -23,8 +23,7 @@ pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py index d5c6697ef78d..4f5cbd81f563 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) BING_CONNECTION_NAME - The connection name of the Bing connection, as found in the diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py index d31d601c361a..fc5fc10c7441 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py index 52a601be3828..846541c92dfd 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_code_interpreter_attachment_enterprise_search.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py index 58c0f5b1d472..262de38b8d1c 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_enterprise_file_search.py @@ -14,8 +14,7 @@ pip install azure-ai-assistants azure-identity azure-ai-ml Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py index 6c9b89114ee0..7f00ceda4050 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_fabric.py @@ -18,7 +18,7 @@ pip install azure-ai-assistants azure-identity Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import os diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py index 05ec5c652915..206b2893f9a2 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_file_search.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py index a8c5345f5588..39f548dc7af6 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py index 8f85e904056a..161636bd3a12 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_azure_monitor_tracing.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-monitor-opentelemetry Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py index 2bae3865b4c1..04398187aa35 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_functions_with_console_tracing.py @@ -22,8 +22,7 @@ pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py index d8fbebf4bbf1..f44455a7b45e 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_json_schema.py @@ -15,8 +15,7 @@ pip install azure-ai-assistants azure-identity pydantic Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py index b5c80eabc7dc..7ce16ce374f4 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_logic_apps.py @@ -23,8 +23,7 @@ pip install azure-ai-assistants azure-identity Set this environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py index 082e7a132f35..5e4e9dcb5ad1 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi.py @@ -18,8 +18,7 @@ pip install azure-ai-assistants azure-identity jsonref Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py index 75d4c86727b2..b460379733d0 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_openapi_connection_auth.py @@ -28,8 +28,8 @@ pip install azure-ai-assistants azure-identity jsonref Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your Foundry Project. - PROJECT_OPENAPI_CONNECTION_NAME - the connection name for the OpenAPI connection authentication + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. + OPENAPI_CONNECTION_ID - the connection ID for the OpenAPI connection, taken from Azure AI Foundry. MODEL_DEPLOYMENT_NAME - name of the model deployment in the project to use Assistants against """ @@ -45,7 +45,6 @@ credential=DefaultAzureCredential(), ) -connection_name = os.environ["PROJECT_OPENAPI_CONNECTION_NAME"] model_name = os.environ["MODEL_DEPLOYMENT_NAME"] connection_id = os.environ["OPENAPI_CONNECTION_ID"] diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py index b324841552f1..bd958ae0f360 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_run_with_toolset.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py index 9cba3ad5417c..c8cadc69cd73 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_sharepoint.py @@ -20,7 +20,7 @@ pip install azure-ai-assistants azure-identity Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_ENDPOINT - the Azure AI Assistants endpoint. """ import os diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py index 2af7dd61353f..a8aac8f644ea 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py index f162c9ed662e..24cf30480aef 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_azure_monitor_tracing.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity opentelemetry-sdk azure-monitor-opentelemetry Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py index 8d65ee6bddf3..aa8c12eca80c 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) BING_CONNECTION_NAME - The connection name of the Bing connection, as found in the "Connected resources" tab diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py index 057d81e0e71b..16a77a2e7ccc 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_console_tracing.py @@ -22,8 +22,7 @@ pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py index 3c526b92689d..8803da00427d 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_functions.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py index b90de1826877..4f9bb8fc9e59 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_toolset.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py index d3f1102cc4d3..7f55066d9c97 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py index 10da7ab431da..9bcf287d50cf 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. 3) BING_CONNECTION_NAME - The connection name of the Bing connection, as found in the "Connected resources" tab diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py index 16a4e6fcc2be..5c2d76b09573 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_file_search.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py index 4e0735d1c0af..fcd5300a27f0 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_toolset.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py index a34c7def89e8..cda531f4cf2a 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_with_base_override_eventhandler.py @@ -19,8 +19,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py index 88da2bb1a91e..6eaf3a02d496 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_enterprise_file_search.py @@ -14,8 +14,7 @@ pip install azure-ai-assistants azure-identity azure-ai-ml Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py index b3f55cab1277..97e206e1f5bf 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_batch_file_search.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py index a31924190c7a..bdbee38bc942 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_vector_store_file_search.py @@ -14,8 +14,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py index 9eb050971bb9..3262175d194a 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_code_interpreter_file_attachment.py @@ -18,8 +18,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py index 0751c365255b..08207bdb0975 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_enterprise_search_attachment.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py index 0751c365255b..08207bdb0975 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_file_search_attachment.py @@ -17,8 +17,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py index 86900f428c99..e1afcedff945 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_with_resources_in_thread.py @@ -16,8 +16,7 @@ pip install azure-ai-assistants azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your - Azure AI Foundry project. + 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. """ From 84c9a7dc39c37755bb3097a43a465a4acb4b6953 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Tue, 15 Apr 2025 14:15:06 -0700 Subject: [PATCH 08/11] Fix readme-s and spelling --- .vscode/cspell.json | 8 +++ sdk/ai/azure-ai-assistants/CHANGELOG.md | 4 +- sdk/ai/azure-ai-assistants/README.md | 64 +++++++++---------- .../sample_assistants_bing_grounding.py | 4 +- ...ts_stream_iteration_with_bing_grounding.py | 4 +- sdk/ai/azure-ai-assistants/setup.py | 3 +- sdk/ai/azure-ai-assistants/tests/README.md | 12 +--- 7 files changed, 51 insertions(+), 48 deletions(-) diff --git a/.vscode/cspell.json b/.vscode/cspell.json index a8bee859a72d..9241a88702ea 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -1362,6 +1362,14 @@ "azureopenai" ] }, + { + "filename": "sdk/ai/azure-ai-assistants/**", + "words": [ + "GENAI", + "fspath", + "wttr" + ] + } { "filename": "sdk/ai/azure-ai-inference/**", "words": [ diff --git a/sdk/ai/azure-ai-assistants/CHANGELOG.md b/sdk/ai/azure-ai-assistants/CHANGELOG.md index 628743d283a9..40bc6bf7aa1c 100644 --- a/sdk/ai/azure-ai-assistants/CHANGELOG.md +++ b/sdk/ai/azure-ai-assistants/CHANGELOG.md @@ -1,5 +1,7 @@ # Release History -## 1.0.0b1 (1970-01-01) +## 1.0.0b1 (Unreleased) + +### Features Added - Initial version diff --git a/sdk/ai/azure-ai-assistants/README.md b/sdk/ai/azure-ai-assistants/README.md index 82a70cfba73f..6027cdbc0ed4 100644 --- a/sdk/ai/azure-ai-assistants/README.md +++ b/sdk/ai/azure-ai-assistants/README.md @@ -60,7 +60,7 @@ To report an issue with the client library, or request additional features, plea ### Prerequisite -- Python 3.8 or later. +- Python 3.9 or later. - An [Azure subscription][azure_sub]. - A [project in Azure AI Foundry](https://learn.microsoft.com/azure/ai-studio/how-to/create-projects). - The project connection string. It can be found in your Azure AI Foundry project overview page, under "Project details". Below we will assume the environment variable `PROJECT_CONNECTION_STRING` was defined to hold this value. @@ -115,9 +115,9 @@ assistant_client = AssistantsClient( ## Examples -#### Create Assistant +### Create Assistant -Before creating an Assistant, you need to set up Azure resources to deploy your model. [Create a New Assistant Quickstart](https://learn.microsoft.com/azure/ai-services/assistants/quickstart?pivots=programming-language-python-azure) details selecting and deploying your Assistant Setup. +Before creating an Assistant, you need to set up Azure resources to deploy your model. [Create a New Assistant Quickstart](https://learn.microsoft.com/azure/ai-services/agents/quickstart?pivots=programming-language-python-azure) details selecting and deploying your Assistant Setup. Here is an example of how to create an Assistant: @@ -177,7 +177,7 @@ assistant = assistants_client.create_assistant( In the following sections, we show you sample code in either `toolset` or combination of `tools` and `tool_resources`. -#### Create Assistant with File Search +### Create Assistant with File Search To perform file search by an Assistant, we first need to upload a file, create a vector store, and associate the file to the vector store. Here is an example: @@ -204,7 +204,7 @@ assistant = assistants_client.create_assistant( -#### Create Assistant with Enterprise File Search +### Create Assistant with Enterprise File Search We can upload file to Azure as it is shown in the example, or use the existing Azure blob storage. In the code below we demonstrate how this can be achieved. First we upload file to azure and create `VectorStoreDataSource`, which then is used to create vector store. This vector store is then given to the `FileSearchTool` constructor. @@ -256,7 +256,7 @@ file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) -#### Create Assistant with Code Interpreter +### Create Assistant with Code Interpreter Here is an example to upload a file and use it for code interpreter by an Assistant: @@ -282,7 +282,7 @@ assistant = assistants_client.create_assistant( -#### Create Assistant with Bing Grounding +### Create Assistant with Bing Grounding To enable your Assistant to perform search through Bing search API, you use `BingGroundingTool` along with a connection. @@ -291,7 +291,7 @@ Here is an example: ```python -conn_id = os.environ["AZURE_BING_CONECTION_ID"] +conn_id = os.environ["AZURE_BING_CONNECTION_ID"] print(conn_id) @@ -311,9 +311,9 @@ with assistants_client: -#### Create Assistant with Azure AI Search +### Create Assistant with Azure AI Search -Azure AI Search is an enterprise search system for high-performance applications. It integrates with Azure OpenAI Service and Azure Machine Learning, offering advanced search technologies like vector search and full-text search. Ideal for knowledge base insights, information discovery, and automation. Creating an Assistant with Azure AI Search requires an existing Azure AI Search Index. For more information and setup guides, see [Azure AI Search Tool Guide](https://learn.microsoft.com/azure/ai-services/assistants/how-to/tools/azure-ai-search?tabs=azurecli%2Cpython&pivots=overview-azure-ai-search). +Azure AI Search is an enterprise search system for high-performance applications. It integrates with Azure OpenAI Service and Azure Machine Learning, offering advanced search technologies like vector search and full-text search. Ideal for knowledge base insights, information discovery, and automation. Creating an Assistant with Azure AI Search requires an existing Azure AI Search Index. For more information and setup guides, see [Azure AI Search Tool Guide](https://learn.microsoft.com/azure/ai-services/agents/how-to/tools/azure-ai-search?tabs=azurecli%2Cpython&pivots=overview-azure-ai-search). Here is an example to integrate Azure AI Search: @@ -370,18 +370,18 @@ for message in messages.data: -#### Create Assistant with Function Call +### Create Assistant with Function Call You can enhance your Assistants by defining callback functions as function tools. These can be provided to `create_assistant` via either the `toolset` parameter or the combination of `tools` and `tool_resources`. Here are the distinctions: - `toolset`: When using the `toolset` parameter, you provide not only the function definitions and descriptions but also their implementations. The SDK will execute these functions within `create_and_run_process` or `streaming` . These functions will be invoked based on their definitions. - `tools` and `tool_resources`: When using the `tools` and `tool_resources` parameters, only the function definitions and descriptions are provided to `create_assistant`, without the implementations. The `Run` or `event handler of stream` will raise a `requires_action` status based on the function definitions. Your code must handle this status and call the appropriate functions. -For more details about calling functions by code, refer to [`sample_assistants_stream_eventhandler_with_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/assistants/sample_assistants_stream_eventhandler_with_functions.py) and [`sample_assistants_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/assistants/sample_assistants_functions.py). +For more details about calling functions by code, refer to [`sample_assistants_stream_eventhandler_with_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py) and [`sample_assistants_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/assistants/sample_assistants_functions.py). For more details about requirements and specification of functions, refer to [Function Tool Specifications](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/FunctionTool.md) -Here is an example to use [user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/assistants/user_functions.py) in `toolset`: +Here is an example to use [user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/user_functions.py) in `toolset`: ```python @@ -399,7 +399,7 @@ assistant = assistants_client.create_assistant( -For asynchronous functions, you must import `AIProjectClient` from `azure.ai.projects.aio` and use `AsyncFunctionTool`. Here is an example using [asynchronous user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/assistants/async_samples/user_async_functions.py): +For asynchronous functions, you must import `AIProjectClient` from `azure.ai.projects.aio` and use `AsyncFunctionTool`. Here is an example using [asynchronous user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py): ```python from azure.ai.projects.aio import AIProjectClient @@ -423,7 +423,7 @@ assistant = await assistants_client.create_assistant( -#### Create Assistant With Azure Function Call +### Create Assistant With Azure Function Call The AI assistant leverages Azure Functions triggered asynchronously via Azure Storage Queues. To enable the assistant to perform Azure Function calls, you must set up the corresponding `AzureFunctionTool`, specifying input and output queues as well as parameter definitions. @@ -585,7 +585,7 @@ Ensure your Azure AI Project identity has the following storage account permissi With the above steps complete, your Azure Function integration with your AI Assistant is ready for use. -#### Create Assistant With Logic Apps +### Create Assistant With Logic Apps Logic Apps allow HTTP requests to trigger actions. For more information, refer to the guide [Logic App Workflows for Function Calling](https://learn.microsoft.com/azure/ai-services/openai/how-to/assistants-logic-apps#create-logic-apps-workflows-for-function-calling). @@ -631,7 +631,7 @@ functions_to_use: Set = { After this the functions can be incorporated normally into code using `FunctionTool`. -#### Create Assistant With OpenAPI +### Create Assistant With OpenAPI OpenAPI specifications describe REST operations against a specific endpoint. Assistants SDK can read an OpenAPI spec, create a function from it, and call that function against the REST endpoint without additional client-side execution. @@ -670,7 +670,7 @@ with assistants_client: -#### Create an Assistant with Fabric +### Create an Assistant with Fabric To enable your Assistant to answer queries using Fabric data, use `FabricTool` along with a connection to the Fabric resource. @@ -700,7 +700,7 @@ with assistants_client: -#### Create Thread +### Create Thread For each session or conversation, a thread is required. Here is an example: @@ -712,7 +712,7 @@ thread = assistants_client.create_thread() -#### Create Thread with Tool Resource +### Create Thread with Tool Resource In some scenarios, you might need to assign specific resources to individual threads. To achieve this, you provide the `tool_resources` argument to `create_thread`. In the following example, you create a vector store and upload a file, enable an Assistant for file search using the `tools` argument, and then associate the file with the thread using the `tool_resources` argument. @@ -743,7 +743,7 @@ thread = assistants_client.create_thread(tool_resources=file_search.resources) ``` -#### Create Message +### Create Message To create a message for assistant to process, you pass `user` as `role` and a question as `content`: @@ -755,7 +755,7 @@ message = assistants_client.create_message(thread_id=thread.id, role="user", con -#### Create Message with File Search Attachment +### Create Message with File Search Attachment To attach a file to a message for content searching, you use `MessageAttachment` and `FileSearchTool`: @@ -770,7 +770,7 @@ message = assistants_client.create_message( -#### Create Message with Code Interpreter Attachment +### Create Message with Code Interpreter Attachment To attach a file to a message for data analysis, use `MessageAttachment` and `CodeInterpreterTool` classes. You must pass `CodeInterpreterTool` as `tools` or `toolset` in `create_assistant` call or the file attachment cannot be opened for code interpreter. @@ -824,11 +824,11 @@ message = assistants_client.create_message( -#### Create Run, Run_and_Process, or Stream +### Create Run, Run_and_Process, or Stream To process your message, you can use `create_run`, `create_and_process_run`, or `create_stream`. -`create_run` requests the Assistant to process the message without polling for the result. If you are using `function tools` regardless as `toolset` or not, your code is responsible for polling for the result and acknowledging the status of `Run`. When the status is `requires_action`, your code is responsible for calling the function tools. For a code sample, visit [`sample_assistants_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/assistants/sample_assistants_functions.py). +`create_run` requests the Assistant to process the message without polling for the result. If you are using `function tools` regardless as `toolset` or not, your code is responsible for polling for the result and acknowledging the status of `Run`. When the status is `requires_action`, your code is responsible for calling the function tools. For a code sample, visit [`sample_assistants_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py). Here is an example of `create_run` and poll until the run is completed: @@ -942,7 +942,7 @@ with assistants_client.create_stream( -As you can see, this SDK parses the events and produces various event types similar to OpenAI assistants. In your use case, you might not be interested in handling all these types and may decide to parse the events on your own. To achieve this, please refer to [override base event handler](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/assistants/sample_assistants_stream_with_base_override_eventhandler.py). +As you can see, this SDK parses the events and produces various event types similar to OpenAI assistants. In your use case, you might not be interested in handling all these types and may decide to parse the events on your own. To achieve this, please refer to [override base event handler](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_with_base_override_eventhandler.py). ``` Note: Multiple streaming processes may be chained behind the scenes. @@ -953,7 +953,7 @@ Consequently, when you iterate over the streaming using a for loop similar to th ``` -#### Retrieve Message +### Retrieve Message To retrieve messages from assistants, use the following example: @@ -1035,7 +1035,7 @@ async def save_file_content(client, file_id: str, file_name: str, target_dir: Op file.write(chunk) ``` -#### Teardown +### Teardown To remove resources after completing tasks, use the following functions: @@ -1056,11 +1056,11 @@ print("Deleted assistant") -### Tracing +## Tracing You can add an Application Insights Azure resource to your Azure AI Foundry project. See the Tracing tab in your AI Foundry project. If one was enabled, you can get the Application Insights connection string, configure your Assistants, and observe the full execution path through Azure Monitor. Typically, you might want to start tracing before you create an Assistant. -#### Installation +### Installation Make sure to install OpenTelemetry and the Azure SDK tracing plugin via @@ -1077,7 +1077,7 @@ To connect to Aspire Dashboard or another OpenTelemetry compatible backend, inst pip install opentelemetry-exporter-otlp ``` -#### How to enable tracing +### How to enable tracing Here is a code sample that shows how to enable Azure Monitor tracing: @@ -1110,7 +1110,7 @@ from azure.ai.assistants.telemetry import enable_telemetry enable_telemetry(destination=sys.stdout) ``` -#### How to trace your own functions +### How to trace your own functions The decorator `trace_function` is provided for tracing your own function calls using OpenTelemetry. By default the function name is used as the name for the span. Alternatively you can provide the name for the span as a parameter to the decorator. diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py index 4f5cbd81f563..b70e5db51aaa 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_bing_grounding.py @@ -19,7 +19,7 @@ 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. - 3) BING_CONNECTION_NAME - The connection name of the Bing connection, as found in the + 3) AZURE_BING_CONNECTION_ID - The ID of the Bing connection, as found in the "Connected resources" tab in your Azure AI Foundry project. """ @@ -35,7 +35,7 @@ ) # [START create_assistant_with_bing_grounding_tool] -conn_id = os.environ["AZURE_BING_CONECTION_ID"] +conn_id = os.environ["AZURE_BING_CONNECTION_ID"] print(conn_id) diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py index 9bcf287d50cf..b90c016a58fe 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_iteration_with_bing_grounding.py @@ -20,7 +20,7 @@ 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. - 3) BING_CONNECTION_NAME - The connection name of the Bing connection, as found in the "Connected resources" tab + 3) AZURE_BING_CONNECTION_ID - The ID of the Bing connection, as found in the "Connected resources" tab in your Azure AI Foundry project. """ @@ -45,7 +45,7 @@ ) with assistants_client: - bing_connection_id = os.environ["AZURE_BING_CONECTION_ID"] + bing_connection_id = os.environ["AZURE_BING_CONNECTION_ID"] bing = BingGroundingTool(connection_id=bing_connection_id) print(f"Bing Connection ID: {bing_connection_id}") diff --git a/sdk/ai/azure-ai-assistants/setup.py b/sdk/ai/azure-ai-assistants/setup.py index 2752c3ba025c..a3df58cff52e 100644 --- a/sdk/ai/azure-ai-assistants/setup.py +++ b/sdk/ai/azure-ai-assistants/setup.py @@ -42,7 +42,6 @@ "Programming Language :: Python", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -67,5 +66,5 @@ "azure-core>=1.30.0", "typing-extensions>=4.6.0", ], - python_requires=">=3.8", + python_requires=">=3.9", ) diff --git a/sdk/ai/azure-ai-assistants/tests/README.md b/sdk/ai/azure-ai-assistants/tests/README.md index a69b9c40bdeb..461a9fdd36b6 100644 --- a/sdk/ai/azure-ai-assistants/tests/README.md +++ b/sdk/ai/azure-ai-assistants/tests/README.md @@ -1,4 +1,4 @@ -# Azure AI Project client library tests for Python +# Azure AI Assistants client library tests for Python The instructions below are for running tests locally, on a Windows machine, against the live service using a local build of the client library. @@ -17,7 +17,7 @@ The instructions below are for running tests locally, on a Windows machine, agai ``` - Install the resulting wheel (update version `1.0.0b5` to the current one): ```bash - pip install dist\azure_ai_projects-1.0.0b5-py3-none-any.whl --user --force-reinstall + pip install dist\azure_ai_assistants-1.0.0b5-py3-none-any.whl --user --force-reinstall ``` ## Log in to Azure @@ -28,7 +28,7 @@ az login ## Setup up environment variables -Edit the file `azure_ai_projects_tests.env` located in the folder above. Follow the instructions there on how to set up Azure AI Foundry projects to be used for testing, and enter appropriate values for the environment variables used for the tests you want to run. +Edit the file `azure_ai_assistants_tests.env` located in the folder above. Follow the instructions there on how to set up Azure AI Foundry projects to be used for testing, and enter appropriate values for the environment variables used for the tests you want to run. ## Configure test proxy @@ -49,12 +49,6 @@ To run all tests, type: pytest ``` -To run tests in a particular folder (`tests\connections` for example): - -```bash -pytest tests\connections -``` - ## Additional information See [test documentation](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md) for additional information, including how to set proxy recordings and run tests using recordings. From 7eabd7dc14377ef3eacb515bc7da0f599e076a6e Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Thu, 17 Apr 2025 13:06:17 -0700 Subject: [PATCH 09/11] Make back compatible --- .../azure/ai/assistants/_patch.py | 18 ++++++++++++++++++ .../azure/ai/assistants/aio/_patch.py | 18 ++++++++++++++++++ .../azure_ai_assistants_tests.env | 10 +++++----- .../sample_assistants_basics_async.py | 10 ++++++++-- .../samples/sample_assistants_basics.py | 2 +- 5 files changed, 50 insertions(+), 8 deletions(-) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py index fc628a0a73db..d7221fbba4bc 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py @@ -52,6 +52,24 @@ class AssistantsClient(AssistantsClientGenerated): # pylint: disable=client-accepts-api-version-keyword def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, TokenCredential], **kwargs: Any) -> None: + # TODO: Remove this custom code when 1DP service will be available + if not endpoint: + raise ValueError("Connection string or 1DP endpoint is required") + parts = endpoint.split(";") + # Detect legacy endpoint and build it in old way. + if len(parts) == 4: + endpoint = "https://" + parts[0] + subscription_id = parts[1] + resource_group_name = parts[2] + project_name = parts[3] + endpoint = ( + f"{endpoint}/agents/v1.0/subscriptions" + f"/{subscription_id}/resourceGroups/{resource_group_name}/providers" + f"/Microsoft.MachineLearningServices/workspaces/{project_name}" + ) + # Override the credential scope with the legacy one. + kwargs['credential_scopes'] = ["https://management.azure.com/.default"] + # End of legacy endpoints handling. super().__init__(endpoint, credential, **kwargs) self._toolset: Dict[str, _models.ToolSet] = {} diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py index b9113a2c6f2f..bd0c32e6fbab 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py @@ -55,6 +55,24 @@ class AssistantsClient(AssistantsClientGenerated): # pylint: disable=client-acc def __init__( self, endpoint: str, credential: Union["AzureKeyCredential", "AsyncTokenCredential"], **kwargs: Any ) -> None: + # TODO: Remove this custom code when 1DP service will be available + if not endpoint: + raise ValueError("Connection string or 1DP endpoint is required") + parts = endpoint.split(";") + # Detect legacy endpoint and build it in old way. + if len(parts) == 4: + endpoint = "https://" + parts[0] + subscription_id = parts[1] + resource_group_name = parts[2] + project_name = parts[3] + endpoint = ( + f"{endpoint}/agents/v1.0/subscriptions" + f"/{subscription_id}/resourceGroups/{resource_group_name}/providers" + f"/Microsoft.MachineLearningServices/workspaces/{project_name}" + ) + # Override the credential scope with the legacy one. + kwargs['credential_scopes'] = ["https://management.azure.com/.default"] + # End of legacy endpoints handling. super().__init__(endpoint, credential, **kwargs) self._toolset: Dict[str, _models.AsyncToolSet] = {} diff --git a/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env b/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env index faaf292ebf44..422b6bd2c674 100644 --- a/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env +++ b/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env @@ -16,10 +16,10 @@ AZURE_AI_ASSISTANTS_TELEMETRY_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_ASSISTA ######################################################################################################################## # Agents tests # -AZURE_AI_ASSISTANTS_AGENTS_TESTS_PROJECT_CONNECTION_STRING= -AZURE_AI_ASSISTANTS_AGENTS_TESTS_DATA_PATH= -AZURE_AI_ASSISTANTS_AGENTS_TESTS_STORAGE_QUEUE= -AZURE_AI_ASSISTANTS_AGENTS_TESTS_SEARCH_INDEX_NAME= -AZURE_AI_ASSISTANTS_AGENTS_TESTS_SEARCH_CONNECTION_NAME= +AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_PROJECT_ENDPOINT= +AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_DATA_PATH= +AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_STORAGE_QUEUE= +AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_SEARCH_INDEX_NAME= +AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_SEARCH_CONNECTION_NAME= diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py index 7c4210f635e0..77a0803cb416 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py @@ -22,7 +22,10 @@ import time from azure.ai.assistants.aio import AssistantsClient -from azure.ai.assistants.models import ListSortOrder +from azure.ai.assistants.models import ( + MessageTextContent, + ListSortOrder +) from azure.identity.aio import DefaultAzureCredential import os @@ -66,7 +69,10 @@ async def main() -> None: print("Deleted assistant") messages = await assistant_client.list_messages(thread_id=thread.id, order=ListSortOrder.ASCENDING) - print(f"Messages: {messages}") + for data_point in messages.data: + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py index 1e28017bc920..b9d19c473807 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_basics.py @@ -73,7 +73,7 @@ # The messages are following in the reverse order, # we will iterate them and output only text contents. - for data_point in reversed(messages.data): + for data_point in messages.data: last_message_content = data_point.content[-1] if isinstance(last_message_content, MessageTextContent): print(f"{data_point.role}: {last_message_content.text.value}") From 45294e84b4a566754eec5d0abaf7c1c43f4b918c Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Thu, 17 Apr 2025 13:29:06 -0700 Subject: [PATCH 10/11] Fixes --- .vscode/cspell.json | 2 +- sdk/ai/azure-ai-assistants/README.md | 2 +- sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env | 2 +- .../samples/async_samples/sample_assistants_basics_async.py | 2 +- sdk/ai/azure-ai-assistants/tests/test_assistants_client.py | 6 ++---- .../tests/test_assistants_client_async.py | 2 +- 6 files changed, 7 insertions(+), 9 deletions(-) diff --git a/.vscode/cspell.json b/.vscode/cspell.json index 9241a88702ea..dc0c2667ff7e 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -1369,7 +1369,7 @@ "fspath", "wttr" ] - } + }, { "filename": "sdk/ai/azure-ai-inference/**", "words": [ diff --git a/sdk/ai/azure-ai-assistants/README.md b/sdk/ai/azure-ai-assistants/README.md index 6027cdbc0ed4..def8835f01fb 100644 --- a/sdk/ai/azure-ai-assistants/README.md +++ b/sdk/ai/azure-ai-assistants/README.md @@ -377,7 +377,7 @@ You can enhance your Assistants by defining callback functions as function tools - `toolset`: When using the `toolset` parameter, you provide not only the function definitions and descriptions but also their implementations. The SDK will execute these functions within `create_and_run_process` or `streaming` . These functions will be invoked based on their definitions. - `tools` and `tool_resources`: When using the `tools` and `tool_resources` parameters, only the function definitions and descriptions are provided to `create_assistant`, without the implementations. The `Run` or `event handler of stream` will raise a `requires_action` status based on the function definitions. Your code must handle this status and call the appropriate functions. -For more details about calling functions by code, refer to [`sample_assistants_stream_eventhandler_with_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py) and [`sample_assistants_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/assistants/sample_assistants_functions.py). +For more details about calling functions by code, refer to [`sample_assistants_stream_eventhandler_with_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py) and [`sample_assistants_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py). For more details about requirements and specification of functions, refer to [Function Tool Specifications](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/FunctionTool.md) diff --git a/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env b/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env index 422b6bd2c674..93eb88f8c620 100644 --- a/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env +++ b/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env @@ -20,6 +20,6 @@ AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_PROJECT_ENDPOINT= AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_DATA_PATH= AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_STORAGE_QUEUE= AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_SEARCH_INDEX_NAME= -AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_SEARCH_CONNECTION_NAME= +AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_SEARCH_CONNECTION_ID= diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py index 77a0803cb416..f621baeadf04 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py @@ -36,7 +36,7 @@ async def main() -> None: async with DefaultAzureCredential() as creds: assistant_client = AssistantsClient( endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds, + credential=creds), ) async with assistant_client: diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py index 8450395b3329..de5ee2c110b5 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py @@ -2881,11 +2881,9 @@ def test_azure_ai_search_tool(self, **kwargs): assert isinstance(client, AssistantsClient) # Create AzureAISearchTool - connection_name = kwargs.pop( - "azure_ai_assistants_assistants_tests_search_connection_name", "my-search-connection-name" + conn_id = kwargs.pop( + "azure_ai_assistants_assistants_tests_search_connection_id", "my-search-connection-ID" ) - connection = client.connections.get(connection_name=connection_name) - conn_id = connection.id index_name = kwargs.pop("azure_ai_assistants_assistants_tests_search_index_name", "my-search-index") azure_search_tool = AzureAISearchTool( diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py index af72d85e4b7f..02a9f9901a9a 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py @@ -127,7 +127,7 @@ def create_client(self, **kwargs): credential = self.get_credential(AssistantsClient, is_async=True) # create and return client - client = AssistantsClient.from_connection_string( + client = AssistantsClient( endpoint=endpoint, credential=credential, ) From ce61e889933d3714a4b3322b90633c68ab7c3cb6 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Thu, 17 Apr 2025 22:27:28 -0700 Subject: [PATCH 11/11] Fix linters and record the tests --- eng/.docsettings.yml | 1 + sdk/ai/azure-ai-assistants/README.md | 2 +- sdk/ai/azure-ai-assistants/assets.json | 6 + .../azure/ai/assistants/_patch.py | 2 +- .../azure/ai/assistants/aio/_patch.py | 6 +- .../azure/ai/assistants/models/_patch.py | 2 +- .../assistants/telemetry/_trace_function.py | 2 +- .../azure_ai_assistants_tests.env | 19 +- .../sample_assistants_basics_async.py | 2 +- ...stream_eventhandler_with_bing_grounding.py | 4 +- sdk/ai/azure-ai-assistants/tests/README.md | 2 +- sdk/ai/azure-ai-assistants/tests/conftest.py | 24 +- .../tests/test_assistants_client.py | 627 ++++++++-------- .../tests/test_assistants_client_async.py | 671 +++++++++--------- 14 files changed, 682 insertions(+), 688 deletions(-) create mode 100644 sdk/ai/azure-ai-assistants/assets.json diff --git a/eng/.docsettings.yml b/eng/.docsettings.yml index 679c12c2d4a8..b783fb5e37ed 100644 --- a/eng/.docsettings.yml +++ b/eng/.docsettings.yml @@ -16,6 +16,7 @@ omitted_paths: - sdk/vision/azure-ai-vision-imageanalysis/tests/* - sdk/ai/azure-ai-inference/tests/* - sdk/ai/azure-ai-projects/tests/* + - sdk/ai/azure-ai-assistants/tests/* - sdk/storage/azure-storage-extensions/* language: python diff --git a/sdk/ai/azure-ai-assistants/README.md b/sdk/ai/azure-ai-assistants/README.md index def8835f01fb..69c1c39f58c4 100644 --- a/sdk/ai/azure-ai-assistants/README.md +++ b/sdk/ai/azure-ai-assistants/README.md @@ -964,7 +964,7 @@ messages = assistants_client.list_messages(thread_id=thread.id, order=ListSortOr # The messages are following in the reverse order, # we will iterate them and output only text contents. -for data_point in reversed(messages.data): +for data_point in messages.data: last_message_content = data_point.content[-1] if isinstance(last_message_content, MessageTextContent): print(f"{data_point.role}: {last_message_content.text.value}") diff --git a/sdk/ai/azure-ai-assistants/assets.json b/sdk/ai/azure-ai-assistants/assets.json new file mode 100644 index 000000000000..823831a56f44 --- /dev/null +++ b/sdk/ai/azure-ai-assistants/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "python", + "TagPrefix": "python/ai/azure-ai-assistants", + "Tag": "python/ai/azure-ai-assistants_a471817af2" +} diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py index d7221fbba4bc..d75f6dd1d754 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/_patch.py @@ -67,7 +67,7 @@ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, TokenCre f"/{subscription_id}/resourceGroups/{resource_group_name}/providers" f"/Microsoft.MachineLearningServices/workspaces/{project_name}" ) - # Override the credential scope with the legacy one. + # Override the credential scope with the legacy one. kwargs['credential_scopes'] = ["https://management.azure.com/.default"] # End of legacy endpoints handling. super().__init__(endpoint, credential, **kwargs) diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py index bd0c32e6fbab..a51ea8cfefaf 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/aio/_patch.py @@ -8,7 +8,7 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio +import asyncio # pylint: disable = do-not-import-asyncio import io import logging import os @@ -70,7 +70,7 @@ def __init__( f"/{subscription_id}/resourceGroups/{resource_group_name}/providers" f"/Microsoft.MachineLearningServices/workspaces/{project_name}" ) - # Override the credential scope with the legacy one. + # Override the credential scope with the legacy one. kwargs['credential_scopes'] = ["https://management.azure.com/.default"] # End of legacy endpoints handling. super().__init__(endpoint, credential, **kwargs) @@ -1787,7 +1787,7 @@ async def upload_file( :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. :type file_path: Optional[str] :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. :type purpose: Union[str, _models.FilePurpose, None] :keyword filename: The name of the file. :type filename: Optional[str] diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py index ce4f475b6899..84a1440612ed 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/models/_patch.py @@ -7,7 +7,7 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import asyncio +import asyncio # pylint: disable = do-not-import-asyncio import inspect import itertools import json diff --git a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py index 1890a6f1e88d..0ac5ea43c13f 100644 --- a/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py +++ b/sdk/ai/azure-ai-assistants/azure/ai/assistants/telemetry/_trace_function.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. # ------------------------------------ import functools -import asyncio +import asyncio # pylint: disable = do-not-import-asyncio from typing import Any, Callable, Optional, Dict try: diff --git a/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env b/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env index 93eb88f8c620..3c74e991b06b 100644 --- a/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env +++ b/sdk/ai/azure-ai-assistants/azure_ai_assistants_tests.env @@ -5,21 +5,10 @@ # but do not commit these changes to the repository. # - -######################################################################################################################## -# Telemetry tests -# -# To run telemetry tests you need an AI Foundry project with a connected Application Insights resource. -# -AZURE_AI_ASSISTANTS_TELEMETRY_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_ASSISTANTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING} - ######################################################################################################################## # Agents tests # -AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_PROJECT_ENDPOINT= -AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_DATA_PATH= -AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_STORAGE_QUEUE= -AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_SEARCH_INDEX_NAME= -AZURE_AI_ASSISTANTS_ASSISTANTS_TESTS_SEARCH_CONNECTION_ID= - - +AZURE_AI_ASSISTANTS_TESTS_PROJECT_ENDPOINT= +AZURE_AI_ASSISTANTS_TESTS_DATA_PATH= +AZURE_AI_ASSISTANTS_TESTS_STORAGE_QUEUE= +AZURE_AI_ASSISTANTS_TESTS_SEARCH_INDEX_NAME= diff --git a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py index f621baeadf04..77a0803cb416 100644 --- a/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py +++ b/sdk/ai/azure-ai-assistants/samples/async_samples/sample_assistants_basics_async.py @@ -36,7 +36,7 @@ async def main() -> None: async with DefaultAzureCredential() as creds: assistant_client = AssistantsClient( endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds), + credential=creds, ) async with assistant_client: diff --git a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py index aa8c12eca80c..a495b46dd07b 100644 --- a/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py +++ b/sdk/ai/azure-ai-assistants/samples/sample_assistants_stream_eventhandler_with_bing_grounding.py @@ -20,7 +20,7 @@ 1) PROJECT_ENDPOINT - the Azure AI Assistants endpoint. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. - 3) BING_CONNECTION_NAME - The connection name of the Bing connection, as found in the "Connected resources" tab + 3) AZURE_BING_CONNECTION_ID - The connection id of the Bing connection, as found in the "Connected resources" tab in your Azure AI Foundry project. """ @@ -85,7 +85,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: with assistants_client: - bing_connection_id = os.environ["AZURE_BING_CONECTION_ID"] + bing_connection_id = os.environ["AZURE_BING_CONNECTION_ID"] print(f"Bing Connection ID: {bing_connection_id}") # Initialize assistant bing tool and add the connection id diff --git a/sdk/ai/azure-ai-assistants/tests/README.md b/sdk/ai/azure-ai-assistants/tests/README.md index 461a9fdd36b6..c49ab7e61a82 100644 --- a/sdk/ai/azure-ai-assistants/tests/README.md +++ b/sdk/ai/azure-ai-assistants/tests/README.md @@ -5,7 +5,7 @@ The instructions below are for running tests locally, on a Windows machine, agai ## Build and install the client library - Clone or download this sample repository. -- Open a command prompt window in the folder `sdk\ai\azure-ai-projects` +- Open a command prompt window in the folder `sdk\ai\azure-ai-assistants` - Install development dependencies: ```bash pip install -r dev_requirements.txt diff --git a/sdk/ai/azure-ai-assistants/tests/conftest.py b/sdk/ai/azure-ai-assistants/tests/conftest.py index 07dbb1f70ef3..e1f9eaa3a08b 100644 --- a/sdk/ai/azure-ai-assistants/tests/conftest.py +++ b/sdk/ai/azure-ai-assistants/tests/conftest.py @@ -3,34 +3,21 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -import os - import pytest from devtools_testutils import ( + add_general_regex_sanitizer, + add_body_key_sanitizer, remove_batch_sanitizers, get_credential, test_proxy, - add_general_regex_sanitizer, - add_body_key_sanitizer, ) +from azure.ai.assistants import AssistantsClient from dotenv import load_dotenv, find_dotenv if not load_dotenv(find_dotenv(filename="azure_ai_assistants_tests.env"), override=True): print("Failed to apply environment variables for azure-ai-projects tests.") -def pytest_collection_modifyitems(items): - if os.environ.get("AZURE_TEST_RUN_LIVE") == "true": - return - for item in items: - if "tests\\evaluation" in item.fspath.strpath or "tests/evaluation" in item.fspath.strpath: - item.add_marker( - pytest.mark.skip( - reason="Skip running Evaluations tests in PR pipeline until we can sort out the failures related to AI Foundry project settings" - ) - ) - - class SanitizedValues: SUBSCRIPTION_ID = "00000000-0000-0000-0000-000000000000" RESOURCE_GROUP_NAME = "00000" @@ -148,6 +135,11 @@ def azure_workspace_triad_sanitizer(): json_path="data_source.uri", value="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/workspaces/00000/datastores/workspaceblobstore/paths/LocalUpload/00000000000/product_info_1.md", ) + + add_body_key_sanitizer( + json_path="tool_resources.azure_ai_search.indexes[*].index_connection_id", + value="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/connections/someindex" + ) # Sanitize API key from service response (/tests/connections) add_body_key_sanitizer(json_path="properties.credentials.key", value="Sanitized") diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py index de5ee2c110b5..4b7d0bd340a5 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_client.py @@ -19,10 +19,6 @@ import user_functions from azure.ai.assistants import AssistantsClient -from azure.ai.assistants.models import ( - ThreadMessage, - RunStep, -) from azure.core.exceptions import HttpResponseError from devtools_testutils import ( AzureRecordedTestCase, @@ -57,6 +53,8 @@ RunStepFileSearchToolCallResult, RunStepFileSearchToolCallResults, RunStatus, + RunStep, + ThreadMessage, ThreadMessageOptions, ThreadRun, ToolResources, @@ -86,11 +84,14 @@ assistantClientPreparer = functools.partial( EnvironmentVariableLoader, "azure_ai_assistants", - azure_ai_assistants_assistants_tests_project_endpoint="https://aiservices-id.services.ai.azure.com/api/projects/project-name", - azure_ai_assistants_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", - azure_ai_assistants_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", - azure_ai_assistants_assistants_tests_search_index_name="sample_index", - azure_ai_assistants_assistants_tests_search_connection_name="search_connection_name", + # TODO: uncomment this endpoint when re running with 1DP + #azure_ai_assistants_tests_project_endpoint="https://aiservices-id.services.ai.azure.com/api/projects/project-name", + # TODO: remove this endpoint when re running with 1DP + azure_ai_assistants_tests_project_endpoint="https://Sanitized.api.azureml.ms/agents/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/", + azure_ai_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", + azure_ai_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", + azure_ai_assistants_tests_search_index_name="sample_index", + azure_ai_assistants_tests_search_connection_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/connections/someindex", ) @@ -130,7 +131,7 @@ class TestAssistantClient(AzureRecordedTestCase): # helper function: create client using environment variables def create_client(self, **kwargs): # fetch environment variables - endpoint = kwargs.pop("azure_ai_assistants_assistants_tests_project_endpoint") + endpoint = kwargs.pop("azure_ai_assistants_tests_project_endpoint") credential = self.get_credential(AssistantsClient, is_async=False) # create and return client @@ -232,9 +233,9 @@ def _do_test_create_assistant(self, client, body, functions): # create assistant if body: - assistant = client.assistants.create_assistant(body=body) + assistant = client.create_assistant(body=body) elif functions: - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", @@ -244,7 +245,7 @@ def _do_test_create_assistant(self, client, body, functions): assert assistant.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) else: - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id @@ -253,7 +254,7 @@ def _do_test_create_assistant(self, client, body, functions): assert assistant.model == "gpt-4o" # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -292,7 +293,7 @@ def _do_test_update_assistant(self, client, use_body, use_io): """helper function for updating assistant with different body inputs""" # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id @@ -303,14 +304,14 @@ def _do_test_update_assistant(self, client, use_body, use_io): if use_io: binary_body = json.dumps(body).encode("utf-8") body = io.BytesIO(binary_body) - assistant = client.assistants.update_assistant(assistant_id=assistant.id, body=body) + assistant = client.update_assistant(assistant_id=assistant.id, body=body) else: - assistant = client.assistants.update_assistant(assistant_id=assistant.id, name="my-assistant2") + assistant = client.update_assistant(assistant_id=assistant.id, name="my-assistant2") assert assistant.name assert assistant.name == "my-assistant2" # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -320,32 +321,32 @@ def test_assistant_list(self, **kwargs): """test list assistants""" # create client and ensure there are no previous assistants with self.create_client(**kwargs) as client: - list_length = client.assistants.list_assistants().data.__len__() + list_length = client.list_assistants().data.__len__() # create assistant and check that it appears in the list - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) - assert client.assistants.list_assistants().data.__len__() == list_length + 1 - assert client.assistants.list_assistants().data[0].id == assistant.id + assert client.list_assistants().data.__len__() == list_length + 1 + assert client.list_assistants().data[0].id == assistant.id # create second assistant and check that it appears in the list - assistant2 = client.assistants.create_assistant( + assistant2 = client.create_assistant( model="gpt-4o", name="my-assistant2", instructions="You are helpful assistant" ) - assert client.assistants.list_assistants().data.__len__() == list_length + 2 + assert client.list_assistants().data.__len__() == list_length + 2 assert ( - client.assistants.list_assistants().data[0].id == assistant.id - or client.assistants.list_assistants().data[1].id == assistant.id + client.list_assistants().data[0].id == assistant.id + or client.list_assistants().data[1].id == assistant.id ) # delete assistants and check list - client.assistants.delete_assistant(assistant.id) - assert client.assistants.list_assistants().data.__len__() == list_length + 1 - assert client.assistants.list_assistants().data[0].id == assistant2.id + client.delete_assistant(assistant.id) + assert client.list_assistants().data.__len__() == list_length + 1 + assert client.list_assistants().data[0].id == assistant2.id - client.assistants.delete_assistant(assistant2.id) - assert client.assistants.list_assistants().data.__len__() == list_length + client.delete_assistant(assistant2.id) + assert client.list_assistants().data.__len__() == list_length print("Deleted assistants") # ********************************************************************************** @@ -364,20 +365,20 @@ def test_create_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -424,9 +425,9 @@ def _do_test_create_thread(self, client, body): """helper function for creating thread with different body inputs""" # create thread if body: - thread = client.assistants.create_thread(body=body) + thread = client.create_thread(body=body) else: - thread = client.assistants.create_thread(metadata={"key1": "value1", "key2": "value2"}) + thread = client.create_thread(metadata={"key1": "value1", "key2": "value2"}) assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) @@ -441,25 +442,25 @@ def test_get_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # get thread - thread2 = client.assistants.get_thread(thread.id) + thread2 = client.get_thread(thread.id) assert thread2.id assert thread.id == thread2.id print("Got thread, thread ID", thread2.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -471,23 +472,23 @@ def test_update_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # update thread - thread = client.assistants.update_thread(thread.id, metadata={"key1": "value1", "key2": "value2"}) + thread = client.update_thread(thread.id, metadata={"key1": "value1", "key2": "value2"}) assert thread.metadata == {"key1": "value1", "key2": "value2"} # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -503,7 +504,7 @@ def test_update_thread_with_metadata(self, **kwargs): metadata = {"key1": "value1", "key2": "value2"} # create thread - thread = client.assistants.create_thread(metadata=metadata) + thread = client.create_thread(metadata=metadata) assert thread.id print("Created thread, thread ID", thread.id) @@ -511,7 +512,7 @@ def test_update_thread_with_metadata(self, **kwargs): metadata2 = {"key1": "value1", "key2": "newvalue2"} # update thread - thread = client.assistants.update_thread(thread.id, metadata=metadata2) + thread = client.update_thread(thread.id, metadata=metadata2) assert thread.metadata == {"key1": "value1", "key2": "newvalue2"} @assistantClientPreparer() @@ -544,16 +545,16 @@ def test_update_thread_with_iobytes(self, **kwargs): def _do_test_update_thread(self, client, body): """helper function for updating thread with different body inputs""" # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # update thread if body: - thread = client.assistants.update_thread(thread.id, body=body) + thread = client.update_thread(thread.id, body=body) else: metadata = {"key1": "value1", "key2": "value2"} - thread = client.assistants.update_thread(thread.id, metadata=metadata) + thread = client.update_thread(thread.id, metadata=metadata) assert thread.metadata == {"key1": "value1", "key2": "value2"} @assistantClientPreparer() @@ -565,26 +566,26 @@ def test_delete_thread(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) # delete thread - deletion_status = client.assistants.delete_thread(thread.id) + deletion_status = client.delete_thread(thread.id) assert deletion_status.id == thread.id assert deletion_status.deleted == True print("Deleted thread, thread ID", deletion_status.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") # # ********************************************************************************** @@ -631,15 +632,15 @@ def _do_test_create_message(self, client, body): """helper function for creating message with different body inputs""" # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message if body: - message = client.assistants.create_message(thread_id=thread.id, body=body) + message = client.create_message(thread_id=thread.id, body=body) else: - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id @@ -654,36 +655,36 @@ def test_create_multiple_messages(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create messages - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) - message2 = client.assistants.create_message( + message2 = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me another joke" ) assert message2.id print("Created message, message ID", message2.id) - message3 = client.assistants.create_message( + message3 = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a third joke" ) assert message3.id print("Created message, message ID", message3.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -695,47 +696,47 @@ def test_list_messages(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # check that initial message list is empty - messages0 = client.assistants.list_messages(thread_id=thread.id) + messages0 = client.list_messages(thread_id=thread.id) print(messages0.data) assert messages0.data.__len__() == 0 # create messages and check message list for each one - message1 = client.assistants.create_message( + message1 = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message1.id print("Created message, message ID", message1.id) - messages1 = client.assistants.list_messages(thread_id=thread.id) + messages1 = client.list_messages(thread_id=thread.id) assert messages1.data.__len__() == 1 assert messages1.data[0].id == message1.id - message2 = client.assistants.create_message( + message2 = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me another joke" ) assert message2.id print("Created message, message ID", message2.id) - messages2 = client.assistants.list_messages(thread_id=thread.id) + messages2 = client.list_messages(thread_id=thread.id) assert messages2.data.__len__() == 2 assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id - message3 = client.assistants.create_message( + message3 = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a third joke" ) assert message3.id print("Created message, message ID", message3.id) - messages3 = client.assistants.list_messages(thread_id=thread.id) + messages3 = client.list_messages(thread_id=thread.id) assert messages3.data.__len__() == 3 assert ( messages3.data[0].id == message3.id @@ -744,7 +745,7 @@ def test_list_messages(self, **kwargs): ) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -756,32 +757,32 @@ def test_get_message(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) # get message - message2 = client.assistants.get_message(thread_id=thread.id, message_id=message.id) + message2 = client.get_message(thread_id=thread.id, message_id=message.id) assert message2.id assert message.id == message2.id print("Got message, message ID", message.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -821,20 +822,20 @@ def test_update_message_with_iobytes(self, **kwargs): def _do_test_update_message(self, client, body): """helper function for updating message with different body inputs""" # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = client.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") assert message.id print("Created message, message ID", message.id) # update message if body: - message = client.assistants.update_message(thread_id=thread.id, message_id=message.id, body=body) + message = client.update_message(thread_id=thread.id, message_id=message.id, body=body) else: - message = client.assistants.update_message( + message = client.update_message( thread_id=thread.id, message_id=message.id, metadata={"key1": "value1", "key2": "value2"} ) assert message.metadata == {"key1": "value1", "key2": "value2"} @@ -854,24 +855,24 @@ def test_create_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -905,14 +906,14 @@ def _do_test_create_run(self, client, use_body, use_io=False): """helper function for creating run with different body inputs""" # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) @@ -922,9 +923,9 @@ def _do_test_create_run(self, client, use_body, use_io=False): if use_io: binary_body = json.dumps(body).encode("utf-8") body = io.BytesIO(binary_body) - run = client.assistants.create_run(thread_id=thread.id, body=body) + run = client.create_run(thread_id=thread.id, body=body) else: - run = client.assistants.create_run( + run = client.create_run( thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.id @@ -932,7 +933,7 @@ def _do_test_create_run(self, client, use_body, use_io=False): print("Created run, run ID", run.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -944,30 +945,30 @@ def test_get_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) # get run - run2 = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run2 = client.get_run(thread_id=thread.id, run_id=run.id) assert run2.id assert run.id == run2.id print("Got run, run ID", run2.id) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -979,26 +980,26 @@ def test_run_status(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1016,14 +1017,14 @@ def test_run_status(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) print("Run status:", run.status) assert run.status in ["cancelled", "failed", "completed", "expired"] print("Run completed with status:", run.status) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -1035,19 +1036,19 @@ def test_update_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1055,14 +1056,14 @@ def test_update_run(self, **kwargs): while run.status in ["queued", "in_progress"]: # wait for a second time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) - run = client.assistants.update_run( + run = client.get_run(thread_id=thread.id, run_id=run.id) + run = client.update_run( thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.metadata == {"key1": "value1", "key2": "value2"} # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -1102,19 +1103,19 @@ def test_update_run_with_iobytes(self, **kwargs): def _do_test_update_run(self, client, body): """helper function for updating run with different body inputs""" # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = client.assistants.create_run( + run = client.create_run( thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.id @@ -1124,17 +1125,17 @@ def _do_test_update_run(self, client, body): # update run while run.status in ["queued", "in_progress"]: time.sleep(5) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) if body: - run = client.assistants.update_run(thread_id=thread.id, run_id=run.id, body=body) + run = client.update_run(thread_id=thread.id, run_id=run.id, body=body) else: - run = client.assistants.update_run( + run = client.update_run( thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "newvalue2"} ) assert run.metadata == {"key1": "value1", "key2": "newvalue2"} # delete assistant - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -1176,24 +1177,24 @@ def _do_test_submit_tool_outputs_to_run(self, client, use_body, use_io): # toolset.add(code_interpreter) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") assert message.id print("Created message, message ID", message.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1215,7 +1216,7 @@ def _do_test_submit_tool_outputs_to_run(self, client, use_body, use_io): ] while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed if run.status == "requires_action" and run.required_action.submit_tool_outputs: @@ -1223,7 +1224,7 @@ def _do_test_submit_tool_outputs_to_run(self, client, use_body, use_io): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: print("No tool calls provided - cancelling run") - client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + client.cancel_run(thread_id=thread.id, run_id=run.id) break # submit tool outputs to run @@ -1235,9 +1236,9 @@ def _do_test_submit_tool_outputs_to_run(self, client, use_body, use_io): if use_io: binary_body = json.dumps(body).encode("utf-8") body = io.BytesIO(binary_body) - client.assistants.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, body=body) + client.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, body=body) else: - client.assistants.submit_tool_outputs_to_run( + client.submit_tool_outputs_to_run( thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs ) @@ -1247,7 +1248,7 @@ def _do_test_submit_tool_outputs_to_run(self, client, use_body, use_io): # check that messages used the tool print("Messages: ") - messages = client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + messages = client.list_messages(thread_id=thread.id, run_id=run.id) tool_message = messages["data"][0]["content"][0]["text"]["value"] # if user_functions_live is used, the time will be the current time # since user_functions_recording is used, the time will be 12:30 @@ -1255,7 +1256,7 @@ def _do_test_submit_tool_outputs_to_run(self, client, use_body, use_io): print("Used tool_outputs") # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -1290,7 +1291,7 @@ def _wait_for_run(self, client, run, timeout=1): """Wait while run will get to terminal state.""" while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS, RunStatus.REQUIRES_ACTION]: time.sleep(timeout) - run = client.assistants.get_run(thread_id=run.thread_id, run_id=run.id) + run = client.get_run(thread_id=run.thread_id, run_id=run.id) return run def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_thread_run, **kwargs): @@ -1309,7 +1310,7 @@ def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_thread_ toolset = ToolSet() toolset.add(functions) toolset.add(code_interpreter) - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4", name="my-assistant", instructions="You are helpful assistant", @@ -1323,16 +1324,16 @@ def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_thread_ ) if create_thread_run: - run = client.assistants.create_thread_and_run( + run = client.create_thread_and_run( assistant_id=assistant.id, parallel_tool_calls=use_parallel_runs, ) run = self._wait_for_run(client, run) else: - thread = client.assistants.create_thread(messages=[message]) + thread = client.create_thread(messages=[message]) assert thread.id - run = client.assistants.create_and_process_run( + run = client.create_and_process_run( thread_id=thread.id, assistant_id=assistant.id, parallel_tool_calls=use_parallel_runs, @@ -1341,8 +1342,8 @@ def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_thread_ assert run.status == RunStatus.COMPLETED, run.last_error.message assert run.parallel_tool_calls == use_parallel_runs - assert client.assistants.delete_assistant(assistant.id).deleted, "The assistant was not deleted" - messages = client.assistants.list_messages(thread_id=run.thread_id) + assert client.delete_assistant(assistant.id).deleted, "The assistant was not deleted" + messages = client.list_messages(thread_id=run.thread_id) assert len(messages.data), "The data from the assistant was not received." """ @@ -1356,38 +1357,38 @@ def test_cancel_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") assert message.id print("Created message, message ID", message.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) # check status and cancel assert run.status in ["queued", "in_progress", "requires_action"] - client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + client.cancel_run(thread_id=thread.id, run_id=run.id) while run.status in ["queued", "cancelling"]: time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) print("Current run status:", run.status) assert run.status == "cancelled" print("Run cancelled") # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") client.close() """ @@ -1423,7 +1424,7 @@ def _do_test_create_thread_and_run(self, client, use_body, use_io): """helper function for creating thread and run with different body inputs""" # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id @@ -1438,10 +1439,10 @@ def _do_test_create_thread_and_run(self, client, use_body, use_io): if use_io: binary_body = json.dumps(body).encode("utf-8") body = io.BytesIO(binary_body) - run = client.assistants.create_thread_and_run(body=body) + run = client.create_thread_and_run(body=body) assert run.metadata == {"key1": "value1", "key2": "value2"} else: - run = client.assistants.create_thread_and_run(assistant_id=assistant.id) + run = client.create_thread_and_run(assistant_id=assistant.id) # create thread and run assert run.id @@ -1449,7 +1450,7 @@ def _do_test_create_thread_and_run(self, client, use_body, use_io): print("Created run, run ID", run.id) # get thread - thread = client.assistants.get_thread(run.thread_id) + thread = client.get_thread(run.thread_id) assert thread.id print("Created thread, thread ID", thread.id) @@ -1467,7 +1468,7 @@ def _do_test_create_thread_and_run(self, client, use_body, use_io): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) # assert run.status in ["queued", "in_progress", "requires_action", "completed"] print("Run status:", run.status) @@ -1475,7 +1476,7 @@ def _do_test_create_thread_and_run(self, client, use_body, use_io): print("Run completed") # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -1489,28 +1490,28 @@ def test_list_run_step(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") assert message.id print("Created message, message ID", message.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) - steps = client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + steps = client.list_run_steps(thread_id=thread.id, run_id=run.id) # commenting assertion out below, do we know exactly when run starts? # assert steps['data'].__len__() == 0 @@ -1519,7 +1520,7 @@ def test_list_run_step(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) assert run.status in [ "queued", "in_progress", @@ -1528,7 +1529,7 @@ def test_list_run_step(self, **kwargs): ] print("Run status:", run.status) if run.status != "queued": - steps = client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + steps = client.list_run_steps(thread_id=thread.id, run_id=run.id) print("Steps:", steps) assert steps["data"].__len__() > 0 @@ -1536,7 +1537,7 @@ def test_list_run_step(self, **kwargs): print("Run completed") # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") client.close() @@ -1550,26 +1551,26 @@ def test_get_run_step(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" ) assert message.id print("Created message, message ID", message.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1583,7 +1584,7 @@ def test_get_run_step(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) if run.status == "failed": assert run.last_error print(run.last_error) @@ -1597,14 +1598,14 @@ def test_get_run_step(self, **kwargs): print("Run status:", run.status) # list steps, check that get_run_step works with first step_id - steps = client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + steps = client.list_run_steps(thread_id=thread.id, run_id=run.id) assert steps["data"].__len__() > 0 step = steps["data"][0] - get_step = client.assistants.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) + get_step = client.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) assert step == get_step # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") # # ********************************************************************************** @@ -1623,26 +1624,26 @@ def test_create_stream(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" ) assert message.id print("Created message, message ID", message.id) # create stream - with client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + with client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: for event_type, event_data, _ in stream: assert ( isinstance(event_data, (MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep)) @@ -1650,7 +1651,7 @@ def test_create_stream(self, **kwargs): ) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") # TODO create_stream doesn't work with body -- fails on for event_type, event_data : TypeError: 'ThreadRun' object is not an iterator @@ -1664,19 +1665,19 @@ def test_create_stream_with_body(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" ) assert message.id @@ -1686,7 +1687,7 @@ def test_create_stream_with_body(self, **kwargs): body = {"assistant_id": assistant.id, "stream": True} # create stream - with client.assistants.create_stream(thread_id=thread.id, body=body, stream=True) as stream: + with client.create_stream(thread_id=thread.id, body=body, stream=True) as stream: for event_type, event_data, _ in stream: print("event type: event data") @@ -1697,7 +1698,7 @@ def test_create_stream_with_body(self, **kwargs): ) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -1710,19 +1711,19 @@ def test_create_stream_with_iobytes(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" ) assert message.id @@ -1733,7 +1734,7 @@ def test_create_stream_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # create stream - with client.assistants.create_stream( + with client.create_stream( thread_id=thread.id, body=io.BytesIO(binary_body), stream=True ) as stream: for event_type, event_data, _ in stream: @@ -1743,7 +1744,7 @@ def test_create_stream_with_iobytes(self, **kwargs): ) # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -1787,7 +1788,7 @@ def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): # toolset.add(code_interpreter) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", @@ -1798,17 +1799,17 @@ def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") assert message.id print("Created message, message ID", message.id) # create stream - with client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + with client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: for event_type, event_data, _ in stream: # Check if tools are needed @@ -1821,7 +1822,7 @@ def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): if not tool_calls: print("No tool calls provided - cancelling run") - client.assistants.cancel_run(thread_id=thread.id, run_id=event_data.id) + client.cancel_run(thread_id=thread.id, run_id=event_data.id) break # submit tool outputs to stream @@ -1834,7 +1835,7 @@ def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): if use_io: binary_body = json.dumps(body).encode("utf-8") body = io.BytesIO(binary_body) - client.assistants.submit_tool_outputs_to_stream( + client.submit_tool_outputs_to_stream( thread_id=thread.id, run_id=event_data.id, body=body, @@ -1842,7 +1843,7 @@ def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): stream=True, ) else: - client.assistants.submit_tool_outputs_to_stream( + client.submit_tool_outputs_to_stream( thread_id=thread.id, run_id=event_data.id, tool_outputs=tool_outputs, @@ -1859,7 +1860,7 @@ def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): print("Stream processing completed") # check that messages used the tool - messages = client.assistants.list_messages(thread_id=thread.id) + messages = client.list_messages(thread_id=thread.id) print("Messages: ", messages) tool_message = messages["data"][0]["content"][0]["text"]["value"] # TODO if testing live, uncomment these @@ -1874,7 +1875,7 @@ def _do_test_submit_tool_outputs_to_stream(self, client, use_body, use_io): print("Used tool_outputs") # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") # client.close() @@ -2037,7 +2038,7 @@ def _test_tools_with_different_functions( toolset.add(functions) # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", @@ -2047,17 +2048,17 @@ def _test_tools_with_different_functions( print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content=content) + message = client.create_message(thread_id=thread.id, role="user", content=content) assert message.id print("Created message, message ID", message.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -2079,7 +2080,7 @@ def _test_tools_with_different_functions( ] while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(1) - run = client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = client.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed if run.status == "requires_action" and run.required_action.submit_tool_outputs: @@ -2087,14 +2088,14 @@ def _test_tools_with_different_functions( tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: print("No tool calls provided - cancelling run") - client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + client.cancel_run(thread_id=thread.id, run_id=run.id) break # submit tool outputs to run tool_outputs = toolset.execute_tool_calls(tool_calls) print("Tool outputs:", tool_outputs) if tool_outputs: - client.assistants.submit_tool_outputs_to_run( + client.submit_tool_outputs_to_run( thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs ) @@ -2103,7 +2104,7 @@ def _test_tools_with_different_functions( print("Run completed with status:", run.status) # check that messages used the tool - messages = client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + messages = client.list_messages(thread_id=thread.id, run_id=run.id) print("Messages: ", messages) tool_message = messages["data"][0]["content"][0]["text"]["value"] if expected_values: @@ -2119,7 +2120,7 @@ def _test_tools_with_different_functions( print("Used tool_outputs") # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") # # ********************************************************************************** @@ -2142,7 +2143,7 @@ def test_create_assistant_with_invalid_code_interpreter_tool_resource(self, **kw exception_message = "" try: - client.assistants.create_assistant( + client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", @@ -2175,7 +2176,7 @@ def test_create_assistant_with_invalid_file_search_tool_resource(self, **kwargs) exception_message = "" try: - client.assistants.create_assistant( + client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", tools=[], tool_resources=tool_resources ) except: @@ -2202,7 +2203,7 @@ def test_create_assistant_with_invalid_file_search_tool_resource(self, **kwargs) exception_message = "" try: - client.assistants.create_assistant( + client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", @@ -2233,10 +2234,10 @@ def test_file_search_add_vector_store(self, **kwargs): # Adjust the file path to be relative to the test file location file_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md") - openai_file = client.assistants.upload_file_and_poll(file_path=file_path, purpose="assistants") + openai_file = client.upload_file_and_poll(file_path=file_path, purpose="assistants") print(f"Uploaded file, file ID: {openai_file.id}") - openai_vectorstore = client.assistants.create_vector_store_and_poll( + openai_vectorstore = client.create_vector_store_and_poll( file_ids=[openai_file.id], name="my_vectorstore" ) print(f"Created vector store, vector store ID: {openai_vectorstore.id}") @@ -2248,7 +2249,7 @@ def test_file_search_add_vector_store(self, **kwargs): print("Created toolset and added file search") # create assistant - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset ) assert assistant.id @@ -2261,7 +2262,7 @@ def test_file_search_add_vector_store(self, **kwargs): assert assistant.tool_resources["file_search"]["vector_store_ids"][0] == openai_vectorstore.id # delete assistant and close client - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") client.close() @@ -2277,7 +2278,7 @@ def test_create_vector_store_and_poll(self, **kwargs): # Create vector store body = {"name": "test_vector_store", "metadata": {"key1": "value1", "key2": "value2"}} try: - vector_store = client.assistants.create_vector_store_and_poll(body=body, sleep_interval=2) + vector_store = client.create_vector_store_and_poll(body=body, sleep_interval=2) # check correct creation assert isinstance(vector_store, VectorStore) assert vector_store.name == "test_vector_store" @@ -2306,7 +2307,7 @@ def test_create_vector_store(self, **kwargs): # Create vector store body = {"name": "test_vector_store", "metadata": {"key1": "value1", "key2": "value2"}} try: - vector_store = client.assistants.create_vector_store(body=body) + vector_store = client.create_vector_store(body=body) print("here") print(vector_store) # check correct creation @@ -2364,11 +2365,11 @@ def _do_test_create_vector_store(self, streaming, **kwargs): else: ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = ai_client.assistants.create_vector_store_and_poll( + vector_store = ai_client.create_vector_store_and_poll( file_ids=file_ids, data_sources=ds, name="my_vectorstore" ) assert vector_store.id @@ -2385,7 +2386,7 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2398,7 +2399,7 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): ] ) file_search = FileSearchTool() - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4o", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2407,19 +2408,19 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): ) assert assistant.id - thread = ai_client.assistants.create_thread(tool_resources=ToolResources(file_search=fs)) + thread = ai_client.create_thread(tool_resources=ToolResources(file_search=fs)) assert thread.id # create message - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = ai_client.assistants.list_messages(thread.id) + messages = ai_client.list_messages(thread.id) assert len(messages) - ai_client.assistants.delete_assistant(assistant.id) + ai_client.delete_assistant(assistant.id) ai_client.close() @assistantClientPreparer() @@ -2459,12 +2460,12 @@ def _do_test_create_vector_store_add_file(self, streaming, **kwargs): ds = None else: ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type="uri_asset", ) - vector_store = ai_client.assistants.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + vector_store = ai_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id - vector_store_file = ai_client.assistants.create_vector_store_file( + vector_store_file = ai_client.create_vector_store_file( vector_store_id=vector_store.id, data_source=ds, file_id=file_id ) assert vector_store_file.id @@ -2511,13 +2512,13 @@ def _do_test_create_vector_store_batch(self, streaming, **kwargs): file_ids = None ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = ai_client.assistants.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + vector_store = ai_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id - vector_store_file_batch = ai_client.assistants.create_vector_store_file_batch_and_poll( + vector_store_file_batch = ai_client.create_vector_store_file_batch_and_poll( vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids ) assert vector_store_file_batch.id @@ -2529,7 +2530,7 @@ def _test_file_search( ) -> None: """Test the file search""" file_search = FileSearchTool(vector_store_ids=[vector_store.id]) - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2538,18 +2539,18 @@ def _test_file_search( ) assert assistant.id - thread = ai_client.assistants.create_thread() + thread = ai_client.create_thread() assert thread.id # create message - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." if streaming: thread_run = None - with ai_client.assistants.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: + with ai_client.create_stream(thread_id=thread.id, assistant_id=assistant.id) as stream: for _, event_data, _ in stream: if isinstance(event_data, ThreadRun): thread_run = event_data @@ -2562,16 +2563,16 @@ def _test_file_search( event_data.delta.step_details.tool_calls[0].file_search, RunStepFileSearchToolCallResults ) assert thread_run is not None - run = ai_client.assistants.get_run(thread_id=thread_run.thread_id, run_id=thread_run.id) + run = ai_client.get_run(thread_id=thread_run.thread_id, run_id=thread_run.id) assert run is not None else: - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) - ai_client.assistants.delete_vector_store(vector_store.id) + ai_client.delete_vector_store(vector_store.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = ai_client.assistants.list_messages(thread.id) + messages = ai_client.list_messages(thread.id) assert len(messages) - ai_client.assistants.delete_assistant(assistant.id) + ai_client.delete_assistant(assistant.id) self._remove_file_maybe(file_id, ai_client) ai_client.close() @@ -2581,7 +2582,7 @@ def _test_file_search( def test_message_attachement_azure(self, **kwargs): """Test message attachment with azure ID.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_message_attachment(data_source=ds, **kwargs) @@ -2601,14 +2602,14 @@ def _do_test_message_attachment(self, **kwargs): file_id = self._get_file_id_maybe(ai_client, **kwargs) # Create assistant with file search tool - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", ) assert assistant.id, "Assistant was not created" - thread = ai_client.assistants.create_thread() + thread = ai_client.create_thread() assert thread.id, "The thread was not created." # Create a message with the file search attachment @@ -2621,7 +2622,7 @@ def _do_test_message_attachment(self, **kwargs): CodeInterpreterTool().definitions[0], ], ) - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?", @@ -2629,12 +2630,12 @@ def _do_test_message_attachment(self, **kwargs): ) assert message.id, "The message was not created." - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id, "The run was not created." self._remove_file_maybe(file_id, ai_client) - ai_client.assistants.delete_assistant(assistant.id) + ai_client.delete_assistant(assistant.id) - messages = ai_client.assistants.list_messages(thread_id=thread.id) + messages = ai_client.list_messages(thread_id=thread.id) assert len(messages), "No messages were created" ai_client.close() @@ -2644,7 +2645,7 @@ def _do_test_message_attachment(self, **kwargs): def test_create_assistant_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) @@ -2665,7 +2666,7 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = ai_client.assistants.upload_file_and_poll( + file = ai_client.upload_file_and_poll( file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS ) assert file.id, "The file was not uploaded." @@ -2677,7 +2678,7 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): ) tr = ToolResources(code_interpreter=cdr) # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", @@ -2686,20 +2687,20 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): ) assert assistant.id, "Assistant was not created" - thread = ai_client.assistants.create_thread() + thread = ai_client.create_thread() assert thread.id, "The thread was not created." - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id, "The run was not created." self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" - ai_client.assistants.delete_assistant(assistant.id) - assert len(ai_client.assistants.list_messages(thread_id=thread.id)), "No messages were created" + ai_client.delete_assistant(assistant.id) + assert len(ai_client.list_messages(thread_id=thread.id)), "No messages were created" ai_client.close() @assistantClientPreparer() @@ -2708,7 +2709,7 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): def test_create_thread_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) @@ -2729,7 +2730,7 @@ def _do_test_create_thread_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = ai_client.assistants.upload_file_and_poll( + file = ai_client.upload_file_and_poll( file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS ) assert file.id, "The file was not uploaded." @@ -2741,7 +2742,7 @@ def _do_test_create_thread_with_interpreter(self, **kwargs): ) tr = ToolResources(code_interpreter=cdr) # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", @@ -2749,20 +2750,20 @@ def _do_test_create_thread_with_interpreter(self, **kwargs): ) assert assistant.id, "Assistant was not created" - thread = ai_client.assistants.create_thread(tool_resources=tr) + thread = ai_client.create_thread(tool_resources=tr) assert thread.id, "The thread was not created." - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id, "The run was not created." self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" - ai_client.assistants.delete_assistant(assistant.id) - messages = ai_client.assistants.list_messages(thread.id) + ai_client.delete_assistant(assistant.id) + messages = ai_client.list_messages(thread.id) assert len(messages) ai_client.close() @@ -2777,7 +2778,7 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2790,7 +2791,7 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): ] ) file_search = FileSearchTool() - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4o", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2799,19 +2800,19 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): ) assert assistant.id - thread = ai_client.assistants.create_thread() + thread = ai_client.create_thread() assert thread.id # create message - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = ai_client.assistants.list_messages(thread.id) + messages = ai_client.list_messages(thread.id) assert len(messages) - ai_client.assistants.delete_assistant(assistant.id) + ai_client.delete_assistant(assistant.id) ai_client.close() @assistantClientPreparer() @@ -2820,7 +2821,7 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): def test_create_attachment_in_thread_azure(self, **kwargs): """Create thread with message attachment inline with azure asset IDs.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_create_attachment_in_thread_azure(data_source=ds, **kwargs) @@ -2840,7 +2841,7 @@ def _do_test_create_attachment_in_thread_azure(self, **kwargs): file_id = self._get_file_id_maybe(ai_client, **kwargs) file_search = FileSearchTool() - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2862,14 +2863,14 @@ def _do_test_create_attachment_in_thread_azure(self, **kwargs): content="What does the attachment say?", attachments=[attachment], ) - thread = ai_client.assistants.create_thread(messages=[message]) + thread = ai_client.create_thread(messages=[message]) assert thread.id - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = ai_client.assistants.list_messages(thread.id) + messages = ai_client.list_messages(thread.id) assert len(messages) - ai_client.assistants.delete_assistant(assistant.id) + ai_client.delete_assistant(assistant.id) ai_client.close() @assistantClientPreparer() @@ -2882,9 +2883,9 @@ def test_azure_ai_search_tool(self, **kwargs): # Create AzureAISearchTool conn_id = kwargs.pop( - "azure_ai_assistants_assistants_tests_search_connection_id", "my-search-connection-ID" + "azure_ai_assistants_tests_search_connection_id", "my-search-connection-ID" ) - index_name = kwargs.pop("azure_ai_assistants_assistants_tests_search_index_name", "my-search-index") + index_name = kwargs.pop("azure_ai_assistants_tests_search_index_name", "my-search-index") azure_search_tool = AzureAISearchTool( index_connection_id=conn_id, @@ -2892,7 +2893,7 @@ def test_azure_ai_search_tool(self, **kwargs): ) # Create assistant with the search tool - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4o", name="search-assistant", instructions="You are a helpful assistant that can search for information using Azure AI Search.", @@ -2903,27 +2904,27 @@ def test_azure_ai_search_tool(self, **kwargs): print(f"Created assistant with ID: {assistant.id}") # Create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print(f"Created thread with ID: {thread.id}") # Create message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Search for information about iPhone prices." ) assert message.id print(f"Created message with ID: {message.id}") # Create and process run - run = client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == RunStatus.COMPLETED, run.last_error.message # List messages to verify tool was used - messages = client.assistants.list_messages(thread_id=thread.id) + messages = client.list_messages(thread_id=thread.id) assert len(messages.data) > 0 # Clean up - client.assistants.delete_assistant(assistant.id) + client.delete_assistant(assistant.id) print("Deleted assistant") @assistantClientPreparer() @@ -2947,18 +2948,18 @@ def _do_test_include_file_search_results(self, use_stream, include_content, **kw with self.create_client(**kwargs) as ai_client: ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = ai_client.assistants.create_vector_store_and_poll( + vector_store = ai_client.create_vector_store_and_poll( file_ids=[], data_sources=ds, name="my_vectorstore" ) - # vector_store = await ai_client.assistants.get_vector_store('vs_M9oxKG7JngORHcYNBGVZ6Iz3') + # vector_store = await ai_client.get_vector_store('vs_M9oxKG7JngORHcYNBGVZ6Iz3') assert vector_store.id file_search = FileSearchTool(vector_store_ids=[vector_store.id]) - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( model="gpt-4o", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2966,10 +2967,10 @@ def _do_test_include_file_search_results(self, use_stream, include_content, **kw tool_resources=file_search.resources, ) assert assistant.id - thread = ai_client.assistants.create_thread() + thread = ai_client.create_thread() assert thread.id # create message - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", # content="What does the attachment say?" @@ -2980,7 +2981,7 @@ def _do_test_include_file_search_results(self, use_stream, include_content, **kw if use_stream: run = None - with ai_client.assistants.create_stream( + with ai_client.create_stream( thread_id=thread.id, assistant_id=assistant.id, include=include ) as stream: for event_type, event_data, _ in stream: @@ -2990,26 +2991,26 @@ def _do_test_include_file_search_results(self, use_stream, include_content, **kw print("Stream completed.") break else: - run = ai_client.assistants.create_and_process_run( + run = ai_client.create_and_process_run( thread_id=thread.id, assistant_id=assistant.id, include=include ) assert run.status == RunStatus.COMPLETED assert run is not None - steps = ai_client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id, include=include) + steps = ai_client.list_run_steps(thread_id=thread.id, run_id=run.id, include=include) # The 1st (not 0th) step is a tool call. step_id = steps.data[1].id - one_step = ai_client.assistants.get_run_step( + one_step = ai_client.get_run_step( thread_id=thread.id, run_id=run.id, step_id=step_id, include=include ) self._assert_file_search_valid(one_step.step_details.tool_calls[0], include_content) self._assert_file_search_valid(steps.data[1].step_details.tool_calls[0], include_content) - messages = ai_client.assistants.list_messages(thread_id=thread.id) + messages = ai_client.list_messages(thread_id=thread.id) assert len(messages) - ai_client.assistants.delete_vector_store(vector_store.id) + ai_client.delete_vector_store(vector_store.id) # delete assistant and close client - ai_client.assistants.delete_assistant(assistant.id) + ai_client.delete_assistant(assistant.id) print("Deleted assistant") ai_client.close() @@ -3037,7 +3038,7 @@ def _assert_file_search_valid(self, tool_call: Any, include_content: bool) -> No def test_assistants_with_json_schema(self, **kwargs): """Test structured output from the assistant.""" with self.create_client(**kwargs) as ai_client: - assistant = ai_client.assistants.create_assistant( + assistant = ai_client.create_assistant( # Note only gpt-4o-mini-2024-07-18 and # gpt-4o-2024-08-06 and later support structured output. model="gpt-4o-mini", @@ -3065,24 +3066,24 @@ def test_assistants_with_json_schema(self, **kwargs): ) assert assistant.id - thread = ai_client.assistants.create_thread() + thread = ai_client.create_thread() assert thread.id - message = ai_client.assistants.create_message( + message = ai_client.create_message( thread_id=thread.id, role="user", content=("The mass of the Mars is 6.4171E23 kg"), ) assert message.id - run = ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == RunStatus.COMPLETED, run.last_error.message - del_assistant = ai_client.assistants.delete_assistant(assistant.id) + del_assistant = ai_client.delete_assistant(assistant.id) assert del_assistant.deleted - messages = ai_client.assistants.list_messages(thread_id=thread.id) + messages = ai_client.list_messages(thread_id=thread.id) planet_info = [] # The messages are following in the reverse order, @@ -3100,7 +3101,7 @@ def test_assistants_with_json_schema(self, **kwargs): def _get_file_id_maybe(self, ai_client: AssistantsClient, **kwargs) -> str: """Return file id if kwargs has file path.""" if "file_path" in kwargs: - file = ai_client.assistants.upload_file_and_poll( + file = ai_client.upload_file_and_poll( file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS ) assert file.id, "The file was not uploaded." @@ -3110,7 +3111,7 @@ def _get_file_id_maybe(self, ai_client: AssistantsClient, **kwargs) -> str: def _remove_file_maybe(self, file_id: str, ai_client: AssistantsClient) -> None: """Remove file if we have file ID.""" if file_id: - ai_client.assistants.delete_file(file_id) + ai_client.delete_file(file_id) @assistantClientPreparer() @pytest.mark.skip("File ID issues with sanitization.") @@ -3129,13 +3130,13 @@ def test_code_interpreter_and_save_file(self, **kwargs): with open(test_file_path, "w") as f: f.write("This is a test file") - file: OpenAIFile = client.assistants.upload_file_and_poll( + file: OpenAIFile = client.upload_file_and_poll( file_path=test_file_path, purpose=FilePurpose.ASSISTANTS ) # create assistant code_interpreter = CodeInterpreterTool(file_ids=[file.id]) - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", @@ -3144,11 +3145,11 @@ def test_code_interpreter_and_save_file(self, **kwargs): ) print(f"Created assistant, assistant ID: {assistant.id}") - thread = client.assistants.create_thread() + thread = client.create_thread() print(f"Created thread, thread ID: {thread.id}") # create a message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="Create an image file same as the text file and give me file id?", @@ -3156,15 +3157,15 @@ def test_code_interpreter_and_save_file(self, **kwargs): print(f"Created message, message ID: {message.id}") # create run - run = client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) print(f"Run finished with status: {run.status}") # delete file - client.assistants.delete_file(file.id) + client.delete_file(file.id) print("Deleted file") # get messages - messages = client.assistants.list_messages(thread_id=thread.id) + messages = client.list_messages(thread_id=thread.id) print(f"Messages: {messages}") last_msg = messages.get_last_text_message_by_role(MessageRole.ASSISTANT) @@ -3175,7 +3176,7 @@ def test_code_interpreter_and_save_file(self, **kwargs): file_id = file_path_annotation.file_path.file_id print(f"Image File ID: {file_path_annotation.file_path.file_id}") temp_file_path = os.path.join(temp_dir, "output.png") - client.assistants.save_file(file_id=file_id, file_name="output.png", target_dir=temp_dir) + client.save_file(file_id=file_id, file_name="output.png", target_dir=temp_dir) output_file_exist = os.path.exists(temp_file_path) assert output_file_exist @@ -3187,7 +3188,7 @@ def test_azure_function_call(self, **kwargs): # Note: This test was recorded in westus region as for now # 2025-02-05 it is not supported in test region (East US 2) # create client - storage_queue = kwargs["azure_ai_assistants_assistants_tests_storage_queue"] + storage_queue = kwargs["azure_ai_assistants_tests_storage_queue"] with self.create_client(**kwargs) as client: azure_function_tool = AzureFunctionTool( name="foo", @@ -3208,7 +3209,7 @@ def test_azure_function_call(self, **kwargs): storage_service_endpoint=storage_queue, ), ) - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4", name="azure-function-assistant-foo", instructions=( @@ -3224,29 +3225,29 @@ def test_azure_function_call(self, **kwargs): assert assistant.id, "The assistant was not created" # Create a thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id, "The thread was not created." # Create a message - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="What is the most prevalent element in the universe? What would foo say?", ) assert message.id, "The message was not created." - run = client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == RunStatus.COMPLETED, f"The run is in {run.status} state." # Get messages from the thread - messages = client.assistants.list_messages(thread_id=thread.id) + messages = client.list_messages(thread_id=thread.id) assert len(messages.text_messages) > 1, "No messages were received from assistant." # Check that we have function response in at least one message. assert any("bar" in msg.text.value.lower() for msg in messages.text_messages) # Delete the assistant once done - result = client.assistants.delete_assistant(assistant.id) + result = client.delete_assistant(assistant.id) assert result.deleted, "The assistant was not deleted." @assistantClientPreparer() @@ -3257,16 +3258,16 @@ def test_client_with_thread_messages(self, **kwargs): with self.create_client(**kwargs) as client: # [START create_assistant] - assistant = client.assistants.create_assistant( + assistant = client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="You are a personal electronics tutor. Write and run code to answer questions.", ) assert assistant.id, "The assistant was not created." - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id, "Thread was not created" - message = client.assistants.create_message( + message = client.create_message( thread_id=thread.id, role="user", content="What is the equation of light energy?" ) assert message.id, "The message was not created." @@ -3275,7 +3276,7 @@ def test_client_with_thread_messages(self, **kwargs): ThreadMessageOptions(role=MessageRole.ASSISTANT, content="E=mc^2"), ThreadMessageOptions(role=MessageRole.USER, content="What is the impedance formula?"), ] - run = client.assistants.create_run( + run = client.create_run( thread_id=thread.id, assistant_id=assistant.id, additional_messages=additional_messages ) @@ -3283,12 +3284,12 @@ def test_client_with_thread_messages(self, **kwargs): while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS]: # wait for a second time.sleep(1) - run = client.assistants.get_run( + run = client.get_run( thread_id=thread.id, run_id=run.id, ) assert run.status in RunStatus.COMPLETED - assert client.assistants.delete_assistant(assistant.id).deleted, "The assistant was not deleted" - messages = client.assistants.list_messages(thread_id=thread.id) + assert client.delete_assistant(assistant.id).deleted, "The assistant was not deleted" + messages = client.list_messages(thread_id=thread.id) assert len(messages.data), "The data from the assistant was not received." diff --git a/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py b/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py index 02a9f9901a9a..9bb7f478b14c 100644 --- a/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py +++ b/sdk/ai/azure-ai-assistants/tests/test_assistants_client_async.py @@ -80,10 +80,15 @@ assistantClientPreparer = functools.partial( EnvironmentVariableLoader, - "azure_ai.assistants", - azure_ai_assistants_assistants_tests_project_endpoint="https://aiservices-id.services.ai.azure.com/api/projects/project-name", - azure_ai_assistants_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", - azure_ai_assistants_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", + "azure_ai_assistants", + # TODO: uncomment this endpoint when re running with 1DP + #azure_ai_assistants_tests_project_endpoint="https://aiservices-id.services.ai.azure.com/api/projects/project-name", + # TODO: remove this endpoint when re running with 1DP + azure_ai_assistants_tests_project_endpoint="https://Sanitized.api.azureml.ms/agents/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/", + azure_ai_assistants_tests_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", + azure_ai_assistants_tests_storage_queue="https://foobar.queue.core.windows.net", + azure_ai_assistants_tests_search_index_name="sample_index", + azure_ai_assistants_tests_search_connection_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/connections/someindex", ) @@ -123,7 +128,7 @@ class TestAssistantClientAsync(AzureRecordedTestCase): # helper function: create client using environment variables def create_client(self, **kwargs): # fetch environment variables - endpoint = kwargs.pop("azure_ai_assistants_assistants_tests_project_endpoint") + endpoint = kwargs.pop("azure_ai_assistants_tests_project_endpoint") credential = self.get_credential(AssistantsClient, is_async=True) # create and return client @@ -149,10 +154,10 @@ async def test_clear_client(self, **kwargs): print("Created client") # clear assistant list - assistants = await client.assistants.list_assistants().data + assistants = await client.list_assistants().data for assistant in assistants: - await client.assistants.delete_assistant(assistant.id) - assert client.assistants.list_assistants().data.__len__() == 0 + await client.delete_assistant(assistant.id) + assert client.list_assistants().data.__len__() == 0 # close client await client.close() @@ -192,14 +197,14 @@ async def test_create_delete_assistant(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test assistant creation with tools @@ -215,7 +220,7 @@ async def test_create_assistant_with_tools(self, **kwargs): functions = FunctionTool(functions=user_functions_recording) # create assistant with tools - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", @@ -228,7 +233,7 @@ async def test_create_assistant_with_tools(self, **kwargs): print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test update assistant without body: JSON @@ -244,17 +249,17 @@ async def test_update_assistant(self, **kwargs): body = {"name": "my-assistant", "model": "gpt-4o", "instructions": "You are helpful assistant"} # create assistant - assistant = await client.assistants.create_assistant(body=body) + assistant = await client.create_assistant(body=body) assert assistant.id print("Created assistant, assistant ID", assistant.id) # update assistant and confirm changes went through - assistant = await client.assistants.update_assistant(assistant.id, name="my-assistant2") + assistant = await client.update_assistant(assistant.id, name="my-assistant2") assert assistant.name assert assistant.name == "my-assistant2" # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -271,7 +276,7 @@ async def test_update_assistant_with_body(self, **kwargs): body = {"name": "my-assistant", "model": "gpt-4o", "instructions": "You are helpful assistant"} # create assistant - assistant = await client.assistants.create_assistant(body=body) + assistant = await client.create_assistant(body=body) assert assistant.id print("Created assistant, assistant ID", assistant.id) @@ -279,12 +284,12 @@ async def test_update_assistant_with_body(self, **kwargs): body2 = {"name": "my-assistant2", "instructions": "You are helpful assistant"} # update assistant and confirm changes went through - assistant = await client.assistants.update_assistant(assistant.id, body=body2) + assistant = await client.update_assistant(assistant.id, body=body2) assert assistant.name assert assistant.name == "my-assistant2" # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -299,7 +304,7 @@ async def test_update_assistant_with_iobytes(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id @@ -309,12 +314,12 @@ async def test_update_assistant_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # update assistant and confirm changes went through - assistant = await client.assistants.update_assistant(assistant.id, body=io.BytesIO(binary_body)) + assistant = await client.update_assistant(assistant.id, body=io.BytesIO(binary_body)) assert assistant.name assert assistant.name == "my-assistant2" # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -325,27 +330,27 @@ async def test_update_assistant_with_iobytes(self, **kwargs): async def test_assistant_list(self, **kwargs): # create client and ensure there are no previous assistants client = self.create_client(**kwargs) - list_length = await client.assistants.list_assistants().data.__len__() + list_length = await client.list_assistants().data.__len__() # create assistant and check that it appears in the list - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) - assert client.assistants.list_assistants().data.__len__() == list_length + 1 - assert client.assistants.list_assistants().data[0].id == assistant.id + assert client.list_assistants().data.__len__() == list_length + 1 + assert client.list_assistants().data[0].id == assistant.id # create second assistant and check that it appears in the list - assistant2 = await client.assistants.create_assistant(model="gpt-4o", name="my-assistant2", instructions="You are helpful assistant") - assert client.assistants.list_assistants().data.__len__() == list_length + 2 - assert client.assistants.list_assistants().data[0].id == assistant.id or client.assistants.list_assistants().data[1].id == assistant.id + assistant2 = await client.create_assistant(model="gpt-4o", name="my-assistant2", instructions="You are helpful assistant") + assert client.list_assistants().data.__len__() == list_length + 2 + assert client.list_assistants().data[0].id == assistant.id or client.list_assistants().data[1].id == assistant.id # delete assistants and check list - await client.assistants.delete_assistant(assistant.id) - assert client.assistants.list_assistants().data.__len__() == list_length + 1 - assert client.assistants.list_assistants().data[0].id == assistant2.id + await client.delete_assistant(assistant.id) + assert client.list_assistants().data.__len__() == list_length + 1 + assert client.list_assistants().data[0].id == assistant2.id - client.assistants.delete_assistant(assistant2.id) - assert client.assistants.list_assistants().data.__len__() == list_length + client.delete_assistant(assistant2.id) + assert client.list_assistants().data.__len__() == list_length print("Deleted assistants") # close client @@ -367,20 +372,20 @@ async def test_create_thread(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test creating thread with no body @@ -395,7 +400,7 @@ async def test_create_thread_with_metadata(self, **kwargs): metadata = {"key1": "value1", "key2": "value2"} # create thread - thread = await client.assistants.create_thread(metadata=metadata) + thread = await client.create_thread(metadata=metadata) assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) @@ -419,7 +424,7 @@ async def test_create_thread_with_body(self, **kwargs): } # create thread - thread = await client.assistants.create_thread(body=body) + thread = await client.create_thread(body=body) assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) @@ -444,7 +449,7 @@ async def test_create_thread_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # create thread - thread = await client.assistants.create_thread(body=io.BytesIO(binary_body)) + thread = await client.create_thread(body=io.BytesIO(binary_body)) assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) @@ -463,25 +468,25 @@ async def test_get_thread(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # get thread - thread2 = await client.assistants.get_thread(thread.id) + thread2 = await client.get_thread(thread.id) assert thread2.id assert thread.id == thread2.id print("Got thread, thread ID", thread2.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test updating thread @@ -493,23 +498,23 @@ async def test_update_thread(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # update thread - thread = await client.assistants.update_thread(thread.id, metadata={"key1": "value1", "key2": "value2"}) + thread = await client.update_thread(thread.id, metadata={"key1": "value1", "key2": "value2"}) assert thread.metadata == {"key1": "value1", "key2": "value2"} # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -525,7 +530,7 @@ async def test_update_thread_with_metadata(self, **kwargs): metadata = {"key1": "value1", "key2": "value2"} # create thread - thread = await client.assistants.create_thread(metadata=metadata) + thread = await client.create_thread(metadata=metadata) assert thread.id print("Created thread, thread ID", thread.id) @@ -533,7 +538,7 @@ async def test_update_thread_with_metadata(self, **kwargs): metadata2 = {"key1": "value1", "key2": "newvalue2"} # update thread - thread = await client.assistants.update_thread(thread.id, metadata=metadata2) + thread = await client.update_thread(thread.id, metadata=metadata2) assert thread.metadata == {"key1": "value1", "key2": "newvalue2"} # close client @@ -548,7 +553,7 @@ async def test_update_thread_with_body(self, **kwargs): print("Created client") # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) @@ -556,7 +561,7 @@ async def test_update_thread_with_body(self, **kwargs): body = {"metadata": {"key1": "value1", "key2": "value2"}} # update thread - thread = await client.assistants.update_thread(thread.id, body=body) + thread = await client.update_thread(thread.id, body=body) assert thread.metadata == {"key1": "value1", "key2": "value2"} # close client @@ -571,7 +576,7 @@ async def test_update_thread_with_iobytes(self, **kwargs): print("Created client") # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) @@ -580,7 +585,7 @@ async def test_update_thread_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # update thread - thread = await client.assistants.update_thread(thread.id, body=io.BytesIO(binary_body)) + thread = await client.update_thread(thread.id, body=io.BytesIO(binary_body)) assert thread.metadata == {"key1": "value1", "key2": "value2"} # close client @@ -595,26 +600,26 @@ async def test_delete_thread(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() # assert isinstance(thread, AssistantThread) assert thread.id print("Created thread, thread ID", thread.id) # delete thread - deletion_status = await client.assistants.delete_thread(thread.id) + deletion_status = await client.delete_thread(thread.id) assert deletion_status.id == thread.id assert deletion_status.deleted == True print("Deleted thread, thread ID", deletion_status.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -633,26 +638,26 @@ async def test_create_message(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -665,7 +670,7 @@ async def test_create_message_with_body(self, **kwargs): print("Created client") # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) @@ -673,7 +678,7 @@ async def test_create_message_with_body(self, **kwargs): body = {"role": "user", "content": "Hello, tell me a joke"} # create message - message = await client.assistants.create_message(thread_id=thread.id, body=body) + message = await client.create_message(thread_id=thread.id, body=body) assert message.id print("Created message, message ID", message.id) @@ -689,7 +694,7 @@ async def test_create_message_with_iobytes(self, **kwargs): print("Created client") # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) @@ -698,7 +703,7 @@ async def test_create_message_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # create message - message = await client.assistants.create_message(thread_id=thread.id, body=io.BytesIO(binary_body)) + message = await client.create_message(thread_id=thread.id, body=io.BytesIO(binary_body)) assert message.id print("Created message, message ID", message.id) @@ -714,36 +719,36 @@ async def test_create_multiple_messages(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create messages - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) - message2 = await client.assistants.create_message( + message2 = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me another joke" ) assert message2.id print("Created message, message ID", message2.id) - message3 = await client.assistants.create_message( + message3 = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a third joke" ) assert message3.id print("Created message, message ID", message3.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -756,47 +761,47 @@ async def test_list_messages(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # check that initial message list is empty - messages0 = await client.assistants.list_messages(thread_id=thread.id) + messages0 = await client.list_messages(thread_id=thread.id) print(messages0.data) assert messages0.data.__len__() == 0 # create messages and check message list for each one - message1 = await client.assistants.create_message( + message1 = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message1.id print("Created message, message ID", message1.id) - messages1 = await client.assistants.list_messages(thread_id=thread.id) + messages1 = await client.list_messages(thread_id=thread.id) assert messages1.data.__len__() == 1 assert messages1.data[0].id == message1.id - message2 = await client.assistants.create_message( + message2 = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me another joke" ) assert message2.id print("Created message, message ID", message2.id) - messages2 = await client.assistants.list_messages(thread_id=thread.id) + messages2 = await client.list_messages(thread_id=thread.id) assert messages2.data.__len__() == 2 assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id - message3 = await client.assistants.create_message( + message3 = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a third joke" ) assert message3.id print("Created message, message ID", message3.id) - messages3 = await client.assistants.list_messages(thread_id=thread.id) + messages3 = await client.list_messages(thread_id=thread.id) assert messages3.data.__len__() == 3 assert ( messages3.data[0].id == message3.id @@ -805,7 +810,7 @@ async def test_list_messages(self, **kwargs): ) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -818,32 +823,32 @@ async def test_get_message(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) # get message - message2 = await client.assistants.get_message(thread_id=thread.id, message_id=message.id) + message2 = await client.get_message(thread_id=thread.id, message_id=message.id) assert message2.id assert message.id == message2.id print("Got message, message ID", message.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test updating message in a thread without body @@ -855,19 +860,19 @@ async def test_update_message(self, **kwargs): print("Created client") # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) # update message - message = await client.assistants.update_message( + message = await client.update_message( thread_id=thread.id, message_id=message.id, metadata={"key1": "value1", "key2": "value2"} ) assert message.metadata == {"key1": "value1", "key2": "value2"} @@ -884,12 +889,12 @@ async def test_update_message_with_body(self, **kwargs): print("Created client") # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id @@ -899,7 +904,7 @@ async def test_update_message_with_body(self, **kwargs): body = {"metadata": {"key1": "value1", "key2": "value2"}} # update message - message = await client.assistants.update_message(thread_id=thread.id, message_id=message.id, body=body) + message = await client.update_message(thread_id=thread.id, message_id=message.id, body=body) assert message.metadata == {"key1": "value1", "key2": "value2"} # close client @@ -914,12 +919,12 @@ async def test_update_message_with_iobytes(self, **kwargs): print("Created client") # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id @@ -930,7 +935,7 @@ async def test_update_message_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # update message - message = await client.assistants.update_message( + message = await client.update_message( thread_id=thread.id, message_id=message.id, body=io.BytesIO(binary_body) ) assert message.metadata == {"key1": "value1", "key2": "value2"} @@ -953,24 +958,24 @@ async def test_create_run(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -983,19 +988,19 @@ async def test_create_run_with_metadata(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = await client.assistants.create_run( + run = await client.create_run( thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.id @@ -1003,7 +1008,7 @@ async def test_create_run_with_metadata(self, **kwargs): print("Created run, run ID", run.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1016,14 +1021,14 @@ async def test_create_run_with_body(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) @@ -1031,13 +1036,13 @@ async def test_create_run_with_body(self, **kwargs): body = {"assistant_id": assistant.id, "metadata": {"key1": "value1", "key2": "value2"}} # create run - run = await client.assistants.create_run(thread_id=thread.id, body=body) + run = await client.create_run(thread_id=thread.id, body=body) assert run.id assert run.metadata == {"key1": "value1", "key2": "value2"} print("Created run, run ID", run.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1050,14 +1055,14 @@ async def test_create_run_with_iobytes(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) @@ -1066,13 +1071,13 @@ async def test_create_run_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # create run - run = await client.assistants.create_run(thread_id=thread.id, body=io.BytesIO(binary_body)) + run = await client.create_run(thread_id=thread.id, body=io.BytesIO(binary_body)) assert run.id assert run.metadata == {"key1": "value1", "key2": "value2"} print("Created run, run ID", run.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1085,30 +1090,30 @@ async def test_get_run(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) # get run - run2 = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run2 = await client.get_run(thread_id=thread.id, run_id=run.id) assert run2.id assert run.id == run2.id print("Got run, run ID", run2.id) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1121,26 +1126,26 @@ async def test_run_status(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, tell me a joke" ) assert message.id print("Created message, message ID", message.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1158,14 +1163,14 @@ async def test_run_status(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) print("Run status:", run.status) assert run.status in ["cancelled", "failed", "completed", "expired"] print("Run completed with status:", run.status) # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1181,39 +1186,39 @@ async def test_list_runs(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # check list for current runs - runs0 = await client.assistants.list_runs(thread_id=thread.id) + runs0 = await client.list_runs(thread_id=thread.id) assert runs0.data.__len__() == 0 # create run and check list - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) - runs1 = await client.assistants.list_runs(thread_id=thread.id) + runs1 = await client.list_runs(thread_id=thread.id) assert runs1.data.__len__() == 1 assert runs1.data[0].id == run.id # create second run - run2 = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run2 = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run2.id print("Created run, run ID", run2.id) - runs2 = await client.assistants.list_runs(thread_id=thread.id) + runs2 = await client.list_runs(thread_id=thread.id) assert runs2.data.__len__() == 2 assert runs2.data[0].id == run2.id or runs2.data[1].id == run2.id # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() """ @@ -1227,33 +1232,33 @@ async def test_update_run(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) # update run while run.status in ["queued", "in_progress"]: time.sleep(5) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) - run = await client.assistants.update_run( + run = await client.get_run(thread_id=thread.id, run_id=run.id) + run = await client.update_run( thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.metadata == {"key1": "value1", "key2": "value2"} # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1266,19 +1271,19 @@ async def test_update_run_with_metadata(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = await client.assistants.create_run( + run = await client.create_run( thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.id @@ -1288,14 +1293,14 @@ async def test_update_run_with_metadata(self, **kwargs): # update run while run.status in ["queued", "in_progress"]: time.sleep(5) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) - run = await client.assistants.update_run( + run = await client.get_run(thread_id=thread.id, run_id=run.id) + run = await client.update_run( thread_id=thread.id, run_id=run.id, metadata={"key1": "value1", "key2": "newvalue2"} ) assert run.metadata == {"key1": "value1", "key2": "newvalue2"} # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1308,19 +1313,19 @@ async def test_update_run_with_body(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = await client.assistants.create_run( + run = await client.create_run( thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.id @@ -1333,12 +1338,12 @@ async def test_update_run_with_body(self, **kwargs): # update run while run.status in ["queued", "in_progress"]: time.sleep(5) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) - run = await client.assistants.update_run(thread_id=thread.id, run_id=run.id, body=body) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + run = await client.update_run(thread_id=thread.id, run_id=run.id, body=body) assert run.metadata == {"key1": "value1", "key2": "newvalue2"} # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1351,19 +1356,19 @@ async def test_update_run_with_iobytes(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create run - run = await client.assistants.create_run( + run = await client.create_run( thread_id=thread.id, assistant_id=assistant.id, metadata={"key1": "value1", "key2": "value2"} ) assert run.id @@ -1377,12 +1382,12 @@ async def test_update_run_with_iobytes(self, **kwargs): # update run while run.status in ["queued", "in_progress"]: time.sleep(5) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) - run = await client.assistants.update_run(thread_id=thread.id, run_id=run.id, body=io.BytesIO(binary_body)) + run = await client.get_run(thread_id=thread.id, run_id=run.id) + run = await client.update_run(thread_id=thread.id, run_id=run.id, body=io.BytesIO(binary_body)) assert run.metadata == {"key1": "value1", "key2": "newvalue2"} # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -1404,26 +1409,26 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): # toolset.add(code_interpreter) # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, what time is it?" ) assert message.id print("Created message, message ID", message.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1445,7 +1450,7 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): ] while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed if run.status == "requires_action" and run.required_action.submit_tool_outputs: @@ -1453,14 +1458,14 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: print("No tool calls provided - cancelling run") - await client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + await client.cancel_run(thread_id=thread.id, run_id=run.id) break # submit tool outputs to run tool_outputs = toolset.execute_tool_calls(tool_calls) print("Tool outputs:", tool_outputs) if tool_outputs: - await client.assistants.submit_tool_outputs_to_run( + await client.submit_tool_outputs_to_run( thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs ) @@ -1469,7 +1474,7 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): print("Run completed with status:", run.status) # check that messages used the tool - messages = await client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + messages = await client.list_messages(thread_id=thread.id, run_id=run.id) tool_message = messages["data"][0]["content"][0]["text"]["value"] hour12 = time.strftime("%H") hour24 = time.strftime("%I") @@ -1478,7 +1483,7 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): print("Used tool_outputs") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test submitting tool outputs to run with body: JSON @@ -1496,26 +1501,26 @@ async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): toolset.add(functions) # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, what time is it?" ) assert message.id print("Created message, message ID", message.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1537,7 +1542,7 @@ async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): ] while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed if run.status == "requires_action" and run.required_action.submit_tool_outputs: @@ -1545,7 +1550,7 @@ async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: print("No tool calls provided - cancelling run") - await client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + await client.cancel_run(thread_id=thread.id, run_id=run.id) break # submit tool outputs to run @@ -1553,7 +1558,7 @@ async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): print("Tool outputs:", tool_outputs) if tool_outputs: body = {"tool_outputs": tool_outputs} - await client.assistants.submit_tool_outputs_to_run( + await client.submit_tool_outputs_to_run( thread_id=thread.id, run_id=run.id, body=body ) @@ -1562,7 +1567,7 @@ async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): print("Run completed with status:", run.status) # check that messages used the tool - messages = await client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + messages = await client.list_messages(thread_id=thread.id, run_id=run.id) tool_message = messages["data"][0]["content"][0]["text"]["value"] # hour12 = time.strftime("%H") # hour24 = time.strftime("%I") @@ -1573,7 +1578,7 @@ async def test_submit_tool_outputs_to_run_with_body(self, **kwargs): print("Used tool_outputs") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test submitting tool outputs to run with body: IO[bytes] @@ -1591,26 +1596,26 @@ async def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): toolset.add(functions) # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant", toolset=toolset ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, what time is it?" ) assert message.id print("Created message, message ID", message.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1632,7 +1637,7 @@ async def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): ] while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed if run.status == "requires_action" and run.required_action.submit_tool_outputs: @@ -1640,7 +1645,7 @@ async def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: print("No tool calls provided - cancelling run") - client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + client.cancel_run(thread_id=thread.id, run_id=run.id) break # submit tool outputs to run @@ -1649,7 +1654,7 @@ async def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): if tool_outputs: body = {"tool_outputs": tool_outputs} binary_body = json.dumps(body).encode("utf-8") - await client.assistants.submit_tool_outputs_to_run( + await client.submit_tool_outputs_to_run( thread_id=thread.id, run_id=run.id, body=io.BytesIO(binary_body) ) @@ -1658,7 +1663,7 @@ async def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): print("Run completed with status:", run.status) # check that messages used the tool - messages = await client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + messages = await client.list_messages(thread_id=thread.id, run_id=run.id) tool_message = messages["data"][0]["content"][0]["text"]["value"] # hour12 = time.strftime("%H") # hour24 = time.strftime("%I") @@ -1669,7 +1674,7 @@ async def test_submit_tool_outputs_to_run_with_iobytes(self, **kwargs): print("Used tool_outputs") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") """ @@ -1683,24 +1688,24 @@ async def test_cancel_run(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + message = await client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") assert message.id print("Created message, message ID", message.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -1722,7 +1727,7 @@ async def test_cancel_run(self, **kwargs): ] while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed if run.status == "requires_action" and run.required_action.submit_tool_outputs: @@ -1732,14 +1737,14 @@ async def test_cancel_run(self, **kwargs): print( "No tool calls provided - cancelling run" ) # TODO how can i make sure that it wants tools? should i have some kind of error message? - await client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + await client.cancel_run(thread_id=thread.id, run_id=run.id) break # submit tool outputs to run tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here print("Tool outputs:", tool_outputs) if tool_outputs: - await client.assistants.submit_tool_outputs_to_run( + await client.submit_tool_outputs_to_run( thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs ) @@ -1748,7 +1753,7 @@ async def test_cancel_run(self, **kwargs): print("Run completed with status:", run.status) # check that messages used the tool - messages = await client.assistants.list_messages(thread_id=thread.id, run_id=run.id) + messages = await client.list_messages(thread_id=thread.id, run_id=run.id) tool_message = messages["data"][0]["content"][0]["text"]["value"] hour12 = time.strftime("%H") hour24 = time.strftime("%I") @@ -1757,7 +1762,7 @@ async def test_cancel_run(self, **kwargs): print("Used tool_outputs") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() """ @@ -1794,7 +1799,7 @@ async def _wait_for_run(self, client, run, timeout=1): """Wait while run will get to terminal state.""" while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS, RunStatus.REQUIRES_ACTION]: time.sleep(timeout) - run = await client.assistants.get_run(thread_id=run.thread_id, run_id=run.id) + run = await client.get_run(thread_id=run.thread_id, run_id=run.id) return run async def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_thread_run, **kwargs): @@ -1813,7 +1818,7 @@ async def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_t toolset = ToolSet() toolset.add(functions) toolset.add(code_interpreter) - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4", name="my-assistant", instructions="You are helpful assistant", @@ -1827,16 +1832,16 @@ async def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_t ) if create_thread_run: - run = await client.assistants.create_thread_and_run( + run = await client.create_thread_and_run( assistant_id=assistant.id, parallel_tool_calls=use_parallel_runs, ) run = await self._wait_for_run(client, run) else: - thread = await client.assistants.create_thread(messages=[message]) + thread = await client.create_thread(messages=[message]) assert thread.id - run = await client.assistants.create_and_process_run( + run = await client.create_and_process_run( thread_id=thread.id, assistant_id=assistant.id, parallel_tool_calls=use_parallel_runs, @@ -1845,8 +1850,8 @@ async def _do_test_create_parallel_thread_runs(self, use_parallel_runs, create_t assert run.status == RunStatus.COMPLETED, run.last_error.message assert run.parallel_tool_calls == use_parallel_runs - assert (await client.assistants.delete_assistant(assistant.id)).deleted, "The assistant was not deleted" - messages = await client.assistants.list_messages(thread_id=run.thread_id) + assert (await client.delete_assistant(assistant.id)).deleted, "The assistant was not deleted" + messages = await client.list_messages(thread_id=run.thread_id) assert len(messages.data), "The data from the assistant was not received." """ @@ -1860,38 +1865,38 @@ async def test_cancel_run(self, **kwargs): assert isinstance(client, AssistantsClient) # create assistant - assistant = client.assistants.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") + assistant = client.create_assistant(model="gpt-4o", name="my-assistant", instructions="You are helpful assistant") assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = client.assistants.create_thread() + thread = client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = client.assistants.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + message = client.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") assert message.id print("Created message, message ID", message.id) # create run - run = client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) # check status and cancel assert run.status in ["queued", "in_progress", "requires_action"] - client.assistants.cancel_run(thread_id=thread.id, run_id=run.id) + client.cancel_run(thread_id=thread.id, run_id=run.id) while run.status in ["queued", "cancelling"]: time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) print("Current run status:", run.status) assert run.status == "cancelled" print("Run cancelled") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() """ @@ -1906,20 +1911,20 @@ async def test_create_thread_and_run(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread and run - run = await client.assistants.create_thread_and_run(assistant_id=assistant.id) + run = await client.create_thread_and_run(assistant_id=assistant.id) assert run.id assert run.thread_id print("Created run, run ID", run.id) # get thread - thread = await client.assistants.get_thread(run.thread_id) + thread = await client.get_thread(run.thread_id) assert thread.id print("Created thread, thread ID", thread.id) @@ -1937,7 +1942,7 @@ async def test_create_thread_and_run(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) # assert run.status in ["queued", "in_progress", "requires_action", "completed"] print("Run status:", run.status) @@ -1945,7 +1950,7 @@ async def test_create_thread_and_run(self, **kwargs): print("Run completed") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") # test create thread and run with body: JSON @@ -1958,7 +1963,7 @@ async def test_create_thread_and_run_with_body(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id @@ -1971,14 +1976,14 @@ async def test_create_thread_and_run_with_body(self, **kwargs): } # create thread and run - run = await client.assistants.create_thread_and_run(body=body) + run = await client.create_thread_and_run(body=body) assert run.id assert run.thread_id assert run.metadata == {"key1": "value1", "key2": "value2"} print("Created run, run ID", run.id) # get thread - thread = await client.assistants.get_thread(run.thread_id) + thread = await client.get_thread(run.thread_id) assert thread.id print("Created thread, thread ID", thread.id) @@ -1996,7 +2001,7 @@ async def test_create_thread_and_run_with_body(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) # assert run.status in ["queued", "in_progress", "requires_action", "completed"] print("Run status:", run.status) @@ -2004,7 +2009,7 @@ async def test_create_thread_and_run_with_body(self, **kwargs): print("Run completed") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -2018,7 +2023,7 @@ async def test_create_thread_and_run_with_iobytes(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id @@ -2032,14 +2037,14 @@ async def test_create_thread_and_run_with_iobytes(self, **kwargs): binary_body = json.dumps(body).encode("utf-8") # create thread and run - run = await client.assistants.create_thread_and_run(body=io.BytesIO(binary_body)) + run = await client.create_thread_and_run(body=io.BytesIO(binary_body)) assert run.id assert run.thread_id assert run.metadata == {"key1": "value1", "key2": "value2"} print("Created run, run ID", run.id) # get thread - thread = await client.assistants.get_thread(run.thread_id) + thread = await client.get_thread(run.thread_id) assert thread.id print("Created thread, thread ID", thread.id) @@ -2057,7 +2062,7 @@ async def test_create_thread_and_run_with_iobytes(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) # assert run.status in ["queued", "in_progress", "requires_action", "completed"] print("Run status:", run.status) @@ -2065,7 +2070,7 @@ async def test_create_thread_and_run_with_iobytes(self, **kwargs): print("Run completed") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -2081,30 +2086,30 @@ async def test_list_run_step(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, what time is it?" ) assert message.id print("Created message, message ID", message.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) - steps = await client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + steps = await client.list_run_steps(thread_id=thread.id, run_id=run.id) # commenting assertion out below, do we know exactly when run starts? # assert steps['data'].__len__() == 0 @@ -2113,7 +2118,7 @@ async def test_list_run_step(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) assert run.status in [ "queued", "in_progress", @@ -2121,7 +2126,7 @@ async def test_list_run_step(self, **kwargs): "completed", ] print("Run status:", run.status) - steps = await client.assistants.list_run_steps( + steps = await client.list_run_steps( thread_id=thread.id, run_id=run.id ) assert steps["data"].__len__() > 0 @@ -2130,7 +2135,7 @@ async def test_list_run_step(self, **kwargs): print("Run completed") # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() """ @@ -2144,26 +2149,26 @@ async def test_get_run_step(self, **kwargs): print("Created client") # create assistant - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) assert assistant.id print("Created assistant, assistant ID", assistant.id) # create thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id print("Created thread, thread ID", thread.id) # create message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" ) assert message.id print("Created message, message ID", message.id) # create run - run = await client.assistants.create_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id print("Created run, run ID", run.id) @@ -2177,7 +2182,7 @@ async def test_get_run_step(self, **kwargs): while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) - run = await client.assistants.get_run(thread_id=thread.id, run_id=run.id) + run = await client.get_run(thread_id=thread.id, run_id=run.id) if run.status == "failed": assert run.last_error print(run.last_error) @@ -2191,14 +2196,14 @@ async def test_get_run_step(self, **kwargs): print("Run status:", run.status) # list steps, check that get_run_step works with first step_id - steps = await client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id) + steps = await client.list_run_steps(thread_id=thread.id, run_id=run.id) assert steps["data"].__len__() > 0 step = steps["data"][0] - get_step = await client.assistants.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) + get_step = await client.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) assert step == get_step # delete assistant and close client - await client.assistants.delete_assistant(assistant.id) + await client.delete_assistant(assistant.id) print("Deleted assistant") await client.close() @@ -2241,11 +2246,11 @@ async def _do_test_create_vector_store(self, streaming, **kwargs): else: ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = await ai_client.assistants.create_vector_store_and_poll( + vector_store = await ai_client.create_vector_store_and_poll( file_ids=file_ids, data_sources=ds, name="my_vectorstore" ) assert vector_store.id @@ -2289,12 +2294,12 @@ async def _do_test_create_vector_store_add_file(self, streaming, **kwargs): ds = None else: ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) - vector_store = await ai_client.assistants.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + vector_store = await ai_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id - vector_store_file = await ai_client.assistants.create_vector_store_file( + vector_store_file = await ai_client.create_vector_store_file( vector_store_id=vector_store.id, data_source=ds, file_id=file_id ) assert vector_store_file.id @@ -2341,13 +2346,13 @@ async def _do_test_create_vector_store_batch(self, streaming, **kwargs): file_ids = None ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = await ai_client.assistants.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + vector_store = await ai_client.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id - vector_store_file_batch = await ai_client.assistants.create_vector_store_file_batch_and_poll( + vector_store_file_batch = await ai_client.create_vector_store_file_batch_and_poll( vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids ) assert vector_store_file_batch.id @@ -2358,7 +2363,7 @@ async def _test_file_search( ) -> None: """Test the file search""" file_search = FileSearchTool(vector_store_ids=[vector_store.id]) - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2366,17 +2371,17 @@ async def _test_file_search( tool_resources=file_search.resources, ) assert assistant.id - thread = await ai_client.assistants.create_thread() + thread = await ai_client.create_thread() assert thread.id # create message - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." if streaming: thread_run = None - async with await ai_client.assistants.create_stream( + async with await ai_client.create_stream( thread_id=thread.id, assistant_id=assistant.id ) as stream: async for _, event_data, _ in stream: @@ -2391,17 +2396,17 @@ async def _test_file_search( event_data.delta.step_details.tool_calls[0].file_search, RunStepFileSearchToolCallResults ) assert thread_run is not None - run = await ai_client.assistants.get_run(thread_id=thread_run.thread_id, run_id=thread_run.id) + run = await ai_client.get_run(thread_id=thread_run.thread_id, run_id=thread_run.id) assert run is not None else: - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) - await ai_client.assistants.delete_vector_store(vector_store.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + await ai_client.delete_vector_store(vector_store.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = await ai_client.assistants.list_messages(thread_id=thread.id) + messages = await ai_client.list_messages(thread_id=thread.id) assert len(messages) await self._remove_file_maybe(file_id, ai_client) # delete assistant and close client - await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.delete_assistant(assistant.id) print("Deleted assistant") await ai_client.close() @@ -2411,7 +2416,7 @@ async def _test_file_search( async def test_message_attachement_azure(self, **kwargs): """Test message attachment with azure ID.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) await self._do_test_message_attachment(data_sources=[ds], **kwargs) @@ -2431,14 +2436,14 @@ async def _do_test_message_attachment(self, **kwargs): file_id = await self._get_file_id_maybe(ai_client, **kwargs) # Create assistant with file search tool - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", ) assert assistant.id, "Assistant was not created" - thread = await ai_client.assistants.create_thread() + thread = await ai_client.create_thread() assert thread.id, "The thread was not created." # Create a message with the file search attachment @@ -2451,7 +2456,7 @@ async def _do_test_message_attachment(self, **kwargs): CodeInterpreterTool().definitions[0], ], ) - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?", @@ -2459,12 +2464,12 @@ async def _do_test_message_attachment(self, **kwargs): ) assert message.id, "The message was not created." - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id, "The run was not created." await self._remove_file_maybe(file_id, ai_client) - await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.delete_assistant(assistant.id) - messages = await ai_client.assistants.list_messages(thread_id=thread.id) + messages = await ai_client.list_messages(thread_id=thread.id) assert len(messages), "No messages were created" await ai_client.close() @@ -2479,7 +2484,7 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2492,7 +2497,7 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): ] ) file_search = FileSearchTool() - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4o", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2501,19 +2506,19 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): ) assert assistant.id - thread = await ai_client.assistants.create_thread(tool_resources=ToolResources(file_search=fs)) + thread = await ai_client.create_thread(tool_resources=ToolResources(file_search=fs)) assert thread.id # create message - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = await ai_client.assistants.list_messages(thread.id) + messages = await ai_client.list_messages(thread.id) assert len(messages) - await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.delete_assistant(assistant.id) await ai_client.close() @assistantClientPreparer() @@ -2522,7 +2527,7 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): async def test_create_assistant_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) await self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) @@ -2543,7 +2548,7 @@ async def _do_test_create_assistant_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = await ai_client.assistants.upload_file_and_poll( + file = await ai_client.upload_file_and_poll( file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS ) assert file.id, "The file was not uploaded." @@ -2555,7 +2560,7 @@ async def _do_test_create_assistant_with_interpreter(self, **kwargs): ) tr = ToolResources(code_interpreter=cdr) # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2564,20 +2569,20 @@ async def _do_test_create_assistant_with_interpreter(self, **kwargs): ) assert assistant.id, "Assistant was not created" - thread = await ai_client.assistants.create_thread() + thread = await ai_client.create_thread() assert thread.id, "The thread was not created." - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id, "The run was not created." await self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" - await ai_client.assistants.delete_assistant(assistant.id) - messages = await ai_client.assistants.list_messages(thread_id=thread.id) + await ai_client.delete_assistant(assistant.id) + messages = await ai_client.list_messages(thread_id=thread.id) assert len(messages), "No messages were created" await ai_client.close() @@ -2587,7 +2592,7 @@ async def _do_test_create_assistant_with_interpreter(self, **kwargs): async def test_create_thread_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) await self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) @@ -2608,7 +2613,7 @@ async def _do_test_create_thread_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = await ai_client.assistants.upload_file_and_poll( + file = await ai_client.upload_file_and_poll( file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS ) assert file.id, "The file was not uploaded." @@ -2620,7 +2625,7 @@ async def _do_test_create_thread_with_interpreter(self, **kwargs): ) tr = ToolResources(code_interpreter=cdr) # notice that CodeInterpreter must be enabled in the assistant creation, otherwise the assistant will not be able to see the file attachment - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", @@ -2628,20 +2633,20 @@ async def _do_test_create_thread_with_interpreter(self, **kwargs): ) assert assistant.id, "Assistant was not created" - thread = await ai_client.assistants.create_thread(tool_resources=tr) + thread = await ai_client.create_thread(tool_resources=tr) assert thread.id, "The thread was not created." - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.id, "The run was not created." await self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" - await ai_client.assistants.delete_assistant(assistant.id) - messages = await ai_client.assistants.list_messages(thread.id) + await ai_client.delete_assistant(assistant.id) + messages = await ai_client.list_messages(thread.id) assert len(messages) await ai_client.close() @@ -2656,7 +2661,7 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -2669,7 +2674,7 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): ] ) file_search = FileSearchTool() - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4o", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2678,19 +2683,19 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): ) assert assistant.id - thread = await ai_client.assistants.create_thread() + thread = await ai_client.create_thread() assert thread.id # create message - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", content="What does the attachment say?" ) assert message.id, "The message was not created." - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = await ai_client.assistants.list_messages(thread.id) + messages = await ai_client.list_messages(thread.id) assert len(messages) - await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.delete_assistant(assistant.id) await ai_client.close() @assistantClientPreparer() @@ -2699,7 +2704,7 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): async def test_create_attachment_in_thread_azure(self, **kwargs): """Create thread with message attachment inline with azure asset IDs.""" ds = VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) await self._do_test_create_attachment_in_thread_azure(data_sources=[ds], **kwargs) @@ -2719,7 +2724,7 @@ async def _do_test_create_attachment_in_thread_azure(self, **kwargs): file_id = await self._get_file_id_maybe(ai_client, **kwargs) file_search = FileSearchTool() - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4o", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2741,14 +2746,14 @@ async def _do_test_create_attachment_in_thread_azure(self, **kwargs): content="What does the attachment say?", attachments=[attachment], ) - thread = await ai_client.assistants.create_thread(messages=[message]) + thread = await ai_client.create_thread(messages=[message]) assert thread.id - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == "completed", f"Error in run: {run.last_error}" - messages = await ai_client.assistants.list_messages(thread.id) + messages = await ai_client.list_messages(thread.id) assert len(messages) - await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.delete_assistant(assistant.id) await ai_client.close() @assistantClientPreparer() @@ -2758,7 +2763,7 @@ async def test_azure_function_call(self, **kwargs): # Note: This test was recorded in westus region as for now # 2025-02-05 it is not supported in test region (East US 2) # create client - storage_queue = kwargs["azure_ai_assistants_assistants_tests_storage_queue"] + storage_queue = kwargs["azure_ai_assistants_tests_storage_queue"] async with self.create_client(**kwargs) as client: azure_function_tool = AzureFunctionTool( name="foo", @@ -2779,7 +2784,7 @@ async def test_azure_function_call(self, **kwargs): storage_service_endpoint=storage_queue, ), ) - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4", name="azure-function-assistant-foo", instructions=( @@ -2795,29 +2800,29 @@ async def test_azure_function_call(self, **kwargs): assert assistant.id, "The assistant was not created" # Create a thread - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id, "The thread was not created." # Create a message - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="What is the most prevalent element in the universe? What would foo say?", ) assert message.id, "The message was not created." - run = await client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == RunStatus.COMPLETED, f"The run is in {run.status} state." # Get messages from the thread - messages = await client.assistants.list_messages(thread_id=thread.id) + messages = await client.list_messages(thread_id=thread.id) assert len(messages.text_messages) > 1, "No messages were received from assistant." # Chech that we have function response in at least one message. assert any("bar" in msg.text.value.lower() for msg in messages.text_messages) # Delete the assistant once done - result = await client.assistants.delete_assistant(assistant.id) + result = await client.delete_assistant(assistant.id) assert result.deleted, "The assistant was not deleted." @assistantClientPreparer() @@ -2827,16 +2832,16 @@ async def test_client_with_thread_messages(self, **kwargs): async with self.create_client(**kwargs) as client: # [START create_assistant] - assistant = await client.assistants.create_assistant( - model="gpt-4-1106-preview", + assistant = await client.create_assistant( + model="gpt-4", name="my-assistant", instructions="You are helpful assistant", ) assert assistant.id, "The assistant was not created." - thread = await client.assistants.create_thread() + thread = await client.create_thread() assert thread.id, "Thread was not created" - message = await client.assistants.create_message( + message = await client.create_message( thread_id=thread.id, role="user", content="What is the equation of light energy?" ) assert message.id, "The message was not created." @@ -2845,7 +2850,7 @@ async def test_client_with_thread_messages(self, **kwargs): ThreadMessageOptions(role=MessageRole.ASSISTANT, content="E=mc^2"), ThreadMessageOptions(role=MessageRole.USER, content="What is the impedance formula?"), ] - run = await client.assistants.create_run( + run = await client.create_run( thread_id=thread.id, assistant_id=assistant.id, additional_messages=additional_messages ) @@ -2853,14 +2858,14 @@ async def test_client_with_thread_messages(self, **kwargs): while run.status in [RunStatus.QUEUED, RunStatus.IN_PROGRESS]: # wait for a second time.sleep(1) - run = await client.assistants.get_run( + run = await client.get_run( thread_id=thread.id, run_id=run.id, ) - assert run.status in RunStatus.COMPLETED + assert run.status in RunStatus.COMPLETED, run.last_error - assert (await client.assistants.delete_assistant(assistant.id)).deleted, "The assistant was not deleted" - messages = await client.assistants.list_messages(thread_id=thread.id) + assert (await client.delete_assistant(assistant.id)).deleted, "The assistant was not deleted" + messages = await client.list_messages(thread_id=thread.id) assert len(messages.data), "The data from the assistant was not received." @assistantClientPreparer() @@ -2884,18 +2889,18 @@ async def _do_test_include_file_search_results(self, use_stream, include_content async with self.create_client(**kwargs) as ai_client: ds = [ VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_assistants_assistants_tests_data_path"], + asset_identifier=kwargs["azure_ai_assistants_tests_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = await ai_client.assistants.create_vector_store_and_poll( + vector_store = await ai_client.create_vector_store_and_poll( file_ids=[], data_sources=ds, name="my_vectorstore" ) - # vector_store = await ai_client.assistants.get_vector_store('vs_M9oxKG7JngORHcYNBGVZ6Iz3') + # vector_store = await ai_client.get_vector_store('vs_M9oxKG7JngORHcYNBGVZ6Iz3') assert vector_store.id file_search = FileSearchTool(vector_store_ids=[vector_store.id]) - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( model="gpt-4o", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", @@ -2903,10 +2908,10 @@ async def _do_test_include_file_search_results(self, use_stream, include_content tool_resources=file_search.resources, ) assert assistant.id - thread = await ai_client.assistants.create_thread() + thread = await ai_client.create_thread() assert thread.id # create message - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", # content="What does the attachment say?" @@ -2917,7 +2922,7 @@ async def _do_test_include_file_search_results(self, use_stream, include_content if use_stream: run = None - async with await ai_client.assistants.create_stream( + async with await ai_client.create_stream( thread_id=thread.id, assistant_id=assistant.id, include=include ) as stream: async for event_type, event_data, _ in stream: @@ -2927,26 +2932,26 @@ async def _do_test_include_file_search_results(self, use_stream, include_content print("Stream completed.") break else: - run = await ai_client.assistants.create_and_process_run( + run = await ai_client.create_and_process_run( thread_id=thread.id, assistant_id=assistant.id, include=include ) assert run.status == RunStatus.COMPLETED assert run is not None - steps = await ai_client.assistants.list_run_steps(thread_id=thread.id, run_id=run.id, include=include) + steps = await ai_client.list_run_steps(thread_id=thread.id, run_id=run.id, include=include) # The 1st (not 0th) step is a tool call. step_id = steps.data[1].id - one_step = await ai_client.assistants.get_run_step( + one_step = await ai_client.get_run_step( thread_id=thread.id, run_id=run.id, step_id=step_id, include=include ) self._assert_file_search_valid(one_step.step_details.tool_calls[0], include_content) self._assert_file_search_valid(steps.data[1].step_details.tool_calls[0], include_content) - messages = await ai_client.assistants.list_messages(thread_id=thread.id) + messages = await ai_client.list_messages(thread_id=thread.id) assert len(messages) - await ai_client.assistants.delete_vector_store(vector_store.id) + await ai_client.delete_vector_store(vector_store.id) # delete assistant and close client - await ai_client.assistants.delete_assistant(assistant.id) + await ai_client.delete_assistant(assistant.id) print("Deleted assistant") await ai_client.close() @@ -2974,7 +2979,7 @@ def _assert_file_search_valid(self, tool_call: Any, include_content: bool) -> No async def test_assistants_with_json_schema(self, **kwargs): """Test structured output from the assistant.""" async with self.create_client(**kwargs) as ai_client: - assistant = await ai_client.assistants.create_assistant( + assistant = await ai_client.create_assistant( # Note only gpt-4o-mini-2024-07-18 and # gpt-4o-2024-08-06 and later support structured output. model="gpt-4o-mini", @@ -3002,24 +3007,24 @@ async def test_assistants_with_json_schema(self, **kwargs): ) assert assistant.id - thread = await ai_client.assistants.create_thread() + thread = await ai_client.create_thread() assert thread.id - message = await ai_client.assistants.create_message( + message = await ai_client.create_message( thread_id=thread.id, role="user", content=("The mass of the Mars is 6.4171E23 kg"), ) assert message.id - run = await ai_client.assistants.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) + run = await ai_client.create_and_process_run(thread_id=thread.id, assistant_id=assistant.id) assert run.status == RunStatus.COMPLETED, run.last_error.message - del_assistant = await ai_client.assistants.delete_assistant(assistant.id) + del_assistant = await ai_client.delete_assistant(assistant.id) assert del_assistant.deleted - messages = await ai_client.assistants.list_messages(thread_id=thread.id) + messages = await ai_client.list_messages(thread_id=thread.id) planet_info = [] # The messages are following in the reverse order, @@ -3037,7 +3042,7 @@ async def test_assistants_with_json_schema(self, **kwargs): async def _get_file_id_maybe(self, ai_client: AssistantsClient, **kwargs) -> str: """Return file id if kwargs has file path.""" if "file_path" in kwargs: - file = await ai_client.assistants.upload_file_and_poll( + file = await ai_client.upload_file_and_poll( file_path=kwargs["file_path"], purpose=FilePurpose.ASSISTANTS ) assert file.id, "The file was not uploaded." @@ -3047,7 +3052,7 @@ async def _get_file_id_maybe(self, ai_client: AssistantsClient, **kwargs) -> str async def _remove_file_maybe(self, file_id: str, ai_client: AssistantsClient) -> None: """Remove file if we have file ID.""" if file_id: - await ai_client.assistants.delete_file(file_id) + await ai_client.delete_file(file_id) # # ********************************************************************************** # # @@ -3081,7 +3086,7 @@ async def test_negative_create_delete_assistant(self, **kwargs): # attempt to create assistant with bad client exception_caught = False try: - assistant = await client.assistants.create_assistant( + assistant = await client.create_assistant( model="gpt-4o", name="my-assistant", instructions="You are helpful assistant" ) # check for error (will not have a status code since it failed on request -- no response was recieved)

`YBf zQ!5coGNvpyC#MKaPNW;sS+`qB#F^oy z``D$s(W+P2LCHJO<^cqc2?!;j;X!!ZRMB$Vz~4ZrQAp%izOPyfH`og4mCsw<+w5g1 zhYI}+pUjONfdwZYXwQwg*c{Q)-1CjQ9&z{H5{`#%H5+?W?T%?xX!;^$6e%WZnABVB zT}$2k)9>G}gVMI7yz|3CBspHlm5hsz-&YEj!y?~d^ytz1(7$`cUcxF8@Q%Mt*`5KL zU9A?n>}KOA0@}aqZ&Vb&T;SW)#1tBy#1pS}+DKb{qD|R* zdE%$SSrx{YEcNJMJTYe6xEgdDo!!OzPb!DwD@?p-Mx$AWCHi37-3Hp)+UHP05bukM zD>F_M=|VTd=Xq#>?0_bGQdt@RR`rW%?VbxicA3!2Ee9rRc&GbD~T%?SHI{VJ@h}Te+oJk`pK0!145|*`CS5J64}q zB#hm^|LoOxY4MmTK-K^?2@46W)6mdx+gzq!@UgjzcfQMRl}q7UzkKvAIeq^8#l3sS z-%CGZTo~qEg8Ht?Y_-yG-uS*+G1~nym=$x+`PQq`_ZEv=o?$b zjSQI?H6jKD=&g==#CBS5QhafB`fiU6icv?zFHbMN>#a4~Y||h9BP7s8acsK8g>~M+ z=N;>Ww7t|DtT7Ad+&S%fmrI{82wfLv@@LPY4k`RXDY)YMK9AG0IXMeq@=_j|85zoi zKRG`~eh5VlfG&PllUG841#I?T{rhlH$_5Pwj^MR^-8CLl2q&uTL|}`V-%>VMZ8M(r zXR?NE63-bFkTQDh1I?0{U3=Y@Gs;f-`_E<(0+j8^pr%OpZNX?v0*mR#y=`pC@;L5f zk42$ffy-s>IT=NpYI>-8Ys!wsrE(Ev!t0T&7Io$_d$48Y#moavbntLb6Qs< zmIhoQ7HQPH;0~F#MX0j;b04puRu?8cHQ)r+zO9e^A?{VA} z?^@HJ(=f&&QO*R3Vz%y8;!YRVEp-u?xcFWHTll=w{`*y5#tv6pe$OrS6z^+>`_Nyv zHf}A3;n?t_pKc}Vf{}I|V&rzWT`Mn=K4hd#PEM2$kOSCc356DZyb__DnVnq#SZJS{ z3e>UQ81lZ^O@%E65R0_VSFc`W2T@42PP`gonIhgSe1AmAGaZ3paZlSk7fG`vM?^W5E%iz#Z1va40 zGp3tCfbVOxI~@cDbfNLvtV&ZynJsVwS?z-N!u`RZp8Mok=2W#AG{t%M=hE`JiJ7BEKN_(P$)9QIekIOb2ByuF$4>CfY|Y3(oirO;fczk+_(mF z7lvDwd@#KRDu|LE*g;?kiljr!8)I9>gps*HVk<}~Ar|uMqO5yspqV%mrJka0qa3wQ z@M!_5?R*R8UrFW+|FW=}=&~@iQnOAPJBsXad8+oZAG!+^y(`|W=D%+H41ejTicV~}_0fG@v5&kYI43yh_i)bsH{wn!PoeBWzDKJLLd%&# z*ea=*Lg|tpR>LXGPM#c9wpLLwdV3jcZevHS>kQ1!&GpKwiL`!>VHp)4w72@HSDtXqGd@8R6#v?E$ZA7@gaXu|F` zGqv+2COFXZF)EN&M4pteSB%Nwih5&t;(Hg z_bJPj>mn@u=BSmVXTX~39 z9HnPlq@Ixb8Ij1q7!{3-jMzbozPuPgu7V;U#}#LxfH6a~9X!7$nDk3vT7pWnwU?)7 z?eY1!`G}I+n}E2i$U3NytRBF^Xp1cBdF^Cu!EV3C09N{oq7did&D=Tttemr>XGu1y z*jj9|B}g7C52(c44XuXwy?gtR8VUvkuk^}?gJ$>!z|2QBOq|>>!Z0f z@HCBkT}pc^4U%O&y4H9j67`M>#u%y~s+Q(Q%`~_nBSC90e^F<%hg8k+qema%dPQMQ zix!z0!03?&jYb0l+l9NP2oB?Y!*swRQ8@LE$8N}@|MB5EiKk&lczvtATYH^MC>;S$ zFlsp!9`o{wN;6ZhhzE^r*^O-_pPW&=zr}o7%vK9oTr&ytUd&62`;L$axOj0Y&Ob5; zKTW2j)N@&{V>FUJ6!4QlzrE`J0V7i(pRo z+x@AQhL?#$gH%;r9T66`3YI7p^nkXR)6Tp`HzOCN$&cA425?*)AlVyi>&tL);cVE0 zfHIP-z+Y{E_$4(k2qW4S_2^y^K{xTazeXn74U)mFtXZR^qycgkbKq)UU-v{kRtInV zh#Ty@K^UfmSlCK2%QA`d>V7{iID=94$~aza`OKA8B=MCRIVDJUb3Z`x>c4SUdm!mjR(QJww zrJQT=`zBmB?SUS1z_B$$wfn94HTAM0kYHI;uYdOhi_#Xx@E$KCXf);2LVK+t*MJk^ z3N$C4nYp>e*kZ0=b{t$dBp)CwN=d4gi8S+P4K2Mcwb-bbue}$QTogNsLnFF)JIFp7 zFdB2UdLpI!?jPIhiW{3Q>6~~2Xb|e`S_)7{Ph(yh(|V@7v4o_oXxTDr-W^C$>-AEK zt5*4owqg$ZdQIK!2TDIRgq1F9uHN{bRyHK5oc0hI$=f7bRZj}QQ2!~ z&z~1?gun*Gao83pE;Qi)NC5h1h~-10GkPKLyQ@!1!!|z zK*V~yvga^{dsePH@o{X}qX5j^boq6gy75V(#i5tW03W0vlhRPz%U!a1Jf&pQS(chk zZx=wuRZzwJ*>PefkC;#c%r8$_g8HG!quKAtNY)hE=WBDm&;dnUdq;V z-^5^MIQAAw0ygXjJ+&FLl-yK$1Jofi4Rcg*HlbmbsyeM&hzoQ_>I01U#X_OJQ%e(( zgAlPIVkBdtfsT$lx-<@g11v2o^UnwlEEP-^u*@%`cn!Ae-jiWii2=C9_}qV=C&=&) zhJJv_!U|bw^97QLhzC6ga@@y50(k#$h@G}51-9AFc_kFxexkk1oAPJrCS!>ShkxPq z#ye`>$om;(sxbhWI8hOaUfd^x#~qIx0h6~HfQ{-Qxx{v#`}$<73mBBETq)CY^)T|4 zPvVjT4vvAW#thmo8ahmkxu0#ZHiV+5yAxabr(+>)gvod!mf}B_*_F z%AP|R?%5futY`uK^%u70!M{b_8f)&H%ctFm*qxx&+xvMR&)0j=+y= zf?8ID9W}Uj?_O43o+cmy&%9~1)Ei>ViF{yh>P6u1xRNGyz&iVA2O#w_bsCg8iyKrZ zVF_(@3d9){kEwiS5W{U#VVEdO#nJI7?PfDYB4v&V2|-0L>DRUBlxtfkR8j zVykIr{2ig%YYUWD&d|TJvo1W8*O?If!1V)8-bLD(-kieWC z3Ha!_bHDGA@ZhWiuxWJ-x>axwG;=TjOPVI;k+K~Ny3Q{>PL!g(-L!>!z8}}=g18|V z_W+dOR>x7CHMGXzehhpri34O_cOAE6UmDxQsWSacSNR$J9ZBw18%x1vb3V#O#%&P_ zzy73l)s0^JiK~GKelpaJ!-~Rtz9J*Q8+!sZL2Ca}HJFO}w3XlL{}YV`zs%kAGppF3 z8NZ?QNIQ=oKl;asJ?xhA=E}Zod8*L5F#?F24sD~q1G467A^;tIrl(9$JwyD@seq8p8% z0Ak?i*2C~x9G|99^B6Y;zldzSIrK~RkngniO<9B##<=yAY~&s`KF)&Vh3527c5qg9 z_9=_f2>Vildv-XwDZ~N)C-TyzOLMarjLPE^J%hn+b50E4h*?=!%-_XnyH?!f(VfH* z5`E(*9t!5DQ1U+JF5}=a($;8tcl4kT3zc!AMLqhX0+Kt~%Ph;%MgI3rNgKFXwsjN*vuN6%U7DnRq z5(Fnx6d_YcK;TYoua=C#pV7P}9M4-uoG4LBU=3~Ep@oY?`45O!Ny)vSw>U=r1c89B z_CuNt!BYX2N6d`#_7|TwlhE$@?#~t!k|-@*nyIp3&Nn5;Xs3x2%8WQVj=U$FhkxYr z+ib*lF-zW>0AU2%n| z;9BpEGbogfX%Tn{2B@BQo3b5sCB?r>bqFrN2QrNGaAVXvIc0Tq3| zUHNVe&D}|}9(D`+eD635cu&NWN-xE|_9y+@N3)OKdubBQi`xfuklc+?6$s^azw;@7%~qK85H;=0`Z-kbZw?9O9m6bTLf#gZYg3=+$f1MD2WwH(g7qPiC>HAxq1hlI;?% zb}i)`Ov6d+M0!mmPVKB3PEDHmkfL`WEu=JvI-p(qsvl%zr%*(NpgTp_@9(&dx4j zMK}yIz)cn4EP0x`D{0=md3)FH*NHPcG7EOCRHWS_( zkOw*4lf-D3&UgM;q=lHnroJupf0y!puG6dkPI@S+zK;U`sW&3N8eq3(`&)po#B`$L z41M`>6XYt-oZjWokUvxBqGyn218xUj{0U%k4DOHyzP=?%hN_Or$zA!VfoZ7?fDt?s zi`}ffyu36VRu@uuF%snD9t*0^tpREkyQr%@?^Yg}h_Efj7VG%izZ?V+EsOnoDtsw) z#V44kluZzgs7}2XFgZtfFi3sO!cZ4KjIGzM_wD(XU4`&yFyU-Gqi1)HaOcLy*13`^ zLw$Fww(26VZ+jWqxfno9t2m~t3AqjsPe(wE&&k&-k*YzIk2peQDmt;^)(5vo z#Ql@*mhHmuhJB@Epv3hti3cVv#2l!~vzS;S zn(Pb-ir|6;iN$31BRxt#Vw4Idrjr>;%DyRpieGKS*@VUC!iUm7sV2uqv+^wISimrg z^Qo5mQrJBjmO7ph0lIc}zH#xDoZ^6p$53V;t8v|h|Ic=isn0;YZqe4T`V0bJ+`S#( z+rCnOr4c)GX*xH?ld>t#G5NWqs;cTFv8@N7$sk)S<~rzl&h^X#*eB|du3F??)P7BN}j3%1}G7k7_!ORrpqcGOI!b5)Iujjp0~uj+sL1?@U+K$jX0 z6APfbiF5H! zf-~JpH2PR(XJ)<_8|OOZTchxk^K@~1;V2Tt?~8q|fa#4Gs2kyie0y$73_8$(DWx@c z5`yMnbnf5QcaOYhBSzRjs{dD6?3cS?DQbK>UxP+|ijgQQtIZE%umvfuZ9DsTvwLf? zRysBGsSy3mz6l-G&4+>*xra>5VQ=AheFk1HW`QLx-K_>pvKx>$8Horm37J~2Qb8Y( zaCw+td`QY}#c0oTULDP)sOc1Gg;}qu>M9u`kW14u%lt>Dt-KWSwD`2Yzd>+k$fb~_Ah~U> zMAhcYEI1%8e$Rijndv99i2T#Vg=1WlEMvo4oHr+zn~4>BE6L1VWN)sjt=eDJMd=IQ zEpOBH1s7bDEC!oNjR8Zx@{TviXmv~^w1^3GP*iNLx$Av^-FWq)8O4PXW)Twi2gzvH&4GHi4*b&aw*~-LQ!rv3q&8;Im$~EEZIbj} z*tJdI-i5A8X`8eQ?~27k)e?%cHrK0MT^to6+3SQk^;KxS{mNt1-Ybrezku7vVk=3! z=tnka^vkwxmDKangl=}0fp!w|*$4Q+S}yk#iRa8(VwfFj{KkEm1+W7tt~;z;zH`>k z|BIY;1H>@DlJ#V~D z+5C&M{-u4PP8vvEize^nQKK$^dyhmJN3Lfq(?D3;wWoaYNdfrk#wRQFSP+_%Ac&?4 zpF)3y-F>L#{H~9t*S+S)(FD*ZFNwH`G}}* zOnxzdx;gjUU18YYD5r~N(gn4&iGh??1qy4+Hx8cYvgh$zJy4OL&{wUQi<1ha$I!Q$ zy9Zt(kII1Z20<3QYTNQB3VTS~GxtvUAM_K27yM)ATe;9F?pp}L(051S$lZC{aBDc(p-^2@;>{#a~maiLI0)}7t# z9^UpC0ITMca-^H4#y-f-$%(dhKbi-9AFprx6ydz}1-|Hg!2895XFPa)@JQEJ?;J=7 znKdbf>+GIx0`F&~IoN8PuU!(lKV9bP>-+{FXH@wFt`IQsYfWk!;$kPtz!*g=Y4nHNEetW*)*}y z7?pq&X6a0_aROR_q{#p*0M;Yu59=+2k$+`mRa)6x_|ZEJRQmv*VC=u@+jw`UrDdrG zVCCra=|Q=y5#u*5k=Y|8t4VGn;@Hd z6xtdqRvg5@HYrapuR4h7)S*S9BDEu}tF4O$BRxYn#MfRMRm6}5BPA~c1uY{^3P>P- z_?On5pjA^q#J#%Xy%k8_tp=*UpEo03VvywsJ{g?6A|A0h^eC00kCnpIqB`IC($dn8 zD*&LE$KraGtQT>O=>dnkcPt|&91&(>lUI)D+;b5`2hSR3?z`=f2SKwI|M;Am1ov0cLAbQ=c zmHynW)PmS563mjivi&XOnkt|-n6*G5J{Q^YAr*nfpV9OzmI^{!$v_1cQp4fuNkHc? zu2GbQC+iHTuX8%-7=Nb?IRTqVgP1T_Mclm}a9J*x$%;WD}`1j8BTm4)EKCqX+kx9B^pT$8h(nitA&5HU71 zD4c@#Azj5{3WBOF91>X4f!koBT6SF2fIQ@^xH#jw(_umbxQS-OcqI&P(j@QpqAn*C z0V>#?WTJGqR=um4aaFGm!*gLrpE-Sh)84>J(V*%HvsRDww=HVg&Bgx$Mc2;xRO=eJ!kA-Jk+-k7F-mb* z+i}iH*dL06c~tV@vtJw@V30if@)$%l!@FD2_Ndd~g^TPdir zW=JowL_U*LShR?VM(Jwz?>cFut&{4QKoi91GNP!f!;oRLj_SI)QMhQPm`}iwD~7*( zq1n~g)v<`ALRr^A8F5?sgQ~xvGn~P55}X3EZ&C}>t5&-L$k{q9E%gDujHuykHf@-6b)oX=1!?&j*d6xGcjb zIH*OlB^9+aZOMR%u0Rljlu%$-s~~$;4 zPwRQb&=T)pDyC@s9rlfML~z(tfpw;&5J z3SfTasJzATV_XA^AQff@ozbtW+%ZkwK08Y|ZE=C3SlFl6ed6vrrCu2m)pe|P;oKr; zJjRF~QugbWTqA!LTNw?kgdP~SXNsgCxs6(AFCPG7`Tn|P2*Jj~4wU0!Z^BVNLmQs7 z3li&45D^6m>&xrQ>3l=(32poi`}Z<%&KBdiefedq$`6Me+_i%9o8DH{?=w0M_2g`DuX|{`7v&hG z6KI$m#7Ra0)j&N*nDJ%D8TTIfWAzw+5~{#e;;-iSTsjOZ9mp2!CyrsP`l(##XHQ9X z^+P4AJ^#YkM!ff06Oj4UR3&lf^9nI>{8f6Tk>~&Uw3B_7drEA{?AVev&rd zCNsO|A7F^>56n?Ps#&;bHk*la8h54mnm3Ah;wK%b`YMdGa|jJdMmM` zHrHGa!qUkOLWz~4JBYz^_4x6A95?wCgo}iH^LGF2eMA37OQN{(%WfCHa!oizrlSXx zw61=Qb^yv33V~BK>LZ`g?!-qwF|!hKqVGzFpFI9V4u4%j5g)Evl#+x7J;2Vp)&|f9 zr-g|1`dv3hJk-Clwe-VAXZ^|9aWrt3Br9LUjksJfv>Ot8h`BiA)jw@>ZO2_D|AmPU zgpmY|4fMT8?!cWOEeFBMsn%LvG~5a8VmJc-NS}>S^=0F$@N0qjPe$E``S$4j`v0cz z1N~@<_OytjHe}Xe)j&K(pK^H)YkqW}w*p8_9 zTQRw~7CiPacWn@}XlR2GDZ(ODWH<00fI%d+(A0MbrlGo6DhDGBd)+O?RX7%&BjyKO z5-JMWFHLt+D)tT;Giu$t+Vz-0K%|W~EH-qsVV5NPC_I8TVOhQIZ2nz3QNdt(l2Ifk zt01UuJ_>c&rkj0nT+v|%bZJuLvfeMPg_j2N0*^9kKH7!3;N` z`}a87CDtFFXgbi*yyK#!Jyi`BUbqsT;Dq}qwjsL+W@a|FE!02B$j|tq{toqeTbTWS3Ws+C0^Apy04j0>YC8(jQozk71uG7S}#+x|j~rQejAKY8>? zT!>~L;zCZho3~b~HmKqQq;JZVm!CX&0>JHrkv8V+8s8P>3fS)U9|~UR3P7y2N_fOJ zATmwwMk`4y2B?JFh%hKOWMp2%XoP-g9;h@|A*w2{eM;Yo`{Q@=b!ca4X9a`D@ftU2LO4LN8xPH7F+L(LF7EWX za}&PMO7A}@ky>P2%mFzhrv<0mbSc+`tkF<84&zG$FP{T&89840z){t8bcB%_5?d0f zFgsNZ`8u*u?-U-PBlpAMBy}g9!owa2>jhpSy&_{Ay~28}oV{y2;aU%_;vD zF|%Z`hioBjbv!(@fmcAfaXeR8kHL^4u-Fh$Qe+_wGE)Jp9at<$`JmvdVbZoNd?$ga zDMDnx+~=k{mS*V@7m3X_<@Lp5?;mgW2#2C#n71`d`QCFj?uF|h(&;>m`d|*70X==f zfcJ0$uR!zdG@se?d6M4@$m=u{=v8QOBe|(zTLLen{|L_9eTphxMg(=Gz>f=m#)%cN z+p!kiiUlEn5Ycn24Q_QeL*2fwdKV10$Y*+0u^W1f9%)jyc6#mxVh-m^K5k7+Mn;Ou zE4s?C?}y@{RbZourqC>C-t?h*Ae9JUHspnpA-WdAX=F;7D5v&(!>&GPdDQ(0`=Sjh z(fr9p0MWQ~VW6lX>(=oH*&d zQX&-ODil^VNN!XwMfRL?Mw&9W2eE^IyM;$ZtpS>>F6RP`B(eG|6^uLHl6U@DKs)6z zK&iEwP4n~swB??Z%9C!CLCLj*O$mmnO}B@LyYGT`-WHIWg0M?m5grUv3%OkoGPc5V zu$t{I`$3DqoTF}T(F-{FXK*o1a%wH7WGOrZ9hd6bdW2gXvK5_ps7LIFt936Sl@L^* zDWD2$khD;HaFQkWf+uGwM*%{xHSyrKHUz3+!UC){@EoJ0(m)a6%M6EHu6TMZ>}$Ac z>u+^nXHUJqLWso)$=dNMp<;{dT>*YFj)9GruZ6Ah{G6S7E@@JyvIYc`#xLIZdvY z#&P!pY6n{06^z&5xSy-DXyZZyiYt$_f(X?G+U1a*SxJkFCp>RPw>sEztD#}ol`B`C zl8b9#QmTF|j@f$Oc;u(vnz0tt7lhD(&!rfv?i{RwEtrSnPJ!BO#|}uJf7h@OAPX*+ zSQ&^0!jw#W@j#eCDwXx%K_NC9O611wz(Je9WTg&UJ(dib6#&sw>v@T%@1HaRk$b?g z$R+zx-Fn6H;E=JSG0ww9_=aoGl97YmOTT?Xi08c-n^->(~E756ovb7b??-g z4RO$2H$tMKiMEySJfsN7sD;X!_Upq%QYNY`BgAI$MqmtH)^5T*#atlci13Y!aX^Rq zI>UF^cBGd>C>ES+(DVJfj$ZP?kRzvYY0oxz5Bptx@%n8=h&T}2aMM-U@3}gq%25>F z{9K8pGyH7W;0>ZOGFCxMyA-f+0phc1SUpSF<#qw7Ajv{Ss+a{O(2GTW^_w5esvPcD znV;wD9X|!jLZh*3INa>(8{)gY<`K1iQLv07aqXm9=dxJ1Jxw=*f`@gK8UE#2jJUYd zG$s?k^(rzxkHKI9{8>Wc83eHq_-EDHqUEwJ?)m3G<2y3PRxCDLsMA+V z)jleGiK>q3|3loHz*D)d@8gR!sWfUpX;2|4MTUf(q=jV6R-{OSp%ltYwVM=b8Ip($ znJOD4WynsYWGhoD63JXqkuiSPy=WiWo%Z>j-}m?b`@QH=-Sga+{d3#N}Sq`PR3@8w2lG zlhA(sY++`h<7l+L@5nXzy`Znov*nDrcK!N!Pfxx835HiRNZXi%s#HXnICiWzVg|!2 z{<>ciCKq~xxdZ25<`HEP3G+NAL6D1NB@g8O+Or87wDVBf}OBL)y?zKP*I({18)2XbN65+IF<92NQJ@UT z`iLi|<{jkP$8~yuAU|Q&-$J|G^@Z{KBajOoZ;Ow;T@$eRu-68f=|v!o_mXWLtv}8+ zT(@SQ)T)}ePdhdLd@+HzaSN?Vz6{B4Y*r%`k9>I{T|I-+uR7LA$7#GSu72Vqqqyu>wR5? zW~RZKHSaU5NQGDYPS1WOd#(iMYm@iN56Ic|Z1PZQM2?*0Fgw*9+b#y86ksJ1&VuIb zGqCQj!zh~GhcNJXfzK%C8=>bqdc`5k{IA;?yl-uG?-7O;IDkJ=Quz^mBfz*IcFJ6k z4GM7UuCR$jqM;Q@V(=`EeJO$GnL-*+M7e;}3Z+I7&txEa(D}7jG?YYcu#L_Uvt~8Q zYO5R7G@#@tf3dO`qCJu}m06W;Ai@>L8!W*+(M0e1C6ZS}mM!m%0My?Bzvh@zlM9&9 zFvIk#%*1dv8bNX7ykT@p2dccbEosOd*3;9NJFn@3PdB2g9gWJslbH>!(eN^k%~H&* zql_u6e3FMgEx0-Q&&st1_3N5{_c{K#UxxDN7q*y|q;{3a;Y3VvP=$#GTJj*ou8>I&-6v*f%fdz(%?3~k!)fbY2nX;UoA%+hI;csvk-AIb zF>VSKyMZ}NG-M4h??LLneLYY5BrUEI=e5ZyAda{tCFmShW?Gja9b-MI(*>o4+T-9n zAv03KiGyiHe`wc`WKIZp5A3`teQm-etVwnQ0MbCkU|(Yr>oQOe)Poz)elOmQ&fjy} z+PbD=zX4#OBP&Gk=#;SUj!%2aw_o&UT$f%litjvzNk80HJ4(n5! ze&f+2kte1)lMK1a*Rmy9CAr$YynLJ=JbiO6Ex`R) zU>U6UW@46u$ie`aN>1npK%b^r;5_D_h-OZ^8El5Chrv<}V3y5tsN_(svjR(mL}p(( zRC{^nkNGVPq)b-Fsl8d&kR6x=A={q~-AtSqH<=~_*s{|kZF8K^9Kj_Rse*gv@kK)h zK_>8^<3Vxhagx|qi16yu*nD?KYN`yA#N!374XNXlWB`QgVb)&gQT0cGf{y{9dRdhn zxH4ptn^8p=3mB-ZNEVAVPtFa;6RWrTH<7{Njv*sJ!(u3tsr1Jdo%JO##hRq;(H_H8 zlm;8w!L}v&SH0f4NsJvo1E+BSQ@M=+_q70siV*^ev8+Z2u7tV<+%I%7hmBKWVmz0} zgPPAXU&OEDkZD?HGG=TRk|egU+7A)O7nU8={b$x}I7v&I&m|aR%&+@;4^mVn$bm0H ze#k|wH^K*@(ilZ-Zi+P*@bEF+&^N>m*J zaJP&{O!z{Ojlar2Bm>(Ms(d4fe?bWoj-zX+Z1q6RXTLjLdX7BcL->fNp)(!l;+7|o zQxA&RIP+VK%G4i<*8T|dUw0wuwp~LwX)U9%o`J%PP00dtfTh>#xr#~rm&4Ua{h;U) zS>Fd)^kU>WS^-1Sqp;DVaU(@ZMwtYg30Z)K#>15No7~B;XYvA#r=*T(<}JIR_`Am4 z>ebK_;KouSp>ca~ON23m2uTTqMLN%cn!wapHDrIT2)V|igYaSP$srFUXu3gA)Zv9b zr)hMuw8=hs$Z}?E%POHBoAdyu1AyW`VEf2;|4|=vXa{PcL8ur=yTYMQqJd3`fc6TC zAH_5j@Z=JX@qZe`-xDx|$rECi}F5hn#qxMy`1->ryCqWCsg zP6uMthvY|3f1~CD&Hfr6d^Nu;V0!RK zhWEz*wCj8tphE)cK^BRM%`EuHo1>5EdesQ*c!;;9Qh33G*3*HkSuYt z_hSn@2fqPBU_H?bXow)`3MDcWnJQq#K#X{)S!QJ^Lc0MD2g&R+&8YibOr2kPkKN_1 zLtbIM8WoUzD-=uGUz|NXq!ZwA`_}^}cQ+1A18}BwOXsnYp z{c7Fc2q7wyhggU+q*YSc+U0M5BJljf)yZx)j^huQ#{}jrb8rmDT_a6R$EiS*%NKJo z9J$8+GL>WbkLa*}lX$Mo^^JlZ06?n+;F}DHk)Vm8i<}*t>dLjuAvY)}6+44w7)XnZ z#v-xPH|1a%tp_$)2rVqK7}oM1Qgw!%v~1lGi+qqOoTnHuT*!o~Om2ICxqhb=A}gvw z-wMaNnKao(b{GJTpFa>Ea;KCgPb&!Ybq8`U*ZnS;I5YZ|tT?UGIx1QqFBSzd(eDzE zxjfE@{6tIaM>j+)EoD!X3EA-6t~$mKo5DXTiy|Ph8H;>99-RgNYuY$!N>JaDAThXP zIvY0z(2yaCk)oy_Hw((8%ZYs^hXHH;2))#-rnX$nLRNo#SoissF%MW7WxR}h%i#X` z;EO}Bk684-k!ILI`i~gc*ymKVLKw;JKCeLxz`?dzCLl`^N z?Bi*5}FH8s;!dX&s z*q?6LXwuhpPAC>DmCEy(=Rb#^5+{`dAtpHM2LJ*M{;InFafb+``X48~ZF7R^N6kZG z1;b$m&j%Qr{SW?mEUuQ#eU13MKuBmE3sA+(kRb|EN)9LNobG~HK^-dGWP7^`a88&k zvkmc_4C(DVbBN5!1oZw5`ePf-{D7`e<&dlgm>NCz7!pUWyBe5)7eaNyxYT-o8k2p% zQLXfshSCp0$k-f{?1ShxA~(jFsR1L2v$A8jxnb5onmH>!$fyog+nTUNU~hmN(DM0A z*shdfPVU{qVDc9P;cy94uKM(Hf%B6YOTx^D`pPa<{0yv`MLK0#{$7O(*v z4qXTbllG=C1YofOAQMdDOK29g5>N%yh30022!*0G+X@6E4r#^T-s35VJS;oZn8a5u z4ejgMNi#sAd)*+6PTq!)I1J|ViQjdGba|f884mC$Gf{0whB;vb9|-B9d+jK1BWU$BrE>r6hVy0_g)?T-PI=yB9Z(fbs_M^y9mXGt@8}li{GxFOK!HP-t>>u1}nN}QZ3ul)3Yh372?b|nbU7$ zwxDv;qR)dm%&L+jgZs5Q`aM@F1wQZ>6eB618?EF3mHOk)Z&(_Hc zSn9lvgds%Q-=_YlbEE#ptb$nuM)W~esKM8F7l!}&Buv+|bmN3*0QOXhO)t!JqJ}?Q zQK1b1pD;**#!0Gu?R?_?jIwe(l#zNa>r66v=`Pxl#G5G z?v7dFCj?G8dB5d5!b+f^@(a09;b-~~+Rrt))1EbKJXbq4c?l1at0Q$JENdp4vw+Fc zt7=G%zSc_LDL{UU361$on)$&N(b_pwXLIzf0J$}I9WdYOpSxhIVX*ovdsd%H?W<^SM>q0iaSRbI$%;hV1Nt{<5B4Ey(+tny zZUo0PU@Mja#AmU1`|#`^Y)OId8y1r4Qc0MW7=BaLi*NqpB#W6Bz{Ddkn5`nRp)%#k&ZR?sK3gK}2O5M5Cd^`met7{PycaL{4s7KoLcK$%Lq{FQDb zl%F$}bcHhY_Yz!{27?UM`LRdeyZbW zRsX}iC1D%qzJ!a5%nAG`fro`01*6IUk@4QkCA52-DAApx6Crm<@NLr?R;K0F4xz8 zQj8YgQXci_O6=iS#f2n$f8%Py)9we~gaIV4>_(kOecc<*4+f3(ql?0d1g!#Z`xhC( zU30yr=ALCF9^j^VC3fF4y?v)A&lR{Zx zw}wC9PZ>V>wqjjECWU~i>jRXy)*eW{i&PE^HTdaj7GRuOrNA;FrTSR` z-w*RUj5C>s0mAWnnsHqIl0yxhXR;PA_tvN5agW(!kgk}N;yC_l)w;SN(y$tr3?w&n+Y*vbkYa*&7d~(H}CxpGS7sq8&NHV}q3>5wGH8-I%KK5Czl;rhi zK|?F3Z!efOcW!RqWN7e#tyk)t2WsJwGNe6M_G#2SAcgpu&w|M`WdHt82^VeA;w1k| z%O8e4(0&?A6GG8e5Mm;tpDk^=`Z2X;FwvYLT&(OKl0{ku%i)l=Vu87;GfhTSWEUBUngVL;nwK16KO<9ljNktuAaKm{; zL8X0*H}8Px*VyQ6hS3sem(G|mL-RPiw!jSBk4{(}W+h@sA&|`QAo#U^V#_!zc*BUb zUacU{zUc0OaqUy|<~Leh017}e-DFnXegmmMlBKqPDu9(utC{ z6`84Hs1FFj=z zn1${yt7seJ8i|ib3gMG-bb`ld#FdvN3r#}{_9tCBvj2N7^PV>_G$gH_FGUJ#ngBNX z>uv|M8el%L;p9G~(wXpxP)qMcu1SrBO$bS_tRc~=CQ#=~K&^-ay&NT+8XW&B-sgDn zN5j1)WZ8pRtDNd-fsxA$vyjBh&_!QoeA`ZY-MSN0qu_2k-fZsg;vUldkc_QB?0=HU zhvDpQG&gBr7J84D5Lp`mI+S7`ngbvfnLBqi*wia)6oxDfU5Ju-f{3{uP_)SBuSh9q zu6zC#MaLb{b7>=kS$dWHvO?W^Zwc)Nfgc6C<;nLXSaGMPW=W0b=e_RT7r+g@1Y-h9 zli@6M?}|PPOhE>5X}Nh6zUEMuvI&iso3oti=b$qp0!J>OQ{suQ=vGiYQe3-qaDtY# zMpC_#39Xm52mE_?VZEUkF9G$%kHred3XBKo3xAy>n+T*)=bS?W%o|A2fmFG`F{Aqc zkNS;!_FTK0olOlqNH@gqd~%W36HTQ|8oY<}l@STZ1NSThB8q{~d9-(-6T}vN!>b## z_6_OXWJyS>`0eJj+zmia${J=@DiAW~Adi79Ezm}bs^A?cJCmc*S&uAOvkuDa zT@zLn%w~8n4s!388+`j1jFyoY+sg6@5104!{3J7TM5Pmo-&x~tjSBQ|f!e1yPTOE8 z)RFWezIUYmdzqMxWprpqYl<`$i`;Km$|zL_NUX;FAN}J;K4AbW1DVKQNTpO9?$rI?+ek)FpBXyf-P~Kne8}_b6nkN_M4|pe%vx-@!ODVY?hBxyC(Gi znFvy;TR@+mXUSJ_SXXfofv9o*z>+^`ca04INPa|hi7#(#gC zk`7p>7R`Z~)GSSLOw|2 zVJgR&)gD8`X&%is6(^LkW#_R!-iLe3T!ws3_Qz_8sLGb!)zLDJ)=&k}wYJf=`lqcn z_=WzP)$pB*tIhDAMUF!&ixWZ@Gbs$oOgb2L_mbF)7e{YR=Tj5s)SSm&Lc-(o`4S#r^U4|*_E z55h>ic^Zt+iA$l9C4D#q9Mtm24WU9*nH-blA5{sR7(Aq@Q){heGjJ;={JWK$_kDy7 zWP7^`AH|t*XysVJEgj7l_R8@e-~2oUmoe~FDfXjJ{_Usqx6SZ?s$t)sUB!?!W&c42 zM}Olp@qA(*Bpqz`aJvaMp+Gntt*!3Or>!QPCPRiXkO$-h{ynH|1@Q-;47 z_pos;`|bYSwu%v^BaC*{MO5-R%Ho_0=%%s%;4Jw52C@%YoLwp*a#8F<59yT}bIvr5 z$+Yv?e{j3Mj{^S-%V@$oJIH=47(IHM=5e^-{`zYm;&aCm${OiUBks_586?{~TW(b9 z2C(EIEQ5TJtO4z0V4kh1%IKKI%aipYY{lFO+WZ{B<1BE9*$*7`rZa|)^IpqZ=Hj*# z*^JlBl_{&uC?z5z2%|FS9QX{RO%&WFO$x{O)dnI1hW&D+7#JCMPP%LN0a8;UrzI+w zkd)FR6BSh>X=r4T+EuQcXK+5Ugj_$bA9H*jNBLrr*rn4pU&peI#z*_N_cTvT|L9EF z{cDH_OZrZf#v&C5g))G2OfbZi0nbz#=mwE)@Kr-r~ z20AkOLR9*T8XkIN@yEXlgD-5i~DgS0&j##eVX^3`-U5UOy$bmFeGa;Y8qPjr8 z-IcQQ_8X_fZ?B`y0H7mMSs~pvB(swjY7zk&f^4=gL%T7?yBia&6?IirRg`A0UcH*= z1vn4YS=45L*(*v7XKtQa#s1S`U@Hash)+{&0xJ|0pBKp$Pr$Lx@Y1<-=~9rqV-j$! zO4q)QkJapHVRYcsSu`97W@H=?XVhS*b;OqWCGb;h_nn4rb*u^x@odzhdB_ zpxXhhDR6UFCt{L}Miu&tsyld4z(H45n+-{xdXDah6@|3xM!-pOLyveA-KKIyy@*Zwu zVOe_2^8HI2cI7EAZ&X!<(8T$ZO-N;&mIMwcqRzHEf*GQ@vKEr}#XgF?!c1Biy&$am zKd&WrZ;QgIE8y;9c#?bcXxaBNS?}MxnHx;W&52Qz+)yq}f9!vhSNny|RB&MzQD~t` z(|?lzEs7IBpHXOCboeE&VUol)H#fHsZ+?p%$UYap?A!vKf{Gk~&P(hskYCAdS3}L7 zTL5IBdR>fWtINxMgYO%a8lRHsu9N*pY%|%Bkevt#5$q_^!^^irQ`2h-AD`39P{%hc zCu}nk;pJOlRqy&9EjJ9>o@sXk+^?DT1UNw)^v3Atk#~123X1Y5I(72oIU#^5531HN z4xaK^l6QZFH4d)b>|IBv!rxB>2!D)tf$jc?YHZ zo|CS@&&BNDC@e~PjeqNWAqsIKSE_b9WU^4oDb%ch|FLX@>#zlJCW>VZ6BsczhMO7v zCjp%ZKRgXS`69ZIJuQ)?LB*a%?m!i5FF_g25_`HLswK0tmLEGx4P)%yfV@55*MaI& zkyCSCKkoW_&GO?ypafmq-~H)b`&(B(15A2g(#<8;Iuk?DlM;ybud~e;BjGx+iM;kj zPV%!5H~gM25ECO0=QA)so$vp$g?HAh5610MnA9nLQv|maR5wf(jC!;hB@jN=A`ax{ z=YKiS9dtk=`*`2p3&>xEB_!V6NjT@G@wxT}tWRUu$yBFPlJsja`O8Qu09HIQLn_x+ zv6!E?1{K?UACWn8G(e_t(8zFJ2-?A0iL1u^Fb2G9=6UkulD0QVB00q=?|+%8V3gf0 zKfxRLv+bLMTseBrBzUE|r`{JFm1DQe`;l~xUWr??{{7cAXhn(Hm&LhgEO6IUiOp^v z<=ybsOmxz^oGB+leT#DJ-Z+}+cV-ww#`TCVi94mOebGbq^4!aAnmyi?fw^**eV_0T zs|wV7AhAzFzbX(x)eahOle*(_>Gvn+y!HO}T9b38bl}ytFF3ScLxr=!_D!H@U7+WK z4y6BXHkiVt(VaEbzCtQ%*NQG%MoxEyh?P{mGYcU%)&|VC9yPKdqiU=dbBHNa&hxcnNm-@Eu4fxO!9IiPXN=hbo4T5 zid%$9Jbu`_o+|nv4ZH$q%_jwu>$}dWjfJ- zw&}eSMcyKl`g2GuXPP#RE)Ov8=*dPRlr_`y!|BDhAM^U3;y#`<#r}48(y*C_j=!uI zEMD6ez9L+#c&*>a8;PH^r+qzj`-JYL@6yCT!`#^jrDRNwjV;V1K{U9%pOPAbef79I zH9c82B>mON?lX}z!TUT@{!g8SAK2?xOKZZPT7z#!%b1|3u|?<&>nCh62AIZ*M3k9g zi9L2Fv;8Ac%x^rwl}&SYd!wM{>;}%Ospa@{^QELpx>M!LhMfrz+g(Kkdvj18BUqIx zboyib_iFvAn0X)pH^8fCqEiSU5U;c1vSo$c9SA!$LBlODY+I4DZQC}B=f7`DZ0DD< zh;(XeY=FQuTRCyc=9C0U1Zs*#?VSxNBek$a@(>12EHKFu`bqp*d!~pZEE2euqhM(I z_AOAnJbaj2aRnw7&{r(_uoswEMGp7@;`SFNUkGu-isOEt3~5A+=hp1b8GNr8n&F1I zOF`h4+yC}NyQ71HLt?wKvNEAd&7T2}bzbi_y0t&5eIG_Z)L+ptB46!#0J1N0=FEwn zIZO$XZ%2-ee_cr>G4E@_ zfty5+^$5cAaOTVeWH3y$W zVk_!S4hCLQlV47A=?{;MrAv$~make>T(z$J5QWIa&inu>!mSV=!&Y=nyD`!H(u#2T z7%zXH+w~^JD#6@;2QmeIN%nJ0t@lz|b99)^iWsj^Yl_!0b{=_oLP^<}-4M$PIULNc zx81&4HHX|~1Y>Q!&*0{WLgZzMQ$OPP1c4GU3q;_I|&Fk7F#AH>tEYEwP#5kv}CYpEu_urT9 z9L9l@U@RN6{lRIlbf}QQ{{TQ|2BqGk>Lx2$nSbV-?eYvtmfy`)xTo%b}py3exAxnmo;1C;yOnW;>Z;gw`tlV6|V5OARpQV zaSI5X9Q0MmEtWl!;U?+QF@gZp6kC}|S+7u^QlA;1JhyG4@yh)ZZ6?v8>pW4kEkL?& zHd1WE3LDX2@K1ngz25qx;5g_lesHh2Ru0I~nJ0=x&$BsRl|7~pXxYwcn=xevcvRo;=qKJidy1BV0sD5ePWx1F zC(cxXHTZiY_ZQrGh*Pk$-b7zQ@R15#R=jBK+@6Ii)0PS;4ynKd7lDyDN`DO}5NG(B-I9ZEM=;+iqr{jB@iJ z1-OCxPVY3R#aMd+lGiTsoug#DjJRBm42i8-Gm?LF1NEl? z;&UhH-#7AmsLC}9m!PVjr@DXQp%~m$1_0!VCt|z~o%&JBs#0|j{lVv74Y5Ier>)+9 zwF4@h5QwGwVe6uWOq`{%)T{j$qCU8%W(zz)MC7CkSMXYE+`I2rDq+w|i#}H8v_b!* z0emQZ7ngJ%=oTSwj770Slp6AT4f|zSboLNRP_EYw5x?Kfa(GCD9C{AoUz~t+KvM-& z_Gr>rCxJl0-jM3G@31MS@zrcipv^dx)+_w(#a-wsRiJTA{`y|~80B))G4gk2+MGXZeF!Z$k zAbX3ePoH2q;a~0KdEG!dweo?i}SYxDlqK^oMG3CzZrR`nb~HcbB- z=L0JeaFyu7g{nGLdD@_QDb{=e@`TI%q@b8sz^(KGnF+Q2so63M&C3B!z~BazKz}=B z@N43QoCV48hnt5-;e8;i3G%ufaAW-^4H*%tTqLOuY8}SzT+6|9-0!u!F!E%(#L(e@ zrLP*I#~)qC?-*ZvBM=E?i+gq=eb&xkF?faXK@6P>w&A zs=fDKQ@~_mY;0^}`K`|)H#?z_rJfEghn38u>o=jYiF)wt@_K$I9vN7ylw^Pj#!rIKkdK z)&N2--MjaDU*?uB2>_&P5xjv@ZkCXU5`-Go)LFVnuhHLlD-jia96}>NW@o+6p6h2<5t<_0`R>+pjR7m3{Vx6k9X9Q* znwkQ-XO-j@_$o#h7UnnD-kt2!m zxIkl6uo)|_H#jb6vhBw;hk9i@y92!iM6PVha2lS^zSy`pt1@G{o7WFme{>o9n^_!p zsILD0zONs?2KP8>T)K2rQCLXG#yd97Ra5_ulkV}i+ic_BnTd9-lv=O=-^TUn>g&C9 zGz)Xb4K8|A=XJJLStx?ku zb8*|Bz4ol*=g3w#dValo=cd}{+6zbfpiA4vZ(|P*ps#o-`?PZ>ES# zdD_h>GYt1%xM3Lu=r>0~;wY}xQ&n zh}pIwPt-J_He!NnHnk)8q2X zXxC7W)T#Z0>srO{GM*0p$1RtB!|+K1j`OF~p(BA3L}fV)S0E9I2S#Q>HXStkgSNQL zOi`_xd(^8%+pyY3$Rq9SUsRwW3GuhJghMkbPCMclz)kRr-`fc*DZy&-z(3!fiEqIM z82nW^{(p||3>5BAz958s4UhsH=4>|Hpv?LkAC+!ROGJNh|E>NbHrC+W{@dE7B4E&0 zKN(LE3-<_mQfzekpYO|{-)HeU3K0L5O z5B{yR`e(l3-#*YkRuKP8Kl?@XxV)PemYJ(1*1?K+ zpYS9GNB|W%0p=7_dDad~HL$X>N~~L0yMTd~T0c+m9RlL>&u9tDO&RTh0OKsUIUouF zS9bci$}K3w1dMcti#|xQM%g}!VS`>s5ER#&KVyJvCH}=8l}_j?i9H4>^*BJNZ#1GA znmsH1YsY384nS(`7eQgf$ePf4jgz4zaD#|D3zDoa+|lP4cIG&{%_I9zCCueloR?#> z1jRjOAaD|j*?WpecLT(`KY*3Y0L{AT+Yb?=IQ27F0Q(NQQGoZ-lcVTb&tOD z(tEcgQoX>V!pUHFlw?ycbL)*Anqf1H<~#%(IQT0|@NZn)e|+Yve=p)-DCtk+=WjX} zIvJtyT<9=8bF>GpwuWvq48X3{Yfu~+lZ!FYhI66RUcA#I5GESY{}>p;@TA@2-@6=U zJ`}O>PZ*2a%R#+MKUQ(rD6Zazs!aH0AB^f7%YLzM z^snWT=!X@$k8qFXtkJ#r^4FK8dQo1ojw>H0K~h5OF`j-j0nvX*>1T~4o*b`ML2qIB zUp~BwhuBC18e6PLpg|Zqj>Z3{Bo(@zm*EvY(3D{jhYJ)a z`q%z^iW7u$SNeaZG~Ftkocfsck^ct=vjabPpJ$I9ixQrs1o+bB2+tzZXKN)3Q6vL zZtQ{{gc-0`+MX(p-woCf4eg(__QA)cS0VWXpa-x>7?}ai8I5AjJRvYj9@7Iw#Qf=K z+17^a?1}v6(=_o^`-O%yFft#D86UVtXf$WEUzCGDan6ZDS&){GC8r`x#Oh&`qK&NV zi0)HcjZ=abLywpw-E3%RNFpcH^P(2`p@)aElnIPSz8HuR@SCy`kYJ$sm1J2n2)C2P z7@}qCy~!KuFFJqNC+9MIE9`ny-MhFqz18)8l- zZsXM;!MP(i4bWhLa{4@QOrhq_CU9+vyO&~3eLC>GOnM`U zK!cl5gnu|Dw%H-zEbKOY~A}W_5$(P!GL@Xu4l$KF4E_ z%YVAagbuV?D8h88qQA~in*A$2GMQW8^g&ftA#8v-W2s$Fm5A;GUd?qhok=3lUjV-f zA@-|yBQnDv$)a1C9xU$Y*bte0;mC>?AHzw&ibijk6FEOnyA}2xUNHkr=A18^H3+qSjD#x3<&X`)MhF#4PA{FlRy(Sp5*2Ek%bPu z@m0@H80&01{h%5BrVeg25J-@Md!$686#w2 zACt8ld_xU(y#us(1I>Nh*_M_THO~n)#}Q9Sr2ww!bX6MXR( z53H>|^?>*N$gJ}aNj;A@1Wf?&mewVH5GS+QEfs&OXRzk4ph)B&et(R_uh#Uv2HJgUwE zd6k6KX80oX5^hBZPzGxqJmLqq%`-?{1Wywsdp!Z3PoG1$Q3XWIbLNK7U{RT`GAdOS z*ynuS3JoWWr8)0xRt&j*U6KG`0B!fb46Do=y0Cy8r~8;VR1IH`m;#}QXaQfuJOdL~ zeQ;#O>=haLc@Tb}{m+EJ>6M=(OCudoV)1CQwABq1irW|DQvw}aR)`7)HOuA`L4ve1MM(|NG1u#AwukP!|j z&abN?7@XN8wt%DA@v}CIIAZkcm{Y58(a_XKS5&-wj^IH`x%cQIG~fnLABa{`kYGf{{2>&4m8jbgfL_Sza}8 zI?2;}-yhEvv^TSJEp*q4KwL{t@ZTu7p+}AwXXJWY0?WTU_eOEaVPWxHi{q4BPSw0gv5Bc-*Z`GBb{M?mZ&eU6vuj>+&AuKzQK z21G$B(?)1P(zFOYAu3;SGgZuyR#Dav_1SFmVO8iVHM)%kObmj@=L9{icdo8tEh^DQF#!nL>@Jp z3##P7H4Wn$dmdxLC@74~Co|)X*b0l}-N5x@o5zaC$-O*xiR0RGtSY7sJSNF4Z=*vG z@yJLA2Sp_0Wiolj+jy~04>SbqpsK6q(1Y_AI`m>>*Ac%&d6?1h{$h<0mWugZrfB-C zO?nF#nF3!z><{R#Z{9YwU>tKLExE~r3r!e>K34=}C>x=(BNy?JWIO{U%vzC2chbR& zx-=RvU;hcT7d%0_xo}o+5tvaaPGcF^Hf>lM!37wzyHP{qw9mX@swEqoleCtCROQ(hnlG^zU-2 zlSXVa*JChxkKZO8p~)W)iZU4YQBq)BNvV^~;K>^8sVOA(c)!df2HM8`2{%K_$jvQN zNJJ*kkzh|P49#+QaDe77%8WxG%|6Rmr>^cxX+s&2$R9(JTj=O-zk%|=7zp{wnA#PJ zMM&pS<=l!hiiK39XdvD66GTC~)q|}a4e`__Lh&dLVrChV*i>~DAtOk^PzIBz!UEo0 zHqrwxn}a7S&csZ`wd6w#@tatx6bn(4VPV+;qbd38Eo^-`m4-v7C@HU9e%2F_*NWQs zM3~W`Di6yKq1qU<{7ml2Hb+HtFZ*>?Am!Vi+rB~1&xZzSC>|RIfG7Sge(50(13T%x zyK(tW?0;~I2$7_5jEqBF{MJ2#ZJgHzZrq`TU?Q5hw)wmDd{o7cksZsyh_7`VE_g|7KzVo>m&KFTQbnwN@Mc6Rm+IBf5D z%E&!AkP{{oP?xV{=z4kQOLq&y007V`5mRhNxP!`0&<=Agg)HAbZ)p_eOLdtPJH+^so2~5KQP>6E$z3+4m>5t~tXf0kQ4`(ky#fxi2 z!Z`8y9Z{sn5Ihv(mP?Q^+t)v>I^Fz|Q67hrfBq+wHmmUJ z=(LKL8lPNPXNuz!Li~2_u{w6C!#WV>}jNZ}(BtY%IE{ixHR>oq z_E4hDVc;Pm5Y^Ucc)W*GI5d1EZxdQp*F+*X0@+10cpzpt_gFN^ACDO#9@Sx$RxZdR z*dk)P^8zLC9X(^bGeK)T6zi8NL1~hlz_3pUzZw>%PF z-IT%M7m$;&e?bMX3B^4dNe^cVYbc_G6^T)#{4|Z5+TX!-A05bj*f5~RPM85=AdPnC zvx`#H^O+MyjNMkYqRz`B`&;BNp;~zGmxtp2KS=c75LPxvNpC(0EBSq`t*zad>vl+K zDps3F_n#25^7{&)@h(Bv~`OWb%1Bs8vXH<9|+;OKTJBv?c@RnZRtqV>{ zKgqQJ@TslbtCls^U+Tg*56^E2s~7=woCHM@kZOsfgFT`N!R^0BG^x%&?+&DrEcf;2 zm))na0$jy!C}Tg1UHGzk@FQi2Px#;H?)-B%;cw6I|HV>bH<{@2lxogHhEOOLxXkU^ zAWKnsH%MrpN9QX<@KAK(z`FLh^KCI4YD9_*A8k_U@k1qu;3_wBlE-AL@>FeT_7tEk zNSYePuyX;&WWSu)?AbjH8{7-e%uyzfla({ME1eKT$jD+Kb|Jlj6jKB#0wV!KR_Z@t z4nCPy3Z_ys(7U{Ut;z@ynAC9flXt9-&K?$*et!AR#+0%>xC-F^nwSKGRvm`O-vBf`6AtNUKz3Fq9$ZG9e~>D`^VT#PkBnG-w-A z#V+!ZMpbH`geKsS)OWg~G@Eb+fN;eLCW>T-0~x-kqB$Jb`r(W@OfoIOA(f_cb2B=~ zD5DcFLf>o&dy6hWrk5aC(lvi$SezNAhg&Y87B|Ev+6b1HP=npq7{DuqAX{N1W(T}d z3}`R}iY`??T8BJtID9lE7{5YxcvZv_Uk<B19T-Sl&SwhN@P*VcYQ1Bta z-^6acNoMK0^>*&0`C+Un$P7lUP{%`EPva+=nwp6(Q^9%za%$Ywey8s$LEg))!qndI zb2D1bQt4U$^pc!gPbw@!kzK-RmPb(G3W!3>atP7D`<4=b1Zb}9!>ckM?O})N;GGa4 z1dMcjsqsPMlkHF*PM>yt@naIVCBq#w185Wss=aisQ&0bfMM-W#;;=(+X)*;t3EhB2 zvA@4C#r6X>ejZA(VN^jOnVtK@L$ngGCF|a#g&yPCq&CREL+982p^vs`2iXBlxn$XF?jdxjJcML}&NwzH3;r zdo_!n|U~7>^m|arKd8@ zSg+Bl8=+4d+zfn!SO*x;E$H5Y;zSDDfmw+9axK>TRE4hkd1M(wW)W~~$&hxhgLV@P za!HJnPkPT4$k*v&i5BAwY_yHZA))>F+!7=?G^I!S^3+;D&!-R( z`38n_NKlZlG>F_b*3}=1I~u=~Q8^56rVYtMd@{o6@KI9A&G@mpfLdumBDxD;&msx6 zteJ#u8E!nEToUZC^MOh4r*o#$s>u=qX%kK3Z9?cZKhDi1kDM4xEgJYC30>e!aBqyq z-g3No5+eIhEV}6Oi8*fiQY9>19n{|;WB6j;pdf$jvKRcoXqUbVz{x+GURXT@m5okT8t`euJjUCO|}i`0N?( z32M~BTI)0GDt@xjqQ_%18$w1%QFEm=RFTuy-1qg4X*2bpFsAzyQ4`qQvD`6}Y);bM zAmb{sHgpAVG-&xwZ_I^mMnAvbWzhPi~T>m&KlMe<>QlP~|dIHzWZ;e!^J%lu?p0GmN=}crR}51p-GoAqG`fy*f~4&6d!{3ml&} zk~15Mu=9EI#q@*1U)(uz_^=&pe5N?#L*(m1T?0gtYU4x0lwPpMy*U2>dXIAj#3;|@ zn}(&v#8zWfvw;x^88f4n+&Tl7YoK{>s{-7}B#$@QiXBipJ(8%ENc_ZQi!i_|`ObipwwY!6DOKQUN^Id3VNo^<-FcBdG@}v4$k{KDrf*jZs-|z{mwp8{tD`+C#u#)P#A} z7bSI&5fccU8md>y;MvZv*qpq>3z0t^=u9vrXUJlpS-Tc-g&rYxy+7D5Who>Bjag0- zUM4$HDG2D{y-vKi?4f%C-PwhyC^^y_N$K25qJPfc~O8{bj{T$j?e0ca6sLBg`Mc=fdS(3ICHB)e4 zJ|izd(C%=&9pF(Nr?(16ICRP{BR4R5L&8Ov+FTc4?s)F?O2dNRc%r1{4iok!e%2v? zf~1g#F=jO4Vqzn%m}fKmUJsvU6vpn(JiDsv=H{08A|kV^aS=|~4{GvYvI)haZujRG z_Amv+&&$Ubhl`ogeK)Q?94*DPR%jnK0+}tKCMNc}s7zqdjLPl0mo-G#cT*@9IvKP{ zdyt&>5kQ9JUu(RA&to~@EMWw3bS<$YqgR`uDvtofV~KO93m3gRfHPkQ9eaOB?jg%- zK<07(+I<&vAu4jrjk7<3wcjh9J;fsz)*;r^?Ln$Z>$?q|`YCBM{ zs*f;VVb2hb1;S6_75Erx={Ip&;X*&f?Ye&&=e)kAhZ18OWvGYHszAd6>~aGK_%@O} z2M#W5f0NM#93Kk7C}HY=!C2wcw79v6PA}NLf?~QF0VlEeBj_db1vwNSJ$_t-6ZjH6 zD|{;mL_%a&O(FRii1vIkHeuwv)@-TjEl_}o-bfs}Mb6VCJajE_m*CwLhx4Aj!jLin zYeO+)D>o&p1o--VCQ%xm>~evYhEQzrc^iga9y&pbnP7}RgB%O=grSX2XfieIVeF37 z>^6i1)Mxg=`Y9LqA-cP6p3`k^LNPSs%S%g0*b=OJ1OAr)8!~o)L3YaY33?F`=m;B= z>U+_pVfqgs9}U^csj*my4kk;`9mdTSdX$##ynE8*$@8giwPn64x1MXc=QxJK(h|s%7S=;E8#D(_U1plG#%_theIeikoC^{36mk5WlT+zN z=`ett;F-xySXIdbj%Cm)=M)7DVK?ifs4QQHxB-N7*TXY&2(!vaR$Xk8X+(YyP-vh6 z1ty#5uvh}h@t!$SOCNt{J4ZJ^N*3pA3B`NvM$h48+Q9_U&;jaqzu^qS0N&hwkvix0 zEb$@zfwJ))%co7A908Dx7UHH^+L|0NK@I=6Blibb^~i~Ej1t*NZd7s_s_HB>SAiRfhKQ&+!y3k zH2yT7i2&SU@KDMBKKtZC+3HWsWVCV`>7S5$2u=!ZI^}HhVD;1rdmvJo#u@_KrI@;> zoe~zaZC0TtgB<7L7S}d?)Jl#f#D5@iTchzbq;ZJ2=ky%z79b10bAJq164i@XM6@;! zGLMFjc}@8YL`o@n#sL)&e=>qReWBgz)=@PwK!GDyL`LRUB*Et5zl_OJBpMjTXEe}G zFNQvoq+Puhs>;ozD}vf064kF0|KSbM*a7|mizlR#jXaKUbx@z2;0_EQf!6(X6$5;M z=9ym^`4uQ@qlxQ-AQVSi5EXwM38#W{F$?&rsdT{t9dH{N(QuqX}PV~TS2b6`WEk$W}lZZJBF4ba_$wp&a2P> z1U!&RS(Z)?vZ*KC9axZY#nTTdZGE8va}%45NY*J2X|{Vul2rNz#!&#DNPQL+9_=z1 z@Q}g|xks_y8b9tP1{mQy@?5hk-`3SxVmXjx14P2Z@lgD3y+@oUijq2!>#w4QC@|T^ z$os&~ow z3MAUpjaZnTQro92UZSTB3;}t6=GbvI{%B+BgS`S0|6!f3AXvMEg;3a1wb+>egceRzFWkEGFSv;DUaxt<5Ju@dL?3)^k!1HqDskK^9 z&nJxPg_d#y!b{a^el1NZk&)jMc!A~ixWR@|Dwnw8V!5RL#A3zDcO>ykK8J}$>5xPs zF)KWa7<8V0fC3FZ?mZF5#tU9U z810l$1FDvtVlB11a19mD(3ROBo3>e{=T2XHpB;oBDw2s#VJdB=r) zf|GFT=^ZT2N@r^Ad@+#W`#7fZx+82MWuoI-Dc>?j`E zR!+FI5FQY@=Usuf3MC5=!qrT(MrWq=;XI!hq&{vZ#@ZZ*)_e^7j%m@7${qkxqb3M{ z>!N#1iw#{bxtY+zpg|l4W0Wwga>>qvJKAP1)_iOk`IZxCgQ3#)8(?dEtrqOlLh1H( z(jqj8lYpa95{P-5+g*+EZGh8^kq~MgAI|WYc;MahOJom%PB!WHU{p^ZIU}-vEfj($ zL85cps6K&1*cincMR#B&w5-^h51m(p;171w8NE#;MviKQA`&36i1KQ}Q6Q@sgYy>U zpw|4gqqs(}N5VO3u)v>T0pKzHAI{!9EXVeH`@Tv@p+U*4E@dc`IWxJ^U@T=!kuhTt zk|~5HQc8wQDJnx{%v@2K-NKDbB_U%-LU=wa_wRY$_uZbqUVnVIFL%Rr9_MkawXc2O z*E$%=JZ8pG!JBe1O!12suP>edgc;(sTz7rp&#$>(OYY?EnpjOS+-u+9tx5;EbY1RN zthz9pM_GWi>MeM96t52WJ@)W281a1_2UJ&N=pjGGHPrIX5C{*HWB8Y~lq8(X7*o0g zXW%vMJXEdj4}e?e<&}Ow14_G8n@Nkf{2I(HNDwhtnqJLY*&Fw(V))gsQu=>f=JxM+ zBe_V9QisdBP33q9!$JHweD-Xw-j$^nDcDaD>E4HLR8u6nz>t3a_pFk)_TP-|HRW)t zirvW;Kk^c(AlUN_Jv6J;6`$-e!iO{kkvl|_7aCa{st$R?%0c5bnv_=!-g~zA)egf( zJy|rdd__talNoSTx`aj)zSKdH!d3nM;-oS*F zcmKncJ+XCbc_(m8EPD4I&orN*>(tYqf)`C4*p@S>3D@Lwo%!!z{vy=~d&;6kFXP?v z2BIfGMGE6qg_I@ucB{JTPh)ghre+ zTv54j?=3Z7MokeP#Gx$L62oNNOzb|1(-dvcw4g3bfeZvs7>yfQ$dX?$9Af_Yw47&L zz=iBG2}{OIR>S)vEgjO=B$N7lTL-)qnjQ<`ePGZbL=frgm3+&3y!D+Pa3Gc$OBg(a z{XrM;K8FX7&Bu((*I_wW5VE-%MdteB8d2AwW-k~Z?mTK{zmXnqgnHdU*mrc^y68Do z`mM?!;c0YGHJ80kS@u~thX9<=a=W6G={sja?N`E}&S3H}xa3yX~0j^q1q zN{p<4R-HQS?w_YpvgqVuyj%2mu`?|i*jRD1JDwqFpnF$k+hmMkxKdr+NS>{Q5H+A& z(;eSqGTY8pk3EN=T?g!w*;-$(?c>wPxc;a6EELmPa+xoSFj64)acD5< z5oXVi^0wNqRN#HZ*nQ~a(`2rI)PJ>Jc#6Mv{g=PCpP0TqJJd+v$qxvj>02nHMXjc` zlQYNcMzMg)nd^cx&+|Uss*wbAF645XHMR6kT3h-93)t@Ep2n&IL@efte zeQ_ROStEi{ecn#zy?Csxu3>t9k3!LWG#t!dvjVsr`(8rd%0ivf_^iI$G$@skW{B{2 zmekA%Mg+sQU_zxie)-4AK&;KCa;t)Kk%sycxe$fHjBFO@o!7Wf<$l_3xl97yOXtCo z7}t&QGY_8{C@Ujm2@2oGh0^ZF$?CkMe3nqAk@151v9bR^2WT4HRMUWLT=x4=4|1yf z^U!q~>0`aR?dWLHp}<4k#T(krM;r2)chiOI>h*X?EieG8b zeXMb+7>c-#Dvw|q4S>XpIWPPP!oq(BZ6Qm-^fhZ&8PgBaWDrT`^?0e?r9Qc+r}kk1 z7Do$&0ZMb}vKa!f8!$RK_6O%?FbD;+T@CS0&GhY+b3_Zus%r(vaZ| zJ<2AwNI4rhuuaCqZcltS5rd2!L_L-PY6q78`oW&v3^ai&fCT&IynoLPCJwXPTJjID zzJ2ZLYHSuxIQgF^!xZMvc&S~cTs2ts`AVz(Zu7dDnyOAd^*=x*)&E278cX9}x7-fG zdcAqv9NgBlbiC$ji#$AMAD+Nn%tu>}s<3$g9f|wTFkgqHrBEN@w8_?K$7F}W8Gq)S zDz483%!8Pvb{jpes?9|@H%LX-nrzy+^RO%@c#PTzx%{x)7~OYZ9hV_hJK`?MfJ5kl z(oz-enRPJ~gd=0)=sFZnt>KbqN80wLPEOeHieO#aI;v2$yj4=LCjnX1ADcw zhk@eHS6&m1iZT%IvX`a*v}x03-i+-obuHLFzVV#&%wbOIE9DAuav@m^GL!%PK|Oq& z?x5y&HPVN9E!b!osHT{6j{&Ei>n|x{@fF9ds4QyE5X5v-uPc#q`}hGyAMb{7d`pfQ zMQV)(6Tqo@@tAUpDJ)dUlU#zs_zwx4)Vhi3b!h>nsk^ZKr>56w;K@po4!6M5xV-yH zDD2I|>(kFeEF7D5qsQW7(SgIKlbt7NsE_B=9tWS!ew=>0WVph9>}lU5xvB#)=wnPK zz)A*vvKjUfBddD2xm{cMUgDsG+zrwNhn5%+*wrzc)+t;TR}IP(h=02x&d2trn)lSv zJ#-WS&%vR^m`sNLgGMhOM(9v82QI7^VhL(y{F`#y`->qm<%Y%Wb538Go2Kr%b;|r} zwFyP=8KdMzR!UqZf$ztYwts-STMCH%+0rlT`Oyim`iXz*_Z3^Oy@iqRBK&a3htqr_ zN9;$p3C0y9K28Kx~IHVqV2_X&Q5^C(%{c+PNd?<-3Z8TKfB zHbT`rEnWl9Uq*MyylG1B`T}PVt;Lrm@5H>l+q`L2|t;(9&9X&T8x{li-xc4oFvJhxW-9>HlVA27M!1c8!EjhU^AnSV9tsPl}w zt+DPsHVsxVs_s2DOPoAg&s(NN$^@Sg7H^V?!a9hf$fF61*@)dWJQj#`TsLu3q34zLB;(d zV|M@tFMZSdMcYVSewn(w%ilXH->j_J>>tV+znp6nQM#?g`zyT=kyY%^$7dcgmiA`% zS54Mo@12AhL2j-w#H=qaWg|E5{-<)~4pZp@plB66R{ZzTMy+)VM2%h7%pV~l3B$Ii zFuc~GJ=@c&PnW1%jW=bC_~IA98viiyq;xJK+uuefH2pQV%^qb!mq8AB)pUAQX-F_x zoQ-H)$Gt9GqIs9DKzWjW$Tg-{h-vV9vf((5GWo~vg6@g72i$&QO+I{mxSFCgl;<*9 z;_hqbZ7ZOoM(vEPB`|_uo4YXiADbKbSV}`PZ1)4eIh6R~@8f!4{F&ke zJPo7GRqi7_f3hJ<*rWpNqIT|V?;v`Tj6#n(9oSN#aJ~IM%^16|)ZTmZ^xPSte+7kK zpDGLD#hpULQX$)LBvm%-TA+n;`=x#G)|i;wMMjE%fU0-ZPspETYHno*j>maueOY5w zumtr+-okN)#DBc946ic}H)>W-#4Rc{2gcxxM4;NCX{6b}_ITQ{mPsM8tD+W6Y|`&L z6O7-$&xvq}gqW=rzM`UV9YDG*_UG0nCVTX08k>aER%}H4toMf^&>lzN0un9aztF@a z?0;dLFk4#>R=$7uM1aKD4y9<$xy-_pg@#zhZ%#ZJG@6mv8$vbZOGNhqGy8xQFaOk)8Noz^H6!uDdbhjqz7q?~&7|CrbIs z6xvRWJUX}U-1%Bve&u>{?-)ayn#D{a=+=UW|ym~+J zHsuQ!N>z0l;r))I0DMDYI{3nVWyW#teyf_WNX;qJ)Ct_Kg=v)K>SKE*K$8PhK9iOfTqtQpiciP$C zupjhq-4X$YvmX%*^XRFNe8S-p7`}hjW+8SMcN|UBX8t=}oKXWHq|Sv8o0HM^)48Ml zC-wFA2TpPHkI)=&V@J#x(v?{F z*OZ_o!@uZy0ElmLsaP|Dno+9X?uK1;z3D}hc%D|*kPwNGfJ?=ef?dlGz z5k31+KF4uNU`<89GqhAE!Oee9WV02OKI74kwn6htKa+GDO_|b$Dj8KEkC%-g)|cch z*PXM^=ILzlINFBR_k#404TtpA3?^G} zSO`s0ylA-qq-MCfW^Ias7*lphX;@Axx#d;3kg7$fs_kCBpbRnZ)rTSRkz2BBBnFJ= zG60dQqGqS6clsG1=^&F|<`2A99ddhdq-l6=(H#!Dw+K)?jGIW zWQ4B|q|`$61?MU2G!+3JONrM{dzPcsPoY=uchn;`^H|y2QnAWAmxC-g13de))db@J z;SA)0zY9(lN)`$>nO0gHLWe%CuI6THVv^bOs3+*gyw`codSpB?wxUO8W8?P^kG=|f zTCHH5n53gg4gDo=O}!Z_V*x}f{lP`&l!*nt)B9zZkL_9rrs+-V?1&4^doH|AOjQ&l z@=?sWn*+}B%g|TLPydl61;jX$u_3=o%5c^e)H^ULWwR+4z9dpg7dN{P11-EOYHIVP zr7IWF(&q2(+?E#(PB|~3?Qwg|T+%Qo-J0`ncF&c5wsw$KGSgt*z@ko_I+^`S@zTn8NJCH_ zo;Yc5p26`H5{r8DT9<@lQ^pJ~s#{?XLm=IFQhLjJOD`{IjxMzOXNC}C7hExmNyO(a zE+?6tGvQLdd!43Sjq<#e8`f~d%O%r_F8xwZ+yeK=ts?rJ9CuVypZk|&cK3WI&W_-E zt)w*G)@|-inRNZIP+hGZRu&-#ASnbl3;#!_-ghq%wuT8yLtq28R~d+I*w5US6)2VZ)9HJ1vhW)Ibnp<=mS?l1#qto7!;l!=9D^5dw9EBWL z_5dhg;nZuFN|x9(4g8UNCyqF=`Jd&$wj>isl^6~B6Q0JK!x zOlEJ*etP!Q-7{foE|2BL&sc7_54eOrJd&0ecJ3Tm}-n3u%hT9(<$fH69m+SR4kW|3Cck&>7EnEQ@&fW95jda@ZKeg9qjF&=H6{EWOW{@=Xhofu$%hEz|4 zB3I9-eg!hR0=SCe4##v@<`7UfY@(Io1nIT7i&`yC@(X`{pIL~4BmQ@w(ef}xo3J+iKyBX_m-Tx8D!U{71mecO zvw;ysA*49VYex*4H5`nD|Je%QD*aKQmpU?(-(66;~)?dvlJy$UUa4T0ZeSP3|O_ z_pyj;(`gFsZtEYu0^S&2EE%kHxAM*RM8tn0w^X z!Go7~L~9Qjai?=$lc0d~?C5I4HTqv%Ic^^`OU)O?k1zDC^f_;w7}n`U?ENpFNl49S%hCvu>?dsa%p$ zM~4L2*CO8L1`2N|?)roDT(NT+$}K;B{{GLY?Y$Q) z$fm^BFfuZ_Ts$w^XV}hO{pZc=$Y6*xGJhr7TgbBcuWq`wJ=9NCjDfA8;#N$jt&Ay2 z%GWe0W%rm^SZGr(TmqTg2A?YTRtCSm2XDI$*T0(ISLgAOrW46gsmG38qRs7|VR%rG zeG%QDVZ6lWh{+qTOt$LMrMluQpwQdfyPvObXDahjcJ>5RsgQ;jZ|&$=Q%6U~KJwDT zpkS9vH*ePEBwwThxb*ey7TjZR+@eLbmMvR?c50BylS+bAV`t(RV6aJzIZw|9b7NuI z+xt4evTSydVea5qYdt_n`n+PT7xPYedT&_0x|;NOC%sm=*k|_Kn04);zSEV!ON$=_ z1({#JeOr&MGyVSIrdU}2p_dOIWETGEYzsJp8eZ%>iN#DNBBTX~^`A3&HxqAKj z^=t3c)6+|m41D{{qPdzTsdJw6$X0Y!So@c?mK2%pare$0zOc^DKFg9v*BBgWx@F6j z0s>k>v$l3@pN8@=X;!hFKD~87j7~4AZPyQ%0T(e<}czTdOIM4?jU7eEp!0#J`OA6VA^%&O%4P?udG% zsbQN)eVf)N3@eYaDPVWUw6={m4R7AJhJqck|8jY<>z}#v9%b#Vr`^fQsvoG**g>BQ z+4dX>Q|0I6=Gt{wmCB8E)_$eNWXK@7$*n=pg>9+s|4amySbG;J(1Gg|EcS^D4-aoS zsa^@buc;HDM>RDyTaWrlq?W&&bvH}i%?6wV^}!&s*%1xbs%2cc;?QA9l&QnzYu5%i zI%-kC8$M#*ZYu0XeMddLh<^d>H3)XW==ZzfO|p0Hs=quZpQ}Fn7B8Miz1cQOnU@OT z0Pi>f@lquHrSTs6m?vxQaSEUNzpmwwt*u*@3^I3QudS!K~Zl3f)@$) za7cT;?rJwbmZD?ki*N3~EU6=lbg2pEjsvkb{;Sn<&9(B5P>!#?(8|Di4PRA#k=1pPJ zRG_ZZ4+a*Tm(dJP+nIb2@-yV3d&UOSHAM>+Ell=l<`%|Jpk`nmg~-G}z%KB8p|Z%^~68H5Hd9cf7a8 zG_NB9^J-mnPKF&85ckCg_wH>05?)ErG6u*FsYQq|Ux%wRjzznC>z3Wvv5m^g$|g*i z@-?rRogH+{iTKq)bpKrAys_2S3|SL%qV3s~vrU>dE&TAoJz~15$I5JHUhF?)7?|0b zKr@lNI$+&#KKbP2+~yRP)l4)v(+9JnRHr8bLlrx3O+5-u^z3wE>y|AUD1`NH2l+Zu z^hIph62!qXeE5_JOWCi8lAdhP?EIFkTD9oat4YR}I18ouj#b5$9$oj|4aLR~EDwUH zza}ZjevFo-#Sqn-J^T09Y}~jppEvogSp7aU(l+4y$+J?CzX_~-2Vs$4o? z(^;jfjs#ve=7ee9ZUAQy2RMECuO4{c4Z6K19Snqv-xw7Y$eG(cY*!z$jq9;%*RKbz zTUV3neYE{xmw|*8U7INXF=IxI7-8E}>Hh^LU#;T8v-Wy=PE&M^jK1aDJd1QUh`IYR zyGUzl(FBtYPT2>?ovhnpU|2ucfX+?V*zY&Ee$ZpfjvYpW zmu?hp5*IQq-CHt1?L~_gy~{tY&3@muK^1?;Z+Q!6ottfKRq>_(flNlnd4GJ`+deXo z`lkpRFqWqlEePvK2-+;R>#;WJ1z!K`R$Kn(*P`_=TIO|y%NsawV2x>4jPoRd?jdyB z4;{KTCB>4xZ1@v7E09lS?*1d^(j_??X%`AAt?BUbrSaqaLpdjyQ{E>0RJLg0aq2=U z(daS3r`52XDq>}zZm(|ouK%D$`2aQ82sP>HYet~Gs8lHpdjO1QhIgO$LX)?nIOSHi zXV0GL>FK*6sZx(0ckMM|$l$>Q4Gr~b)#&=yW^y?IC5~ciMn!#9z9s2y+RfNZtR1CT zJoZotvXyKp4EF5LJ=HDfZub5AMj2neHBrTnwej>)>P6)BT3VsotXVT_pXcN8J3Gk)Akey^cm0um1H(RVG_AFE z?OMr0@7}++h>frxb*|#4fz59Anj|V5%i+nc-Oq2a-+|n^Qhu(D=JnX_^$4=dia%Oe zTIyh(%f>5i;-HTYLbd@DwDbCW`0zoqR;~WUqTCAyl+qhoNx$2i#{s(P)LZ`V zKiO!u#W&E{_^q>|)^{4N7kv|iGU##3 zz(;HI*s^{5Uf``+GoxnBHsBI6k&}A} zVsT5~6@C26-z9B6sETeHnm{VEw;(3hy-@iX=YAGKj};w95Vu;$BkOS1?kfU!wAqnt zS3d7lpSX}VZ=JUi_Gn+uqIN4NDq79HY;(fstZvsXU9x#0q`wW6^fOwUMJ%LMO=r&I z^p&_&3VAV>^MBsz^RxEKl`EO$*hX}YRe>ts-<69sG&E$2jbGX82+t*%i}Nxv6nM2) zZr|RR)@JQx%a#qXHOte2VrbCYzjL52ZB^zBTR-EO>vm>g9bDJ_hYqdl-u7$a*yH9> zL*ZLUvgqiPkS*vznP(2=W$}51L*&&i>K>X-lV5O&{q5~HFJ2r_yyD`uYlDhcthOJ= zR5yKv6`tn7TdewD^a@HeWpw+46SEuRIS(b8wLW3=cm!^^+5qh}2M$=YZP%`g;U@c; zlwY+wZmrER9DUZ$XX~RC*!ShzuHU%f^Yx9Zp{Z${It<}OD6@i(AH!iU3P=SZ%c{S^ z9yLg1XE)Pa88LG7vFUnLHUI)vWdkwSpyx|Lf5bL2gsB7e%|>icgQE!;NvaX`R?<>- zx~QJ3dE3oOrGGV*t!*LaP!Yfz_Ohq*X zj2T)pXU>$D%)zzgw03T~Nl=Zi+rouid0=gld@QB?K4}44I)k3)HwKgvAI)r zShL=Zc!ieZ_Yctm_ zO#^}wJGR-9wvVkm`oi+w($thtb_Pn$Z^@DY!-j=}wH!0XddGDeM82S%{gtQXHpl0$ zUkC9GQYp=_My-go7a8uBadg;=sCo#ZXq?C*_Eu`Y&HTu_)I33-;zgiAQ zuLEn|cKm4ragq7|QMoKfpWC*9i~5MEwGm4i<3|{@U}w)Id)Ht5&Y;A2B9O`|xb0Abx_~pU7mNif4a*uOlqQ&2a6ru$6fo!m%79sG_R> z0_~EKpa0|ddrYNPjM<}&V1T9?U(_YuOsPm;@V36Xy1K5eZUE1w7~Zz}K$T(4wryI9 zL{>WZCplV8;qUM7?c>wtND6oYLB1NxcB|Mc*m32e*(%i(m(mIEAQ>tk=sQfz(qL9X zW7lMt+uj|O%D8UV2&{$0#jcaLQ-Y%nD#nc)_aAzp=4Oj#toZW_7=PDqEmjh%n`6Wqe!bZO*qvGV$7dzBerE1?$-$J|;{QP(!S7;wxj}P+mvmQKX0G7u`nn98&efO}F z;?|1oyzWeTirjnrtf3d}F>+*mhV5ytSg}H*X3Z7=oqo5~lPK9PW@lGFeE6{C_>N&g zR{@_Igjq(hWtY=9OKsECMl<{K63dRl6TwnNzckIOA;HkRCa^^)AairAj4zHKXcL%( zNq~5>f9ge2**P^ja>DqunkEw07z0s${HRs_qen!Erd3Y@&XJG7B~Llvt$R`1Si9ql zX7~jDc+Oc>xSuoV8PmzcS%=brFgJ-cO!xZ~N9y{R!h4n^#ZdB4jMYXn|6m9yKuhaC ze7G*=VNHc*?b?&m44m4)lIp^swqgSZL-gB3n!b4XQrWU)4QNu^4ege?A3Jd(Cof@8 z;k$PSKU`LgWeR2URzqX^_3NjA&)c|TN1ffT=Q1$1Hh?XVgv5}89m+v>^_{YrlvVHK z+-Er;`;hlNeVzwC|8gxPVP~(!>II8$8|s~FBx(O@=D+{eUs3s~COp?_g4qDgmALa| zq|}x^%X%~q(mR()AgAi4&fC{uN`KW=qb5yiE3CS9l@B%OoZF$DJDUMS!>GWG(aCS? zP(^m#(tUnQfJ=>n??<#?ItIA8HGyxdWuhT4<@)u(V7&qm*5w;FU_c<~8d!T9dkoZT z<1~770|d|wEWOc^h6h2JQ0y}GciqvW^OFuw0YBcRyC$~RVtsI&`Hi9Rv#GDVpI@pa ze8huO^IH+Z-N#zFA%sBe)SzN%@AKV6X?{lAsC~w>XU(aeZK>e{;;K8tLE2LN6})*9 z!rr(@n}z@u76YwfKjJ_a6>Cy>9IA()pt>C{a`pFpMdRr3p zC#4zaIdw02`*sq1%In9cI>`(K+QUrPJItPdK+_P5#?#b#=hQH5r_FxU6>6-20B5^z zR~JYuqyX5_Zs}*v?Crn0Mx()w@#8-{yP#vD0m7}JtsUC&_;L6>VvG-rIYpI+tXjE&n;2m5#i&Q#X1w%rIMe4Z&Ri1|8@F z!OH9m5}p&oKdr5;cax{vHf!m81e6zTeZWhosh2O;!uNd{Uo&Ex;9O*;8VZ1w8aBN; zFbt`zMYi#F7|*>FEZO-}r%fy3wCLK%&zdoGsLOXDt`k?exw-uV<1v_7LZ1SDB09uyXKiP%&C6lJ1Ogyrs_C?SjH zooWKYHXrp9b+Ppi6QE&@^XJcp+~RGm+p*(`MK}dRF4JDLh#+B&x@h6Ju_V?}Q`c~{ zGs~L+M4umFltp@b{p3tHmZK(V5&k~<%*T*pW^sS+K>2mXCwiv?v`yyXG~Ox|sh~x{ zD|PlO^Ln$v?Fbh>g;~{!Q4=_Pw&mI;R-j$|N6p~)_=uzg%tUp(g3Y@KWF zT`J?&bIh>-TH&>n5mFaRf_#VZHfZkJAS`;$NBOE{WIq9+@?VH8$=y3dlr#OJ zk0R>lH{7lyYb?=u9f^isdEdXhQtP!sP5RrNPcFf@m&RBI_ydaYLVE3zL+s-_nL5~T znsHCmj_tjqK7ny9$8t^R4(V>L`0>LNz8UX~N)_)va^%Pg{1$28D57~0oVVv5gNPcm zK-7YlaXx#db;W*8H%+=~AV{+l7E9~=AVMZuvB4xk{4kQ|Pz|}xdv}qqR ziOp)g;&(6V@0^Dng6{T^EFn#& zd3vf-ae6Ry=xDWOt#m?}@me-kycew*ZtJ#S~NHl{7*KR-lbo}X!svZ+y zQ3R^0$_7r{V{m)B-LPRgERpxZh3)Ts;u~V0L?wp#mA6njuS6q}aZUJ>8&UYwt%8>{ z+O5LuGVyMrQ=_5w@#iWrU+wZ3+1fRd4X9Aa;0N2jWP0(Co)q7qG>8k5dh;BJ$p9B8Jocw zO7kwv(vKNs92auR|EsxTH~A}}eU!NsML{JpYWkv&JL8%w&6`7bC^C>|Qwc;&h^`GM zXvbDJ9OT)kabx*;n9S3?3)R8QIu4N&rcV#TYfwnMOQ-YK;!j=ejlnzz4U6p$dtBJS zel&deo1bsdhkUaeqmgWp8*-Be^k8j&cwY}}am*-D_@Mq&a4S0{I9<5b+0cs#_;SgC-e(Y0w^BRTq5iqi2-OpxP9s}?I( zW>pWh{BS_CZbvi!E+(I5Yv?&8{eRTDT`@81u*)f_(!|Z|+AGPe7{y^`=Fbbp{7L^`$5exn+5NUF)+c%8KQy6MeP% z?$@(1?%MUlrDcYnJTItRnoN_8z1Q3|IpfZqdH}a9z~RP-2sMYujf`w5nXVc;i7;ag z15F1H(>UwYthvtlfCfmn*o;Y!$f?hTeHtycNtNe)@CWv_7qNPcSdslMexzH!0#7TdEOhqu}k^P%3|Q1%NOdMnuUr`SIh&j9bGvuFX=m9U8c` z(?eaSlCv}VG`_iDKzy^q9?%3zJ7M0 zZ`gXc*=YOyPttv=HSRh~Xk|gIy?gh@sG#5{iEJWv7)1Bzorrq9diRDLJ$0%(-5xs3 z;3zu&9*b!QhU@XCpXN$p_5^ZVvW{^5ssOQOj%72dYP!{{N~ZVa z`?ZV)sr93L=4YnCsw!glZMGi+8l@L<1M8Iq$YZRJ5Qoz2{O$o9MaB2;6VVz`PA_5R zP6ga-*|u$lad+bS417?cPL+Q7auK*1!BDFQqlQ~gfd-bUQ689~GtT=~FptZBMEdE| z%C>E5+h|te^z)uK&wRlnj<_cu9o$MFp^P3i(V3UWA7{hAT+Y&^y%5W;*A|!h*b6_u zeY7J_Dnw~^d>dnLWa!?4q8@u0xq*cZ>3DJ%wIiprI-eTueh}f|*mdZa(8>#Vt1O?4 z(p&S>OxDMLNAkA1AOuzXtd!*HmR`+g<_5cbwz~Yiz3&j|+zj1+y8-n59cOIh&VkeYk2NP~VxbeMnl9 zvOf;gT*+?G|K@rr^q0Oo^0~Ty+3y1Y*cm<4^$h2M$t!7iL7s27Xrhw;diMY&O~so7 zDZtx8_fnRu~37Swfk_eYXFW~ZPuc5pmI-5hdtqL(n zC@I{22{SALGYBNEu^S;3x{Cn8mQR2}7r^vq7srJjs5(&9zZ7l__l5;0mVv<1=$he) z3(=7wzSilkvP=tdOe#H|sH+a4YWCqkkkL=GxFbF!!3h(L54)cWf#+gxHcObZ)_tCS~RUE3=4lzk8qk+WDl z31+iq&yJ1?vF}+ccvvst-h%9}NbKh|S!lIQc1c=Ash{=qX)|{B0D5VgpOgy0HQQ9F z5XKwZf-v5E4gfsL$JGl|n%p7M01YTUX^Sd)6;*v)#&FG*R5qSJ&Ew*i2PeK08xqsm z>wptW%gYCncsw4pgvt?+2O}vZolIdeX~X&T7lEu#pFW0-T3>wY5*IQeXW1mRdiClF zvCS#Sz&3`Y6Bo5S_*nI-m(3HD2#zMbz-ijq+4205d$$HXwSw7_D8R7?M%LXuePt9{ zI1~dBel50!sHDp%cBIcXePUeNCe&V9%htt59+KHHft2C99iz>q)Fbd<;`exA=FHn? ztrWChR{Z=abP}m>)&BjhShl##xorLTUc8`e=p;kiKj5}Cp`qFc_qD*ZVECBRmN$P4 zbLnvP^xU??hxpeF0ruW$I_6@iM7h?$gUmlrtjhDTWg|9jRPcN*{a5bg(*m}kGv}kPBu9}DIH;woKNx*;4Ls6( zcfFFxEK47*9|}j*%^qZGqJii!an`Ib>J->|i<_Hp8zI$j(EQ&&E10*H+_Y7SEK1I` zy6WxQw;z4lw@vLjbpW0At>oIAPu-CSiFQ7{XUv|xp6+R>%1Q5dHEakn0k7`ws!^kc zR68K_wLs${J}}vHYh2%I)v7_1R)=mI^)W{2KR^r#lCVHlX+UW%564(UW}wSs9i*-I zr9Mv8g^j16CNbndsdR3-&VE0AdMyYX!pzPq4uP4?Rl|tGtIKAy496fk8?eKXHTdb0sYH00MymAB{_Fr2UyLv-1rg^Q0lm*Fd==hMtp( zSuIQSM5l;Zvt5yqtMG^Fqq#YS)jx6kcs9DjzMBtJW2Z4k&wAmz1~+&14yDs$ z^tf>|d+0~pqz%m+4qXi{sqEvk<|6Up)7S)J5mt=k=+#3jVkmyiJH8sqAAc}eL_43g z43s`WeSN)>eTaX?JQ;VeS3McNvRVo`5d?=Z`%bX6Ra9TN7&d?g%tI0aULDbSKUNIIz$eaGjDoi*_SvhCTa?T`PJqd?L&p> zVa1w1-KuxQLS35z3==lg zQ9cD3Y9zWMGydIbL%Po2$28?zg|7+lSc9%B2=(ns3%e? zM-c-M&n@z6nNWNuKDcF85oDgjexx$)$A^(toGNPwXIjBJ;n$0~R&4vD&Go!IeFX}~ z(Ruq?;vmsjydh!9Z=|@W5`bf{IM7WC!zkeKf{?dkhOP8aH z_F&H!6M4VF4oJeFy z#}T&l-KY@38{kD5!Z45Gz^RF@A4*kMm*l?i4}| z*q#`5XsjWl461|c>-O$FG~FRyivn2EiLlm+1oMslPE|E3K+Y@Bt$i*I)WTP<)?sxw zYS_-rd&9BR)C>kf6+-euOHDU7r}l5m@CQ9My9!m}eC6jspobB6B7&a!aXo2D@0K96 zr%#{aHG%5uhbvHQaO|+CAB*d1HwZTWKYDxy(s7&iE$6ekV4rvZ@x?U z^U34K?EUWCVAzDh*=eC~oa4GGBOuKD8qA?QHA2%Ko-|=j<9hX0Q%KurR{4Eobc5C= z{Svv&bny1Bb%1sujh8q?mQs9XVk(jzZ*L5>$7pg?>J(j_|JI~R#*2Ic@h?UwP-p}! zA~Fjx_`-=Dy6QMNT?W`Fp8%)py?b<0=lAd5)=VIY_%b=Oi`<&V@@S13HA*;ZG!XM0 zxCh$wx#0KPoQm4d-8AnYM(Ntk`!oR(J~%o}N5(*w*sJDt?%DH9$2t=>nFd#T8q`@N z#0qe;;h>xT_+1fvQ)Zt8u?|M-1~KW{Oki3n1kI>-T>0LgpiG^$hT#tr@J%OIHc~}b zMd7Fkgj=jD1F6;Y^!1%zmWwrk`bqRini8sP)pP+`iCzXWs8(^obN#CA%W$^|GRHA= z$3>+)BvTut2QB2!r~@PGNVP*$nCJzW?wTBRT6zTzD3uX6Pr+M}MO_bjJ=Sg(^(C`g z#0*n2Gbt%pB9#7ZAOjV9bz7w483~I0gS4U07Y2SaUFQu0z?>#N@wTYCuki_Qua*jTeRO zar&MYlU1iJU=MNQXF?Xj=x>=lXU+zARH0Zz(nBCaG+a+pNnK>b4D{Ctvu7VFe4)xO zBzUTqR`!3$fss2jr9QScjQBurdJfjxjW|1!$2`@#b;(|pJPIVrjj4u0oS`okaON^H z`T<(ZHw2>%VKG_$qoh^#=M#7kQhKcWR5>udDQ#jKUca7>bTIOc6UR-4o|5~_tHT*c zE!}`&y&s;Q(^hEe=-9y`CglTlno~`gJGZW`V0Vo$7y@P*p0qXC+@i|rs6h_yI&c2` z4%B5*vJsIWWj)zjR-HRvLJJYA@=44!MdG1n2ArWzCQ4oq(%&8uj3R)1Ewuv#afneC zkvA|V#zkJ9hQahq}?u)2C05_+!*(#kutV zP8)6?Pq<*RMHY%|m!|6ggkB`tKuTy(>+ZX=;muYOM1A^MJGJqdTIG!fZm<#&n>LL& zTwXlePL)mbS$#(A_n$MTJ-UssH*|YvT)epV@^o?iNE%_IWkS$?Dk~Xb+N#wg;2e`z z)#cKH1`QfaFWs)|I;ZO7?nX8h^STvf3`J~zO+hsWN2jSPB*tZTSmRL+ZQl$BT`u=w ztS0(S;^u}52xM&QH5b^m%3I9h+u@IpS{bcHY4EuB<|*PlbZRmo4Nrn^90IF@u1UL!UDK2>UZ|aMd8+ zAz=O$Jh}*(MsZ8K654Zovbbc|f_%gcOM8!;0HXU!6a@3V`g%Jpb-41d4TYXKZfRcO zuDcxJIk@noS_S=`7@D}qGXdP7QkmaQ_nCx`)%^D^h{&yjYhZEiv)3fq_}u;GnEz7l zkaiGx7^$Re(`jr8-k+Y^Vk4ptP<;IDsJfte`2CBg>~UF1Wj6`$oY-l^;tlkA zpWmg6sac*oB>{M;xV>phWif}7l8iXoZiLTylzZWkdDq5dL?lp=c3`lxn1m!RF0664 zy2L>!vp`b^Y#$sOalgQ1?k!2=5gN@$2xpX~CA!Tv&qquZ>5RG(WWTI!+qT0A5a7f% zYE{(hDv|N2O)=44M05HC#*R7to~;)V6H^nf39~E2{hkR$#@z#}4jh=dH6KO&B5W-K zP;#V5p>aT-x(+--4wy?1e&)1ll9hly|!(^|AtDi1ktp3-3#l_yhe;O0p& z^QVxGK4CF9VA@3gRRMjYQ`*v+b_VU%J zHzBw1`&qV|ad1>hGerS?1m=$R@#BEdsW7#;Ckt5IkoRk};}IV<$>^k-`WAxA^XgM# zVWt03VHC4>_U*Xp_aeb%+1HnG0~E%J%_+Za<70>)am z_M_fO3zE1uP?6-UK-=Qp_-k6QcaV`nQXn%3z$j2RC^YmUeiDy|`wAvzpnC_^s@F2H zMN7(#u?EYnr=suC06Kvt0g3ty5w3%|%(-`b(V*6GAxmh{EX7L(G@hr62K91Ut8MU5s|!z%XhCf9kw?@xmD924{uX zzi1r`7XcrtW?Rr;my*(2wTZ;-MyDzmYLtoD5ybF4f4dHWZ7I511#{=`MhV~9+h)Lk z(6jx_{kq43L}*coO4d0asC3>ym@XtJP%QXIyeDI<+q$(O?24H2aA0-@;9Tw?DQ(`o zj)A(Mp5fDGDG!-C=tCvQ2>0|;rzWDqwN;ts1&~9ta&y;Nt`F|~Zc3-^`VpUfa$3b* z6=fBVE>X(w{nr_pzlGKy5we1pg#DO2IetT;Dtg;SKRbI>yf3%4h<}FuvX$6_dM*3m zl6LFzWld>Yg5ef67@cN@6&f6Wsw?OeYbPDQ315E_;3g3fs7MKb{we5YlKl8DAt{m* zb>x1xWtVbuFVf^dOM~N3yZD|`KT`-{^{3t$&aEN;SuKefU@Xp%@-{l^!>kmSdU50ZhS;=;zn)^kjj;OrKzpG+SK9f zP_yI!#4%|D2r+U%VOS%6^+^(_^-S2WFhbbOC|*j-$}XX~yLbP2-qhj2F|T^kddL=_ zC6X}BrJ*C2Wd@s@w7+Plim%Z&QkmEG-?jCUnrp`fJ#9jjj7zvbKP7F?^0!E1b^5x5jrR97=G!KtQ=oCB>Zoun1dKxKGaX6E4&eiyjzJ@5ltNY&l z^ey-bf*@}xmmR+`Hnuja3}et-N*;jCy)9ApCyCP9tQL{d&e>T%;Q5+AK;65l&dNid zDn3mDQQ$sn(6FIbkGgt>`i&ch0uN07FH!FQ&(5)8D2PNV{E@f&IV#^z zoHAt%`TnN!fU+79|Hs;8UGuzO`|A{slP8+Bt%uvYQmvfs^C#g8FRN&_}rNr<8SxkPW1dju<3xw<&# z(U}rkm`4m18wV{U-k)Dw@w{-wC^Vs2FTVg$)X}}!A#`G#@3B|kD}nwI^?s+ce`@C& zmJRsN{p&2V|DGS!jk|2HdCmWbR$iMZyG$_EWB;`uG-$xz<3|~XY+cj$N$>|KAKb8ON+>%kC_`e=i z$2RTT*R|2Kk92vLvwMPvZPZ;X91&f`%*{0&(h+&CYzs>wz2wri1;=`u`2$^An&!;` zTi8%4h|7~Z)iZA1917Lr>xjyLzAd|u&!^tn-B_d*wqVwc8+9<@6`nJR&^pDA#1@f> z1;7dinlv%V_io;`t2yVDehexSG^{`w!2iIfWy_XAgK-E1Ctl3Wtw~d4Q$$gb%80oW zk&zo!)KKMLgu|iBCiM9qXwZQ;La@E$#QHDv`}H}Mx&w$pdrcT$rbNK%+k&9_wxu^WLLb`w{ z&#{i_)~~--$O9}4_q^vR-;YXgG!*l#_~B4Ar2`>*@LW~AJCy^r42vl9D~y*arKKb0 zfsS7Ecp?NZO;aHiIv&_hltrExXQ8&ASUjTVq%rMriVRo!Rir6Y5kPpBi*!gcZC%e0 z`h^lBvsP1BckiCmwnfB=g6lYW5`00;^WL`r>AFh)Bh<7(r%wNopReyG)gvz%Tx&z3 zltx+cPgj-2S$y4uNt0HSMQ8k&sq5-57<=8!NK+FOez=$l9_;r1Va-7ndzchS0=@6; ztbAXJBU#crVW#GA-+zWsT>pbDS-y(yL#Z5HIDIdRi@)YiqPAHB|26@JT)O@cy8>aH zGPAPc?jAj}*w4?UGas!GoVXZBxmH#da%*)92Yo>WY~|tr5uC>@?hLqLc2IJM?h)mu zQ0pL!h@QH3f!nxrbcb^=WIISN%u9}FBS&VbI`uIF9 zTe>Xtf$7nx@84vE0qk-%`F@Y!)OuOQidrLyd1ZH1 zLPk})80z0?eqRfm#E9k~OIl`9W{-f`79H$AqI=_}O{+O5V&s@%KREszA}5{Q!^P_i zb(Dj8DIGFg&}7>o7fd7dLjfVBKh>n@XOi zcf&Bp&U`T-{_5RwHbE98xA3*N{G}KQChb>sON$)OiVNp1i3JiOq~8sJJ+I#Gef3@IGUd9Tjg304M)$+z zf7?j;c%$ZZR6Zer)>GX2XmqVgKTIB3Jad|IN<(BE2Tv_?Aa%hrBL*~Wx}MXku8=t? ztoi}-mJ$D2_^s{4zG4c4Pqt6T8> zqC)B18$5xte*&@9dj8AWBzTz=lig7Z64Y$yx1p+R8Dd_+e$#8xBus?Jse{_4*QfSW z0nwzJm9pMw&YU^2v}Iqvc8Qh#qjTs-8d&Q!s#l+QHL4AMx0)y^-|`pI+9qT-lpSG9 zo1=gE=FPe~&HG$pI>^<>UFlq>@+5ZQgf+LfQ{i}cO=GFtfmqU7HMYvd>bQC1#sE^D z64=k{_51vy7KdAkTtX&fwus@7h~P3HEXK>7w+ophe&cx zp{kz92b8c$(I#Vj2qn?!VZ&$5YRht_Qt3{fI@J?`a5Q%W{TNWsYpc*gSk=W+OFpe> zXP3*M2x=8Nj|{&>f;j=A>3ECb6xOe&c8KfND5ek-btL%FlZzn5@K3#d?eWfQ%LOJi z?BUxQv@CNPkZ~aOH=mwOuND3j>06Qm%EOosc2rq1kag{a3w`Or3ZfNGQE=Qm;s~2L zgVK_AV*zFn%PeoM<+P34Sq=Mf9?jJ*NgG0gf&ySeVs)5cK<9`yJ%&=cLMgJmik-VG z>eQ*jO+iZ(w{GnbCF+Keah@7ur&au3<^qS4icnnor7<~nb74>X0&-UgVZ%{}-3mK3iF2+>`i>IVh0qlS<6cJ(Hg*1CP$g|2fhgU=zTu;HKFP*x z5A8#|gdlY_iVQLksfPMZn)861ijejmaGXjH7`xnJpw_M)3$&oK-h>Q^A3>{$WHHvu?ZWc$Y3E8O ztzy??P!eJ7NpwGVx}~SSms}JfNuR1zdfhj|>nd#BE4567frVQ4WTX~i^?es6B$%~n zcZ zHW{xb{#;zzgrXwJQR2aAwz*-01_C&xPZg|)Uu+d%leIEt2cR>qV~2;;=ElAh=Hz-z zuNbH66+?jdA2oHw@3YSf7Z%!81EH_O!X<+jXnmA^bqB7J4xnZr^snJb4Ihs3?$ZVE zOIGWm7oOXqM43is3S|}s_-E^&3Y|*rgN|#-B?Z#_X%!ezz z$bbOx#38D9A*vhdlW;g@(g|z3+2r|vxp8^j7rZf`pG4b4L)wpcw-?GDFgCC3{UamN zdg~*#w-7DqD&M$m+t|sqtbmBP@SNK{Tg`iULgZV=QWjAkUcP$O{KPY+SwUl5L}gzq zLOzDobG1Q-jwxktZOH3`DBi0nM3{bPOcsIp7*xx_gBf(ZOAH_%95gil6h=} zqQOupiLgkE5Xl%J)1qW3Wge2ELdlQ@C9JY!nG!`&rWGY)A>;mB<$3q>9PfVjKK43} z`##p~M*sis_x)bOd7jsKDGIMunk+gdiZFUj3$Snm*P|Cs4&)0CMep;7n?ZLY1eY*p zG;5r?+Cd%QI^d?=n-QG3bf|;i$?-BZ00W2|2+3_uYf36|O)!5<|0fhMa^$)Zoq+C* zk-IeIa|;xJyV%laFUMefL62wi$Kv@57RZ#S=>0HB8hN>6RDswf0K^L51|89%TQ^7A zT$6P&agp^tCURMId3CPm@_+zkpt(r`6o(LVGCB7F@JEA_I~2JcoMsalY5facEn)W? zc?E$`?M971am`LQ5!fmmagiosxJv*L81rn{@O+a&ZA2DRvXR3(m_}>DfgoEc2{3t$)Yst+XBvN!F32(09n|%>f9$3P{K=< z`mg$Nbop)S^bOaoe);WV=HF-0iH?2U>PYG4fLpl0>gqn5A-{%Hy$ ztOUYFN1|WxdL;>x!ULy~-Mb%e8*S+{a~R)8KTj8+RKd&Vd+rnNYrWE%W6m+g-OQHc zbli5uWarim%CC$mq5zP{s91$X7bLR^{gSY<{Iye9YY|*B0JMO4U>Tm>+kaua%lY4^ zlf|v)6O^Nv}L3_g?Go=NPi0}&)?CfGSM>?aS3$L&|UX6IuHkr zMlIlx$7}K!X@!T>A}F%*^I7>3G&wta&1wmfrzj|S*P_hp4yVhv-YmSH%oTsxYlvg= z+f<;B#LBb3=a{IetqE2);2cqy3q;$n19zPsP#^;uRu$3n1_d?3v12RFT~uFU-$sKA zmx}UKm>wvhUf+6cFxf}?MDif+C(8Fk$E4D;pQ2V*GJz(_LJ2yOLebBFK)B6Xr0+vk zZP;Ng*WyLb+OKR!-G6x0K8XeRBJm|E zDMkgyWd`))X1a*80boZ;Kuve3MA}B`svq|kF9T|M|3g$402>vzxz9F4NusAD2JirA z<)UU9SzIn+U-Eof=WOB_WU%A%j=lFLu!gfy<^vGfr9 zL5f?xJG-(R&a`W+nemWTU6gjh5_e{S&GK3(+hil}K_A^On$yCQUK*%zJYMePuQavX zvva3Rek4|6Gx!}pIZRL;XHL?ec$!i!yG&dWkwYZ%>S(~^CukS@{gh`+`HYyEvVYy2;^>OuFjzmciAvx*`F z(zRjHo4|6k4s;%|Gcw$g{}%DOl0`r4-o+-m9-&9XgxrTXy}c2qt5k<~Zh;xwV~PPj zw2C5Lq#~i9HVpnIr87TUyX-72?e@N3T&76fPsm)`TwBk;%G!$it%zj?Lp`3m?(@zs zGNezYV#xQL$zx6L7!I_I6$dZNgdLs=2E%2%Z(7v4ZQDZ-J|}h{VqNx0}C6uG8Z2j&>X!R}S?;efZn2 zBB*mRruWxW9FcqphHkt(+52{VpsFn7(pV72gbdn~FBw*3 zb-dB`f^Ao(!Gm8W3N_c(Rxhn-=-I5<7B{nXlD<35EKaeYNJKc*0KZ3MfKx84R+kY$ zC~7^+fLNpdBsj-3#*6V(!G~0)sI$xR{uyFpo%jJB_$Oo2rp@-a% za)9Mx4gG1Lzy%l#|Fe9YyZaQxZz2pynp{DAC|0b$DwoGtXP6&7frycT}2WifpQ3a~D7mLa@LC%I^r) zZSH%dEodb3{amLcmIKJ_D2e^9bpXwZ-vp-g8BB@q+e|zQX_dkZ1~)vo(-rQq^^Cme zE2dwNU$qWczFhl9nZ}f;V4y8|-Vj2rV)=-$B)s)T+gXPAN6-$z>d}2&0F5-wFX6vs zoF1r5!1I)qKP&-Yjz^!CfFDz5KAOdpf)f5cYUhUh#NVe`>}aEJvX3HOxiNja^WoLs zk;Wg(UlFtbc4NIV@VB$FWiWPpb@h|OEF%zCF-R;;LXbR4(M(|mK#y-i9VNr>eEHyI z=Ts^gMn?66VA=jLt8LR|o1V`LH2b74@l?DLGFs8pmhz5BvdY$No!*38E z7-PB&th$9L|Ady8Ai`%`g6;M1fBZ2veg&p!0_3YL$&Bu*=~|2-g0KM$;WA$wB_f_V zUpSrsEsXQ=4C7_aNOVtH2T&<-VGzZNWt+_pQJmPHmp7ABTca5*StY9yfpJ_^Xm1gG zt1F{gk94u8{NO(Zqv7|3k`KjN~U~k zDFc_%hj;6?m9EFWiWem^f4JhIQ>W_V+DdJ=o+_|qzfoZZ*;#uji-s_b{mZ-{lR-Qm z5<*mQ_W?&`M~i z;2YF@E4&ZBp_*OClXwJ@7_VU$?J0;N%x@{|X0Q+Hj})yc)Kub3`t<43NH{9Qy<4Cn znF9>LA4?2$1ziCP`U`5^0>$ux%KOzZJ~HV*e<=PavA9X_`^G&@+rDAaE1GbQ48hF4 z@+=@7_>>I`EuCf7&)Kl938x-=%e6p6GV4@18L%4I`;TrA><{Vj7a^g zJ>+*3G}@bnNB@G`_Zl%X?a)Dm?$HWL&!o7FkgKVqgMf_td3-GbvTF#)M6-#6KeUpw z6MO^h`*QCjmkTY<tku@EXk|_X#W3=qYAKtPd_g|nYv8GcL*f$~icqy9&(C59I`;kO>W9Aag*LnM_kp9fHoEKP{qX?HK%`QLy4rLJuS zR2bX6)lRo zB-5MCpNubqY+zdjpn}! zS}vjm+C*2kY5K>tAV5lMTI?izb)EkOdR38O`NVt=vdivT1b}W-t2X#tsMtQ@SN&=h zX>5yHV!2u!e&ux9A}aT`SV&%=+Tb4Z;R2E1XCbc~HA>KA%qu>hBL zZn%g3@d$)F9qQpEkbCDLqmT;5H8EIQ^G~!V*uBHvr1R%+CJ4W)!#hZ;Q`Vf$WvES2 z2z8{(JHBo7AAp){1bz@`SeQ_Jt42eqPG=r1p}l7BK6 z9Do~=s+I36V?DvEHxo4?5u=LEz0I~3-ifv8rPc-G>a_0454WB&u8aYJ$mSU5>!mB0 z-Jv=*aeNG=2D6&2As7-#=>3mCYzs0(9kBU4@U@Tvk>kd{wozZ_tl*5X4khI8Y{g^T z#&Nu0W?{Gs$h6Qmai8^>#xz0F8mJ$G*ADSi{F8=t*n z*RD+jOGi6l|6mcWgyKKkTopLad5S$B?%BKQ>aOp8Fp!e5V6u(YE8ISkIcfNSM0LmE z{4#Ce8VWOy%z)OY=aX*mP>I*#B4#nCxG&kOR<_vD;QAR6I@0|Yp7ZYkofR4u<~(~P zLK?9o6B{_7qEq!-a0m#0Ph>$wXs9OeQp$)%0=q)=hzF7F|AHQ2c~Ns{0PF)9h2HhA zdafzCgeWO(FHQe&N+?A^O2Ak*Qgqr$oBQA#Q)<<~vM_+typEE&Q>H2=-@8DYjJJE~ z{C7JKrf~N$>V?$#Em^!HIFi*BhZ!yR4zI4N>bBzR6860}lfyJ?Y*cP=ZP^tL@_;<< zG0G)O`#vnbpdLb+6~2FUQ+-2!LKG){tq5@c-r4~#`{S5)3N(&YxxhN2km0CXGar`Z z$>6roAHOs8DShyr_`Tt~ZQXIKfAr!726q73Z`d&;%6zsgHWTS;hk7!)`}FBFs0&Z& z#JCCIL~EOiYd%11)QiNy+@_h_WCSxU)pZ`aX&P#`WoLuySiZh^+E`ovWm?iB)3)I% zCR~57n+I_9KsnJ|Xq!^t%GoX{#k*DnLY(HCc)SV@ z{x;JBRTn${B;!ckki{dTg2{Vx_SdEFYg3Dz*in%MjLjyAtX2f)R9|A${Wt}@Nwj`~ zB2fovHM@w1co6X9OcTq{yGBd;Zs(PA;En$HXU4;G{iJ{6nC@8WvaWmk`@CCk_>g+C zT?SMs+BP&C=!Fn;Xvxh*KfW)AkyQr3CZcbfAFh3VF~4M79E2u}FBU~XEWI)F!uwY+?kvR~ zV>Odr3JZ4urWY|F)F1$zbk-Dmbbg0leX<_!&}rKg4qo3CU;FYkwAKY@y)W?w+X}pbZ$|V7Q;}B6FJas<%jaJfP@hcAs7;elI zATn5JmzugSE2s|28X8GZC2mtpz7M1SZ&@nCv!Q|UBwon8sE~ExdCl4f{Gu6}EOtDC zKQ-*&hy%!M9HfA1m;0o1vyHZ&p_^cI+>%!lA^-fP7M@N><8>m3U$CId{G!#1Py>rVWiVSa z>i7@(=T*&BiQeXJ#D}6+CecnQ3?0`;>o3l6Z4HR=()HDi zZ(LpQtsmPewL78qb8g1O7;4@ppwM6`-EOj%+&(C7!|7TZf^hjukORv=!GCk`l6xqY z3-aZ(Mn!jI0bsOj~@pt*z6h(bh{nOp7U849Mkf(eunQB*Q1U4H+VwqJ{Nm~e5P*f(Y9?rfLi7iU6bX?mGAbO4z~J{gDSfWMSY4O8+8ZVy^4%C4uJr%VE91zCh?7`%luMo@*6}3mdI5GpHL+KwUfGQU>)~vuc3vFAnnSa0R4LZ=XiHHLz|^}j z_P=36&vFW&mR&FX&~n)9pB9+>vbniAwxP~Vg4nIb&;aza;K*WA4>};O7W<&g%texe z^_tkH$>^!bN1ktL&fyi4zV5q5B#mt7^fKjQj2=?yaVMuWf`mA{J>=(dh4AN`noq#V z^C&N}lM5!EllVi+IRG8N9TsTmFyG#g!6bMnzqmg_O<0g^WoH;ZU4y>wPK-zPg3{^%bwsOgdw59vR@Q^lzC__z#;;i3v(e~WGsSCb|_FV2ddlj>+Q_u*+r|!^cEe6{~Y40$pz|=6MQHUx)4#! z2VF;rfr4V)`t=R)R*TG8YaP+Hmfr6I41*Dz{Oyza?HNfzdrR-`$wH&O@AlbV5ps{o zUwpr3QMY7)tY|sxQb7XCR*^WzeM^u(WNK8OFPURKj%pQz#u|SHN1!f_0UNT+2d-G87GR^(_ zZ!4?|p*QLV6uY2Piv;|ZDSQefTB(CCaJ8zn6?1tmir9@hb>qF+*&0V8IyrCR?|dwt z)(qm_KP_YWQ|8QRTBqvNS+;8rqBcMt!2`t)3F*NJw(aG|yE?2XSsnWhUfOitGP@qB z5yLKMn1V=jVc-rtJ>HQQ|L$bTq3#q=&#UQ%>#<3vZeA&DWLue`R}@5-u^==j5G%{9GGQKFnMLLEsE(LMkV z+?er1H@!qGe0kEs#EC%y-GgijU@ehD+^NUSU+i8z^)O1aN5rwZl9H=A1E)_aZ8hw< zL?mcz+(;xL95=iHSFKVJ=o708CV9lIOtg9XZzylrGA0I7B$$GyZn!U?IPRspYJ-As zjmc#vi;dpy+NYm5Pr>=k`RxjKjnDq)ZC&QcSK{Up2M>Zn@K3boe?b(bDtj2LpgSL1 zzKGv)nJ6wR6%k)efq@q=EFMkUAR>dTzT>_R0qGZ=1r?%fOvKG6d-F>AMzKcXDC^V8 zpnCp9MXPPT_R__R-KcAY2m@r5saXU82MStRQ?F$h%YHFj0??9VcvQ|UP9cg@fG0>6 zUw8HRFB$!$EfGHiam#039-(~{ZX5FXJO2Fq&uW2oEoLhQmiK|7{!8;4L*7H^wtSiR zVGfugGFFxDRRpIVDZVNs<4e4D&gR>^C&+Wfuu{ye>8N--qKA{wAOt&+V4DKg5TwOh zC?w=R()Dm)$38hr2sSwn;2=A(WSAC{2M{a!2fabY3G4xD#hl61?i=X zvaUOGrk9v1)Cpdq)qZ{nL-f2#u(lW1$H;Q}v}rPuV3>TCE)hs+9>eRk6TVE5$dn(W zHk3E-WpO2n0Ga%PZc`(FU6v=rw^4x7F2m5QX6cKHLaS8kI}_hiBrN}*T4UMs{-+Ry zxwxXe$(4tBk-tloJ1KL$SP-@9KCL0gMeNv}&HcTRCxxW&``a-0>3ZKh3EFd?@S~sS+`nRYcF_`(}YF@UeWK&T##$NA%Od) zsb9qSK~$nLmse{)?GZCkUob5YI6}Fs@8XbwME|s4^NH$6<9tpg`ttZ33VI zy_Dg1zWI>1v#SrXpMr+Lh+#^}`>^J$$gjOXKG_Y=oUTXm?% z5E&r+)xo_%iCzHRy(4HEGG6m}R3O)8~RgG0Rm2r&h8^ z-0hcFg-$(jO`;-0LeyUamwot;*xrMGSk^rKW#kGpv$aE};idGQNY&Oq&N+{k0yI`c zs=~lfMK$5j>H@fDqe4YhwGnKK{G_2cGB8oa2p4H^C_j=)>Mdwe6iWj@Cx)^40w=iR z&!?kyuiZd!p>E=@FR^l0k$qV73}~w}Q@?05j$`cKBs|o14`w6A!&+p1>K@P7-wCTV zu~oSP0uM(oY8Nup#gZ17>SxK_v-lKRdr#liG0*E+YG*M!aOKg|E*hZ@)aR{It!4vz zH$+RnBe{`=#w7;ZgbgC02m`c)o=8QJ=>tr5AVdJ?zy?<{FDNmo{Ik9F+%6s7#%X9e z)y}#8KiV3y{#%4~vXBP;MqtS7OvolrzAL%U+{fIm1?5s;WMt&j9-}e+V%f4DYD*Lh z2vl<51sooHU^?h8s3dgifDVjGPn`CH>2cmvqXrEc2xM^ON{~rsl%UzuJJfsp__2Ml zo2K~(2kY#0fBuOW*l1QxUwt1mLr09>Y5z~a6i-wx;3h_-9tr-3QW9xKj`PUBw^D73 zu{dv9@HsR#24^-M8!jt-X*`{#e!<|PE$u!^$M|1S>a`oRR7(*ZfE+%t>D|Bop+k{4v5>Jp zu`5x#h_M;>r>%empnQE(M?~rD9keXjPOB-K+cRK+-&`ou=@VX4q8&TD>Q1aY?cEQ>$^Yq3Dn@JyxtavGX53g@|nPUG}A;w1|!VIOn|?Z^?fqZNZJXV8b>J#$EzaT40vd`?w(lZ zT4tgJLLi0{*7dm6+_eRQJTcn`aXc)uGd?_dSzQ3v#OdSel5EGsCrf!s9~11eB{%bLXQ}({ta9X3bX^>eS5qLN|haEmtD@P~2%QP9Vb17GroOpd!kyNc{OILv~2c0^n zJt=C-hDonE$hEffh;MPY;++UN%M>d=-MY%27g3lpucK%fD7LZ}!Qh}E0KC3%@1Fab zMtq5Ia~c*Ucj8T#n+cHzE;jBI5bGR6zsWAl+H;}GQ3c$sFU%2;uPl12Yx;zul%|iP zI~Y_Q(nM5-GK$b?+XdJ=p1}-u>dJU6<+lh{ZYKqqoRLacZa5DV#G;O55u#Kq4QVXf z*xFjF3&cy4gkzQPKR>-#$ZXVEwTnmDGkx1alC*4tpkz=K!JIhdD_BoTn z{bd6t^R&A+*hk+MV@?@9g>pz7l+4w@ioqIlvpZWuI88OO!vFxvs-cQRI%@8S%U}@& zg_+9-1U@cET6eZl2N52LLzT%@q3K3}DrH0F;WZ&7A7h9I^L=y}#zx-)W{iaLcWPdn z56w_AgQdwDA4>Jam(OX;>KutEcIAL&F+3`D7{H|X$59xG{mgu3US!vbjKWjeT9Lqo zIV?|~1< z@z{0A>#r9$)qJIcABd}v@D$cLAOo(tGP3=mal&X7(aUh}cF(r6?+Uh=B_IeP1VFTp^i^AZl0|@nu-K_N9L$fPZ zO=54VT?CZ5v7P8~2_pn~!a#hdflh{(YU8{3d1k%WXaW1fQ^6fdw5`&&p}y1V^C|HO zs8wz(PDU0Mno>IecZuli+~;RwF3gpkjbhP+NW)!d;)wB}3v6#ZYV>D3p;OKxMc+lV zW>^eJWskTDBu!=DZ|(K^`PBn8xb3@C|AE+gg4Ni*9dM_hU^qRQtOgWy6{r>ZTXdB; z#PyZM-zmx3LrO)yLip+Rwd;n{4isauY%(RKFMNl|iuo}Xb!c&(o|FZ=7(yKAD_!XO zJWq`qCNXreTp`b_Koy(K{tOWThKRX1t{~WNg1NIxg3uIq4biZSI4@bx!Cx(AMgLoP z2d#ZfDy8xbcGsa!*i5CR^I|?V?9Lw3o33q6p?Z<3y4Tw~8y*_fMHxI4r)E2<>lk`z z>*yRgHqN@gNb05KloIf)+ir)w7jGvcRsKcLIT1Sj`4DbB4yK7gr@XG*c$o^Lk_0j2-pLZcg8(I{vXLI}yR!Xk;K2K$ z1TMdO{AfZUuh-I2Pq0Y7D3b8p;zRPw2$%Ed5g>GHk;P7!ef-dF|Rv z`ACL(=$z~X#AQy6>EGZB$N0n%6B)S|jXGzyrT_Z3-Pe_@6K7oTOWL~?p`Pr44JKAA zSyX3}`;E~KO(?D1iSFz?Wte+~)73E^HqjNJ-)pfKM`UE0Y!|jxlnT5BC``@&sT3ET zF`ALk$A3|raz{i}2NRguDYG#{O?Ap#ZeMVhEj9hQ%1%>E^ef4)Dy21HrzkqQBsn%- zVyQomHb$Nsm;X}Rp0pJ z>)h_iF8Q5@+>Lg&i?OUeXt~_kBqpPaVYt3YQ0TtK_omKXlN4eU-g-^zfByJT$@a0i z*9)fYX`8kVik=)&DK`F6VX;2-M`|qH z?=k6#>KLV(-R2tVjRpZuP5QDmH~#y#*DH(6`nzwBA&fsqkL1FcPaJ<9Tb<<$%#A-e zEhxkyxrTAOIK*c<7kVfjlgatA#HXwKNo(Yx#x5N#7Qa-h*_PhKj+$J%d2`?FB-$~w z8WEIW@#uZDorXkCn*eTNaOLvl;f!+4Sv9l7zfAWVdR%)uyIW-3PycD`u?98$pqi)K z@Ps=xXwju6ePYQs@s0;q+ebV$ouBp4&n@`f$B*Y9rxgFY((WBD)aWb{`esrajL%ql z1|^JtWd{`{<@&%kS!!+i`bGkr91$vc;6M*@xGUzoG=fiGzfPn%h~x03efv=K4D9yh z-&RQ%JO8GfP|J+1zt@wY>HdCCH-uQ2j5!>_mXBCwW}c?YkJ;VbEBFnqYXaB6avMOu})qk7*3u5Wy4YUKYZ4~uw%QWM;tTk;WLu>jxNV_w(K~@sAnpcPu z#iV)qyP{_xys^NtpXs+|=+{rUC39G~TekLeVX$>MI*zm_n+NRG)$sLBHPx8xV*@bVSx~}~m=l3i(?0QMGni41i;@i55i!Xd}%~F}MT~$%C?=r7}lU}`CsprBhhT%)q^_p{5uV7}80UC&5)0Z7{iE|cJZHs$|@UorltOvWa(}ee2saaSmCIKjj5Ydran4}ZW8<8{X86`ixCK~hxNqfL)|-3WIXq{N%M-Smwhfn>0bNzwRoqR;;RjRw8J36c+4<(`IMQs`CWX{SY1(!wlkwP zrD%?kk&!L>pYw-nWS4sP!sk!sx`^Vp^ z@w(44^;VQp6QGGBG-&0dvvu*m&Hp#V;L42~H})q5wkX5Gt9wo?@4u(LicK`f6xmGy zOrQCHPG)AK5DlC>ZW(dl^)$b^*WXrGpBe3_D0^R0F@#0aHRlf5hT>F06ojg@u=OzASe!5V=tT1-yCupv;)Eh(7{ z$VJt2Jf)&hN_~#swgulez9OgJ_qxu1=dw)FTCQnmJ6fZV-2xoFP( zMeX=m31E!(GnSoY#ZYxl+qusfxVXN!lC&8S9&X?ng1YK)%2umJl4?m6CdS5B?l}dv zc!FWXT{d7F*il~Gr5<(74Dc_i;nB>*7s7vRMA~>Y7YfqvU*C`BC$fHdK$iiEViN}~ z7rb%fZDQJ9U3Fjoo(?L3`)q7%ym12oiVbCLmDzynw{MTZ_UG2h@9*2I*Q=*BWk^;p z$z);CRO|5!kuTbHo8Q7DpYynJJAGCt_+W19Xcyq!q%UI|8mDn|Il6h{mMv#>%gU}V z_Y8q|NHA>U7&V9xvKwbPNs&$tPXPdRF)C zM0>quFO!a5Po-`t`b2{?;JY1;_w~{iRmPLPc*d9RIVn4apVCqaZ*jD%VHe9m9Fh$W znRiCC#-_J5EQ>q{TvoAv*(>9y@W5HNBSwT<{uhtnU>vVXBW&4+n65J`o0?U_OQnCv21=8aT zN{S>Tf(=YWY%q+OlK@D}RuF_Ebj|vm30? z<;N6oogzX*53%_?FyEghZCF9I&f zyc$SrmxZs2@9i3`Vl>uo$HD6N?@v{nOMRbYsOIZK!i)w}Iu&E+_*cT+6V0+x_pki! z&z{;DB!libMuP@TM3;AJ1>eqL^Z7R=Rb1-o;`ukUU0t%g(7BAncqE@jH|_`NlpKZ2 z@;-^#{xtG;86mCW_Zs)^{UPe(@$Fu5w9dQ9wytGz*?h8Zx1EzRnBch3_76n~9?t`= z!UU$Agm4$?&&1q3F`$>Gno(cRw3 z<#6r#^}G1zMicmjKM9K_oR2pi(T%jh98@y&Rb0lJx?qOOCWQi>IR zBhNmZfhyYVt_xp3oCp?|@pN(AkDj!p$MKTMtLggv`}+ew`u6TgP%!wo{N+int5r}l zxMfcO(8V?)BOfounlE3zUAkzu)B#V@3wURt`U z!D)O~TYKV6*_UUL|8|v%4wSHwXUL`9F(DRQ&eKI)m8xrfO62)EaqlGz&=lOb7pf>J zbH;6tA?wconYw>|<#|Bfqg1Okq;j?>p&3mAjLfG{OTJLAvC+r(Q&(s$I_mB@BD0O2 zilPL5{ap5C+{2@)n=kbapcKRQD%2n~J$)8{e9g@8ut(^vh~WR$Pchd0xy$iNY_90kF%J!FZ zy_!dV-3x~!)oYg~k_#21Sop}X(@23lA zvBZ%ad9Zwb1<%Cu#yG1W*0M3FU@-l%;@c>7q*Kq~@;Asoffeet1ua#!6Vdx%|4cqH z)GQ?yqJ?%_oBB@d`MXk(NYoiHfTGC8%)}(#_WLOIFo%9+++Y52vzEXrOY*9*P%!G> ze>}6=XY`g=-3ln+-A+IR8*VX-XEgtJ`>Zo8F1>J1lRp+X5NK-K^a}Uk-Exfd^)DV= zDP;iz*fRvgItqBE<>0CE zX9F`ZA<1p^B3gSst*F@ZI*sQ*&cImJf7!AD-8$6%$WS>W z1N<3@rabCyS9bo~9MqvwJy8XlBC3ULu^&0o)x{-Tyl>jIJH|!3a z=gJM94oPR6P14D511VM?tyJ@MrEWc@4kn4$T)HQP!<_U+f`nKUXI0YnP8Wf#Xa~md4)s7~Eg)E6^^#WG*+y7;yPq=e(1TO*d%~(fvyXzF>}GzQ!*nDhBqU(s;&X2rWJN^yEm9O;L!ReS z1V&Z)V@AIKnkgV@YwPLlKZ4D{rJ+)9I67_>dXg%ppM`~@tXupjn>^1EHS>Qp{_Ffn z{Fu`5LfKP{(p#t}^va2aa2ZdVBqIn7sOc3eyFS?RWw7)lxdiFUv=AqHuMmsI4I4W0 zFJN7>M>^%S@?W|%p3>xca1*ToQYER0=76A_{B8fvow0R+34Y^7&=o~Fae%_EXlC$_ z;?=!B7N1GUKgZ|DIwt;ZZ71V>{aG0uZcfq~<23rV^cQVcuVH>=VsZ`f$ZarnTc4 z$lpZSx?_Hf;XaMDo;@9G_?B6>%+7tu?B-Wt=3&#$>QSNRfUBH#()z`w)bhTWG>oHw zRJ-RqM-MHT#=NNcOWF&{@0sLh>B>SZK9{a()^pzDzw}2n(Heb>Aje5@^m{$kvqDiOws!7*oXRBAztnPh{w>+8QO{831fHW zZ9bx-@aCJ=>^?>9;PIdOFG~sAF02cpT{=iXfDZpzdT)9=u*a)0YYJKGye=>FjBnX~-PKAeEdmO&i3VRLW;#VV9ycB{by-%CyW^~@)xov!@kSI}aU zMOtNY(KPq%kT@?>wgzsDtgQpYk|HR`i4LQF({|$`@3XnYs6&Ur#Prk4vl{S$6LdIS zVFri+J9kzI-hv5|eHaBv6;E%{!gCfmcY7|3YflxKPa)TCcUz-IP!`kEe_1Jg2+23I z9Q-Su_hJ_YI;(NIYvVwNWeC~4+_wWs#G&{wrSgmKrPJG-aH!$I1i~ zSN?*cw-IeZxZMWEB}Ex(hOKMU8G@{>3%*~o7s-x`Z1paB_>_G^53h5FmB3l=Ij;_3 z6jXcL)-&qVw7umq={s^-p{*L(=fQ$8ndOZ9t+;o9mft>10CeW`FNMBU+2Mw^au7h$ z$u16bdu=SHuHC=dZIFT5b*xmsHOD-D@3T8 zj~o@ih=N&8Smxv0FF!n-OwKyR6EJPGTPw^-w(L&@%&=f$d8ppi|>7=y0igsO|`}pc!q2}fM90So9nbV3r=$G$>22K@E+2kjLtwOW;%(2~0 zzB8Au{OE)^so7@lRSoa7oqO&eDbSl#6BZ32I)hOigg4G;G@d7k++B}lEnWUmzj(GJ~H4C#}&UFjy5);!xmahj+RGh2*tHb+&^TzAF~wRM4^mP@IUSV!=+S30UeCT6xoS?@ zi%z?;=~W+SiQXS3`k}3`#Lt~xpmL+s7mp2=P2QX z;hb5P_NHVujC0lB9k&!3b}TLTJ6k%7hJlOcbJm~FlZbz$?!YBC=ge0%+=a+VnAZk+ zORP}76;QT?@87@T*0Ql6V=SYX#FaQ=q2}8h21X1PrR&ALH6G20;519A1rTSN2|qp;obQQYDhcXdN4?T_97RJr z8nNVxbA5_^*BD?BeLaEeIFS@Z`t9sBZJNNfsP4wr!L=@_eqI1)es}f%4Wd@YZ10q0&pP(U4Y@)@ktr|N1w_MR8bQzmum-po19lOt4(T=ec6h# zC;$E{o_%qm!riZHG^e(7ZrHHlO+;5`Sg(u{qiB1^KU>I&MS2%`Je2*B)A}v@a141v z93l3e-qmkezH!=pKtZbW9L$YLmhhMP)PNhin3e_$?9DrUc-Lql6I-)T;t6g&dnDq4 zhf6EEvT4C;yO%eb`SROGZfKv?R@+)YmHz(g9QW;vt914ZA;TQenY83l{QOp)PnknT z4sO;|ZJjLbv2$M6SUaJAGOornw%(clc7@K~U!zj!_98Fl4ikK|NcN}c(CFQVlO@i> z^Y=ncSk$jgjG`El4Nc9K$v)|s_fE!SetN6{T|3!7!JkzF&VpX}AFuZ=s5btPpxKXO zj80Sf6&Q=XukWK4k>r`v%B;THB0t^%;nIPF_4h}UHpK5X-KT9}yF?<6NtMN^W2h(n ze4Vov@=-1?5@VALN_My7h^z_-`YzL03EK@PPcFH-zrpXn z@z-(fUHtssxn54j)TiWH>(rtdEf;-R3`(+w11S}ewDJrIcDj1f$n*y9cK`|%wSxay#8%EV`Q6Kqbr*U-d$=b%!=Mqbqun~B9bYjMb@vCND z4r%RKw7d8D#5l!lE)RNlw<)V%o&T|7`fwHdwZ-PK6tJ@j{BP{-*U@;TDaJdFd(U7~ zBU&Bm2Xu)sXcYb=SSsIT^%s-e>wo};9nx}aSR7nmS%e>>TJ`f=L#e~Y&|6W|#4?64 z3;#ogVajcujNaEjhCafZ84&vDtro-Ro)%}Govir!3x-8@pbeRmUfElG*T{Hb3tm)y zG+S_cWS=;}!Z&Ze3tGQX->mfHbv~kJ2;;!!qetIRy@QA*o{UM0d0@8ApDJ1pxm2`1Ar`~PnFUq6bH(vwEzs}|D-MYffJ&~z zhW!ay8iigpmhT1Dwf!e9$pMsn5;nVNjf5cyqdlIenaS`O_{!++q=X5FTvqhLXPC&7 zM`CNI+04j-f@%SIY)ImY&&&e?h(SpcA)fKq?A zIz%Pgznh_BcU{YXaR@1#mn=!~IeKvK-n~cfg5$0H{#D<$q1Id;zYFLz#czV(bDZF% zP-q&1O29ym@+fnUK5c?76zUT;o$x{`1f#in zvI;O(T5PG8PaUZ5*abhhV!?onddt9#FrhiEGT_bC<1^mow3WJt!WNecN#_hx|}+sYIfyX<=2b5YEqvsGe+MSR`^*X zWh_I6z^$hi;4d?X8Rgym<{zd^S$-!CZ008=NeM0{050|5$3z<_g&~FNVby|v=NwQi zE$0ozNO2{3zRT4-@a1@Xlm>k$^i zV!UQCLeY!qE}?JfBUtP1DE_>Rxa*H-%CUC)NP%Y`#uAUl0l#gg zKrdNpg^^wwoqbiI=av_#W&3Y?ZW(kKkH9U%jI&nxxg`dLgmiKFM#E^ZZ?bVyt4CB2 z-h9A#oV0t-E{r4qdlLcOj@<))VD$btX*mu+VmFf~#DI-P7Nz&4??GTPf#Z@}Fm`Ba z;pcird!=?GM}$R_-Q``8g0~2}jdIoW?jL+e_B_k4F~WS5-vUISr4`K^)3FK2in+FL zfE&bbSTvt&CZEP_HgN4qOVQ`t!NA?ldFj$LshQVL2Y;}-auSf)&Y9XuBVg!RE;*gD z!PS#mEnEH@xFB%W6fa_YsWUWFyyQ!;LTRswQ12(EV;sg&9+A`P!P1K3O-D3J$s~>0 zhY2iCb{K}QLIKei53gRpea<{eX;8O+P4sKU_DP{>CHlBs--hKGiBU?oZhmSO82K5S_e z#}%%|NH>-%%4p4b13N+@cO`;iQGftGVJ=Wp@&h-a^gcalM(b9Ty!N`!*mGR?D*j~Q zSSNMIB}BI!1_0*IS#s6Yv2wucbBCmB0s}xPvyUY3ERY6JF^WSgIxWPw>SH&k-?4Cb zMA$#drUh0ASgfRkr@u;bK+EJde*A9PvgtO--*ECm`Gyo0KByQTK5)`p)uO}VS=%&d zS)pm3BiYs2bR(nH><}j+4>(0=;`yPcw0sxRA|I;b=HcRD7L-- z+MsDaV#I=JBeD!B+(*&d9Y+in$+F^mA3nuQy869l9EYseyvNqdp*q1>7lnV>H;Vp4 zgXms8AaP^X86w?Ypc2XLFM>MnH*%~bcL)sjU{R3|sPsblzQNmR?G21AEi(m!T<4Rl zYTsh)cGW;tG=Q5zEJRoV8nF1@JPjSQe7Z22S;BN;|JSg76yMy`Dl7ntdUSVu9B$%j zFU+N0z?;DmcPPLmFD+!T%QN?-i#XR(=5Ue-V80pjp28U2kD-w-bO$22u-zc5S{4ri z%!2Syy5Cs5Z_l1RGTtMMl=M-)zTIG&hDc5mj9pYFq%8>hF#zm<1Ng#p;o3ydmx&aH zZy>HJ9H;&RdY6X%pT!B@b!b(n#V~?>7spC2c@@MliajSJU5*SPw+nBf1V)WP9Fl$c zvT2tt$K+|zx}ThElw~8FXj5^%pyBzg1R&|Jb=jY*IPXQ%lYiXjH^+ZE_CwYy$Vqz(EIqL zsZcLjy#%e_?_;i_l_7K_WRb8xy-fdgqgw>b0kj4VZ@06B5zfbjzuKALJ_SkT9C@ER zG?5W)SDVITbb%R zoAVkLQbHGPrV{l@S9vv)YU2>S=wv@VWruJdimyBO@isI+SJEVdERj}`kS=5;C(wav z#D#!gec;E};S#}oeNW(YQT7sTaN)#Mzh~sVCQpW-{y44Mp5f1v)u}~IKt=ogs_W<{ z`!VFtbD6{U?W=s**D9zwORd@81pF`|Dg`_LFn)1AhJQX|ZF(Fe);^3{;W*hZ>yf*P z+hSTbJ8BK9gXIXo4ebOCozMx5#|vJp8?H%)cGTTzm<*nwJuFr!;3)Juxq24K3TD3(K*UW2eZok+qWO) z@375HZ=oHydOi!KM~&J{3)>MUYx-ps`(spQ5ZUuTBZlU33(bq+TrAO+h!lZP{aIz@ zaptO!6c_`S@#|9mgHu=x1O5v*xMzU!^+2()Vf;>pWsah|ifm%JuhD(}KGrjUiZ5XGlh_=K4?e3wZURGOQ;Z zv^*diZEas~Fr@;jM-aO)v7f=H>>i)drkdag9pRVDT4R-^O#I6JnDeuacQ+s+gh2LQ=iGJ z2PuW2S!E0)x|9@JAn337VM!U#x1NXC&&G3fz-?f<7?OSV7J|x$n#Cl|POGu~xtZ*^e59 zxZsr8fZ_O_1ygD6ewA9L$E?ywNdQz~ma>jy!<=Rp;6<;B2p5(hKX(=ENc?>^_adHnvWPtDM1WXl(O2z4N~e% zUhXO)bsFCo##{==RwRPDOPZxliKEcJzFDJ|4}@+z2?myajTLtj(yyt9TMM4U)!KW+ zE?)Es=*{r@nSHG-ne8S~l@=ILQVhIOMpd3eE}%BH275pKM=5X=kY!Jw(YQ6}_&7OB z&4bok)?4y5%;RA&aUT;}G}r|EKr$n$Q?XsigEKsWKjT&86VJcSnh33tkDOE>k-c=s zvuMV`q6L#f-c#FPk3S0#CvZ`6iFbJeCFLZwXgr7O*rz6i!jLFgF;x`>H_R4)X(J@w zl=aqDSE_`>+tlabiC*?xx8aGIpW|PpJZXxb$;6(PojVVgx{ZF|sGMK$H>v_q=rwcB zz0J`d)huN{E8#pocyQ(fWoKCf$haMs6 z0>~~9sSYsR^x;r|#xh{fZD5+ji}L0UYNuX!%19x`!OfWQ6^=>tNYW(ZnG8(ANf}Gj z-M3YbGn!rZ1)q8N95q)q_u)5kO0Tp!gnQL<%C69}XFqyfPESpJK`-tC&8)Zdc?bA< zS0aWKZgega7F<}>;utN_%m-;_PI)S3rhWU4tBoH*7)VT7@%8O+fu032C5gHMqXb;b z*3G7Sd;9+iu=vg1hS>q~yS;NS;63l`C1KyX<{|z3SaEtdn`!Zd7I&I|M zo%M}U>0fqDS9iI361?v)kMUW};-$TETJ4SrR274Uh~2wQ?8<3*54KsuyhSy|gX!`Md!RnAcIY!;PpS)S4Xn8=Yh#ro0=F22@N|gqYKfgoklwmw{SDF#o*(9)j zRyBN0q!d1U&2Pl>rnU{k+oisq0^964Yg#?WDiT{|_htv$uKGFwvGsoce_NydWtA&6 ziatb{7W#ynAgCLNeB#u9=z1mF}lTKq5lE3r10$iJJKT3kPk_GApM#FH5p0m zjN$~QY5c-I<8V%^3ENy&EOmZgV%%_F$vc0p@;Xs5C9e1^DkjZo97h@O`y!2_Uu zA_L#u{_1km=F7OJ&3r9^P2}W&&D}u^c?TiO!k2}&?j;3IJqmKl`9nHCL{vl!I?fRl zk!Y$Jm&ipED$-ORDBSCtm2e}0VmPW)dqngFSG%6|xE;XHcjJEvon!*sNXvahv$gM5 z9>MM?MKRk{u{!X*)Z6L|ZoPcjP-!R8K|u9IuZrWq$pI@p*3j3EIsfB(Izz@XKdkEG zLvI?#s|AP}@G9lfD&tjQ2bp}AQ^d8bd})NPco(Pp-sbkpURtF8QzP&Q2`bz`dc)cr z7MWG3*DS?Sz)*E@UqM1u`g@;}fKx+;4rN$Pk5>|WX1(50Tc{9c^wF}FDdSzPX-&d6 z8YJzuZB4lfgQ{*01y>0F52#HZckMKzR1{Y8c33Kr)_;E+;U>I`&;5L}`MvQ}rSZE< zRUyzq`ag}h>0w^TXL)p1wV&u|X!=(ls+eutl62N33)q(oAAQXcE1P_2M50Gl&7eLc8VHMDODwZh8c)ps$V0= za}q6y;Y(k{>=5dzoTz#WW%9y;iHqZS_~>icgS3MpMh+>*OOPwX?R!~JT3zzvyBmKN z-Aje`kEU10s=1ZHCrMdFbr&lyniN`&VnbT(X(96#4UDudLL?|s4e$q{Pf)A2VL8ev zPZhHc{Lj3ah?tkgi!*VeO24iVE~bu>Ls5tO-AN)Wwi&eaG=Y6=y(@&K(%_JY^Z>q@ zRU^p^qASU11;qE~Gv8IS&@k>i(x~G{&&v^Qdc5oL?~t5U)K(rtp|9|Z3pX%E$SQs0 zqD9FDao`Y0&qGbd9*gbnmFuxk&+|Vc+Xe3H0*V2ocN0k4d)Yst5YuYide`)qkYj=Z z(oerY5q?|Bk9-;cP@Or>#6#gBq=TOG9#U!JXPzp=RUFiIA(~QyJ(P}g??*t?o_!39 zSigS#q-B-81b1vYN;DX5f8~R-K8c{#xPlx4K_8lB9V0zlk?ZkaNka^{eEHD_jQsp9>@&9a?V%SyN|o&0HYA_#?%rCR)k0~Xe0Xd z@8tN>HP&%3<1u{Le-{-^0E|J8 zC%Hr9S$6WBA1-;)gn89Cik|BMoHE7ePvH_3vlx($bqUwLIVZrxv>CYMM25bKVT+Q;cDT;*#?E_92Jyy*g zPnt#p?KA7xi5+!f59Tj!$)}blM$%{E;7cB5W`(GvkSv%_ppnp%10v{;m-|x6?*;V6 zd07p)!Kyz2wBkyK#_8R!F*tHi?0jL}7mcbP-etCDBah#}c}j_F212Ef9Jei4(e@vZ zV`qV_42TvJliW?1)Oh&tp;S4r;e7jd2l>kVS-l4oS;a6*JoDKF1IxI$+O1n7cLeU7 zxjx+LN^fRG*akoxq;#A`w`ejktx9_q=ln49rsWt5-hl;;K)@z4A~+|rsC-57*fT5j z(SY+vbV&-IN>qY-kQC@YugpCJd=*1uc9ho@iwImW+&LN5V;dY%QzB=ey3Ycyx7|%W z%~luMOOzoqW`*ZAqxlpTNHPEkmJ-`nmwqFkd}3(FoY`HbRF__x&++MZ=h z?ZX)6sBEp_>y9bIQYfNoE-q4_;ZoD1#Bh%NBU^CU8-qUXML587!&uzsM<*4nNgO(Z zIIfl`<79%u%|KDwZEl;ns$xyue2jeG1T%`4u%}O+%pX~eb?l<6^-|J0?_V;6yS2k& z7$XQv=~Sz*u`%h{v*+Y$c?%+==d_se2T}82IEtuZv9Dj`w7-{+>@xclk7mezfBx#g zuTup;M_JiITRWocV_;z5)&1!$b*^hDR`XmB4pA579-S%UKZ{=OG>D_q`4FO>(vv#u z8k$}pm7AnAE}5YJBsddR8Pd(%?0U91iU%o^YNRVLR*8gh3@jDB`h4R-rx<9U-Hc01 zk5_YA*&(||MXW~enVXyI9Oqnfei~|ctDSE|p0*sDLTO1g%+L8| zcb(pY0Ua@n<1Y%6AqM(KZP#fWya{Ah_F~=zYh_RcEXP+{Zb9JGBgE#WKiVGqL(OBN zl~tCP+m^IEB%}J@Tyroe$P^*#-vSK`YggNAK*G5BXJ+A z-;w3e*9eqL{iZ#5d{#9);`85r)NI+mwA}bOwmjIo+y(NL1sUYMq7+V!F^5bBU1UCh zikk_zoS-Wz1q&%TpL6r>viP{GXNYKwWRD-2Pa3%Y4f?`f6RA(g@q2IiGh;#D_kD8# z+6Wffn1Q;R{}*R(0+(anzki>`Ff$D9F^n|^SEQO6OO%qv+~KljsYI4RN?B55BGrsB zO9+=1QY5kzMb>OJV=pb1NjF@`%FzOOW4ugJ!kiwr!F%?ZWr`fL4 zNDvgch`_Dz1cgTmMgViLhz_IDFY5OLFmZb;pB>a#hA|6XC=Ndgz%JG^ta5Mw9_Vl> zC8_hJvn^Vd3;rgfL5MU|VriP++7A6kBYRr>;+o<*8kF@tkVX+N=wBFGLm?ZuTAzADq{2J5G5-h?NJ%3n7T#y$a zbbwXSzZ&>)%Jp`hU?n>2@sxZ!P8PY=RVNN0|-T%HMneRwLMJ zcD4s}tUM)TJEaX}sHoiG3|2mVp2%00);pe|aAq5a%2E>6dcuJQkxNKW%VDjB2BgYg zdZiXfvl663O$?%@??PImR@}{KsVAdlawnPIG@Nql>+oe~M$t!rV^(#|`SjS&NmwxP%r!;WfcFUONFoZ(t(kQH8R zyP^Dqfzk+OMY#`P@)*$!amd`z&|U~%Tt`lOO|)%)te-H;n$DfK+N|ZUjH1wc_wG3Y zqK?RiUztpOaZOYzZCV=r&9Fi&QPR?y!5(hJr?PHwjuF{4<*;Gtyme#-_OQq?pPYI* z^~knHkA4?qhWyX?<94benaR7!p)p)tlL-q}j|JCXakYWr3Me>XucD%&%AYsw&ZxYF%zk4h?@<`BGP7P{-^MpHtmk$qK+c>wqrDWY z+r=1Glcr6tpy4xS^Q+bbcmfhvmY% zzX)ARO>Nw#&-knH)9!)Xh_EBZ#(x~WV6MzUkmTN#531ibrwMZbR5t(I^h%zC!4Ywn z(cjp!5{Z;nUgA_RHL%u=B>ynZ7baq@oDd>o+yKH4i;gCv-bdgtYxRLO2Tey1=&%Nt zRfeL}`^vc)N~3Dp;==Z(qCx@FaAlNp9Pbrv(9CF|RI#8YLeGPpZ6y90YDL_cR##&# zAhXON$(SP*kATF|a1cLRb?i8IbNT5vPki&ov%G^>$&ni5=tXSwqL+P~y&7j5L~hxV zBvie4)RSNLkzKzU>q)VCrtNl;lr%`v6SG?v({*QDxNt$1p3K@>+96^h`}&;sKq6$n?qA-`RO9=<&tTHE{E~To;OmMW!q70@d0ZM?5f0~T7Vmi;;Z}it zh^TSMX49-}29;z8fLQb#sm)Gee!(Aef_3`@p<6ILV@%*01cqdJVG(yPh zE9mV`-|q-&dj$w>m2vOj5x5DhlQHC*m$WpX(bxlc;$3#6pl`+7CB29rX)`{ z*)oorR)8dmx)b6IL-gK%vz5A$gAHJ#E75Y~0i%HUPTg%Ky!3(mrW za#T{%hOLGUa>)BMLLJ2|vV+l1=t-px6d^QgqdJ;r6Gd>?MZ?iGr_t`FR5DHkBda3! zk4F1F$nByXht?;i!^cYSV9vqFORImkfUI8Rv_X2uEdf=R0+!dO{7)kcZMR-Z?2S!K z0nj_iw4LaAiO&abm&5=~Q=NDs#ZAD|Oms;^JQ)r_?0xJjYLN8NR9|^V7ffUORV9oV zI^y{Kme<UrkLA|7Skx5tWRr)*bf6{mJuX%GvqoY3#UKHqcGjk4iP6|9i6WrSy}#C=dkqhQG5{o3Nb111zrn5ObBRW9~VVI8l}h<6sHbKYh)P z@?=o+IPxu3XRvEmCG!5)OfmPxmiCkGpD5{ao-(ZU*HDkcU|}UMNR@ix3s;>`itYN$ z_7HU!<(6ol=^Cv2Tb6WP9lq7%HBk(e840xjNvKB4JLF2GM6rl6WYm$zH9UsJR1Jh| znRC}=r?vY~scLB)yz?0u2$ygpCvP&$P8&_WUw&od{{7z;wOMe!twaDir~7W@=;tSJ z(^96Js!`4BMQiPb2&Aj(xGc9^J%<1YB5{cQ4RRByyqZMO>riQ+hQ zX8^(SFtP9fzr+m|fgH=W=|Dl&5wjaHdW+n44#F_Mkxe+3YF_c_DU_=PkdUIvD!P}t z)o@KOurPEW(vl)-h-uy0DE~%DO9q=+w1Tg!?j`;uQju1$i=2p~b7u3*F8u7z$cF1ZS9C9rz zhBJfe&e*|rDrfLt=yo$}!$wMattvf+GP;SAgh;jsMwy%n|Dy2QqhlFO7f5&Qz!w|& z`tv7ObhfKgMZaPqHbVEsw;yjs=LM zixy6wUgB7uHKh?=!_RMi^pWAM?^_JiQ2L=s^P+#w4sFON*})4lp3p*|Y)qitjDz)X z5IY5QZjaY7w-e|4LmwIV`ytYb47{*tApjr-8A{30agd=k9BC$ODlM-ChlyZ*;(WD! zpG`E!KnYna4@QQE{%UG2YZdpQRxO;ZAgAzroI}B%u z{MheSbcAX&5Di#%a^uB@JeI=-%l!SPEWUHA7x}+nU2{9$Q)~$4;MKjfY?Hf8dN?Mz z7OI<7&Q(o&aDeUyeJsiW&PU4rY!7W>D3yy=)S1$apSWDe|1-a1WMZTDWTKC} z**};2` z5PZj~gXQR&WpC28f;nE?b<4Vk+1n+4J)Nyvq%WKr@Z27PR!lv_=thhZD)2&+!k4nZ z+X|VRe_)GGKRxy+)eG1CE_933;YITpP&#nIlbqkGf6&|KziIYR7f9}$QU(?=ftZKN za`xuVFcq=jkwS5cpc9>#5uGE>RpWtgANO%*gmT09j|v7!WNz;BvcZez&o5QKTG?hj zRq+2+@vSJ zlJASVSP#)Qc`(mo9$-?m`t>hKs4ANE zxe}ac1qGt|=pE4CwqS`BYG%S6weu|Bb4Nc!vCVH}~ ze*__zdjVL)iJ)Shkag+OfyMsQJU82N_3kU@ou1yIYx-%V2xdjDKgRnul2peefz6Vq z=3FrWZX5ylaaDf`y^={jo8upy3O@)yT5FjdaLPiKr z=}7P#Z$C}!#6sp$1Vtv)KzicGqh;i?7L|sW75w>G1HPU>D(V-6-0eYKOnCe-6frkw>| zkOuy7uRD5I0j1S9>K4n0_$HBQ4N(aaa6Py9Qm9>HsNtWpUygxYuhP|Ud-G_Z%s6s6 z?)Rq0eo-}(a4LpV>7`V$TU)yWa?Bo@Y%j8E-(d}u;X6RfJP>L4Z8o>Hy))mJON*V= z>#O2y;yd+muFO#|u4zAXDAg68Tak3)$vEh_Xai&mtR^|x<|>X1vsSNna}i`7H0*1p zG$*z;p5U-&+^uV$D&C{e0`xv_IcvT&IH`|DCGd;s@sgeRu#1wmP~luB5kS&>!#p5U zYEy)n-oDm$>6PJY*nUd{U*vi~(a|Z?K*m~Iqv)~k%46JCeNEme_-8#%oNiGITpNbN zx>o2T_C%6ppv)41)0$>x9BelBNz8e87j?FX7XQM^#>3YE=-hDY1isg`>!aRn99qf+ z(3l-(I8~-d)UxH`<*kE?{|=goXCfddU)|e4`f!T7L!db#CfC>DO!G%>-dyB%fjOn| zGVH&j-YfLA`Od;DG3ss4JzoLHlW>6*&kM?_s~k zlP=gci2cQAdy!_zjC}Jz=tPZ?0({h096{=N%FPM)S&ng=y7`V>Bs3&72Tap(> zxXI)X5X5!rFbilal*TZ3B6Vs$8f88$hDq11gPC^m^Ap7&PqC%QBER11bJTo&cClOM zFTNoB7u?-SF!w%H2`U3i+!X;Kz|G5$ncV6HMSmso<-qM~4KE1&EtK!-UQ4k*mciq~ zf&GIMg+rF<5w_PU3hkv2w*4j|vwWGVqA<=tSwUW-uDpH!(J=jOF>F~G*wC(lOT^;B zeZzcmrew0(&uM4P+JT1i*0JZUFTmlI&iNEA&;@gAKbC~dWGy=F(lFQ;XPk+MU3ID* zfobC3N`ZK?A>D8S*uaqf6jhA+yGMnkG5G1rx3 zX^RPjPU>sx9UpstBV!lR0~2zw!D{8jialzuZ0S-tG6yB=MyUS9@4M#OkLV=wi@m^6 zv>*59{M!5c#S~F>RUi$kjXINJ>SUpUdCIXN^98hI%e+E(9!ZOg{>03>JH$Pwp&Xua zYu=T(bB^Vs#Q%Wti5oqPf(Kg%yxOn1l(i=LU%_5(E>2svW)2KY?W}%N#nl=OJ^$+9 z!8f~#i}oU{e*}zWR;JCG!u>z2n0LY!4)L(pl01J7$1>95mbWnRyP~*E9rdeZP?L-8;?0YB0RQzOKEQ7K_$aKmidlSs4I_r;x z-MlxyHTHG{?V5DVdz+jNSaf9ClG}?T5ks7t6d1KqpOs2})R&c@iy(|njMWG}1ozy# z_%KiG8Za27;&w_7j@QimN!vi#pG?yz2!tb;jo8&=CG5f2nRunJ;)PN{)2A(W4kw^K zb1c6F<_Ho7=7HDL;5 z1P22{XX!otC(lplyucP*SP6o0)5Mm?hJn{_1^O2x=oF#|&`7@f&HX=ozopmtwkRy6 z&xhzf^)Q-pZofe>(-3|+pNI7&B(3kY)6OEotuPdVB968@5lkCm{{79j&&eumprp6H zKed(F|0$;r*D$1@`b2o21Sp$>k86slNzNEEW@M9Gt$;w^C72j(A)(7->otTd!oBvZ z$_d)){HE9W-SG9VMeGWWcrZgUguEjEM4vJJ6Dn{$-U3WIz5ei02Is&w#L|#)ycI1! zQc5O@;V7@Zj2tt#XEbo&Z2&NIHFKEv001g;EU!iQ2k&8~6}u|7nSp_Uw?9j(w$o$= z+rf{W$h1VF#nbh>jcC$*=9C{Z`(x-Llek14QG$~C2LDt!l(L7NiM+^}2))Q|Fb^oy z%>_U=yw?!2w^8nkX-KZ5mtJ&w&Y9b{Z;$QOK=J;aajF4t7jS&kRa}nLOH6k>kRmQn z1RPE4A03g$EQX(ku~vc)ww*cQz>IT)^SIy}R&W~jUf_R?JX@K4F12S;cjj>zAk1TM zR)7e>#6MJ88qHa?(kdCQR_R7q$OzuS*(rnbh7KJ%LS{kKF;l)n5AXYfpM=go`;iiB z1Q1mzBbs7;w7XzTX3CHAppor0`oit}$5hn7m5!C7ijsLqmF*YOl=5KlB3C~5*nR}O!=?S*A*C@g z##0^))6VQm9UPF=2I(|jT7M?B<3oa_7GxVEq!rZ}Qrw4X3N-3+D;kS0SGGa3_ogL? zn#B#DFL0W2WAUZuB2LD?DVY>{vwgg{ySC?%Qtn9;Br#3MDZ2eGuM6SH zh%i%rQEOYn?^ye)CsQuPN{m|Z7=Bvx47W--h8L>6Kvb}VKnhtNDpHTB`UFUF0IdDa@_@p;DZ{S9mm?l-N98Y)wC@vbl^bHbCbG$gcm_=1 zCTpfFUG}9mfjlfyL=hn}RS?x@rdU5U!XPWd?=2%AWhczA07w}m6LdSOpU%NKd*(~n zRK+Njd<^C!@@{MECj80pwvyDW86hbE1-JO|*>nn-Il#sPFnZO~%86j4@1=a+evsqJ zv30aTTJ#z5GH3_v!2%Z{7}K>!Y9;9`Yb%z)dtJ7(eT}hzV<2~-pN1cV{Y!FDL+nL! zTS!YnqMa zhb2Frtdxz##nTcZ2ElhLkB)kNI|B}1mX=xxhMSrI5|qoLvw)mw{||rYpb`15yxOdj z#y~tJ9^PQQu*@r3Q)KK&slT#?)Z1^*sNNyMKhe?&e}9`MQqXtNr9)X~wDryNSG6T4 zx8GwEr>~SYrnxtw4N)D7lNXP*H6D!!E{Njy+eOlmGUBfy=8JJ=w(Mrc2f5lki7+IU zI~x??Y>IZtVp=s54&|FDFW?mHFI6B#$yq_FH1`iBP2 zcgZ(=`ZLHQ{tM#PB$F)QHLcuOYay=8k_l_Ulx{!)=VV!a`$fqB;X#pT8LJ*5W!cW4 z9_T0e=!p^G$rq9|gA&Kc!)PNd)ua3D6Cp&A7FNIe=?;QE(IKP8Sp*YcVCgOMRExY3 zA%<|8Q>d`p%zk)*e>Iwa_uKQWUWLwx-66I{;(tOPSGbsR<0am7th2(!V&)b$7o6AI zPo8X;gRGdCw=f?nH7$-O`LI$lN{r^n6Iv{CUnTAnR3+WDY()b~3MG;QWB!Znobc?# ziUzGl0!AN(frtTKLxAken!|d4QdQiL*r_nNe#1WF6QldHN>ndJyU%2*=f`eIR0 zvM+QyAO$?>(@^tqB-Pvttj}X*wS2g|(hJr6c5#Xo_k_h$)IPo{5lrqigp~}e4?T+v z@1kfq^eiQgnpX)CqSujmmF&>Q$M(V}iGQZ(G6lT0#gLpn-T0pXA=#MnbX&b1paYP0 zM%sS;rz#~HwOme`4xq&SAfm0jIpis(6YDfFR=(%&Tp^N&;8b|d?=52VIphzRqUL2x zdy2SC2U@e;eT)APKEU|8t9%Sn39lmCY6Q$-KuZAN`VR^|xL=BcKOw~owd`q=3Co|X z+(j}`N6a?K?3jw?et8=*k6Jmbtzg))io^*SG8nSsn9xBYej~-5@_YxB^N;wt!NjxZ z&$k6abKsXIezAi+Y^PoF&x5gSJr5u+ikbf~FK5zhHw#ew6QT`3-Ia44)q)gJu}-sO z$;-icJq!&QeOSKyaRf`lo#B*7jacYh(gy3|>91ciw=GPkF$fkMEuSyS0V4h$zORVb z@$-m&@1sF2AVUlA&*PB@XbPReWDju0^Bxw7W%V5PSNb`y$_m=}@xz8)biB}*)xr#y zB#+PJrf2it>}qO0T;_Y_L36Za7d%qpLeS_0w-kt3AQO!9U+$pn`k*4d;q*O6 z_=$q}@y=^_Hi-QI6!&RwV1!Ag2xvVI4g94f|Q5ZqeOOx3**AvYKHtX_->=(Q25N^>BNzR=J>1t+S$fV1FD#m?jLORVz7zOnUMaA**Gg(0 zku(aeNE9*q`4=$6HBCDNWAH^e`NOh#|FC$tjOv<6b5@UKg!PP%8m^+Z10IX#p=jKQ zzh51z!wU5(0}G2t-5ww<3?cCqogFg@=_12F?`$h0p;Eqz4`xv(+!5a9)Lq9xW-M`H z3wn%+VbV1n1535Za%gk2MxNt$5z1PL{b&*eR((PhdHX5VF z2f2UB0IJMmoU4-pT6r`V1{<#K$SZcHsCPI9L0$zLWZ+OR4iwZHEer7w)?SV9&YOCq zJ6Nd|0CgZ5Aw-xSw9{f44tWOEBxN&~sr}gKIUga2z$p0BaRG zDj6U|NCoa3kk2}CP!qipIGEG_Fm6Zi#zFz~rgaj@z1V%;cjvN#@BS(u9gO5dCMsz0 zuaNu92TbGLL~0Fkh2yZv!dW0a6e3S}RbI!$NR7epA0}}+2eGjG?#$F@ zIwz*rB=23y%GzfX$HPru`K&p#wg`U&)5L8u-DZi^yXqe7Kqx~_ZeP)N*2RBO;agc_ zFzOe6BIIuP>H8`a5;m841sO}8>R;L#EN;YA!?ZD3@7AND>wN0nkHXt%;sBa2q!}ouECq zo)#1&Q-=t^j$D{IbLL?tuw+1yfdk#5y7_x#n2&Hm6=qcR79^>NUZCdLnHG@cqp%FW zf=>PJ2cVaanS16F<;64dXeFaaz&bLbYP}>$4#cE&{$|YfFb!XjzVV5e>B|8$yCE!f zVteleB7v8n5`0_64?rA_vQYm4r7?y~5x?{IFS(9o6A=kLeue755%i(jQTlLJ^gp_4 z($_5shfD7G?`q!BGM4eS?=jqnfoQ}E|HFHPO_wj~mrJ%nHD3`idHxk5!laVd^4CRL zin|@BblHBQL73{nfgbqw?VHU&XLxuxvkn^Z_@VJ}#&j@gz*@b^*Td<H?yl~g;7X){$9FD8 z*$q`iuXMBL7rguMCTame-t@$Sg?Vvc*PxB51+q)_4aw6QtqP@~7xK>r2a3-(w+?4dZwyTPcHz~x8f7bm$mDza1@qqGkqPwbXtDnkV>fe=)m~)>dvF4Z`f}(x7b$>{ENeRr z6mu=2y#O-orfZ-$SZF6v?TNqJdz~@5V5-_BZnI}6AOXAyrK!5za$$0y5x_od8WTb? zLz2?WhR-njBV1%w6g%Qo_JCX-Cd7m2lOGp3)8B@XbLP3Cn3l|Vjb~iphJ{nxMR7kzCt+5 zJF#TJC$u?eE4t4*8_67r8VjIagb88MDXX^?T z-+Z)+ycl@`7wR;WXRg<5@CIlzu;&mWMYnNd8=@HroH zT-|i!=*n7pGf`I2ds0mbe2T(i9MVAZ)OUAQimNWzlB15f-6xRg^8Q9MWKtMS7Mo41 zT|UaILr45xPe#Zx8-TC%XWIIsf63SJ?n1ocb>lXXP@|Qmow+wN|6NbR#ezn!Y7PBe zKqtjJMbr#B^uLliJSpyQ)I>@lF`(!v<5m}aZ8@?-4j$??1gh{wQ<^0uQk?O4oyeTx zC^tHRC-_sPiDQCBD>%TZ?P^#}Y6v{dp?(RZ01~das#c0;oJ3o*k$QSSF? z?cW%&YUP+4G0GwE)@+w%`e9oOxh>4!^*zM^GoPOn`C1TCKLWP%Xigk^$Ec!->LH0w$fv>^F52 zbfm*DozCadaY2r;S8~R)$9%v`>IZ96bFR*{A)tbT!kDaB0&+9Sc2un^O+vR)ZlNgu zS__BScIKP(1uhY*dx^r3NTZ^zMEB-FB-2(a>8J$zKy|b-p;%m}#Fm61Jh^?xvY{9z zOG=Y*MLF1qUg^~Rh(|JWLWe}0lk>I)bm=MIM$Y(fP)Hjo1})n6y?Dg)d_1D%O5!ey z>1u7+ZgQJcb#i2z6d#D)hU*AsBFfu0IHv%%hJd-ZYzF5zlq}SYKR}|1YnQ3|FFwy7WJVb?=i3PDVc|L>X8l)Z3G9LF zQ2FmRi)#%*liSqt$PBfjQz7&vS^81YA2~1VZUsel)-@!aJlSjTCUeu0f!eeGppOeB zF~|(6ZikGmV<~aWGYgK<0+k>Ec`{hkyqf~t`z)HkC#f%op9QpS^e=UWmn9K!ZXs$5 zu)O{6aQRz1jWbUhG!$PXtrW$}e})R2CE)@!>JBpJr4&=)QtGjTmZLS~h96ldc$@fg z%0W}eeh$wYTlwEMZ0|oec-4#!wu(_OX-a#w8J^MS7SfilV?)RRD7P10Fo-m$WYC-o zmWtfptW!hkQ0Y(@Tiqjm%}D%3oFj2ZQ{=g-O}|!{3~N!sU!m!`PHhs`LZ+0|m#Z9( z=%4@&(Z~5MG=yqezo>4oH?uvZ<>gDxJ%^dp?I6Dub}^)}TMLHwP)b;Urk%tgn{wKm z#(P}lj*CGcS*5kxNO|Yr4dNUpc0J(Fq#`*kNV3S`?6$sF%17}xRDv-0qi48mqtK@W zOSUvS>M~r1sAo~0|3DiXjaEdTUtv2|$#Ly9Se^z5Y%QiXAmeg67O{a9DlP~|KUDak)WWwqzxKb?SB!5sZ zAq^JK3M4YAM1A)T2kG+#B$G%;H?oh&!Xgt#Nd}0o5K|80rryI=lAc<9*)>3^Q0Qj^ z;kYUri(SC%n}NwV2E#KfIcLQjDfA5ko!~q3j_yU)b`3$UxEhLn#GwXD-$PmIjr1NU z^)+Azh3EJ&1LFmrp~i@0bZXv68f=dJNEX`{+J@fIxHD)G>`)S|qp_Yg2BoRxI4bHz&D07@zplikM8oA}$+@M_cxmOy>G8b^4^VR^R12yIdY&!a#E~Ptk${|yBx#CiZG>BC?Ik!O8Keba zK+RDZ;!EuL-_!}nE=x}zDeKoo1r}UKsI%v6dF%<7EIDehD?WZoNvqst2{5Ij7Iy5hOI=%*gAw;jQzo+4wa!Sn{oYd3 zJmj>Ole6AY#Tshafz2}oy(>5y?vGpu`jDf<9S+x@^kS6YF4*&{lyX|6fOFMt7)Za$ zIu|_iftuokFnST9HWbG~Z!v_x1juO*Hxc+0hC;Y*CmU79-6W7m9?*$i1VEpQ&s5za zzLo#PR=zje^P27A^i!ua3@I}naK{{|*^&*xxrmPgWH6B0Vib2k z4$h+yGv;O{g#G$?yEx%X36E%TV&|v5^TT#Dy=ziKAVkpcRK9eLxbol) zk`gmw#E5HRTn%;R>SH*w;%%0dXdj6k3}^9it{=0xcs+vL{_i7*)blji3XA1vq2XHX zdP+q!o>kFkEpkngPsTAsH1&|@0sdHq$z`7aDcHAb$2+N*Api6$pyWr|KRsPR+Lckb zP(DUEhwu0<5!Q*;6{H9TRV>2TUFDBF__@(e=}`XT<P>lNH`TbO#jsbW-! zmw;+|2Vx|=kK9@sw+xvt;MJVP?`GZn(@%}Omt$A~@!n*L2oQ9u#1xXQVG4gL;wRMu zxds|ziq9=j$s`b;*KrrnU<8iZ`0aN(P8_F%gnU%^eg}KO%G!m>2XEp511zUHE~Rp&b&Eh#J}WWuKC}=T_wb zyg?4`bg1DE^<*@U;)!T7wVc!D3`%D{U@Od^+L}@NGT!`ai_S?5BVTMQV`y?JAjFY< zl&X$$Smfgzp%aHlIp&g*E3dZ|-l7 zHMf4We`L&n!y%UtC*&E8XwWw$#dLgxTUpN5tz`CclWDcX17956Dy~|aY?m)>4w!R` z0gSxR&y;3zgpgK;c*ykmBeXy{2(C700O}WSC}Ss=TzkCtV0K1wU`=Ai!pA}&)48Ob zL~iKFlT1$-?9wFmi`zb#-=vmBdHGz`_Hbj#jCP4|*+2cez&Az3WgUuN4QXz5f1M3= ze9z3@_G4!;KAIQ$xzcRLie~+WxkUJ53v^;=gZhrGci(j`B*{q-n>>6D0V9xB0lo?w|rwx_)CsnobQrK(d3I% z*)4J35l+SO|9fSd-V|mUEi}IGxz^72i52^NW+)zQxt8U+S*Q+D89X(4InBQ`T=JIo zj``qusihGkVwg0@*<}>6myoiiGnpcIOw7I`H;w5lmi5D*N%OSxwZKOQE|euCCMEa~ zk!>r(;+3de=+8(fotG69PNh@FwyM1}CQt|6nam@K{Qs@~t-am64CFooFd@kdz-Rn7YhROK1wi z>(tymdf^a8V;o8sDN2A-Pu9RTpzpK`5%^vG4x%#H740)TBW0vc zo(=BF6UD7uQ$cYX_-sXIjpUvr;n6PRwGM%f#j-5$$r9-#U)7TDpLBn;h^{J+qr- zajVQc!!9ch-|ULzI2fe88?m<+`}XgxA+@t=E#mH=In%1my|n_)Q`Q&vDr7iKp4%qwJu%cg6JsL9Si4`J*<^ zpogqti5O}L{5r(rT{h1@K8K0((pyMK_w3u3g}g5+XZaJ~HLsd0v2)%PrM;UR3UhE0 zZ{NxEqwcL5?FZ!|LQpYF-yQuK6GrWLXYihBCKCvF6~ac6p7q*r)lQg{eL!21>}n` z8g8hits??W1x_6@>_Y>rLFH@W2FtD0#TnW9+~S1wjZB%jb5OTdXTH2*g#7kZ5VrWx zr=mN&j~sta`XT){P|`m+yw{JFrpt4h{0*atZ!Q|d%HTyu|NHC1?_Oh>M#wXra(t0X z8OpaZjW&99lk|j#cG7 zU70rR_}*#b8Y#i?W98;`n0IKb^f&;ArRbKI`Q7;8x1~8uV{G|yv}G3*8zXg;FFiOR zeCsu8qqVLt0U*Zc)|yUeknVnk$i6VJTG5)R^wt{z=bEKd|a_IE>*tcZQ94XpbSdpC^gHz$PkGinCf zb*r+CGgG|({>v?5n{9x(f10J+QG1?r+229+FN3OCe{tj1Rn>g0C~HQ!G?{MLaq;!B z66Jd{4|@6 zJ$l3}8dtjccJb@{qrIo62fMP;*>t0H$fSGIXJ-ILR-x3~dE`jH1z4*|qQ{2mTN=SB z7R6Fku6YzRM@*`zUg?Uiz2u2ES^SM0NUW48g=3E75Wd|PUy_4dskUM@5w5be$Ival} zpX+0Xx*=_`;v^GLu#M=Jwue0BLW0lt<|xs(ciW^5s|wUUl#2qq-wMC&cwTM>c4)2) z!PL6K>eoJGqBq6#AkRr201eWtQVsWF;#fbHr*>LhRb(Hy5Z8d`=Olz<8?{AK0ooR7 zdH45@pez^(_%ydH36`srFXEf$=v<1K`{g#LpREjY+r+w`#l)!mHQmdg4r1<5!Gf|V zZm;y+h&kZ5T4}?gF;X+>yNiLd)CX~^#!;-Ky)8AjqUiPoEO*~`IN$T~ZobuoKan!Q$^ePuHI zw?Pd9x`MKf;*~#f=9;B10of`7Dkm2wJW6=}LuZ?m1zVTDxWry6{P!OXVxP|=)A%z) zbMQ=aYq^H-H-pHtKA;Rc+1fxVd+LXPn3`}Xqgz>a%x1ie`k>4689xL@A@;M5KbHLD z&q*a$p*U=~GvUcC2EAH*kH#Q!z)R9Z)|kMm4b)#<$)oQ5isl{!`(;5z&wQoWZbM9H zIyP*$4i({j0%ayJUFzj-Z#+Fcs}|J1Sde-*#3ahnLg|}C!tHA~_<|jE+-4=X@louM zNG*nbzuu&#FfO37dy0d{#$MeR&7Yv#!E(`8hzTyfSr!{K1mwL-(nRrIiT4R7QsDzt zqsg?rOB9W;7lE^fH&?0Eo#fofnkO6{+Mk4>b%~Jl&umuZK=aJr@|!IB2z?;kDY84e zFJH4=H>Q|Yvoi8d6ytfBQG-Sz<$7xU+wwuxYlGf)vmZMlbRkVk>v#|-rSCDamlcox z0sV7=Pa5pD)x+J}bs8SmRGbvn&0IUMUz;h5#@1C2s_#Qp;dN@kubs(dqonZ*-gzLa zUy;VYK5{Nq{It}xOWik5@G}YIK)R~LY440#ObIre!5VNHhdp=ddaVwR%>#2*G*q5# za%POu2^3Y??@5#T171KIP|B48r04jT$2j{l(|59?cX|VCF-C^OO?=2Rn0J zqvU|q;O@e)sik{JoSK=`V!&PMZ#0u8)A}S)nO6WQ%C@5Q>9Tn7Vv2itu;`Fet_as(<6CsEs+)#mt7m}Rp@U)OfU45@}y(OBAtl!c!@`KSf&(BJj*|XN>)waix)SVQx;*>c-z(&veV@)KvPPD@xSLC{JR^`4z>4-F(&`D8RXKpnF&YV|ktd!=3>{SWd~MH=xN)c+ z<@IWZe5>FgFC{mpQofCEb*@Qxyl#hS=8(~>HoKW8*lHc#&D{H-U=wp2G=b{xL_dlv9M`xcqZpIOjrgg=SPR?pHKChCcVR_>N_iz&T6O-rPo&-<=ADMmC3(aP9gP^;Wgd`m9Ivy2JUC zOx#?EIIBAi96&Cgf8?fTVn-}dQ&GlNiFE|wRLS~1CSGZuPIaZw4W8LvSmd zGq!4XZXx(|fzUk&uD%N_xsXSYXwZ((7_ELdn5|qG@zJcNmbWk|K>R)9l)JQ%`oIoT zQTSsQ_2Lp@ckb+d;_;EmX1LTyva2mM586b3XR8Gwm!d(kvDJgv6)9A9_BBq@$MBj#CQfRpM<77e2aY1@DKCXa$Ui*X2jp zOkGLn$D~A;@)Mm-9l`PO4cf)?(j*7V-z7*tUw(t=KVDE6xzUjWf|#Pv-YIrdFD5os z{?PY|eVbBR#0(aFeHl^MQ~3B4(>{dLTm0OLqh+c|hK|<8RP_rb7)*D3w$ePH3*H8P zTYkLzhrm7n)cb5su$qhuNUFETm8VeVZkpVYXgAG!Yd7EI8rN@ zd)siiSwm%cqq=f~>dS-$+R)VO*w;kmBoPHWb)mg#HXRG*Y_!QyXot!$hPh0{kJR`> zxB2pMQY=e(F!#W@apg#)25?s?BRu}PuH<^dcpScUZ&sVmldXUF>zAFslR(nluhYiH zu$q%4K}wYxXVftbVf_lp|BAp13j(V*Gxk3y=+&9dKJ+8;AXoyqq)^-O7`ug9##McP zw}l96z}-1uXFZv1Z_b|%ij*?S8LOL~Q1jE{bf@rCGnh$vJb((h* zC7lkFWhgCpG*w6Km9>F1I?{`bjOx3mY9&dyzE=98DGzTUI@srPiqqQrX{Rb4Mv<;Q z+QZUmJ!|*keSg7*9tw`T>Hf4vd(dgQk&{Q$y9;KqW9QDDQDYk@6YX2YPiy#z$G0Gv znH=@;|JDPi~ zPK4e#sf}v=1y`fQUSi;+8-tEso?c$nDAb&unAi2nV?1S}pk?@Gi~wv-Z2cws#7;^d zUco~aA`00>i$xx(k7S{lUrK8tHhCEhKIyoQ7DPa^gxMoWZeIZDH2&7-{zc8Da@&`w z(%zd438%w;=;rL;=ll@6k? zK?4}H$A_%Jo%r=2fSj{TnO-NyJ?r_5SlnHpp>b-1r6|p~O-F8Wt7AI}xYd`Q&CPZ* zl*drp`dE4F6sxJcM>TLF8*A>7!r9HZ+4|S3b#Zg|ejnGsX)M|#5R;lG-_#}kbLOS< z1eb`OAzdS<1^ReF1m#m>mrC1g4Ot1=2qOFe@;9L_1SggK=bv*M)?$=*+&RQZsB}p(@ z9d3}HP4DTMAVF%|vL;IXRi5EqR+CkEw6ASGe{ayB%6&(tN&(eaMeTVy?xBA-^iWg` zVlSD|ar8$@Vsrah>gU96Qv>R$7dK6`DF~O`ff(jMpoD*^ZUp{L=d!l8tSHvDEZoiW zj%?x*`v!9>HGc#-tA8uFv6^i-%y81vG&a4I+lKx@1w+)22>vxrf&ew-7R{ysi#@|P z5TT0DjQRkk-)9P8_}ErbmLBe=|H#C35qZs({l>2lhHODHGeGizKqUN6iif_NB)o3Q zVgE=Mm6SUG9#`;E=}T#j$**kmeiPUYynN?-u0wmYZJO`9F?s;(;FS>^`wz~`${$*R{V62^2=*PimV+{b?jtZ$ZjUngF8 z^+1&|FsU|_(s`kccq`M)r9lykgZzV}sgx`ty{ia)jHvf`^)<4V1cq&EcWoy+eSrW- znPVmSKVlC`PL|^C(7YnQmH9UV-#qTn$C$R{voE_}Uj`8^NEX4EEniA~Jo!m``>{J9 zK&9rmMrb=9qK_DTef5bf7usj(h0nv?HbE4ZbL`u@;roDCk!2L*K4-siX!OiIFEdj{ zUI;aatkuf|V)w|BSQkIVYS5I1hK>zJJ|HivCoXkVONgzVt z8-J|MQeZa>&kK@3=Xr_ZDW+iyn{Ocib`Kt>c-Y92TXMQO=5n-EA8iUSO*sP|SsA2JAeOF8Fg3Sd-Mrg^nAm8srhu8|3pwa4?2A(S{^o5 zZI|g5A_aJr=1FQ|W0@o5<>Y7~TP&fiP%`j5h9Tob4}~#D3Lm)(k8wE^@>F?_P4&p; zoN;LwIQ?^Fs~d@Dfo=yqac#wzpyPvm$uBvj0=h`$EO(;#&7GIM!DI#SkXPS6Qr+AK z0Kbf|N_R5o@*TO8+@jKzZKhu2j;^C1>Vq8JPK1P?cj#w&p`-6esn94B*ssylUygyF zQ0E9}%)$q{*?|_r#3q^ASjt~n^%N65ksHi88>W7$ov+&dh_B4*_7=wM5Pm}tEME$o zo#5%=_g;KO2PsuPxa2xeNvFVI8)&~<)LnT@Jv5sS=B!OQ+v}i(Mg8lRvp~N`#1x_W znTt_7J+o9I+ubO&ctv%VH2Lgk+Pi%|h+XRkNS)RstB8$U&3_)Ry4m8F1`{1YhriZJ z%dzw&Vzr5>fs}j^RzaX)BXlR36(QBSy)p*$FU4}`rsc)SWe;R)(h7Mh%vsIK_S2f6!CQP(f z0YTPU1Rc!kH=-)J^XCoI-ET_~#@otqmv`QwusG)Pu4F|Ev&Byk!Bnw~G<(v%R0Y+9 zb_w*s87X^_GQXPeswy%qTH}xTK;`XX=tPr7y9HtN$I>=(BZJQGiUsMT>AhgqRaNOI zrfX!cJpif#eg;_`l>hczbv{kkud5p=jt8%ewwAncw#8Rstq>^%k(M+8Xtx!4-2Tq>3#=?i& z;;!d8N4(YQB$^8sIb@N%yE47CQ`oAY_6^c|q`)<~k#+^9nS3-`_6`R#3nAb#0|0ww zpC6xupa4aTT;}hkBMc&|LAYv3wUVUe)wdggNwM25cB#IX3?uK;D zbLqiT3D27c0CDl!-^9yBXf;P+{Xwjv5yTlWYMhCK-9f3L*wmQ}G(@jxVQZ=2#4eQ0g*4Z)63Qh(2r>W*&Zw>aVzYjc%JNr$fkEhqF0-enq#ph8 zx7FvUvZAC7pZ4ydbK)0YC<#y4zy5)Q9_Oh~X;9R66?`ARz7e-wjJ4=gw$RUYwI6#Q z_BLbv&`yP6`Zg!naiU7JZ8zS0inGo`C*@F@VxTTUYlot^ZQ}1XgS9TpgG$eRAI!!__Aen>t5*B8Jjeacm(>rHr;l`(|6!<{v#BE|ueXQ`ckPN+Qgr ziILV9$-TPi*-;aE$){n6fS?#ASPC(kCV7m)1xjGtrXM;`vNrNqoc{V`?R9Hug(%MR zO(0I06qkQ51EQP!kb_3Z+pI7WAmEr{=ovWJ#EO+UPp&g&E zpSc$HM>ZY1UFwzbXB!5OS%bu;yOt&X?2ode;K6_P)^9qX!o9yx@z96~cb{qS=Ff#} z-+2hSp6$8HFKI53S*s6}p@FE>>c~6&CW3*Eu5rQ(CVi66~_N5qYS*VB5_Mla!Z;c*%dHPTP0XB{Q0zP{(@1%5K+3T+C)xu z>-Aw9tk(aQ3L#JoaBt+f*}i$SVbI&ZThLEDY&o+W`cEESac%Yzk?T16%@Vlm8i3}` zP<>k6`&Y=-LI=fUuUhMO<=*&8VN)8K2Etbb4h4zh>T}7^=IEc z(CypD3xX0mj@qn|7>r#r1vCuHVG5puccSmVi`nD!Pw{B=8dW@df6K2Tp9~y2+=B^hK<$VA8tj7+f;2hQQZ6ezz{DR33geepsf@9wg@Yz~OLrODTBkiSDha`Xhf3^xj->71!Sx_F*eah>Z zxf+326rs8Sgd~BEe{7`wL}{)6iL@3QH0+%$WEWFQ`|5xAR7LXJ--6rQ&~2?vZ!lp% zlx#w^)8k?KztR(3c%lIxGqFahEzb*Qx63!>*#6NXsi~5vLp|+I`DD`f2)RmX`ffe+ zw?ArgoM~pt%`S>I=HL~0_FitPh-zTTzu9jYQ#tk3?uEy|eem#1HzatobuCCEhR zG62_swKoe+=R5S$M+cUl(*%I8$kb)mhYoBQ?07qw(Xosg==;n=b8D|mo88L(z~J~j z#qXXhd4C`5R*WOFd-A4>xp z(ZX@U)mE~5@;1GOSILT&hyUz=7$SvNg7MB@SuTC?rW}=~0}vtT5YC2`{o{iP7 z_g9a^P-BAz1^#6|@YdH}PxhZ_UIAovsz!fR?SIz4HE#0V)ykV<^YnN5rXgK7&rQ+l zOW+Q(Myby+7ziE8=wn$tEIm;)H9P)fK;_;IDr&NV=P9qg3-q0RKc0-VTtYbiZAIm` z;OxJZ-2Hkr*wy$5#PoL*j-R4Ems zpgFW{8mqs5`BECo#+mx}Av6HFqqWmq`ngflgy@fzs5|n2M)fO{Jx;qQ++7SrlNIA( zleX1Lmp_`b4g#ps`Kj-?R`$|qiV$T@v>p@km>=TvJb7NjX9r>yo^O{5ePi-dybVyPy`lt;eBg#X(NB%D@UpHE^XF=a4NS(Aa%0 z=v>mUu?eP`L=>d!{eRSNT~4!B3+BX-wGX#EjMuMZ#jZw$34pAe1YlQR`#S=9zW?|C z!JPl!CZzxTL2e43k52Z2@yO(@P_z`PSqySR+#S8;&yu8586~D6>PwHOi21jOj;VE= zjnTo5tP1?%;hC3m1KeElJAC*`eNAQ4^bUCStfTMcUg#$N?w#q-`E=yTY0>FrSUo@cW%a?W?@UFq0gHn1Oq|LKGafhJ;*O|l}_}&8ZGlQJ{*RblpD=DDV6SxPGLRJyNIE) zfYjRed0*5keZ3?Qm57cJr6;lP8s9oT)FpopkYmLdsf7i<#|^^s^2YZ!CU>v;z}4H4 zLZ47@qL{KYqXyh3V!%-bT_>i0`fG*zang$|}8GRI( zq=y#F^M>W0@84fqjbHaecxbd->XfH}_`Dbw;2n9jcX^tqi=;m3A7J_ZuYdgqZ@l7$ z_nBtnBT8FZe~)Tmc6cSw#*#h0?$oW|UdCE!5dP2a@&Gd&-F*JnI4G4tll zqx^Mw>hBU^1s>A}7Kvxfedy;w}Kzj@c6*ymXnMU zZ8+>;s;K+)Xq4&VK$i%SGMaH;RN0dHK3So1mxZ+B9NZ0JOToZ?%@(YB2a3w14;F<1 zuaa-ex)69(M9@*~cWwW$m1hdmI~XU#Y`&Y0`Tjl-{w^^6g0aQ30Y5-Ml=L?GGgIYb zV_m%O{1<|sYx+Tu!cVf)Qbzr1O0MZ4Rn!|Y!u-@Bt!*|RepA!=gQn(xl$Pz^-IEIW z%AgOGZQr3HgZ#|on)x>&ce`CMf9yX#>^&H-#HOlMoy<->*@)KZK5H+NJjOB) zFNp%{XNJzpX5l^H&km^q7uTpfFD&{1khMuO(>@f)9{Iwh+0}trmBA!u3!@;p5XNk@ z-|fW(ZlOVV4Eb=FTdC|G>>Yji)6%BXUJd?|W8TzPaG6SZT!!zLJSR|>)CB?!;tf|+ zn!R~YP+%z9T${Jg&eUFwJRODty&Kq%*UxA5@1W$qy*@!*Yak~AnF?_I7W`}KwzvsZ zbzcNTPVsXQqB?Tw(w?$Q@9+5Vivj8`C0$u)Q&3Irpm#H|Sb}^r1D!kSHdzbj^@a=l ziYPI@JINQLFru1yav>@$dqCxc{Pz({X|K{98-Ui$KA?DvY_#YnH|WPV9rkr=ndBBiE@4|>eTxt#i@~u z8widnjS2;07Q&(RGSflfJBHDk-qfihqb+-3gENcT=(Id2#3TqaVe3Uo5MKOYIqW?y zS9nqQM^g8F5HDhOc+2Z40nd_)nbaC2)gKJRl~(8p)K{#F6eH8g2DPwY>&fTeED^SL zbDwO>ZlW~)`9nO(Jt>b0!_sQRx{}tU@T{U-ob`d20=#4Ox=Z>O@cBA+k=A-tkk>NK`|n?zFx#0iBcm3z;+xK0QQG$W(wX{LQ>f0g~iJp~QR zx+3~+GMfy?Y+}wpbZ7AWr60cS{)cj1UFq>I!qGYA+#BvHV?z!6ZA!4YNcz2AUc{-$mWoTDP;#Em=R!GjhnEk>%` z9@tBMd6UE*r746yW#mht&8=`M5!FIq@5JT~eQT+-VtyIyd%WBx=EFy5xe=G!h;wm; zD~T=h%)%fKS?}wYAiQ7}VrulzU_H+CE;<|Ik^H&AM1i^!Y;X8=d{~vTvQ7qbj!c!V zAg3gp(zxNZbHGoaKPP$a`snC|3|Cn@N&?S&D&j)*KCz~mpN~9wFXr%Y_%2XE=*UQk z!}|t%3ScdY^&O(skp{VEbTq(gN113L`-*Fp*$e*$7~>L7l#_@_m@Xen4SD ze*ip=-eKGfyA~8IHLoH22(%&I=m9gQ>4hCMo}Y~n0uF>F1h9lZc-usGX^CCz1%y^G z%eS$=b*o}LPjf35ot>Mh>y?kbqsu$k3UB-F>IC!PYv*hlPvm) zWu6?1#_RVobvsYW#ShS97oj zpxci8N1tMISabb(p282G$=+o@injfFYF?^deG( zU%t+Ar>*d*t@@+3j3M>&`Ib1PAU#E~$%6IzBEs7AR?r7ati^mv@oxIW!9{f~@Du8w z%Y1!dNvx_qHi z;PY%#darB)p&?y2X&2JKmgh zQiN3MQaEc)iZHxp4R>5zFCp1ODgxlSm2YctE$OWd-;PI416o6gKv^5ZrR0}MmeOrJ z`|PeIbR3+h_=T|%YylD(`E>@^$PzI0#fS!+C<(-I$bVGgkvmKHh8EuIX`}P5+`6@*D4v4e>5~}pede`%@<+#V)T)HV z*Ws^xSvodqt@0~NnopSMtyo~gvCmBRFnt=)l8=u8@)go-c}5&P=v0O!YQ>n_6_}Wj z73S>%Etn{J3G3x~;;{m&j`sFkn(B^vz-jRC?VIGR52WrMExaw}cq{b6OP*kri%Vmj z`0V7sE2ipzmxYR4g~#@6yvrtySceG`2C*3bqDC{+7d;ew_@0;Nsco3Ah+Zy{X2dga z>%0q;R|Hl?GUyx#`$(}AGIanLi>(mLX|ZH93`@0&tqbZb96N%NM|KURH@v_I^T=YX zAa9wYv~yNFEy;k>r{Q~{?8)Cj!x)`Q&|j>Yk6D~9tOVl2Gf5MUgLkhJNgpP9z|1f( zuZ4)2eBzCd2HXjV9jDa4EJlmKC0r;_eWfhI(1yW;us3NG(knqPG4_rQ^Fu2 z8rR{D236-l$@Wmp2|;HfRB7@8zff^<9wF3AkdZIyk_0M7u? zUBUER@0XAYQAjQX(*d}@C&4CZ5Wn>_W_D6Jj2GR;~um8rh_PFkuAMEVy5ii(Hf&W z>Lgfp1d|?Y8!{0Flx$kcvlCA`?g?c$cJ&&2(kpd}g3p zDo*e7=J(~~|9rF)DuKg5L!6?~QVX+CEKswM zkJD-W7x&&dp|3uvm16uP0ws7}GF;sLPzOvHSCv7;ogv16)@AU^Kex*(e{?@zw3LY52 zLSMX}G46tjX>p*o)>(xs=BgpESf~eHz+?aus(0%b4uYf&}1xBa%trc^jF51*BlAT zRnC%kAmK0_f@ZyaTqA-Z%(Kptprsm$-*VCyAf@tmQ+9myU^J;^$_Fh2J%q7-af+-g zHQNVVgd)&c4c2}BuF-~PO)B&*K;F&sK9GpxU0rV$v<=qcFP31qLLF%Qad=5<|m zuxUs!r4nf*s-R-69oO$oJpv3IPkyZjKcjxkx6gl;u9d0cR;#h;{Io&k1fbe!ANLT; zG$G9Qp%2X9nVyRBaff10JaN#*msOa4OXuIm6+iEe6&o4#7e!PfzK3*6T??#6D*nbL z9WZi)9KIEVgI7b+v`ySIyjA#srOujKa$1DkY~WXUjp$8PA;4m;fsSYtjO)}UuVt6V|CJs}PX)k% zdfHHF)y>?{J-x{jBE5~KUj3$L0pyRVk@e*v zs(5&rK?6?P;n>TjVp_zYA9#Iv3`b%Rt8XW&18v;ahJ$07vg0AZ#fBWby`%(o#rJoH zEl<8M$Hllx73Vz;Ovc79cdPe%lOGWH)~+^d*w?isLwzM-XlN}E!6uQy-dR)Sws_`_Sw^XD7&xp zVD+fj^+uXQ%{CFsn$qt8w7CX8urF4??$Sn5M5812Fv-dP6XQUbHYCR#%x=h8V^Wy7 zVasDg8Aw-&l-zIVGEKs&4w5(j;VN)e#A65V%#~a`O;-^7=26&@>T}MS4Kf=jqv)f1 zq@y(907Ssk-+l6?yaTQ&%N23Ao|SG&vQdp{96Hii$3z>PDzhaEe+*0kc``JkuD7Wz z!z0dGTdva499Xmc+DhJbBf-xBOyrMTOO|UB-FQ=cAA;R*%^3osxm<81W z4DUj1Dp=CYIE_j-N<2?u9;R(k>A>0*J;=kRQ*iLHVHI7ZmyzX?$Y`hnl&hfs#~J}0 ziHvvyrY42|GM=Kvs^L?lJ)1g}ypCX!j!(H4gi$M9Xlid$oHM&z>ZvErit+n6Jq5!- zw(>2TR1S@-TIoUsT;3Q~j1BzWey{c2ir^vBF1He(%zhN3!FME6TxjmWK0GT-jH#K2 z80b}Xvk$=_i3}G;e3~pAhmS{B+!d*4aCGF54i@4GNn$LbP6U`H+eGZsG?4S*SQ3eh zH(fl`El#j1(Zd@?$Yjj*lyg(f4Ng(Qwaozo7%+3GY7*cK+)U}(a43eWVbpCu&cL`P z9ItX?9Nb*oK>iwki}AA?~^}0~c-fy-&EDS+Io7Dfy5pe~?s`d}@OFh_rX;7vmGSE?7-Cw%_U3M2# zMVd)y9%f`w7uV|0FfVZc_M4B1JE)t-H{4e7oQe8HO3WRgL0E)7Z-B)-Rt#KL;tlZ@-obp~=(j_0cl@G|r2ts^JA zK42l>&INqBS_V^Vv=SWI6|9mp(65YOGQ3u2vR0Tk!u*C^bjg@U^nwz$V1AH4vQhNn zqT8NYk zBShd|!LpvK*6_3XlJE%c;RGS$RNhk;!7>8|F$So59`m_$Q_(O)5FumhN26715l!)= zQlE1(P1>}p^mR^-;ew+h4vL~j`?zTGtHg{9NQ~+E)X*K(TU#a`oX@5XZ??Y8$tw=W zw!+TDCa1qFy1&f4j-4H`E;(&S8K6dbL;R=8zVqj?WiA>uZEtmY+T1)~$pCCYv@@P- z8JMP``P?|2G=?yP2Y&{dz>yi9 zXt*i7_s4G^#41~bL>T`y6XV@Bv%7?b;0vn&dHrs=B4Vcf)ZNRFMQK>pmHUVd99HLk zDaJ}L%QhL=Y|1TQ4L@K{K}KH_6(wu27x%8?ECg82q6JZx);nBhsO%wv`yY@_jSScQ z-*yGwWSojp-%jkSqOOE=j<3*=7rha%38o~RMtYPKkkgv$VVwO@+;y#`XZlyQi)bx@-TsPi% z6+d*@SPv);tdXo6<; diff --git a/sdk/ai/azure-ai-assistants/samples/assistant-WhEPqpcbmXadRJmCzMUeTi_image_file.png b/sdk/ai/azure-ai-assistants/samples/assistant-WhEPqpcbmXadRJmCzMUeTi_image_file.png deleted file mode 100644 index 02f9bcd170a6abbd4ac83d0be8852797baa3899e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 181757 zcmeFZ2UnHX7B)&WcF|Z7P>4!X5m7*-Yp?;*dlwau4$^xx3c&)1NRcK=kuJT1f{K9B z`=*F=HcepD!TT(8#`oPZ?q9fP3{Mh8c=vkOn)7LM`6$R;q2J21m5z>%Uh3*aB|5q- zGIVqsxi@db?+6@`%f&y0>@I28DO(xYIU3wCq?0wUvo^P~GdH>Mr-R`gTN5iwKAy8Y zeB6KDwzIRg6+U&!;{SXBkJTOHQ?;}b8otRE>#Lf!baX6b{rXuOB>aDDEAyDI)%uW*VoqJ=w``*wZ9rcFtIJ=khm_wsI4;4$UfjP{ zKD&>rr-tKK@@R8XlHEu{q@`&|gs8L972ku3c6M0-0RgwtHB>TgNj-S*AVDoftDx75 zS+wh-&Jg$hN4_j#iG3A*)UozVX=&+zw6rwoAnt_Ys)=vpB80bY+csmzCrh0jX(TK4 zU>k$o;y!T<6Yd7*+?V{~I3vE=h zwt4FjXX9`0?h3oizKI^Izrx!5M)CEuYMRA8J@SylDY@Nu@=J1tUZpc`BSGB_*2^lsUUxCyNu6u$S2!oB zr6l5~40pRus$vOevP#BEB*|}HdbGc?r-!EPkfNR2(wwB8()fU;@a0EeMfUCbBPn{t zS~fN|`8E29D#rxO+Y|(Q+%mBUv6r7P4dkU^7wqK^h^3?ZTYYIDSbr8j@i+&^r-a4E z#>Q;hA-VC+eC5V?`BuJK(btw;1^cW|C?4?mx$1c7@@1`f5xe1+OEjwM+e_AJ($Wu= zl$7k&R-DPrCNegtZ3JVb?Nq0wc?fO>I@C;4F!>(KVMy4Tb^tg z!S_hQF4<5ZCx0S&{w!yOA8RZ5+jt-4u>;uMv6H1g^?E#i7Mr%FrqsW_lH|TTx1mXz z&bq6}d4_Cs>g@WRq6)YCnm=0)RPEEvzjJA*CZh5EKbx{|Rql3Q==HXgP^Xl7mg>F= zWJ}RV|Lf@G$6WqlKd^cY{)Z)1lGIM-wnjwUtneMdK5FPLcGcCIXi88PwjT}jSR7!G zYVea-8_tOg+4|jWWo2>Z#W|Z-YH51ovqGBY=E?iHHD2PtCCUWzxU4M>*p8o$ESK^_+aQFqUd4%CkG+KEJ$elZ+`2JE==u4ipQP+lsy92X%Y?bPuk{myZ%p zY0oq^o9N6hnD2HKaGAZ0;!_wa7B3&w^8MqZ=2B13+$ME@tTw+z2fzFBcy0l9dSkV2 ziJK-R$D%REg2Io|M1M$F<@FUmo_9enb`JeTM;Ee3@F%6>0@^12~bCdsTdHBs@kS9ELP%ap2cpRDgIh>Z^Jfzo9`bEy82B0_@Rg`5*Qhoib8a*$j7Pw^DeRZPTSFr z?4(amnAQKZ*g&W5KHuf&rKd^`nB^ZX(O+z{Wy8*zG{i_bPi){yN*Bon-YZo78~-Yjn^t|QloX^h+cParaB*EVF)@*rl1g_yDJG`7je(~r zPBv^76)Zuo*u~PMHCW#xp>VQ9`?ueIvrI_)lZ~Byq>?jA<>JMQIWwfz^71P9`1nZ6 z$W%L&+*{*0bF}6G9UU*juS45|N%@5;$aEv_0qK2)xFCV?)Yv-~^%9U{K=ch+i4Ehb1H=q!@h;lDe_Bx}0rX%R=_OFiuiQhr)rOzS5s-g`a#6nFZ@8 z=9p`u?8{=!)y6xrQ+zgT*|J60VJxI2S@X4OlG+F=M2jJhUeTNQNEhk8PfyT2BBnDc z1J1a(+ca%c$5yO~5Q!Iaox9yr;_lVTqhFHDpyzZU)9C9Nw)MVD0#zdhk@FK>NmyRy zDjlC?G~&Sp7l$ogN}oI;{Qdnc{G<%40&NmbRtKLFob_@jrxzT)yNSix#revzt_Qcq z+w-WGCA!WU8X*Oald1S&R4-;rg~|YScEd55pDQzdq^PYbqhy-}V~vx1_Hge1Jhdaw!@Q@Yt5KFK1LcRD5s?b#9IKwx zR(;Pd-Yg@OUXE)aFUc3%?b-HqqN|X;(S2#wW@%#|gT9+0-O1y^(=*+JttjNfOliz#_vmj0c;rXTT9+Wwh|+oMYQ_&5tr?&q{=z>SD=R80 zEIqIhf&Pwhb6>*-Ysst?Pa^AE|LT%fv5S3VheF*@->zM|TyToS%)<|G{>ZeGn;MRu5?m6r)iJ8 zX!Mpn9Jg^UTUyg}@?RN8I$zE-u2p-&Brx6eDXnx(3*G(X+eq`7zT6%UzJu~?2{=?P z=yUsa@BVHb3N-$}+xvv5sLoD_mA5rf5~R+UzQ4EOLER1Dm~fjbJF)j3kRFP&=as>1 znRfx--ywu+;3^8^K|MN3k#nX|ZRB`P&N^#dKouGaVxms|9k1yp%z~re-v7g5`h5A2 zghwauSO2FIeF^gpt4p*c92a^+tndp_=L}S%=N7L5k7k=TH!S?d6|>C`|+T-LW#kOmUO>LoBMv8uh{vp$ok{ITfu~klUzXmPtKo zsEJ*ZZHliikCZG=*oJUDj|j6YkV-3_dl7U>KNX!fzc4gl+C$u9q=9{PNaANYvzHU< zvdb#1^$>@2`_>&hT27=Dy&8&itx<>!3X1!=G9=-3U)a$Ml%-_h`?f-sH#j?%t<*rq zHjV>9Ji1!zH|^m6a+6v8(tA=A2g9tGTnuP4GmRpXMX&KvJ{i0UOsT6=Af41f>?S)K z+i+~KUX=~k_}IeYBBRUTBR{`Z;4hujcwRTW9j}oBd+KPPpG3`B#j2;%1J!0z-`;B# zIi+vhe&E~SelU$ZuUp6H=oI?V6Quc!zVHT;2B92xP4V{aH!}8yJjSk1-LP(6yfiuC`7Bm{$0Bi`%|ro0O}khHA`k2WC*&c zw*Q55>R)?DkDITU$HfY6JYY;l;OL27q^mN>B2mz|Vr>m)zXlBbXxh(9ym8 z^C|ZRI=Y8NBaQJV*x5fS%;6~p7t5~UDQ5AnOjY2nRez}ey~U2&@^xaPf+EwA(?UY+ zyyMb->pq8ac2lfMSJg;W3#4a0dkw$7ICkL>69_n?i!(IQ6f46}m{BxWl6m9f)^dMW z!A|RcNNtG}b7dN|b^Km|2c{p|Qs_9@GWa#DOU+XaR8ioXph?5UwY9Yk89a_Q18$M0j1Ocf4t6Q(zpk>-oG`jjV$=_ ziJ5=~NoU1TT~pu}g2JN5TC}ZHOFdSnb%q|DD!JYIs3Tm+7Cf3gn99U^{cnSXNz|`K znx4m4+i8&BuL}EFSSF%dAR^>>=X+|RHT9F1NZOyI=FH&NW&i-UW(u~#>O!AiN4{-* z_82-=J?Ku~9V=DM*qE5xAY!Ds6CMxFr}{?W0!m!9da*Bl^yQ~dpFVu_sPW??234?j zvY%&rboB|NVC}PEu8YFE++#T$rU2 zs!`|LeGh8!gYY)_KN#8B*+q^*EFg@={`-e5*ws=cB_+r8ON!7qCUk;}oqoKU1JY<~ z)A(3Xk>vE_`*H1@_}^Ef3}{`Be71wdsGPhNvA z4C8U{j4n^0JqV-ypopJhWo3PSN?#94cfx-%V`0IGn$CUfm=th2v<>A)`%iW@XnS+| zIX|!&t4#Xh&IeS>z@y{;@rGi1hM{V#^gaQLj;52#gQS+#O8guKE|tD^Ef9b1vzvYI z?%l7j;?z&fR{Yza+ZN~9jRZJMcBee!ls_&cq>19J_VMAq=2w><9ch;|zz&%kYk$pW zTr-T~Kw4{}O2Vr!tCIEn0&S+~|8e#9iF4;k{5yGk<>kiBFF&0WR+8>#z;eNGI5INFe+BBB?E+FZ`;yznc_VI z07y2&54CsKZ`wZNd*JjD`JrbbgpqByC={30{$LZkH4UeWSJ-z zUd&kmgsdam3(Ox|h>GXhx^=4(peMMx#X~A5$WI5DH>97}d3onz#nz$Z@G@{@A&^pSr!gZhpyXrPid+4=tOH;RPQ=#7ncIHRGrYi2mh&id{J-7rZxH! z5)Bz;7%$bK8hmaP?W+ZfP%;DG%#p)~FW_u9CaNacegEKNK9r=DowzhNp0qqsC=YTc z*IaBjqEx&%7}hcZ3O4dYu-l*Q&^hb#=t*@@iwx$SRZ?}@4KkW~%e`sTw9-+$lP1Uy z7P&fPn>1SFcE#ZUXq?o_`kb9&RrJHF>+ADtRwavZ1|sH$U8%_#%jTK3V5?O8rEIxV zni&^?73)EFvY^O(4duIHToZ1SXO8bBU^lEl8>a}$3+~>%XODpG;I)&w1u78eb_|(7 zrot(V15=W!8je0mwGJxxqHn}Y*VS`0czcMWvHq+j@bT0K7o3jd;_`@dHh<~6{dmxl zs+*Lgp4M?PtnpH*fLY7!?jq-oT&t+AXSt!|{LPJ1%-Y@>5KQh%31}=ievA@7k!dL( z2R*cGDgx(-zi_qXtZybzx2CTJPV=3+;&&|v=g*!xYuR~Qu;?;q%S`y#bA3W9v7u8? zYL5vBH1~=U)eabopVasS<%*F4+oiPy;7am6$GZw!doxh_8_^<1-g52+JBadJ(*!oF z$*J%M`5b=0pjQVmtzSjkcFFSGo&JC0u7$n=$rlpk*syV9BJ>%T=}J!31SR(Pmf(ax z{hT%T&81fys_?}+>&^_-s)IE9E*k;b(mw2w9|q~MPFN%52`iulbxkfxJfg2iTwI(8 z$6C3T5XW5heO3rgEio2~@I*5*0k6UDN0hsx_{pPgwGP+6-o~xi?GRNx3a(OV(@w}o zEVIS({A6pENn;iiE$=OF@g|jXExY*d^j{_TZp6i6->mouz4^0PP$Zqyn}p}-1-rSq zZ5NZKjkP!Wa}P{>@Wb{pL%nEeBQ=l+kKPqYD{o~>NF~*#p5m$r_lu8>HNleohhlTP z9JEVXJpnJ@phI|d$DpY`pS+md`TK?qjBzs=Rt#(u+n!{}$}am>U7|WQG&HaovgH9H z>RT>~iV|Y#P;AoWLQNrMsf0Swk`gAq*uSUTx*Ot^y^yAKz|!oL;e z1~$mC?jYSkI4Qx@I|&I19a$#NBAvc(DF!NWDCf~T^}Vl8;Kql| zvLU>ODPhe@pDtB`(+s1c<}wR&3zE(8?(TX49CPzMu`!Q-&oz#6R|6ZdU9#odMp>{P z*z%KXeUykOyu*R1d-@JHZrtEUF?e!FM9p<>tnzH=m8z@0@GuhaobjQ2#=^xrtW;Hc zv?+bhKq!fhHavb6wU-b?yi60E#rE!n;4=ZzWG9gd{({%w9?eLL$XM=>l9|L@>~egR zls?fvO}|+&3UZijHjnL1gJRTwE{k9U+VE>XQ*>Zw%cQ`kD*R4U*HiRM#VL~?AZMoN zz z7>)k7j~KFv6f76D=Qsvp2k0?uQO1!1o}>8RIx-M>?%P%hGYue3?c&Azlu^eIhn5{K z*aTh=v1i?}Ws4#fl)k!u_WFBTS^awL3F+^fH*c2Qv7GpTMcf)HbJ0XqB9PLgogt5VT+GoqY-&x zrt|g9xFS8l-{{_%|MGG!scC7w=%Q}}951vOe)0F;bcK460C?1fuYF|Y(f=LFPoP`~ z9V{rnk$xK_gGs`psBw$({zsR<#Ol%QEL$J>1ls&l0)K5sE5n{WuZc~D#jw5e=rJLZ zsK;OZJHPey5t(+m@S zPSXf@T~1+}I&j#*sPhs4&MfY;_2zrTGad~!HL2i{WTC;d*yon+4|=uIJ<*X}&FkLx zL@?33J>x-#7vQbf+Cra$k9H^?bYv>TdAvj5q)r~Mz7$`S`&=m8nVVeOHg6_IS^44+ zyc$-Ig$qiTl^Sq9#(iZkT)42qN;)n+p88x}uO71KbW;mz<1n7;EZgNT9JLqd0Ac*91{5{v>B*D~*EsIv!k z*o{wITu)w%7TC4uD7W=y)a-uLoOIXoADSbjK>thyn_OD-P=P-9wWMe>S9L}#{6J?s zP$P*_IymcCnrZIJC55AvlveVhFTH$wxDacu*xXe{v9-KMuM5m;OFNH&TJqd2eCyg( z=`+6OIrCMJl3jb&7vIjfpgUb>;I@RVv6W_S=bcB&R6D#+G2*6B?MzJ*O#(8R`EOr$ z+;RHmr|sk2#f9%$Bd|!|-g00CHcrJUI2r9MGmemgv8KXB8v`QpZHG*uYP9cQebKlS zQ^wdWcJicbTG8~wlBLlT-aa}YTD5aJ9u>ZaT%a@Cqg{)ph@|~$7e{?{K#D&TgRpB) z51Nu4Rha3l`R!rZQPeD9kCnW}@;$`Y03#2GdHhIT(rddSz)CWM0O@I6+fD09&blZ`hj;BZjQtiD5 z!7@I}ec53Z=YT+i>=gn)iBAAoq0^uk50}yHq=E9;`$-+ls~N(rIYmXGyy-EsV2#Vs zQfw?HQKz?-h2v+bPadGPn?P+Nd+)~0n<>?#lB2;nrV8!{I+>07xl%E4!x5l~G*Wnm ze_@e>!JD8+!Q@H-8sGv)0?)`QRJ^GMJnv8(Z8&cDrwBM5sU9(pnX4dXH=%2$0L8u0 zNWTGt@1d`64)r7l76lJq)b+6y`<@!@&7y=QCk{8bE$PINPm@a4mdwiQ_+)~(6+vNV z(S$m(Pf$yaO9936&2FDZ%|CarJ0AXfzGdc7*%$ov@M_o%**u|~cTT++rw>fdC_E6R zerde;>nMaGhvK%p<%Pg7&?eGOy?3&n4Qx%;REEc>TIevYf`cX1fSs1*Fs}9M-S*%6 z8YW16?I+#<3GI7B!Ow-hov6tAN}uX1u}kU?Id&QxDj*sZF@E@+r*DA^y)Wp*K};p2 z4m~)N$FosZaHQ9pwFUIRNHza8DCo5 z6_^bI=#>Ii-8v9Vq~+yXVOi49H%9QCb83Q7?Q^?bWMDSfFNME+>FX?XZ1|?UUO4nK zn8{g(gVwKKZ%=pzF@0WNev&NZMu;7XIKNf5X!(;$Si=dhZH0Ceui(j;S`>?%Y-rS0 zdGruf?4s@Ee{jNXf{a5b+F7ga@FBM8GV6sY17Z%~CBg!1`un%_c4YGcD^JsC&p+O% zCl85rVDc)JM(N12>Fex1(L>1BEBGW1hN(OFN??Lht+h*ZNUB{vOD?Ddmjr*C@crl~E~1CJ0X+p8xVArBRD z27BD+6YxOLdNZ_J26$>B{U{u#mzS3d`M+3MqLv(mV^gJ$v?9<*J$xVfo@yvm z4ckr?y#%BE6uW8tv`g3Tbaa`&Je|FozL5cm+PSIPt-WtbU~CDwKNXxAtSO%FP72m@ zshoJ_LXL3TO+T{GOlVrs+vaM;m#FF^a1pRa4<6UN*{*PuXqN~u2AZ0 zBpM)`y_kT&4&*6AK4QsPlx~9LcUXC4{bxP|2L52YB^zYgo)Jqa7;yXuagI&uf24X` z?-Ya$m-&elie;fJ(OVDSy#-exzx(YPC=Gq}!otGsv9@%FEslghYG%fH?PN_wz~T0X ze|J6f@kyUZGRie=KF-e0&RBIuf*O8{BNaev1XU$N`x9sq5L^p*#`XaKu`K46Y1G07 ztbk#j!Wgg=Sp+0k$a@2~Gt{sI5i{*7$VR;Bdh1-W1u<>6S7%%GbSI44so&111Pha9=fLpggvy(j=sXES zI3Y*Kf#Ev3bcOoJ5oZRH#_f4A-T%3KsEDmc{X}mR=fiP)iPT7=%d^3!S6J$az%7_1 z4~2)IH!S>!=g;M+Y&X1GM2}Qpl|)#Yp?AbQ4*QMR7hMWI9v&VABXMDNsE}xFaxT3O z7?K5z5XhlHwlPHw5b8d6eE6oG+}5SGzC-{zVsM;hi*15Cw}<1J0(8&u!JUwmkf~~l zmfYkHiQ3CH63$;%#fiKiYR9j+k8dwSz*uek6=EKs;Np0(+U6!zK+L zZj&}TqKofHgS*hd?!L6J@CJa~WhlZ?1qPd>-#(&e1CJ1UFgH6MtJd-pg0w~6g*KuV z&ldzZ?`VZGEgODL5e}_j(ZtYDJj6lGXB@H($Oy!u(rM{yBM1e-Apf8%?F6Z1;plZe zZZl7!lYy%pBs~W>s6$+aoga8KGh({j@E%T0eekPbalM8vm_C=~v@~l%Bx$B|)-Sb3 z(-flbEzM7+0XWwvctB~BjTF^{cBzKFj^@9o3oA=fO`|O-)TvxO1DiRw&bfIP{ApK5 znEsFNA5}ArWF>Fp@kyo*e#GOAc{rl#QRwRjkNNK2UT8ORLr_2c#z#A>foX5qgO&_@ zjb3Y|QchHUVQoGdb|RGTa+@P*1RDA*0$XK88sC~zIA>6H-z`_56t&n$Fxh}eb*N6r zRFQBVaUz)nEt9dG697mS<*tSC8TxZh8+(@mV+QUov^3Y&` zj_&-^=h>U?Uz=e0{<;1^6nwjUl`Hu0v*?9G3$r%RVTd?TZcTJw2ZpZ}gjIWO)12?| zEG4g6=TxNk{rkg^9?pu!7D1jUWVw2$51WHj+drl(8D$`z}eyzfxA ziNdby3?v+p+6P){BYa^H<=bJ>4ABAF7rxs5kORA5fE`ngJK%DIU<-A9_W?VRM+L9e zx`l=1Zq0pHIe_IG_j(6&`9Zf+-;;w@O2ii=Kcs8JYSHXPb5RxW&1&sq@HmIjU75g2y}ZFzaBRAg|jK=K^*Z1vKcagRaL07r;29-3?flP zh;~s?u+1!Lb|Vog!I#IqHn57j<#jz72yf#Mk@j`{{PfIj3B;#NxiV$5s?q@PsDST$ zfdmygv&6MLMS|ma1lrzGFv@lvz5xr(Qx9+PeILjtjI#_Q^yHNl-7bBU@oh3e~NO9^0?F$L3YCvGut9pY5+z)eB|o_fDP z@Zmhkzy3PNA#FH-Eeo-%exh&zEkVJ}t*F|kC3*T) zqF&RCDj=0&(Up7!UHQ&m8^K2j*3O9xMIrwi&WOU5qUiV11X{mUMTHG~8F;Gyi zj3|$#%8>!~1pQJE9@@93`ozpPZ~+T>kTbDDjIcl5N_xgK7xAyv_Tu{b z`p`}(3PxUG>8UQG3GsIhmT@S<()#6JYs!h;-!~}kT&$gcC)Q)3cT-_0K-*;!|AEUE zgFV%L=n?jcWw7gYBDeL5C4-&$7I(ID6zsqFy#kMCz7T#B-4lLQVagiPBBXB0&3m9& zsgm#~(HqE4A$~`gpp}ERYewJbV&3?of1ziRNMhsjo>8zQ7vH&YLfv7vxb#20LX<-Y z79ZYrqBNu;TQgL+I`Iwhk@(-IP~9lBwXq4d^0FIYTB_8BsD{z@;&bB_^A& z1%iY?+HTDwJS3X~f)Vb)`uf$YU!g;I1PY|WA+2*R;@T2E!GRldg#Q2=gjW`;Qe zQFx62$}_pd-MI_|J&R}3u(~?dnco;YFj+jW7d%*-#%xtoU$F$w|DG ziH0YCi%jH6V#A-_PG9IeV-)W?cns!mWBHtFidHl4{eVE5yWQ=aIaUQD9OZU{f25}l zBlDx|b1M{jVX^hW7rS{e4K(sZMw-I`Ao5ccSWv;8C2XK=NAkJy%sB zr!^suf79_<_uR%Fn3U6PrZY%C*^f9Gg>^cvEZEGSee?J8W6Ga;6_nadT9QLcR_C;- zc5)Q$kmY_F;b%NwSPicaYH>C~*t#AR) zBf*voET~ysvN*_$!dYwK!w3tIq-L7&CV`NuExD4z55oN;r+OlZSnp<~Y(B{bxJJ0kt;@hg%KoAlsW9P_u>? zM3N4^=|-1*UCtc;*Vd9E%;$Zp#t-L!ikMxl+)w;J3CnfaQ1+ntBmy+55^Na6t@#Qv z%em}M#EEA~91X<<3X5B6=^^tpllHmqLyr;8X|(QaLs9{m;ah?|$lPo%_66*!ANjqx zEDP^ZQlvWYSf7f#cIa^ce=Npk=W?&a`t-Yha;LewW#$&KnHoU8^Ggx#ls|{V! zVayow6`8O$aR+=OTEce0i#TRfUd^hr=?PFEcv57l@}AnD!f9CXunOI|sGh20lPKs9 z)i+OcMNbZDBNqn_V#{ikh zrouPXss?$u`Qko{{131B99~~}q4e<$Rn7CQzyZicoMBTeXvkE7FTP#!Mn7QV7k}>O zG$~i(ps&XZC#%BqT$16S76Xu__{W%<+^yh8~;)S^XT%B);xeG&K6!Dy| z_94pZgu#F^HUS=CVnGIVf6(m0v**yE6v%JYz4n{r#K`No6z&;5Po>vGdjzU24I{f{h5PVBa zZ$zV&^HOe_0XSf0#WZ5ajvWprpZl-nR8Yn|GTT^Y@c5=L?pGw6Q+ncLnSuDD{C3fjzmxc=8)_lV>mZoJ#)%)r29DKrLT4t>4eNl8m* zFmJsMDGhZmt(scx^4l_A15!D*rp#SWx$Kd_1Q@wSghuicg1t|sYj7jA@VW78NZFvd zk~{~c_FerS>ya{V2EBuJ+mDNui^eO8@k^M<34%(r51j| zLKapA6VJMD;gymQE<&^4rvJ#wk-RHl+H@6iO_hBP1BfK#4 zcKcv%RgIy}oQmC!G(9{jmqmNV$E7YZmSWLWpbDd{PPD8!(xu_s|?bCg1GKy$275$KQNAK7ZZ8CpJf{Z-D)D5^r0i za$cXK4yIY;!p}ux9}N{GC;hS)L`dNN{L_6*Os4AlSy?Ar>~D>a98rF-bzit%cQdn= zZ;(xs7ZNVz57?!a07P~4f{#MS%A#tPJw=V|Hxom($P`~5z5UpEBBm-^@XAog=0(Fc(vp z0stJJQ>lj86na5UTgp#S`!6TnKv@3p@grj#W~J06twf!l>zWgMLBjJi07|9~Ue7f* z5<>zm#z2%W84m*;9}d#Ctenr6HL#}feYJY*q~$T+8=jND?=oX6&VqM# zZX$m5RyNh!)_{&q$6)QpRg}o-&-EXlplsyZAMzc|EWR7Q zhZ0YTFmSbLsK4~&UeT7mv8m0A&Zdr_ev} zI2B41WtEt;ND#Gvzo$X@Q-+0KJ0rVAea~m}o7`!WetbX1EQ)$TnB4Bf8po}?E#CEw zN)3yjG^5N5iixcjP@jHce31UrdHTOT%Jt^{r?DDsgvJDbk<7iLugS}+L&_$DQzVLz zS~$@F3&l+E+N1W65x(N?eRJZPO}``YgEmhTQKOo0A|WyA^<*0R$?UXU!DNQl%@E8H z0R9ZlPUjSl+~V-aQ^x9$9Ghf&*@JBuN)*aXtzZP4X@ZN3EXG0a;)5iz6}yBnuFg5P zVI;R z?1$?vV5_UZ0VH!e&yTB0BWgQ6FBsVX#h}Ye??{m@U?zQ|bC+Por(J)q90O)2c}L`8 zG^$C*+?lgEvh4Ka&%I6gGIB#FQ8gNg6bAsT?Yc<_%Tn&#*wBFd%w>ztJVj(o>)|H5 z=+aVuYjwU6#a#ufUNJ(AZtgJUqTP z_>d6Bf^-kICaKq+v^OT=;>S;)P(-C#GT^O9l2Qz*e|(N6RDBj>Tf-K@5@K>N0Gh^6 zE7zf07(0C)N2p8{7>44qcE@hVWXZ;rXh@MnTEQ^^b`;Wh`P zdDz)6VUwqe?AY5ca1!aIYiu%vP$q$Qn|9`<8+`T#XC!&;kzSTN82G*MMmqpf-VMj2 ze4IpAtmMv5cVE|0Z!O|mb=IOI=H0vV!7T$@05b)y;p51&y3amsl-zGLeuH&R{^DTv z5QC$HvuAPUlhcmHX|3h{Pd9wk?xqkP?aysLJFJ-7omc5Q?pOx2Y=_y%;3(-FchA+8 z7156Mx)+WOuln8W{+WKLxV?EAVyM~)n^o9>r_R@yag&kzXkLMdkOTw9EvD4_D> zHbvRLDZ7RmU@g-XO0tz0EYW1WP9g)yn2spP8yJ1VBck7qz&H{Jj1qNj!05|d=OXgT z4d^o4Vsw4If-+_K`5zHEi41y~IIgN0IEm5yom=3cREvc9^h-byuMx~JGp=^+iN}Aww(0mTKfY-?E?CiSy1jA~@GirLi zmvVZJLLNIFQ0|G@z5WD-1IE712WK9e&woX#+Ar%UG*$eqW(4TpkO=@N+{Ep>KA#V* zLK%RGc9mK2+?Ndb{p?`MNR4GhbI-(i%8@Y?IO@6Npq6)$^=wYnr3go0)&%sRY@BLy z3AAynN_2I33bsy;4BMTde18jFXlJnOLMfxQRhTx#t1OD<=HZcpw|~N)HW2r6_jYz& z$BGibz7E>E{?t`&oDIu2<>aJ8oUoBu4TZCpNR*W-#E%|PGvrIrNy*Z`M5xba~%>-wo+gZ5R(Y<_Imq|3s1~Lf;)p;10t|bHUp7$r^ zck!Sb@Qy?7gpHS{QUPE4kX~MmMQAdN6f;4za6=UkQLLgy^^v1TN$xbbMYYZMpwNwl zWSrUW4SU)!0I=cC(E%4CPm@U_Oft4)U}q-6(35L@`r?J@fdV>4hW9ve$;)B?ndi+J zhBCpULckmb1vqRFt!Py^v%oz`@J+n7{Un3vkd4>YjVEyYmzD;A$__$C`Q5;Lg~R7v zS1Mk;`V*cll9UP{AK`k)SyWnb30^5rxBkMH2=YSw-U#2vk7uu&t;vH^|MI>aC%Yj{ z+@G85v2wRS9dGTjzGxk(385N}Vh$Q>|Q-L{{* z4iZL6Ben?f9j8^)9f*~I%qxliwPfHhwbMUP4>?HV9UIZ}CRQA&_3=?289|t)(jg)| zrea^D72t{n%lWrgAXjb8;!J_=MFzS%-WvRqZH}aF0fuVFcc0$+!s4C*CWBZ!SAK{# z+fRMFa8j?RO}H4dC{S;|7cj2l!A?d#B8klOoLolCn9MkIS(noj>vR1Yt?tUfoK|8j{b3Z@7|IihQC&B6Vo|#0X zn`gujVRF(sBG=6K9K^(%;-AYm&>Xk#{HTKw9LwS6vOwBlrel4gAC$|>4eg0=kz!`* zsPsaG891JdEK|f5Lyy>&teYdrnF>$;yvsz@WWN8nRVn}!$+X7`BTjD?Vu{a~Q(1-E z672u$=T?YzILP~hB`V@^oD(O}9Z2LlIvJrhm+wBDI;m#h+dTG@#UMT99|M8)jYR_g zwg_kgizeVSn4%G4F2PBO=eI?6@OCRDm=EDZlMo(hNhU)SpfCd$)JQpryCH}HrIjot z{`tbbEA!BxEsOy|1skg|$y-}+XW*fI2OJnU@prVBr>mc(D>n5|t`UJ0muFaUAr5mn zGV+g{eay^Bj@`}`y=D-AG35|V!W+8tIDF$?y_qSQT$&iwlM$35E<*V%>lskF)&2 znJ>vO*9h6NU&2R-qg;Un~cF#FdOE86pz)V2i-fa;7u5|ip-$2U`ilMv{eXc zL1G>#v;eg=Lqe;y7bcFMDSBwMIkLkKmfVNnB}j;w!ESA%&8i-dyi%x)#U2kZAW;oB z_jpgtEDC$B6-ApPSh7KQb{{*1cuxwDm7o2aZK`%|9I%Aonc{nK+pn zW0LG6Z_EPzbK9-Yl`E0kps9SStAQF+~hh@RoX{K_nDsDr||M~5_Szi zxRRPWI^iU`alq>{BPNsTP$I{89*RK=a2_oqN<498lo{MG^2P5sp(CjY?wfH)JQ^F# zD?U^icPdn!NXuJk(p1}EZ*W|>g;^(y#rJb%cNDFuDY;B@ zwHSXV!zDgCu zCLwKXH$=~=$_Xxi_3k-KDIAqJ;0eYm1l5PE^~jd_($mvp`EMfX#31!RV3)Iup6k}P zt1MY!-sfvjQqWUo6lB7cLzZK5ZJG+3paqRtF2vzt3Jg~Wkn$;@PfbPU;f0`;CD(~J zAN)X=bPru}ozipmJqh0co7m2d?$3=illdg=(u|P_ z$#(eN7!I6lnkE(UaJ$5xfBte?o5 zNCDN21*Gew8&>bfSeJOkHXZc!dXhI9zEOOuGQgp$l0q-!~|Q-teAEFFVnqf>fWyb zR#N&hihF|*^ufl0@o1Y%17fRi5ResAwesHQ_V=hPvRQOs2HJlamsFt}8r)$UH zwV4fDa3JOi2zzI7r4QV%88aJkWZI{fim1)6;cB21Azfo|LTKVQxXd6{%8XAt_0X+7vcA=ydh!zqlMhtgttMY!PBk$%%b?ibxju z5IoTolzzC2k$?U5S7&?wL6`+?mIrD{JV3!q;MBjrOCIZc{|_L7c06Q?ahLehh0yIr z@$&=%W07aazmW8BI8zM|s2*_qD`L7yI^*gkavx2hp5(SNq&9Cm4IdU4FHs1H#Yj;W zrsqfofegMuW*t#lHwDPV4>!AT-Wm0_oky9p*d8g247I?#)nVE%2v$t)1j4rpxv5nD z@2;cUTgAx08*(s(n+|6@3lAp;n!+A$G~jC+jhhjg>LkD8cX0j9@Ils?tLW3@rkNg( z<*y3j*Wdm9*Q&?w7vV9~f2C6~k^hf}hM8r`IQgm%Gr=dld66CO( zjy5!MrHqD#hA%A_d1;a)5?`5;G;0UmB?d9M9RdQq3NfP@DA4=}FeUdZbih%=t#?-TkvEIW zRp96|v)8k+8$pcDlUD_X0m^txim@jbZ>{lHW4JYjU~qy&bl_eq5I1H2DLr$ipR1U_#iyopt$bZ_F<_nfk_ts)37I?^V);}YDctAM z@ATs%1DS*+^KH--6yT;LR~*Jap4?-BDG_1k=@=sYfStGaUnBhVxAhzMPKhXGWg-8`n>C3=BDry5dmvSM zw)0<%ebsNh*_Ir>m((AyGBU|En%%0G`HOj#u1r6&-)ur*YsTDIzG2ib0&zs6F}ruc z==+rmY<~cGsX(=`U-~U#>;oVJG9aYVB0)^JDtSPRS}4MENa@j5Pt$8g<;D*`OJ@CA zZeCU+11P%kxFQW-T#Oby_KMy~QcoMU?QdiE$kV>SeqbYR-$TbGSIA|X<0;~#3v4e#i6cYD zl787_utv-^ha6}S`8NQzVGD`b=+wm_9HHb z7)ILuWFV~__bWAG^aerUGl)N^B6OyVE!l)n!2>TZrrn>&{4&bF37Rt@s$G+T(5nUu zc9Lthh^GZvA;l1rBMnfb?NE@R)v6I59$i6$*E0+S8dre%6NL;lM_9n_4t|1w!~YfpBEaGl`@BA#H?R~)*k(KJ z;^oVUu;rM9?cR{qQ>^9$p$}%+CDfQ4=8ZW(=MLo`*U^2m`0v8M*XaKwg-vdKDUAQ~ zCoUo*BccIkcskK^PLQjyx(^9EP9&q)@lgz>!hhh}1(HrKz~nXNI^RIFm#)e?;EcsE z_tqw{5TsKln}osF#6cy^kT+`?$EA(+I3*;WDWwF3nOu;7aI_HZqTvNHyhD5ta#3DO z2GAop-k2_c#1b1A9&X-R1F#O`#3nzQ|{_-C(D z-}moX*1dmWoUBgR3Y+se=o(&mzf*XI`yHVZxVnLS5M0I9^7QDH03YN)_<_mOmSXGsCv7Y!9;sz#fzy(=Tv0h1aJ%EpxNzAXtHM5zb_EKjd^@v+t#fC zh|#0_X1aHHBE0U^>-)5jj9=p|H$#R?HY7nQ1m}mW*9iXUt5j0zoqX2aPg*JFc8S1h z)=tKe5Jpi(H}$%;r&3IcRq3+)<(w_O8$uUOLlZ*ZYCpOB5wi34QeAdUKL(7^A{A0) zBZkZOK07#R$Wt+TaPOagXrMg@T%HwJ_ueB09@X)Z(QX;J%AF{OB>H6szly>9b!L5$ zvn{zp2v@b`2K~DA4j^ZH`NRA$i+jCbK_S%qe=~^>E}4P5)9z!yDgpGMFm|o%41hOm z)Z0M#UNcFZ!7B3q(8uW9;|!D$*+yHIJ67;ovMan z-c<<|Ng#J*rt>T9y(Hov1c_nC3g}ckf)Td*cKc+@@98LXob!bZYHI5Wl+vm0m*G|cSW(cMJ% zjds9`+-dY2_nh7)5meAW6?i$`GZ0v0;c!>YZM1_@h72z~nMl^hl<(!VKgf_XJU99Q z(6${`Ys(7@!@#1gWW-6SR-DW=0r{z9o2q)J>4O2{(g7j8&D@ZtkEmS-#k_72@! z)nhgr8UG*l-UO`1we25X88fr7O_gY&$y_wcY)MITX)qMcqCusUd8}9z8Whblr9mWf zlLnel6sc6wKoY9&cP^oxZFrvdd*1Imj{ouR*!!s6rnS27>%Ok@{7vV=DL1~fC_p`Z z_eK4ad)DB=s9Yp}g2P+Cm;f@J!Q{94Mp}JKgkV~ddl@bsp+B&W*PGWsuYsLsll!35 zh0knGftT~Hw_1w`2^2aSiLy(v%B5sxk=^E|D;FKmvWeTqgkYO^jRSDLjL^2rS`-m; zXyGctw}{Wg)0358iB2_UAk@eC5K|YSd^!}n^?W4y3`3tW6*V&-nxMECcA4Z-gZqm?}1<(8=r8Oig3BjT^gq&vCT)Y z3jPor`DSpV@ImBk_*ox8Vf0`96K`OPe>!b^%4X;%K;0%uH?D_3wzO{c8vKrrAd!fK zxJRdDMVF0&ha9wFgHs!d&p{p)kmp?vvDcC>bFJW>^ILl8GO9$Es-ox zH~ZZP899DDwQ}{S=){L^^ixNaVq%TuIdrbIYCG~_*KUPN+h|wL98_^AMjzNsvfkxU zwtdzsFe&I!)}np0eSD1KW1#4!x|Xb*_%&x5SO7l}4A@G|tS9RH3%F&*P|xXH-gE#v z^A4w}4^SDPOjC!ev@mex2&DcdN6&;|ppb}ZG%HI8ibe(WrW!gzbv(CJR+7kLZT;1T z#8Rr_0gJ5$enUn54(FFK*vToc(FOu;l2x~~&Gs!w$J(a2O>ar-5R`tt4fmQL*x*Fd z!Mx)g`-&I+1vygIp<^WA6Bl|daDEh$$~$)KI9bPg6YmpZ7!3g0QO{oZ!=49PhmJGs zVI0#7dOcGJ&03d~L)ka9)v&>lZ^Af_G+YSUXRRB8;wlBHDg<6oy+=Y8iE`|i(I{q(DIgH4 zMUW@#`ZjIcW?Q&T>TV63{NO|NgdI4(C2>C#i6g-6Ahe%q141{`cP-3OTnznE6w;+= zog&49Aut}BRqou{Amp89O^Ok$U2H`G``b=VzEsGk4>>4G60+P?(20xU9 zLqupoj517iwoxd+T1ZYbapMZXRMteeM(0mR<;U6|nm*#W(SlA=3<=urs=^&~iT>Jn-% z&;Xt=(^Zer)dp8iYf9$4@uSeSlgUQh^i2B%#V?_@s|Dm&{4NwJPj^n#NYKAh&nrJg$=6uk^iWaW(=|H_VryQg z+E}Ggx|-D#L=U$Ugfwuyp(XXifFvl5m}0Haw|$GX!Wn+Kyn5fl8%cOEiVnRP7-$RS zeK2xT5$J^HRRxMJ()A@xo1>2g6$XaxU@QLa5H9@M8d%K3Qbootr`%ZdzEK5OD*5nC zY&<&yeth~nu!h52Lt&*91^o0C9xD>C@-l5nx;1N#qwt$%G_;M_%=*MW?c1sGMff#^F5{&_JJf*2*a{EEft9!#$@=!k7zd6L*vh!60A^C# zL#E|0Io$T|-_KJ66LJ(GbExj0D?bO$frci9-)GRbzpA|9i|jMbe;n8f^RZ(?LWx?G z)?C_Ku_>!!2c(V&33GyA!vVF>5pWvP9<2pHQK%nwa(1Yu_s{vz_2>NVjO)rsR>0{< z)Ii}ExFk5XI9%Jm&F_5svy8zbJ@)s-f9YR@&zQV43fU-+BCwZz0y95`k56BICjEcT zhak^;$`NGPAHjBSinm7FG`fdi!Ylw9uLJJ^!p8>dgv3Mik|?*s2t}eKShOG2wISbs zTJiy!hG3sEs{5yZ{iK}FsbGJ6&sCG3B4U4h{CB?5)MnGS!jGta)I||c2@=8(>ht~G z(d*5Yj~gTN5_BCL`X6lOyDmXU#}pR!=yQkG_px8NY1Et%FGtN8h|l!zx39ZDTa7sQ zqSsHW82k0UeE&@<#?r4kcpWm;LVXzWcc*Tcepg__aD1kJ`MSUE{#?N)_xG`TFKbua z{JbBTrczQms0Rro{MYw1(#vhgIBrPW(%2U7T2xEe=->V z#y2V&YX>k~7ovu4wi(ShjJLJ_H*fJy`licTGS9f}{hOMs0ECLb=ZUJP$p+IsetcB) z|LIy9OnDUmNl}sCtRHpE)nTNEtK`UIoPWQ4{q6Iv6Ik&F1hG});5u(PprZBg@|!Y; zSKj2oXZrWs*WW(N{^KV955B?K4w|OpA!_;GzZj5LLb4+J?_7+JyHdCq6^b~nWMTB5 z`Zw47DSTYk{C{))eaHXx>u;aiIQ9CEyTQ=k!`UEzeCOZz#;x*#VGPDL70BJ!F645> z1z+=ri|@Jw5soiwr!z)g<~}>9nfq*Ce5QW~c7NUdIe8%Yw|aau4aV*U6>nu_?cer? zUfdC2vI;^MzOz!ez7Nmc-#4SCPMx>uviAH?cnOZ9Y4J_{~Pme_I4jPO>*Fev7>7smw-5e+G=^Whezmo zM)Ks95H}9^6uZobxB++xrKhDzuBpCC49u zw}^J#Cb-D?(-kV~0BVo}nXqTHNezh>y&l!Ue`Tsj3SvMG#4nL3D}+&y14O3eCdz6> zIf-HkkdqvZ)saI^nr{6NRG_G>to>qvohl_X0wm4{yt~OS2ABrTcmZTU0lsLCPcx|r zB4Gv)AOV*y-$gBCpMn)n6PanuaBAI36QiM-i46-Aq&^Yg53GenAlHZ$Kp8Jtr2l>0 z;ES85R(Zue>~40!o)n=hdTn){cwKyzk6Sd2MNJo3{LocZY3(E~7cT9jE7kz?4=xJ_ zAY6w`Z9>OE9ss4lLZIdz&UJlClYl7R$HEU)HNnE->nQAkXc>}*`cX|uh(BFdS~L_0 z1VMd;mqo9QYA}22efRm7$t6L73Jcb)W|J)tw?-Fgo6BA~e$q*Eytj(){$gL>1>IsX zwA-euPP2n>H(!GkNTT6Lig*i8iY1Y4k5M%1*r=x!oxvC!TqeY#T#xtobJS$9gTN)4 zVNJF_=j(Z)0s>BodaeX=P7VlLM-bldcvDsbC>%C*8x#leB-cm08QjBSx@33SL7Ebf zQ~#cVXn=c4_$I*tKq1ezL`B+dAHry~eX%59mM-^us%FLkrMfAPg01iGcJYHRvJat^ zU*ENM%^J}wtuZ;eTXGOuZEZhnX;hWF^H*Pn5Qkl{^rDZq^c&2aRiu zFBr-rk?*Zb(Llsrf`+hhSkKrNzhSX69b`eDT8}KhDf+v!?6tP=WC%VCJJPT%97udl zNk5-lz%1|ZO9R_b+y!l93`FnO91zE3-mwTV_rPByi=P1gHp^3mmxhzG*wFL(GGvaN z4{i}e^+>rlN^O7$N{oo8$J<=KY#H?z_!)q4ebFa`5CZ8PaJm+G2*q7K2$87&0^lUE z(;vJASc(Wpt=c2vaXV?)6*eCuJv1gCuFe{0f32y>nDxk=cO#$Wpo(V~1923I9{6ArWA(1IvHDXZ)8fYKIaUx|P%6~2!r?AY->C*-NZlB9# z&-eS&Nb)hzFhHU{l``maA3={EQ&Caj6fJ@SfI=`x@eJYJYHV9}5**~N0I0K45wcR2 zo5iUiv(UX2-rrlbQu8Gv3MPQUDnL-@4&P9a3Jwu8y6pQ$#!5(3XaJA&^fkDUl&Xa{1PKj{raCWvhStf=c^~=w6TXDO2 z{iT`Kntr(w=tM2AR(bK%XahRc19pTQXr>8Rb4l~{cpRr0y?^a_ZljMn_8%H`!a92N z=#H~ZgaDXnSfWy-7?5f>Yz{(2!ejxy7@l+ieK1lY)ZjTs9m$(pS!*#-?n_s7duz!= zDMztaU1y}9hx8v(SC8TZ#Y5DdzP#HC-q%hjMf~6O6uX4-kRjvxBy3oGf5Cx6E7$MR z%YsRCWVH6F%Zhp_%fjs}K3hD!k;+a84cXIIKs_<4qsGw7!nE-9^;vqtmVUDamom?_ zS-+ItYmsH?GjH_a`K1P95;OlUy}LkfcB$bckL4SEj=rq9yI^dGMV8v>#BjmgO6IB$ z%R?SiZbIhy$tNLeU_9RBbA7Xqd7kRA+Y6p-@_8ImztIQS_30BQHfWZ#URPY+{Pj=F z8yjpPeAIt*soMJ_A+v!(%Z+^St-K=^!e=eAbno-7^2%BaTu96?VSUfmUNj8DK zQW)z5dZ9<)53qlJG~czCbdn^Au`*7D`!Wpc036hr>Ri7_P^MX%E)#}A=pwg+}cZpNt1MZ4+E>L2hIDK+Y5?Dt9ybW3* zHKX%_&BWLZ6CB5OOJ9NoT@QrfiW02acf-#IfcOsv&&py$)7E7|tY1leVR&Fr5R*mk zzX_i2VaQr$jP0z+_N*|%}k;A z8i6sQ?p7H}TSBf$&(G(2HSBlKoUB6CViai8m3+hCpMb|-yI4Pj{Carf7?MHra-e+z zsvd`t4YbSRh|ZhiVKG7-8=ulN1*3-c%yqLhA?__U6xzD$NLIAcRv;Vfq5We)IUc5l z`@Ur$m;K+rdlQ)|?M-mq5rL9|34c)HO-6#X2ChzywKIM+Vb}@K&9NSlrDx$<;Elry zfLkd!PXVY{{3$K49a<*J%;%&C`kHRDwC!^#X^SGKnK9%&HNmw8h2t-q&yS8NTqW^1 z)57pr7uN#$thttprJ~*CaqL+9E?KDB0kT|H0*@eEA_dtN$sFo!utmM84%fSu*xH3{ zUb?6lOC|!@ML#~~jAjS72${Wn1d^deuN?SYRM#2It9c=#J^B=78Jqc`ph@U#1muX* zr|McRYAT9sc+tX$0~UEY#pXy0=P*SX8z%Ut!<1eOv*JFbNguqJ?HvZoT(#ACs7Kd# zyP)A0Kmuu!G~QjK-&u$d(v) zSb~{C{QUC3?B_nP227^A%Q%Y=W*TT$OtViSKt>F&0q=bky46`M(w2k2j*H%j&73So zOd*U->^oqnl>~)ty+^90R_G#9;pDALBvl3Onoj7Up%d5*pL*5B@%-At$0(3i*71El zNCDv0RpvF)09yGRUr*s^aNk9Z3jLat(VYim-Z!x*Xjt9+*OJN#G%~7OW>JHparSkkkO; z`115U06TTa4wk6&u)7A24){>b6O`Ox1H0|+1-y_F7*J`yqj_6lHl)pjA0e>uW^b%{#HB?ev>jk@u1P6@@dU^nmVyy{YhOG%OF?;)rX zLAi#uhsH6!sWyXUkkroP*nT>g|IUX7PZ@wTY+-faH1XZo3}H3j@R@I)+nO#L+KukV zdW1TOteKD-DZrHR2*YAXMZ}trxeh=S5E!>YTurb_s@VCS6hi?_6TKi_G$!>QBulsj zHZN}XhjK@|9&(g(6r_gYo$CErN$t=WK$!8a`_WqgEFIQSw$2sf3y`dZp>CX2;(%v_ zVl4=YC9N_zTOn+5q0cm z%V&}$YD*E%i zd~%SXKTB9x;BVx|$&`*{06F~YS9Hfu(YGGg;wv2Y`)_pP)tt_MG-K7^q0B#Sg&2Xb zc443AGtf=Ltp#wc0Dc&mXFRJALu@mI(N@Vl@W*RraKa#e{5J&V$o>cziADxf=bmyN z9S%N*UjI*zj_Z=bIHTh_iN`sV3#I9w9slRLs>f@Syq%IqdVKsl-*BPvK7n&dY@DNPN)SA_`|Cn z`cL8bp?7*bc_<*i!tur*o4AKf=6k_`;~|?FH3}QbfImB&Mt)hj!s=jI7zre#5aLf zG~}pi!JqMa-K{VOQrrn-L9YRYk)jaNOpLN~ChQ(qf(BXw-I#H`eupvaU~?3OE7CeX z_}nQ0pf?8Z0KGWD!;}a{KpE_Am?d7LltSGM61Z6q-OoLZRCghO6TpR&NQnt#i!&sm zpn;}~h;M(n3;_!hvpjgEL!dILrIb&aLxSPW&~Zqs*O0S={tjn2AUR)13yX4@B(4;l zOMqBPbkFzdIs*{zJ}?|)9j7Sa9dI5I;o18FoIJoM7I2TihAc(ge z79u@=p$e03X8vp@hS!XNSpdrDILkH?Q+CEDlwir-2A)cBx8r-a_8>($UDiy;E)T07FVi_l^d0?rl# zaUct0(C~U5a1^#@Z-BbLHsy0%-IMrGur@PUbINl~l4p<82P5EXE_0Dl zE2U(PP9%jK#9lN5(e53&dC=-!8s4!G48XJP>AX)8i$<}79~?3i8nc||MUUI0-A-7F z2;|tD6ya2OBM!YICBC;HiHa~&N}^QCz`0eu{nHjFCeVkB#9R;$>qiHZM+U-k!}T>w zN5~pBO!+zUP%#I535D^Zo6#bouim_5#LY1D3TBNe44D}ir}^o9e@5Qa-w@{L-Y1h2 zyJOMZxes5+&UYS?iT!oXQma|RHo2PQ}%o*x^A6Br4l&kf0as&ixzCLH6k1m!N1oX12Z|@@gN!E( zdjwoPMI^$H4`Rf!0tuz`mwP7l4F2tF4iJ1^1crhUb}zz#b6+hkMPPY-7fqqQ<3KqB zR-JDQ^|rxe3{=&-QHo+C;5(Jj?L{HY5kMFpG9ZJXCN{xxPpOZ&hTU)`i)5=D6KJ~! zVmUe%p|eD4$<-Kv`V)>2caRDEMJu4XdMgKZC112TIdJC~1_a&>iL%Hs(Mo&sbaii8 z%Y$5;v~%J?wS^;BRWcV7!)K6N7qn*Lt_IR%b}e1H)K6zCbb@c{GQ3}(PEgXUiw5v3 z-V8ukE=qU<1cN)brTD5mZBaT^Jb&?J+OO@aDRwMZ0}ai5KqeKcN&*akSUWh9nFW`6twmXYcXitfFO>>9K6!w)9@#&@22M#`XQO#vPAiY~~1g@Z_sd|Fz?KADyPM+;2bL^ekP1){NIr;fAI730N z>p%q%bbRP_poKKV?_fI}2?%yr$+p34CU}yKt`_^@K>_fl6$)8(I8=Q)p-6s-26(L# zil^kI+FE6D2cQOGvbsK(caaz4PCYsN5J%250bB_q+U6Kr*m16T&oes83XMq>7zrm@3+3P8Np79N3RCx&W+bb}zJ;y%o4g_A!M1CFH(AU%)n zk}iG6ZgA4BKS85)ueJNJys~s+EqKovoy$~lFo_J5@_SKVuP|@S_0?EwY(rkt;jUW^ z?Phu>d^TpB|0Y=bnRx95&>-DBa#GQ2vuPr%F(9ymB|z@YIu5$$9?sO{iax%2%UP4l*&it0WrXsNE=k zjfx*7G~)cbQNISRMoKlB1^dvCkF%pdfs7eFdUHp)onQ(A#^ArXEtLv3LbtvYNN8&p z1p5-cp+L|bV&d^_gdkj^Ljb~>3n+G~wQ7I>l2TY`vvmB2OZ|WRXwX&k!;x(h3K~-< zl(#j8g39Z=z*%KnzClhjFag8QM?AqLzdnZt^pw**%>|g9l)6Gq|#}C_( zKWgzrp4%_yjkX$R+pquf31@C?KR01{$5r?9>)fUHiqyU48hg+68voHrbGS_>W-v@| zGPwHi@(1lXae&eKS>V+9{ZSi&>!)>e=#DE1uiIT8T_bL-I;Q4=u(ULsHmw@b)}4)A zow10D=V!GzrlR`U=~hX$8<6O$+4yz%PEaLwAu1_19?7`-+hHj|BEuT1 zE=(5SmXoYg2K|Jm#(Qkp5K6vn^vRQq(cX5=QQ;xX~$`IHvAddU& z92qA%JPPi2#8J4Nhk&K zYoe}KDI}}p*$f+ z#q&C}BaVoBnMTJOg(;(a22n91J5EhmIe@qgI6%X8lwGYk9|?v2I{Yc|-1Qn7AtEu! zOQ5+KzT58hpc$`&PARFFn1qu9v8t-7e_CX0c=&CaA`0V1zcyJu4<%3uWX=aOL6ejB zmHXK}HF3m@LDT5Vw;?GCgm6X&gFnwODQS=_{4o5$>Jb3_d&EnZd3+u#G4<|bdS!T0 z@zRH)Mtt7E`D3;p=piOnTM@4|4Npi3XoZ4`W_W%c(WGHiI!f?LM_RwZL&q`A(A%Eu>1L$vizhi=-^U@H)P^oNnr@QhY(K>x(rp4G5=-vVFjs z;TJ^B9ZGEDeBVPCwXckKpwi#6D#RItyVnqm@s%g#)>3RS};5gCSaz>Y`w$Un&JXkRXOV*{guM9p+O&@ zLmUwBYqYQ05>Y;MY@vBI1_w+XSW8RG7_g4y+Cz&1>aN$ ze|it%WQ0tUYoK)^=%$6w?E@)|b7f7hEwSvc7k0fEJ;unvc;TMACijqkeB-1g(0~_A zY3W>qhpnPK>udcOU5kHtHx3$=CN|iO;$Ea5tGPSP0P9W8Kd)g2e`Z`+48saB%RgRE z-sn0RN`HLFYXv}#zeI56%#|i4l9p$<_NV&#@>IRTeed@Qnkwy{_x;;4*qRR&^_0TY zUGmSa4dm=ed*tQ!$lq+3$2ct0Cxri4&&{ZNZ>kvT-5bo@-~T`V#(`ByLl~}B?2{)e z>;^HI)0lXhV}AWMwa~d+j@#2z+3Hx;_PMIr{?)tovRMP!qS+0D88y8h!TV;I1JvV# z^Y-}nuW-wH6t+&Ab77*QM2(D2{`SHwOVML6_MiIr@vkHWkUK&9MR7U4?#;AS?*plg zt0Vb!!Ab0!@yE6BkHztiZ+u-XhMixS^pZkCM}GT~cj7Ec?XGd}pB!5vl2$(3BVE8h zuf|UDgrT;jCPTtQqzBF$Fi*cXM8_}6x$k1TokH$=3zPfblU%Tx))gY12}=6pe2HGH<%x7gcxrj*&WnbKZNmi>aTAzeqrtl z>jLA%Ox3(fjTjqei-;&_YhzpG`ns+D>{b48pZ^=*@IrH2x}5)u{f{G7E-8BtGkeFi zJ~th7djc$;&HtkA=iE2$3$I?evQPEpJP-2x0rLD6cF=0@ajE{|pa?4M&ybgxut0Pr z8k%#=_n|a$ z(6a7xV&1&J%4T8l?(g^SEZ#Mj;ZlEwk;9nzaRD4T(Av*BrG)RDi zg@q$*A0QrwV(}<6XPEbV0fD82F!_m-WET~)D%{Bv#cD3}coxb><#27~-Li<+O08mW zkFG52_zVSgwx6F~^g3bWr~zP5K~NeA_m3YRN^*Mk6~oNG^z2_8aD>vnXhacU~ba9qDc|K4vB>G6HIbqBg*X|O`&Qm2%w}NsvV&pVFy92 z51dyE?Pf8s(klpxpyVz|NlA*gqm(0PwIUa{_EN=LzRXndqzF>7$l6k!jG@yhuh!>TA6h;922s!zj@*@ zD^SlmNBk2|*J+*$Pm#9%=y93rH>A&_PdG0+b|MCav)GRcRCQyRGFAtcTExX=#h+y_ z`I{^0B&QVTRA}8NQ};pTI|@bT^jgeFM6?SnB-=ts~Hjkc1%>*6*)f+*1s)5cxeA>ye+%aifb6M7 z`>2M^gUOjOjR=LlB7JTzb#gj_Kn?GL_DO32~&g3|Ok~EjDiVtX3m)JcgBL(uGPOEiJybh9}P;xh-6mK